code stringlengths 114 1.05M | path stringlengths 3 312 | quality_prob float64 0.5 0.99 | learning_prob float64 0.2 1 | filename stringlengths 3 168 | kind stringclasses 1
value |
|---|---|---|---|---|---|
from enum import IntEnum
from typing import Dict, Set, List, Optional
from .isa_loader import InstructionSet
from .interfaces import Coverage, EquivalenceClass, TestCase, Executor, Model, Analyser, \
ExecutionTrace, TracedInstruction, Instruction, RegisterOperand, OT
from .x86.x86_generator import X86TargetDesc
from .util import STAT
# ==================================================================================================
# Coverage Disabled
# ==================================================================================================
class NoCoverage(Coverage):
"""
A dummy class with empty functions.
Used when fuzzing without coverage
"""
def load_test_case(self, test_case):
pass
def generator_hook(self, feedback):
pass
def model_hook(self, feedback):
pass
def executor_hook(self, feedback):
pass
def analyser_hook(self, feedback):
pass
def get(self) -> int:
return 0
# ==================================================================================================
# DependentPairCoverage
# ==================================================================================================
class DT(IntEnum): # Dependency Type
REG_GPR = 1
REG_FLAGS = 2
MEM_LL = 4
MEM_LS = 5
MEM_SL = 6
MEM_SS = 7
CONTROL_DIRECT = 8
CONTROL_COND = 9
class DependentPairCoverage(Coverage):
""" Coverage of pairs of instructions with a data or control-flow dependency """
coverage: Dict[DT, Set[int]]
max_coverage: Dict[DT, int]
execution_traces: List[ExecutionTrace]
test_case: TestCase
def __init__(self, instruction_set: InstructionSet, executor: Executor, model: Model,
analyser: Analyser):
super().__init__(instruction_set, executor, model, analyser)
self.coverage = {k: set() for k in DT}
self.max_coverage = {}
self._calculate_max_coverage()
def _update_coverage(self, effective_traces: List[ExecutionTrace]) -> None:
""" The main function of this class - calculates coverage based on the collected traces """
# get rid of instrumentation in the traces
# - collect the addresses of instrumentation instructions
instrumentation_addresses = []
for addr, instr in self.test_case.address_map.items():
if instr.is_instrumentation:
instrumentation_addresses.append(addr)
# - remove those addresses from traces
filtered_traces = []
for trace in effective_traces:
filtered_trace = [t for t in trace if t.i_address not in instrumentation_addresses]
filtered_traces.append(filtered_trace)
effective_traces = filtered_traces
# process all pairs of the executed instructions
addr1: TracedInstruction
addr2: TracedInstruction
for trace in effective_traces:
for addr1, addr2 in zip(trace, trace[1:]):
instr1 = self.test_case.address_map[addr1.i_address]
instr2 = self.test_case.address_map[addr2.i_address]
type_: Optional[DT]
key = hash(self._get_instruction_key(instr1) + self._get_instruction_key(instr2))
# control flow dependency
if instr1.control_flow:
type_ = DT.CONTROL_DIRECT if instr1.category == "BASE-UNCOND_BR" \
else DT.CONTROL_COND
self.coverage[type_].add(key)
# potential memory dependency
if addr1.accesses and addr2.accesses:
types = self._search_memory_dependency(addr1, addr2)
for type_ in types:
self.coverage[type_].add(key)
# potential reg dependency
if self._search_reg_dependency(instr1, instr2):
self.coverage[DT.REG_GPR].add(key)
if self._search_flag_dependency(instr1, instr2):
self.coverage[DT.REG_FLAGS].add(key)
def _calculate_max_coverage(self):
all_, reg_src, reg_dest, flags_src, flags_dest, mem_src, mem_dest, control_cond = (0,) * 8
control_direct = 1
for inst in self.instruction_set.instructions:
all_ += 1
reg_ops = [r for r in inst.operands + inst.implicit_operands if r.type == OT.REG]
if any(reg.src for reg in reg_ops):
reg_src += 1
if any(reg.dest for reg in reg_ops):
reg_dest += 1
flag_ops = [r for r in inst.operands + inst.implicit_operands if r.type == OT.FLAGS]
if flag_ops:
has_src, has_dest = False, False
for v in flag_ops[0].values:
if 'r' in v:
has_src = True
if 'w' in v:
has_dest = True
if has_src:
flags_src += 1
if has_dest:
flags_dest += 1
if inst.has_write:
mem_dest += 1
if [r for r in inst.operands + inst.implicit_operands if r.type == OT.MEM and r.src]:
mem_src += 1
if inst.control_flow:
if inst.category == "BASE-UNCOND_BR":
control_direct += 1
else:
control_cond += 1
self.max_coverage[DT.REG_GPR] = reg_src * (reg_dest + mem_src + mem_dest)
self.max_coverage[DT.REG_FLAGS] = flags_src * flags_dest
self.max_coverage[DT.MEM_LL] = mem_src * mem_src
self.max_coverage[DT.MEM_LS] = mem_src * mem_dest
self.max_coverage[DT.MEM_SL] = mem_dest * mem_src
self.max_coverage[DT.MEM_SS] = mem_dest * mem_dest
self.max_coverage[DT.CONTROL_DIRECT] = control_direct * all_
self.max_coverage[DT.CONTROL_COND] = control_cond * all_
def get(self) -> int:
return sum([len(c) for c in self.coverage.values()])
def get_brief(self):
flags = (len(self.coverage[DT.REG_FLAGS]) / self.max_coverage[DT.REG_FLAGS]) * 100
grp = (len(self.coverage[DT.REG_GPR]) / self.max_coverage[DT.REG_GPR]) * 100
ll = (len(self.coverage[DT.MEM_LL]) / self.max_coverage[DT.MEM_LL]) * 100
ls = (len(self.coverage[DT.MEM_LS]) / self.max_coverage[DT.MEM_LS]) * 100
sl = (len(self.coverage[DT.MEM_SL]) / self.max_coverage[DT.MEM_SL]) * 100
ss = (len(self.coverage[DT.MEM_SS]) / self.max_coverage[DT.MEM_SS]) * 100
cond = (len(self.coverage[DT.CONTROL_COND]) / self.max_coverage[DT.CONTROL_COND]) * 100
dire = (len(self.coverage[DT.CONTROL_DIRECT]) / self.max_coverage[DT.CONTROL_DIRECT]) * 100
return f"{flags:.2f}, {grp:.2f}, {ll:.2f}, {ls:.2f}," \
f" {sl:.2f}, {ss:.2f}, {cond:.2f}, {dire:.2f}"
def load_test_case(self, test_case: TestCase):
self.test_case = test_case
def model_hook(self, execution_traces: List[ExecutionTrace]):
self.execution_traces = execution_traces
def analyser_hook(self, classes: List[EquivalenceClass]):
# ignore those traces that belong to ineffective classes
effective_traces = []
for eq_cls in classes:
if len(eq_cls) > 1:
member_input_id = eq_cls.measurements[0].input_id
effective_traces.append(self.execution_traces[member_input_id])
if not effective_traces:
return
# we're done with this test case and are ready to collect coverage
self._update_coverage(effective_traces)
STAT.coverage = self.get()
return
def executor_hook(self, _):
pass
def _get_instruction_key(self, instruction: Instruction) -> str:
key = instruction.name
for op in instruction.get_all_operands():
key += "-" + str(op.width) + str(op.type)
return key
def _search_memory_dependency(self, traced_instr1: TracedInstruction,
traced_instr2: TracedInstruction) -> List[DT]:
read_addresses1 = []
write_addresses1 = []
for addr in traced_instr1.accesses:
if addr.is_store:
write_addresses1.append(addr.m_address)
else:
read_addresses1.append(addr.m_address)
read_addresses2 = []
write_addresses2 = []
for addr in traced_instr2.accesses:
if addr.is_store:
read_addresses2.append(addr.m_address)
else:
write_addresses2.append(addr.m_address)
types = []
if any(i in read_addresses2 for i in read_addresses1):
types.append(DT.MEM_LL)
if any(i in write_addresses2 for i in read_addresses1):
types.append(DT.MEM_LS)
if any(i in read_addresses2 for i in write_addresses1):
types.append(DT.MEM_SL)
if any(i in write_addresses2 for i in write_addresses1):
types.append(DT.MEM_SS)
return types
def _search_reg_dependency(self, inst1: Instruction, inst2: Instruction) -> Optional[DT]:
# normal register dependencies
dest_regs = [
r.value for r in inst1.get_dest_operands(True) if isinstance(r, RegisterOperand)
]
src_regs = [r.value for r in inst2.get_src_operands(True) if isinstance(r, RegisterOperand)]
dest_regs = [X86TargetDesc.gpr_normalized[r] for r in dest_regs]
src_regs = [X86TargetDesc.gpr_normalized[r] for r in src_regs]
for r in dest_regs:
if r in src_regs:
return DT.REG_GPR
# address dependency
mem_operands = [m.value for m in inst2.get_mem_operands()]
for r in dest_regs:
for mem in mem_operands:
if r in mem:
return DT.REG_GPR
return None
def _search_flag_dependency(self, instr1: Instruction, instr2: Instruction) -> Optional[DT]:
flags1 = instr1.get_flags_operand()
flags2 = instr2.get_flags_operand()
if flags1 and flags2 and flags2.is_dependent(flags1):
return DT.REG_FLAGS
return None
def _dbg_print_coverage_by_type(self):
print("")
for k in self.coverage:
size = len(self.coverage[k])
ratio = (size / self.max_coverage[k]) * 100
print(f"- {str(k)}: {size} [{ratio:.3}%]") | /revizor_fuzzer-1.2.3.tar.gz/revizor_fuzzer-1.2.3/revizor/coverage.py | 0.776029 | 0.342462 | coverage.py | pypi |
from typing import Tuple, Dict, Type, List, Callable
from . import input_generator, analyser, coverage, postprocessor, interfaces, model
from .x86 import x86_model, x86_executor, x86_fuzzer, x86_generator, get_spec
from .config import CONF, ConfigException
GENERATORS: Dict[str, Type[interfaces.Generator]] = {
"x86-64-random": x86_generator.X86RandomGenerator
}
INPUT_GENERATORS: Dict[str, Type[interfaces.InputGenerator]] = {
'random': input_generator.NumpyRandomInputGenerator,
'legacy-random': input_generator.LegacyRandomInputGenerator,
}
TRACERS: Dict[str, Type[model.UnicornTracer]] = {
"l1d": model.L1DTracer,
"pc": model.PCTracer,
"memory": model.MemoryTracer,
"ct": model.CTTracer,
"loads+stores+pc": model.CTTracer,
"ct-nonspecstore": model.CTNonSpecStoreTracer,
"ctr": model.CTRTracer,
"arch": model.ArchTracer,
"gpr": model.GPRTracer,
}
X86_SIMPLE_EXECUTION_CLAUSES: Dict[str, Type[x86_model.X86UnicornModel]] = {
"seq": x86_model.X86UnicornSeq,
"no_speculation": x86_model.X86UnicornSeq,
"seq-assist": x86_model.X86SequentialAssist,
"cond": x86_model.X86UnicornCond,
"conditional_br_misprediction": x86_model.X86UnicornCond,
"bpas": x86_model.X86UnicornBpas,
"nullinj-fault": x86_model.X86UnicornNull,
"nullinj-assist": x86_model.X86UnicornNullAssist,
"delayed-exception-handling": x86_model.X86UnicornDEH,
"div-zero": x86_model.X86UnicornDivZero,
"div-overflow": x86_model.X86UnicornDivOverflow,
"meltdown": x86_model.X86Meltdown,
"fault-skip": x86_model.X86FaultSkip,
"noncanonical": x86_model.X86NonCanonicalAddress,
}
EXECUTORS = {
'x86-64': x86_executor.X86IntelExecutor,
}
ANALYSERS: Dict[str, Type[interfaces.Analyser]] = {
'equivalence-classes': analyser.EquivalenceAnalyser,
}
COVERAGE: Dict[str, Type[interfaces.Coverage]] = {
'dependent-pairs': coverage.DependentPairCoverage,
'none': coverage.NoCoverage
}
MINIMIZERS: Dict[str, Type[interfaces.Minimizer]] = {
'violation': postprocessor.MinimizerViolation,
}
SPEC_DOWNLOADERS: Dict[str, Type] = {
'x86-64': get_spec.Downloader,
}
def _get_from_config(options: Dict, key: str, conf_option_name: str, *args):
GenCls = options.get(key, None)
if GenCls:
return GenCls(*args)
raise ConfigException(
f"ERROR: unknown value {key} of `{conf_option_name}` configuration option")
def get_fuzzer(instruction_set, working_directory, testcase, inputs):
if CONF.fuzzer == "architectural":
if CONF.instruction_set == "x86-64":
return x86_fuzzer.X86ArchitecturalFuzzer(instruction_set, working_directory, testcase,
inputs)
raise ConfigException("ERROR: unknown value of `instruction_set` configuration option")
elif CONF.fuzzer == "basic":
if CONF.instruction_set == "x86-64":
return x86_fuzzer.X86Fuzzer(instruction_set, working_directory, testcase, inputs)
raise ConfigException("ERROR: unknown value of `instruction_set` configuration option")
raise ConfigException("ERROR: unknown value of `fuzzer` configuration option")
def get_program_generator(instruction_set: interfaces.InstructionSetAbstract,
seed: int) -> interfaces.Generator:
return _get_from_config(GENERATORS, CONF.instruction_set + "-" + CONF.generator,
"instruction_set", instruction_set, seed)
def get_input_generator(seed: int) -> interfaces.InputGenerator:
return _get_from_config(INPUT_GENERATORS, CONF.input_generator, "input_generator", seed)
def get_model(bases: Tuple[int, int]) -> interfaces.Model:
model_instance: model.UnicornModel
if CONF.instruction_set == 'x86-64':
if "cond" in CONF.contract_execution_clause and "bpas" in CONF.contract_execution_clause:
model_instance = x86_model.X86UnicornCondBpas(bases[0], bases[1])
elif len(CONF.contract_execution_clause) == 1:
model_instance = _get_from_config(X86_SIMPLE_EXECUTION_CLAUSES,
CONF.contract_execution_clause[0],
"contract_execution_clause", bases[0], bases[1])
else:
raise ConfigException(
"ERROR: unknown value of `contract_execution_clause` configuration option")
model_instance.taint_tracker_cls = x86_model.X86TaintTracker
else:
raise ConfigException("ERROR: unknown value of `model` configuration option")
# observational part of the contract
model_instance.tracer = _get_from_config(TRACERS, CONF.contract_observation_clause,
"contract_observation_clause")
return model_instance
def get_executor() -> interfaces.Executor:
if CONF.executor != 'default':
raise ConfigException("ERROR: unknown value of `executor` configuration option")
return _get_from_config(EXECUTORS, CONF.instruction_set, "instruction_set")
def get_analyser() -> interfaces.Analyser:
return _get_from_config(ANALYSERS, CONF.analyser, "analyser")
def get_coverage(instruction_set: interfaces.InstructionSetAbstract, executor_: interfaces.Executor,
model: interfaces.Model, analyser: interfaces.Analyser) -> interfaces.Coverage:
return _get_from_config(COVERAGE, CONF.coverage_type, "coverage_type", instruction_set,
executor_, model, analyser)
def get_minimizer(instruction_set: interfaces.InstructionSetAbstract) -> interfaces.Minimizer:
return _get_from_config(MINIMIZERS, CONF.minimizer, "minimizer", instruction_set)
def get_downloader(arch: str, extensions: List[str], out_file: str) -> Callable:
return _get_from_config(SPEC_DOWNLOADERS, arch, "architecture", extensions, out_file) | /revizor_fuzzer-1.2.3.tar.gz/revizor_fuzzer-1.2.3/revizor/factory.py | 0.799051 | 0.317982 | factory.py | pypi |
from typing import List
# x86_option_values attribute MUST be the first attribute in the file
x86_option_values = {
'executor_mode': ['P+P', 'F+R', 'E+R', 'PP+P'], # 'GPR' is intentionally left out
'permitted_faults': [
'DE-zero', 'DE-overflow', 'UD', 'UD-vtx', 'UD-svm', 'PF-present', 'PF-writable', 'PF-smap',
'GP-noncanonical', 'BP', 'DB-instruction', 'assist-accessed', 'assist-dirty'
],
}
x86_executor_enable_prefetcher: bool = False
""" x86_executor_enable_prefetcher: enable all prefetchers"""
x86_executor_enable_ssbp_patch: bool = True
""" x86_executor_enable_ssbp_patch: enable a patch against Speculative Store Bypass"""
x86_disable_div64: bool = True
x86_instruction_categories: List[str] = [
# Base x86 - main instructions
"BASE-BINARY",
"BASE-BITBYTE",
"BASE-CMOV",
"BASE-COND_BR",
"BASE-CONVERT",
"BASE-DATAXFER",
"BASE-FLAGOP",
"BASE-LOGICAL",
"BASE-MISC",
"BASE-NOP",
"BASE-POP",
"BASE-PUSH",
"BASE-SEMAPHORE",
"BASE-SETCC",
"BASE-STRINGOP",
# Base x86 - system instructions
"BASE-INTERRUPT",
# "BASE-ROTATE", # Unknown bug in Unicorn - emulated incorrectly
# "BASE-SHIFT", # Unknown bug in Unicorn - emulated incorrectly
# "BASE-UNCOND_BR", # Not supported: Complex control flow
# "BASE-CALL", # Not supported: Complex control flow
# "BASE-RET", # Not supported: Complex control flow
# "BASE-SEGOP", # Not supported: System instructions
# "BASE-IO", # Not supported: System instructions
# "BASE-IOSTRINGOP", # Not supported: System instructions
# "BASE-SYSCALL", # Not supported: System instructions
# "BASE-SYSRET", # Not supported: System instructions
# "BASE-SYSTEM", # Not supported: System instructions
# Extensions
"SSE-MISC", # SFENCE
"SSE2-MISC", # LFENCE, MFENCE
# "CLFLUSHOPT-CLFLUSHOPT",
# "CLFSH-MISC",
# "BMI1",
"SGX-SGX",
"VTX-VTX",
"SVM-SYSTEM",
]
x86_instruction_blocklist: List[str] = [
# Hard to fix:
# - STI - enables interrupts, thus corrupting the measurements; CLI - just in case
"STI", "CLI",
# - CMPXCHG8B - Unicorn doesn't execute the mem. access hook
# bug: https://github.com/unicorn-engine/unicorn/issues/990
"CMPXCHG8B", "LOCK CMPXCHG8B",
# - Incorrect emulation
"CPUID",
# - Requires support of segment registers
"XLAT", "XLATB",
# - Requires special instrumentation to avoid #DE faults
"IDIV", "REX IDIV",
# - Requires complex instrumentation
"ENTERW", "ENTER", "LEAVEW", "LEAVE",
# - requires support of all possible interrupts
"INT",
# - system management instruction
"ENCLS", "VMXON", "STGI", "SKINIT",
# - not supported
"LFENCE", "MFENCE", "SFENCE", "CLFLUSH", "CLFLUSHOPT",
] # yapf: disable
# x86 executor internally uses R15, R14, RSP, RBP and, thus, they are excluded
# segment registers are also excluded as we don't support their handling so far
# same for CR* and DR*
x86_register_blocklist: List[str] = [
# free - rax, rbx, rcx, rdx, rdi, rsi
'R8', 'R9', 'R10', 'R11', 'R12', 'R13', 'R14', 'R15', 'RSP', 'RBP',
'R8D', 'R9D', 'R10D', 'R11D', 'R12D', 'R13D', 'R14D', 'R15D', 'ESP', 'EBP',
'R8W', 'R9W', 'R10W', 'R11W', 'R12W', 'R13W', 'R14W', 'R15W', 'SP', 'BP',
'R8B', 'R9B', 'R10B', 'R11B', 'R12B', 'R13B', 'R14B', 'R15B', 'SPL', 'BPL',
'ES', 'CS', 'SS', 'DS', 'FS', 'GS',
'CR0', 'CR2', 'CR3', 'CR4', 'CR8',
'DR0', 'DR1', 'DR2', 'DR3', 'DR4', 'DR5', 'DR6', 'DR7'
] # yapf: disable | /revizor_fuzzer-1.2.3.tar.gz/revizor_fuzzer-1.2.3/revizor/x86/x86_config.py | 0.732018 | 0.372676 | x86_config.py | pypi |
# RevLib
Simple and efficient RevNet-Library for PyTorch with XLA and DeepSpeed support and parameter offload
## Table of Contents
* [RevLib](#revlib)
* [Table Of Contents](#table-of-contents)
* [Features](#features)
* [Getting Started](#getting-started)
* [Installation](#installation)
* [Examples](#examples)
* [Reversible Backward](#reversible-backward)
* [Parameter Offload](#parameter-offload)
* [Coupling](#coupling)
* [Models](#models)
* [iRevNet](#irevnet)
* [Reformer](#reformer)
* [Utils](#utils)
* [HuggingFace](#huggingface-integration)
* [Cast Intermediates](#Cast-Intermediates)
* [Offload Intermediates](#Offload-Intermediates)
* [Explanation](#explanation)
## Features
* Less memory than gradient checkpointing (`2 * output_size` instead of `n_layers * output_size`)
* Same speed as activation checkpointing
* Extensible
* Native HuggingFace, DeepSpeed, and XLA support
## Getting started
### Installation
```
python3 -m pip install revlib
```
### Examples
#### Reversible Backward
Invertible functions allow for huge memory savings as the input can be recovered from which the gradient computation can
be restarted. It's a bit like gradient checkpointing, but with recoverable inputs. That's why a reversible network
should use less memory than a network with gradient checkpointing, and both should use less maximum memory than a normal
network.
```PYTHON
import torch
from torch.utils.checkpoint import checkpoint as checkpoint_fn
import copy
import revlib
depth = 1024
batch_size = 4096
# Create network of multiple layers (so that checkpointing makes a difference) with weight sharing (cheaper)
base = [torch.nn.Sequential(torch.nn.Linear(1, 1, bias=False), torch.nn.ReLU(),
torch.nn.Linear(1, 1, bias=False))] * depth
baseline = torch.nn.Sequential(*base)
revnet = revlib.ReversibleSequential(*base)
checkpoint = base[0]
# Forcibly enable gradients so that checkpointing stores tensors
@torch.enable_grad()
def memory_utilization(mod: torch.nn.Module, checkpoint: bool = False, features: int = 1) -> int:
torch.cuda.empty_cache() # Empty cache, just in case PyTorch didn't do it (which is usually the case)
mod = copy.deepcopy(mod).cuda() # Copy model to avoid modifying the global copy. Deallocated after the function ran
inp = torch.randn(batch_size, features, requires_grad=True).cuda()
if not checkpoint:
_unused = mod(inp) # Compute a normal forward pass if not using gradient checkpointing
else:
for _ in range(depth):
inp = checkpoint_fn(mod, inp) # Manually iterate over all layers as torch doesn't support module wrapping
return torch.cuda.memory_allocated() # Take accurate GPU memory measurements (CPU is very inaccurate)
assert memory_utilization(baseline) > memory_utilization(baseline, True) > memory_utilization(revnet, features=2)
# Outputs: 50349056, 16794624, 99328
# 48 MiB, 16 MiB, 97 KiB
```
#### Parameter Offload
Another way to save even more memory, especially for deep networks, is to offload parameters and optimizer parameters
off the GPU onto the CPU. That way the permanent storage of the network is offloaded and only the frequently-accessed
temporary cache, used to compute the immediate gradients, is kept on the GPU.
```PYTHON
import torch
import copy
import revlib
depth = 256
width = 1024
batch_size = 1
base = [torch.nn.Linear(width, width, bias=False) for _ in range(depth)]
# Initialize network with separate weights for each layer, so that offloading has a measurable benefit
def memory_utilization(offload: bool) -> int:
torch.cuda.empty_cache() # Empty cache, just in case PyTorch didn't do it (which is usually the case)
mod = copy.deepcopy(revlib.ReversibleSequential(*base, target_device="cuda" * offload)) # Copy to dealloc model
if not offload: # If not offloading to CPU, manually move the parameters
mod = mod.cuda()
_unused = mod(torch.randn(batch_size, width * 2).cuda()) # Normal forward pass, 2x features because of RevNet
return torch.cuda.memory_allocated() # Take accurate GPU memory measurements (CPU is very inaccurate)
assert memory_utilization(False) > memory_utilization(True)
# Outputs: 1073750016, 8192
# 1 GiB, 8 KiB
```
Another way of doing parameter offload would be to manually call `revlib.offload_tensor` before accessing each parameter
of a custom model. This way, you can control when the parameters are loaded onto the GPU. Sometimes it's faster to load
everything onto the GPU in a single operation, such as before calling a TorchScript function, while other times it's
more memory-efficient to load every parameter seconds before its usage.\
Internally, RevLib has the offload_tensor functionality integrated into its reversible core, giving a faster experience
thanks to parallel `non_blocking` operations.
#### Coupling
Another major feature of RevLib is to use custom coupling functions such as the one used in
[MomentumNet](https://arxiv.org/abs/2102.07870). It's a recent paper that made significant advancements in the area of
memory-efficient networks. They propose to use a momentum stream instead of a second model output as illustrated
below: 
<p align="center">Image from <a href=https://twitter.com/PierreAblin/status/1426899071495819265>the plagiarized</a> <a href=https://arxiv.org/abs/2108.05862v2>mRevNet</a></p>
Using a custom coupling operation (the functional analogue of [MemCNN](https://github.com/silvandeleemput/memcnn)) that
merges input and output streams, MomentumNet can be implemented in RevLib as seen below:
```PYTHON
import torch
from torch import nn
import revlib
channels = 64
depth = 16
momentum_ema_beta = 0.99
# Compute y2 from x2 and f(x1) by merging x2 and f(x1) in the forward pass.
def momentum_coupling_forward(other_stream: torch.Tensor, fn_out: torch.Tensor) -> torch.Tensor:
return other_stream * momentum_ema_beta + fn_out * (1 - momentum_ema_beta)
# Calculate x2 from y2 and f(x1) by manually computing the inverse of momentum_coupling_forward.
def momentum_coupling_inverse(output: torch.Tensor, fn_out: torch.Tensor) -> torch.Tensor:
return (output - fn_out * (1 - momentum_ema_beta)) / momentum_ema_beta
# Pass in coupling functions which will be used instead of x2 + f(x1) and y2 - f(x1)
rev_model = revlib.ReversibleSequential(*[layer for _ in range(depth)
for layer in [nn.Conv2d(channels, channels, (3, 3), padding=1),
nn.Identity()]],
coupling_forward=[momentum_coupling_forward, revlib.additive_coupling_forward],
coupling_inverse=[momentum_coupling_inverse, revlib.additive_coupling_inverse])
inp = torch.randn((16, channels * 2, 224, 224))
out = rev_model(inp)
assert out.size() == (16, channels * 2, 224, 224)
```
When implementing MomentumNet like this, there is no storage for lost information in the forward pass which the
MomentumNet paper accounts for. One way to work around that issue is to avoid the coupling function
altogether. [HomebrewNLP integrated the coupling functions into f() and g()](https://github.com/HomebrewNLP/HomebrewNLP/blob/efda4b1dbc320c620ed024208f0745b82fb30ebf/src/model.py#L209-L232)
which means that there is no loss of information, no matter the depth or beta of the model. The same integrated
MomentumNet is available via the [utils module](#utils).
#### Models
##### iRevNet
[iRevNet](https://openreview.net/forum?id=HJsjkMb0Z) is not only partially reversible but instead a fully-invertible
model. The [source code](https://github.com/jhjacobsen/pytorch-i-revnet) looks complex at first glance. It also doesn't
use the memory savings it could utilize, as RevNet requires custom AutoGrad functions that are hard to maintain. An
iRevNet can be implemented like this using revlib:
```PYTHON
import torch
from torch import nn
import revlib
channels = 64
channel_multiplier = 4
depth = 3
classes = 1000
# Create a basic function that's reversibly executed multiple times. (Like f() in ResNet)
def conv(in_channels, out_channels):
return nn.Conv2d(in_channels, out_channels, (3, 3), padding=1)
def block_conv(in_channels, out_channels):
return nn.Sequential(conv(in_channels, out_channels),
nn.Dropout(0.2),
nn.BatchNorm2d(out_channels),
nn.ReLU())
def block():
return nn.Sequential(block_conv(channels, channels * channel_multiplier),
block_conv(channels * channel_multiplier, channels),
nn.Conv2d(channels, channels, (3, 3), padding=1))
# Create a reversible model. f() is invoked depth-times with different weights.
rev_model = revlib.ReversibleSequential(*[block() for _ in range(depth)])
# Wrap reversible model with non-reversible layers
model = nn.Sequential(conv(3, 2 * channels), rev_model, conv(2 * channels, classes))
# Use it like you would a regular PyTorch model
inp = torch.randn((1, 3, 224, 224))
out = model(inp)
out.mean().backward()
assert out.size() == (1, 1000, 224, 224)
```
##### Reformer
[Reformer](https://arxiv.org/abs/2001.04451) uses RevNet with chunking and LSH-attention to efficiently train a
transformer. Using revlib, standard implementations, such
as [lucidrains' Reformer](https://github.com/lucidrains/reformer-pytorch/), can be improved upon to use less memory.
Below we're still using the basic building blocks from lucidrains' code to have a comparable model.
```PYTHON
import torch
from torch import nn
from reformer_pytorch.reformer_pytorch import LSHSelfAttention, Chunk, FeedForward, AbsolutePositionalEmbedding
import revlib
class Reformer(torch.nn.Module):
def __init__(self, sequence_length: int, features: int, depth: int, heads: int, bucket_size: int = 64,
lsh_hash_count: int = 8, ff_chunks: int = 16, input_classes: int = 256, output_classes: int = 256):
super(Reformer, self).__init__()
self.token_embd = nn.Embedding(input_classes, features * 2)
self.pos_embd = AbsolutePositionalEmbedding(features * 2, sequence_length)
self.core = revlib.ReversibleSequential(*[nn.Sequential(nn.LayerNorm(features), layer) for _ in range(depth)
for layer in
[LSHSelfAttention(features, heads, bucket_size, lsh_hash_count),
Chunk(ff_chunks, FeedForward(features, activation=nn.GELU),
along_dim=-2)]],
split_dim=-1)
self.out_norm = nn.LayerNorm(features * 2)
self.out_linear = nn.Linear(features * 2, output_classes)
def forward(self, inp: torch.Tensor) -> torch.Tensor:
return self.out_linear(self.out_norm(self.core(self.token_embd(inp) + self.pos_embd(inp))))
sequence = 1024
classes = 16
model = Reformer(sequence, 256, 6, 8, output_classes=classes)
out = model(torch.ones((16, sequence), dtype=torch.long))
assert out.size() == (16, sequence, classes)
```
#### Merged Optimizer
Another optimization RevLib allows is to merge the optimizer step and backward.\
Instead of first computing a backward pass and then applying the gradients in a separate stage, RevLib can apply the
gradients immediately while calculating the backward pass. This change allows you to get speedups by taking advantage of
asynchronous computation and means that you don't have to instantiate all gradients simultaneously. So, instead of
storing all gradients simultaneously, you only keep the gradients of one layer while still arriving at the same results.
Below is a small example demonstrating just how much memory this can save:
```PYTHON
import random
import typing
import numpy as np
import torch
import revlib
def optim(params: typing.Iterable[torch.Tensor]):
return torch.optim.SGD(params, lr=1e-3)
SIZE = 2048
DEPTH = 64
BATCH_SIZE = 1
STEPS = 4
def block():
return torch.nn.Sequential(torch.nn.Linear(SIZE, SIZE),
torch.nn.ReLU(),
torch.nn.Linear(SIZE, SIZE))
def run(fused: bool):
torch.manual_seed(42069)
random.seed(42069)
np.random.seed(42069)
model = revlib.ReversibleSequential(*[block() for _ in range(DEPTH)], fused_optimizer=optim if fused else None)
model.cuda()
optimizer = None if fused else optim(model.parameters())
mean_loss = 0
max_mem = 0
for i in range(STEPS):
max_mem = max(torch.cuda.memory_allocated(), max_mem)
inp = torch.randn((BATCH_SIZE, SIZE * 2), requires_grad=True, device='cuda')
max_mem = max(torch.cuda.memory_allocated(), max_mem)
loss = (model(inp) - inp).abs().mean()
max_mem = max(torch.cuda.memory_allocated(), max_mem)
loss.backward()
max_mem = max(torch.cuda.memory_allocated(), max_mem)
if not fused:
optimizer.step()
max_mem = max(torch.cuda.memory_allocated(), max_mem)
model.zero_grad(set_to_none=True)
max_mem = max(torch.cuda.memory_allocated(), max_mem)
with torch.no_grad():
mean_loss += loss.item()
print(f"Loss: {mean_loss / STEPS:12.10f} - Memory: {max_mem * 2 ** -20:7.2f} MiB")
run(True) # Fused Optimizer. Results: Loss: 1.7444351912 - Memory: 2049.05 MiB
run(False) # Default Optimizer. Results: Loss: 1.7444351912 - Memory: 4098.03 MiB
```
As you can see, while the loss is still the exact same, the model uses half the memory at its peak. The freed-up memory
would allow you to create 504 million more parameters. Considering that the model only has 512 million parameters, this
would mean you could use ~100% more parameters!\
Of course, the absolute freed memory would stay the same if the optimizer had buffers, such as SGD with momentum.
Because of that, the relative memory advantage would decrease. That's why a memory-efficient optimizer
like [SM3](https://arxiv.org/abs/1901.11150) or
[8-bit Adam](https://github.com/facebookresearch/bitsandbytes/#using-the-8-bit-optimizers) is perfect here.
#### Utils
##### HuggingFace Integration
RevLib also has its own `utils` module which provides helpful functions as `residual_to_momentum_net`. Using RevLib, you
can trivially convert any HuggingFace transformer into a MomentumNet without significant loss of performance. Especially
during fine-tuning, this can be a life-saver, as it allows for significantly bigger models to fit into memory without
the need to manually (or [automatically](https://arxiv.org/abs/2006.09616)) create countless buffers for activation
checkpointing.\
With the addition of `MomentumNet`, there is one more hyperparameter to tune. Small values of `beta` allow the model to
continue functioning as normal:
```PYTHON
from transformers import AutoModelForCausalLM, GPT2TokenizerFast
from revlib.utils import module_list_to_momentum_net
tokenizer = GPT2TokenizerFast.from_pretrained('gpt2')
tokens = tokenizer(["The shadowy hacker group Eleuther"], return_tensors='pt')['input_ids']
model = AutoModelForCausalLM.from_pretrained("EleutherAI/gpt-neo-2.7B")
original_layers = model.transformer.h
print(tokenizer.decode(model.generate(input_ids=tokens)[0]))
# The shadowy hacker group Eleutheria has been busy for the past few months. The group has been
model.transformer.h = module_list_to_momentum_net(original_layers, residual=True, beta=0.1)
print(tokenizer.decode(model.generate(input_ids=tokens)[0]))
# The shadowy hacker group Eleutheria has been busy for the past few months. The group has been
```
On the other hand, large values improve numerical stability of deep networks at the cost of slightly altering the
information flow.
```PYTHON
model.transformer.h = module_list_to_momentum_net(original_layers, residual=True, beta=0.5)
print(tokenizer.decode(model.generate(input_ids=tokens)[0]))
# The shadowy hacker group Eleutherian psi- psi- psi- psi psi psi psi psi psi psi
```
Either way, both can be used to train the models just the same as you're used to! While the gradients might differ
between models, there is no performance degradation after fine-tuning.
```PYTHON
model(tokens)[0].mean().backward()
print(next(iter(model.parameters())).grad.mean().item())
# -7.596428730494154e-08
```
As expected, the memory consumption for the modified model is significantly lower during training than that of a
non-checkpointed model:
```PYTHON
import time
import torch
from transformers import AutoModelForCausalLM
from memory_profiler import memory_usage
model = AutoModelForCausalLM.from_pretrained("EleutherAI/gpt-neo-2.7B")
tokens = torch.zeros((4, 2048), dtype=torch.long)
start_time = time.time()
memory = max(memory_usage((lambda: model(tokens)[0].mean().backward(),)))
print(time.time() - start_time)
# 206.94576001167297
print(memory - max(memory_usage((lambda: None,))))
# 150272.09765625
```
```PYTHON
import time
import torch
from transformers import AutoModelForCausalLM
from revlib.utils import module_list_to_momentum_net
from memory_profiler import memory_usage
model = AutoModelForCausalLM.from_pretrained("EleutherAI/gpt-neo-2.7B")
model.transformer.h = module_list_to_momentum_net(model.transformer.h, residual=True, beta=0.5) # The only difference
tokens = torch.zeros((4, 2048), dtype=torch.long)
start_time = time.time()
memory = max(memory_usage((lambda: model(tokens)[0].mean().backward(),)))
print(time.time() - start_time)
# 275.42114186286926
print(memory - max(memory_usage((lambda: None,))))
# 6187.0703125
```
##### Cast Intermediates
Another nice feature RevLib has, is that it can automatically offload intermediate values or cast them to another
datatype. Casting the intermediate tensors used during the backward pass from float32 to half-precision (float16) would
halve the memory required for intermediate values.\
To integrate it into existing code, take a look at what we do below:
```PYTHON
import torch
from revlib.utils import memory_efficient_intermediates
ITEMS = 2 ** 24
ITERATIONS = 32
def run():
# Model code here
torch.manual_seed(0)
out = a = torch.randn((ITEMS,), device='cuda', dtype=torch.float32, requires_grad=True)
for i in range(ITERATIONS):
out = out * torch.randn_like(a)
print(f'Output: {a.mean().item():} - Memory: {torch.cuda.memory_allocated() * 1e-6:.2f}MB', end='')
out.mean().backward()
print(f' - Grad: {out.mean().item()}')
run() # Output: -0.0002206185890827328 - Memory: 2281.70MB - Grad: 0.00011316053132759407
with memory_efficient_intermediates(torch.half):
run() # Output: -0.0002206185890827328 - Memory: 1207.96MB - Grad: 0.00011316053132759407
```
The peak memory consumption is over 2GB when running the function normally, as PyTorch has to allocate many intermediate
values and store them in float32. If you instead add a cast to the tensors kept for the backward pass, the memory
consumption gets halved while both output and gradient stay the same. Here, we only have to add
the `memory_efficient_intermediates` context wrapper, which handles casts automatically. Note that this only changes the
tensors that are kept for the backward pass and alters the gradients slightly but doesn't influence the forward pass in
any way. Nevertheless, doing it this way is critical to avoid casting down to float16 and back up again during the
forward pass.\
Similar to casts from float32 to float16, you could also cast float64 to float16, float64 to float32 or even mix these!\
For example, when switching the computation datatype above from float32 to float64, the program will generate the
following printout:
```
Output: -1.2457215497263025e-05 - Memory: 4563.40MB - Grad: 0.00019801305610731704
Output: -1.2457215497263025e-05 - Memory: 1342.18MB - Grad: 0.00019801305610731704
```
As you can see, the model uses almost four times less memory, giving you the memory advantages of float16 without losing
any of the precision of float64.
##### Offload Intermediates
Going one step further with the concepts from above, we can even offload the intermediate values onto the CPU.
Intermediate-Offloading is akin [Parameter-Offloading](#parameter-offload), as we've done above, but moves the adaptive
memory onto the CPU while the GPU is free to compute whatever it wants. In practice, moving all intermediates means that
the model has the same GPU-memory consumption as if it were to run with `torch.no_grad` or in `torch.inference_mode`
while still allowing backpropagation without any loss of accuracy!
```PYTHON
import torch
from revlib.utils import memory_efficient_intermediates
ITEMS = 2 ** 24
ITERATIONS = 32
def run():
# Model code here
torch.manual_seed(0)
out = a = torch.randn((ITEMS,), device='cuda', dtype=torch.float32, requires_grad=True)
for i in range(ITERATIONS):
out = out * torch.randn_like(a)
print(f'Output: {a.mean().item():} - Memory: {torch.cuda.memory_allocated() * 1e-6:.2f}MB', end='')
out.mean().backward()
print(f' - Grad: {out.mean().item()}')
run() # Output: -0.0002206185890827328 - Memory: 2281.70MB - Grad: 0.00011316053132759407
with memory_efficient_intermediates(storage_device='cpu'): # <-- This is the only line that's modified
run() # Output: -0.0002206185890827328 - Memory: 134.22MB - Grad: 0.00011316053132759407
with torch.no_grad():
run() # Output: -0.0002206185890827328 - Memory: 134.22MB - Grad: 0.00011316053132759407
# It will error here, but that doesn't matter as the memory gets measured before the backward pass.
```
As you can see, the new memory consumption is the same as if the model were to run with `torch.no_grad`, with the minor
difference that it can still produce 100% accurate gradients. Of course, this free memory doesn't come from anywhere.
It's just that the tensors that have to be stored in the normal computation (but not with `torch.no_grad`)
are now moved to the CPU.\
However, as there is no real prefetching, the model will be slower, as PyTorch has to query a buffer for every
intermediate tensor used in the backward pass and get the tensors from there.
## Explanation
Most other RevNet libraries, such as [MemCNN](https://github.com/silvandeleemput/memcnn)
and [Revtorch](https://github.com/RobinBruegger/RevTorch) calculate both f() and g() in one go, to create one large
computation. RevLib, on the other hand, brings Mesh
TensorFlow's ["reversible half residual and swap"](https://github.com/tensorflow/mesh/blob/master/mesh_tensorflow/layers.py#L2191)
to PyTorch. `reversible_half_residual_and_swap` computes only one of f() and g() and swaps the inputs and gradients.
This way, the library only has to store one output as it can recover the other output during the backward pass.\
Following Mesh TensorFlow's example, revlib also uses separate x1 and x2 tensors instead of concatenating and splitting
at every step to reduce the cost of memory-bound operations.
RevNet's memory consumption doesn't scale with its depth, so it's significantly more memory-efficient for deep models.
One problem in most implementations was that two tensors needed to be stored in the output, quadrupling the required
memory. The high memory consumption rendered RevNet nearly useless for small networks, such as BERT, with its six
layers.\
RevLib works around this problem by storing only one output and two inputs for each forward pass, giving a model as
small as BERT a >2x improvement!
Ignoring the dual-path structure of a RevNet, it usually used to be much slower than gradient checkpointing. However,
RevLib uses minimal coupling functions and has no overhead between Sequence items, allowing it to train as fast as a
comparable model with gradient checkpointing.
| /revlib-1.7.0.tar.gz/revlib-1.7.0/README.md | 0.950423 | 0.916596 | README.md | pypi |
import dateutil.parser
import json
from datetime import timedelta
class Field(object):
def __init__(self, source=None):
self.source = source
self.client = None
def convert(self, value, client):
return value
class StringField(Field):
def convert(self, value, client):
if value is None:
return None
return str(value)
class BooleanField(Field):
def convert(self, value, client):
return True if value else False
class IDField(StringField):
pass
class FloatField(Field):
def convert(self, value, client):
if value is None:
return None
return float(value)
class IntegerField(Field):
def convert(self, value, client):
if value is None:
return None
return int(value)
class DateTimeField(Field):
def convert(self, value, client):
if value is None:
return None
return dateutil.parser.parse(value)
class DateField(Field):
def convert(self, value, client):
if value is None:
return None
return dateutil.parser.parse(value).date()
class RawField(Field):
def convert(self, value, client):
return value
class DictField(Field):
def convert(self, value, client):
if value is None:
return None
assert isinstance(value, dict)
return value
class DurationField(Field):
def convert(self, value, client):
if value is None:
return None
return timedelta(seconds=float(value))
class DataframeField(Field):
def convert(self, value, client):
return value
class LinkField(Field):
def convert(self, value, client):
return value
class ListField(Field):
def __init__(self, subfield: Field, source=None):
super().__init__(source=source)
assert subfield is not None
self.subfield = subfield
def convert(self, value, client):
if not value:
return []
elif isinstance(value, list):
return list(map(
lambda x: self.subfield.convert(x, client),
value
))
else:
raise ValueError("Value is not a list, is a {}".format(
type(value)
))
class WrapperObject(Field):
def __init__(self, client=None, json=None):
super().__init__()
if json and not isinstance(json, dict) and not isinstance(json, list):
raise ValueError("json should nbe a dict or list, got {}".format(
json
))
self.client = client
self._json = json
self._cache = dict()
def __getattribute__(self, name):
json = super().__getattribute__("_json")
cls = super().__getattribute__("__class__")
cache = super().__getattribute__("_cache")
client = super().__getattribute__("client")
if not client:
return super().__getattribute__(name)
if name in cache:
# Cache already contains value
return cache[name]
field = getattr(cls, name, None)
if isinstance(field, Field):
if name in cache:
return cache[name]
json_name = field.source or name
resolver_fun = getattr(self, "resolve_{}".format(name), None)
if resolver_fun:
# we have a resolve function
orig = resolver_fun(json, field)
elif isinstance(field, LinkField):
# Special case to create cyclic references
return super().__getattribute__(name)
elif not json:
orig = None
else:
orig = json.get(json_name, None)
if isinstance(orig, WrapperObject):
# already wrapped
target = orig
else:
target = field.convert(orig, self.client)
post_resolver_fun = getattr(
self, "post_resolve_{}".format(name), None)
if post_resolver_fun:
new_target = post_resolver_fun(target)
if new_target is not None:
target = new_target
cache[name] = target
return target
else:
return super().__getattribute__(name)
def json(self):
return self._json
def convert(self, value, client):
# Duplicate object with filled client and value
return self.__class__(client, value)
def __repr__(self):
if not self.client:
return "<Prototype: {}>".format(self.__class__)
else:
return "<Wrapper [{}]: {}>".format(
self.__class__,
json.dumps(self.json(), indent=4)) | /revo-client-0.9.0.tar.gz/revo-client-0.9.0/myotest/wrapper.py | 0.624064 | 0.284762 | wrapper.py | pypi |
import re
import pandas as pd
from uuid import uuid4
from datetime import date
from dateutil.relativedelta import relativedelta
from myotest.wrapper import (
WrapperObject, StringField, DateTimeField, ListField,
IDField, FloatField, DurationField, LinkField,
DictField, DateField, IntegerField, BooleanField,
DataframeField
)
from myotest.exception import ResourceNotFoundException
class AvroSchemaField(WrapperObject):
fields = ListField(DictField())
name = StringField()
type = StringField()
class Dataset(WrapperObject):
name = StringField()
workout = LinkField()
describe = DictField()
avro_schema = AvroSchemaField()
async def load_dataframe(self):
if hasattr(self, "_dataframe"):
# Already loaded
return
reader = await self.client.fetch_avro(self._json["cloud_url"])
self._dataframe = DataframeField().convert(
pd.DataFrame(list(reader)).sort_values("time"),
self.client
)
@property
def dataframe(self):
if not hasattr(self, "_dataframe"):
raise ValueError(
"Dataframe not loaded, call "
"'await workout.load_dataframe()' on workout or "
"'await slot.load_dataframe(name)' on slot first")
return self._dataframe
def dataframe_for_slot(self, slot):
start_time = slot.start_time.total_seconds()
end_time = slot.end_time.total_seconds()
df = self.dataframe
return df[
(df["time"] < end_time) &
(df["time"] >= start_time)
]
@property
def type(self):
return self.name.split("-")[0]
class SlotValue(WrapperObject):
type = StringField()
value = FloatField()
class TrainingLoad(WrapperObject):
requested_min = IntegerField()
requested_max = IntegerField()
effective = IntegerField()
def resolve_requested_min(self, obj, field):
return obj["requested"][0]
def resolve_requested_max(self, obj, field):
return obj["requested"][1]
class ResultData(WrapperObject):
min = FloatField()
max = FloatField()
count = IntegerField()
mean = FloatField()
median = FloatField()
std = FloatField()
class SlotResult(WrapperObject):
power = ResultData()
speed = ResultData()
cadence = ResultData()
undulation = ResultData()
stiffness = ResultData()
stride_length = ResultData()
generic_stride_length = ResultData()
step_length = ResultData()
generic_step_length = ResultData()
effective_flight_time = ResultData()
effective_contact_time = ResultData()
distance = FloatField()
regularity_90 = FloatField()
step_count_ratio = FloatField()
regularity_median = FloatField()
gps_valid = BooleanField()
cmj_note = FloatField()
class Slot(WrapperObject):
id = IDField()
tags = ListField(StringField())
type = StringField()
value = SlotValue()
text = StringField()
result = SlotResult()
end_time = DurationField()
start_time = DurationField()
power_type = StringField()
analysis = ListField(DictField())
training_load = TrainingLoad()
workout = LinkField()
async def load_dataframe(self, dataset_name):
return await self.workout.load_dataframe(dataset_name)
def get_dataframe(self, dataset_name):
return self.workout.get_dataset(
dataset_name).dataframe_for_slot(self)
@property
def duration(self):
return self.end_time - self.start_time
class Asset(WrapperObject):
name = StringField()
mime_type = StringField()
workout = LinkField()
size = IntegerField()
async def load_data(self):
if hasattr(self, "_data"):
# Already loaded
return
self._data = await self.client.fetch_asset(self._json["cloud_url"])
@property
def data(self):
if not hasattr(self, "_data"):
raise ValueError(
"Data not loaded, call 'await asset.load_dataframe()' first")
return self._data
class MalData(WrapperObject):
speeds = ListField(FloatField())
cadences = ListField(FloatField())
class Workout(WrapperObject):
id = IDField()
title = StringField()
start = DateTimeField()
end = DateTimeField()
type = StringField()
target_duration = DurationField()
effective_duration = DurationField()
tags = ListField(StringField())
custom_data = DictField()
datasets = ListField(Dataset(), source="data")
assets = ListField(Asset(), source="assets")
training_load = TrainingLoad()
mal_data = MalData()
def _get_datasets(self, base_name):
if "-" in base_name:
regexp = re.compile("^{}$".format(base_name))
else:
regexp = re.compile("^{}-[0-9]+$".format(base_name))
return [x for x in self.datasets if re.match(regexp, x.name)]
def post_resolve_datasets(self, datasets):
for ds in datasets:
ds.workout = self
def get_datasets(self, name):
return list(self._get_datasets(name))
def get_dataset(self, name):
datasets = self._get_datasets(name)
if len(datasets) > 0:
return datasets[0]
else:
return None
async def load_dataframe(self, name):
datasets = self._get_datasets(name)
if len(datasets) > 0:
await datasets[0].load_dataframe()
return True
return False
async def load_slots(self):
if hasattr(self, "_slots"):
# Already loaded
return
try:
slots = await self.client.get_slots(self._json["id"])
except ResourceNotFoundException:
slots = None
if slots is None:
slots = self.create_fake_slots()
self._slots = ListField(Slot()).convert(slots, self.client)
for s in self._slots:
s.workout = self
def get_asset(self, asset_name):
asset = [x for x in self.assets if x.name == asset_name]
if len(asset) > 0:
return asset[0]
else:
return None
@property
def slots(self):
if not hasattr(self, "_slots"):
raise ValueError(
"Slots not loaded, call 'await workout.load_slots()' first")
return self._slots
def create_fake_slots(self):
"""
Create a single slot matching the full workout
This is for legacy workout without validation and slots
:return: Slot
"""
single_slot = {
"id": str(uuid4()),
"text": self.title,
"tags": self.tags,
"type": "unknown",
"value": {
"type": "duration",
"value": self.target_duration.total_seconds(),
},
"end_time": self.effective_duration.total_seconds(),
"power_type": "",
"start_time": 0,
"result": {
"regularity_90": 1.0,
"regularity_median": 1.0,
"step_count_ratio": 0.0,
"speed": None,
"distance": 0,
"power": None,
"cadence": None,
"undulation": None,
"stiffness": None,
"step_length": None,
"generic_step_length": None,
"effective_flight_time": None,
"effective_contact_time": None,
"gps_valid": None,
"cmj_note": 0
}
}
mil_ds = self.get_dataset("mil")
if mil_ds:
rounding = 3
def extract_result(key):
if key not in mil_ds.describe:
return {
"max": 0,
"min": 0,
"std": 0,
"mean": 0,
"count": 0,
"median": 0,
}
return {
"max": round(mil_ds.describe[key]["max"], rounding),
"min": round(mil_ds.describe[key]["min"], rounding),
"std": round(mil_ds.describe[key]["std"], rounding),
"mean": round(mil_ds.describe[key]["mean"], rounding),
"count": round(mil_ds.describe[key]["count"], rounding),
# We don't have median in describe
"median": round(mil_ds.describe[key]["mean"], rounding),
}
step_count_walk = extract_result("stepCountWalk")["max"]
step_count_run = extract_result("stepCountRun")["max"]
step_count_ratio = round(
step_count_walk / step_count_run, rounding
) if step_count_run > 0 else 0.0
output_source = extract_result("outputSource")
single_slot["result"] = {
"speed": extract_result("gpsRecoverySpeed"),
"distance": extract_result("gpsRecoveryDistance")["max"],
"power": extract_result("gpsRecoveryPower"),
"cadence": extract_result("cadence"),
"undulation": extract_result("undulation"),
"stiffness": extract_result("stiffness"),
"generic_stride_length": extract_result("genericStrideLength"),
"stride_length": extract_result("strideLength"),
"generic_step_length": extract_result("genericStrideLength"),
"step_length": extract_result("strideLength"),
"effective_flight_time":
extract_result("effectiveFlightTime"),
"effective_contact_time":
extract_result("effectiveContactTime"),
"gps_valid": bool(
(output_source is not None) and
(output_source["min"] == 4.0) and
(output_source["max"] == 4.0)),
"regularity_90": 1,
"step_count_ratio": step_count_ratio,
"regularity_median": 1
}
return [single_slot]
def post_resolve_slots(self, slots):
for s in slots:
s.workout = self
def _get_slots(self, tags):
return [x for x in self.slots if
x.tags and set(tags).issubset(set(x.tags))]
def get_slot_with_tags(self, tags):
slots = self._get_slots(tags)
if len(slots) > 0:
return slots[0]
else:
return None
def get_all_slots_with_tags(self, tags):
return self._get_slots(tags)
def dataset_names(self):
return [x.name for x in self.datasets]
def dataset_types(self):
return set([x.type for x in self.datasets])
def resolve_training_load(self, json, field):
result = {
'effective': 0,
'requested': [0, 0]
}
for slot in self.slots:
result['effective'] += slot.training_load.effective
result['requested'] = [
result['requested'][0] + slot.training_load.requested_min,
result['requested'][1] + slot.training_load.requested_max,
]
return result
class ProfileHistoryItem(WrapperObject):
created = DateTimeField()
purdy_points = FloatField()
magic_avg_pace = FloatField()
running_style_score = FloatField()
running_fitness_score = FloatField()
aerobic_capacity_score = FloatField()
body_composition_score = FloatField()
muscular_strength_score = FloatField()
running_fitness_score_normed = FloatField()
aerobic_capacity_score_normed = FloatField()
body_composition_score_normed = FloatField()
muscular_strength_score_normed = FloatField()
vo2_max = FloatField()
vma = FloatField()
pma = FloatField()
running_efficiency = FloatField()
class Profile(WrapperObject):
id = IDField()
full_name = StringField()
gender = StringField()
weight = FloatField()
height = FloatField()
leg_length = FloatField()
waist = FloatField()
vma = FloatField()
pma = FloatField()
vo2_max = FloatField()
birth_date = DateField()
age = FloatField()
running_style_score = FloatField()
aerobic_capacity_score = FloatField()
muscular_strength_score = FloatField()
body_composition_score = FloatField()
running_fitness_score = FloatField()
def resolve_age(self, json, field):
if not self.birth_date:
return 0
return relativedelta(date.today(), self.birth_date).years
async def load_history(self):
if hasattr(self, "_history"):
# Already loaded
return
try:
history = await self.client.get_profile_history(self.id)
except ResourceNotFoundException:
history = None
self._history = ListField(ProfileHistoryItem()).convert(
history, self.client)
for s in self._history:
s.profile = self
@property
def history(self):
if not hasattr(self, "_history"):
raise ValueError(
"History not loaded, call "
"'await workout.load_history()' first")
return self._history | /revo-client-0.9.0.tar.gz/revo-client-0.9.0/myotest/models.py | 0.622115 | 0.226677 | models.py | pypi |
from __future__ import annotations
import mimetypes
from typing import TYPE_CHECKING
from .enums import AssetType
from .utils import Ulid
if TYPE_CHECKING:
from io import IOBase
from .state import State
from .types import File as FilePayload
__all__ = ("Asset", "PartialAsset")
class Asset(Ulid):
"""Represents a file on revolt
Attributes
-----------
id: :class:`str`
The id of the asset
tag: :class:`str`
The tag of the asset, this corresponds to where the asset is used
size: :class:`int`
Amount of bytes in the file
filename: :class:`str`
The name of the file
height: Optional[:class:`int`]
The height of the file if it is an image or video
width: Optional[:class:`int`]
The width of the file if it is an image or video
content_type: :class:`str`
The content type of the file
type: :class:`AssetType`
The type of asset it is
url: :class:`str`
The asset's url
"""
__slots__ = ("state", "id", "tag", "size", "filename", "content_type", "width", "height", "type", "url")
def __init__(self, data: FilePayload, state: State):
self.state: State = state
self.id: str = data['_id']
self.tag: str = data['tag']
self.size: int = data['size']
self.filename: str = data['filename']
metadata = data['metadata']
self.height: int | None
self.width: int | None
if metadata["type"] == "Image" or metadata["type"] == "Video": # cannot use `in` because type narrowing will not happen
self.height = metadata["height"]
self.width = metadata["width"]
else:
self.height = None
self.width = None
self.content_type: str | None = data["content_type"]
self.type: AssetType = AssetType(metadata["type"])
base_url = self.state.api_info["features"]["autumn"]["url"]
self.url: str = f"{base_url}/{self.tag}/{self.id}"
async def read(self) -> bytes:
"""Reads the files content into bytes"""
return await self.state.http.request_file(self.url)
async def save(self, fp: IOBase) -> None:
"""Reads the files content and saves it to a file
Parameters
-----------
fp: IOBase
The file to write to
"""
fp.write(await self.read())
class PartialAsset(Asset):
"""Partial asset for when we get limited data about the asset
Attributes
-----------
id: :class:`str`
The id of the asset, this will always be ``"0"``
size: :class:`int`
Amount of bytes in the file, this will always be ``0``
filename: :class:`str`
The name of the file, this be always be ``""``
height: Optional[:class:`int`]
The height of the file if it is an image or video, this will always be ``None``
width: Optional[:class:`int`]
The width of the file if it is an image or video, this will always be ``None``
content_type: Optional[:class:`str`]
The content type of the file, this is guessed from the url's file extension if it has one
type: :class:`AssetType`
The type of asset it is, this always be ``AssetType.file``
"""
def __init__(self, url: str, state: State):
self.state: State = state
self.id: str = "0"
self.size: int = 0
self.filename: str = ""
self.height: int | None = None
self.width: int | None = None
self.content_type: str | None = mimetypes.guess_extension(url)
self.type: AssetType = AssetType.file
self.url: str = url | /revolt_baidu.py-0.0.5.tar.gz/revolt_baidu.py-0.0.5/revolt/asset.py | 0.929832 | 0.278367 | asset.py | pypi |
from __future__ import annotations
from typing import TYPE_CHECKING, AsyncGenerator, Optional
from .types.message import Component
from .enums import SortType
if TYPE_CHECKING:
from .embed import SendableEmbed
from .file import File
from .message import Masquerade, Message, MessageInteractions, MessageReply
from .state import State
__all__ = ("Messageable",)
class Messageable:
"""Base class for all channels that you can send messages in
Attributes
-----------
id: :class:`str`
The id of the channel
"""
state: State
__slots__ = ()
async def _get_channel_id(self) -> str:
raise NotImplementedError
async def send(
self,
content: Optional[str] = None,
*,
embeds: Optional[list[SendableEmbed]] = None,
embed: Optional[SendableEmbed] = None,
attachments: Optional[list[File]] = None,
replies: Optional[list[MessageReply]] = None,
reply: Optional[MessageReply] = None,
masquerade: Optional[Masquerade] = None,
interactions: Optional[MessageInteractions] = None,
components: Optional[list[Component]] = None,
stream_generator: Optional[AsyncGenerator[str, None]] = None,
) -> Message:
"""Sends a message in a channel, you must send at least one of either `content`, `embeds` or `attachments`
Parameters
-----------
content: Optional[:class:`str`]
The content of the message, this will not include system message's content
attachments: Optional[list[:class:`File`]]
The attachments of the message
embed: Optional[:class:`SendableEmbed`]
The embed to send with the message
embeds: Optional[list[:class:`SendableEmbed`]]
The embeds to send with the message
replies: Optional[list[:class:`MessageReply`]]
The list of messages to reply to.
masquerade: Optional[:class:`Masquerade`]
The masquerade for the message, this can overwrite the username and avatar shown
interactions: Optional[:class:`MessageInteractions`]
The interactions for the message
stream_generator: Optional[AsyncGenerator[str, None]]
The stream generator, if this param isn't None, the message will be considered as streaming message
Returns
--------
:class:`Message`
The message that was just sent
"""
if embed:
embeds = [embed]
if reply:
replies = [reply]
embed_payload = [embed.to_dict() for embed in embeds] if embeds else None
reply_payload = [reply.to_dict() for reply in replies] if replies else None
masquerade_payload = masquerade.to_dict() if masquerade else None
interactions_payload = interactions.to_dict() if interactions else None
message = await self.state.http.send_message(
await self._get_channel_id(),
content,
embed_payload,
attachments,
reply_payload,
masquerade_payload,
interactions_payload,
components,
stream_generator=stream_generator,
)
return self.state.add_message(message)
async def fetch_message(self, message_id: str) -> Message:
"""Fetches a message from the channel
Parameters
-----------
message_id: :class:`str`
The id of the message you want to fetch
Returns
--------
:class:`Message`
The message with the matching id
"""
from .message import Message
payload = await self.state.http.fetch_message(
await self._get_channel_id(), message_id
)
return Message(payload, self.state)
async def history(
self,
*,
sort: SortType = SortType.latest,
limit: int = 100,
before: Optional[str] = None,
after: Optional[str] = None,
nearby: Optional[str] = None,
) -> list[Message]:
"""Fetches multiple messages from the channel's history
Parameters
-----------
sort: :class:`SortType`
The order to sort the messages in
limit: :class:`int`
How many messages to fetch
before: Optional[:class:`str`]
The id of the message which should come *before* all the messages to be fetched
after: Optional[:class:`str`]
The id of the message which should come *after* all the messages to be fetched
nearby: Optional[:class:`str`]
The id of the message which should be nearby all the messages to be fetched
Returns
--------
list[:class:`Message`]
The messages found in order of the sort parameter
"""
from .message import Message
payloads = await self.state.http.fetch_messages(
await self._get_channel_id(),
sort=sort,
limit=limit,
before=before,
after=after,
nearby=nearby,
)
return [Message(payload, self.state) for payload in payloads]
async def search(
self,
query: str,
*,
sort: SortType = SortType.latest,
limit: int = 100,
before: Optional[str] = None,
after: Optional[str] = None,
) -> list[Message]:
"""searches the channel for a query
Parameters
-----------
query: :class:`str`
The query to search for in the channel
sort: :class:`SortType`
The order to sort the messages in
limit: :class:`int`
How many messages to fetch
before: Optional[:class:`str`]
The id of the message which should come *before* all the messages to be fetched
after: Optional[:class:`str`]
The id of the message which should come *after* all the messages to be fetched
Returns
--------
list[:class:`Message`]
The messages found in order of the sort parameter
"""
from .message import Message
payloads = await self.state.http.search_messages(
await self._get_channel_id(),
query,
sort=sort,
limit=limit,
before=before,
after=after,
)
return [Message(payload, self.state) for payload in payloads]
async def delete_messages(self, messages: list[Message]) -> None:
"""Bulk deletes messages from the channel
.. note:: The messages must have been sent in the last 7 days.
Parameters
-----------
messages: list[:class:`Message`]
The messages for deletion, this can be up to 100 messages
"""
await self.state.http.delete_messages(
await self._get_channel_id(), [message.id for message in messages]
) | /revolt_baidu.py-0.0.5.tar.gz/revolt_baidu.py-0.0.5/revolt/messageable.py | 0.936742 | 0.22072 | messageable.py | pypi |
from __future__ import annotations
import datetime
from typing import TYPE_CHECKING, Any, Coroutine, Optional, Union
from .types.message import Component
from .asset import Asset, PartialAsset
from .channel import DMChannel, GroupDMChannel, TextChannel, SavedMessageChannel
from .embed import Embed, SendableEmbed, to_embed
from .utils import Ulid, parse_timestamp
if TYPE_CHECKING:
from .server import Server
from .state import State
from .types import Embed as EmbedPayload
from .types import Interactions as InteractionsPayload
from .types import Masquerade as MasqueradePayload
from .types import Message as MessagePayload
from .types import MessageReplyPayload, SystemMessageContent
from .user import User
from .member import Member
__all__ = ("Message", "MessageReply", "Masquerade", "MessageInteractions")
class Message(Ulid):
"""Represents a message
Attributes
-----------
id: :class:`str`
The id of the message
content: :class:`str`
The content of the message, this will not include system message's content
conponents:
The components of the message
attachments: list[:class:`Asset`]
The attachments of the message
embeds: list[Union[:class:`WebsiteEmbed`, :class:`ImageEmbed`, :class:`TextEmbed`, :class:`NoneEmbed`]]
The embeds of the message
channel: :class:`Messageable`
The channel the message was sent in
author: Union[:class:`Member`, :class:`User`]
The author of the message, will be :class:`User` in DMs
edited_at: Optional[:class:`datetime.datetime`]
The time at which the message was edited, will be None if the message has not been edited
raw_mentions: list[:class:`str`]
A list of ids of the mentions in this message
mentions: list[Union[:class:`Member`, :class:`User`]]
The users or members that where mentioned in the message
replies: list[:class:`Message`]
The message's this message has replied to, this may not contain all the messages if they are outside the cache
reply_ids: list[:class:`str`]
The message's ids this message has replies to
reactions: dict[str, list[:class:`User`]]
The reactions on the message
interactions: Optional[:class:`MessageInteractions`]
The interactions on the message, if any
"""
__slots__ = (
"state",
"id",
"content",
"components",
"attachments",
"embeds",
"channel",
"author",
"edited_at",
"mentions",
"replies",
"reply_ids",
"reactions",
"interactions",
)
def __init__(self, data: MessagePayload, state: State):
self.state: State = state
self.id: str = data["_id"]
self.content: str = data.get("content", "")
self.system_content: SystemMessageContent | None = data.get("system")
self.attachments: list[Asset] = [
Asset(attachment, state) for attachment in data.get("attachments", [])
]
self.embeds: list[Embed] = [
to_embed(embed, state) for embed in data.get("embeds", [])
]
self.components: list[Component] = data.get("components", [])
channel = state.get_channel(data["channel"])
assert isinstance(
channel, (TextChannel, GroupDMChannel, DMChannel, SavedMessageChannel)
)
self.channel: TextChannel | GroupDMChannel | DMChannel | SavedMessageChannel = (
channel
)
self.server_id: str | None = self.channel.server_id
self.raw_mentions: list[str] = data.get("mentions", [])
self.mentions: list[Member | User] = []
if self.system_content:
author_id: str = self.system_content.get("id", data["author"])
else:
author_id = data["author"]
if self.server_id:
author = state.get_member(self.server_id, author_id)
for mention in self.raw_mentions:
try:
self.mentions.append(self.server.get_member(mention))
except LookupError:
pass
else:
author = state.get_user(author_id)
for mention in self.raw_mentions:
try:
self.mentions.append(state.get_user(mention))
except LookupError:
pass
self.author: Member | User = author
if masquerade := data.get("masquerade"):
if name := masquerade.get("name"):
self.author.masquerade_name = name
if avatar := masquerade.get("avatar"):
self.author.masquerade_avatar = PartialAsset(avatar, state)
if edited_at := data.get("edited"):
self.edited_at: Optional[datetime.datetime] = parse_timestamp(edited_at)
self.replies: list[Message] = []
self.reply_ids: list[str] = []
for reply in data.get("replies", []):
try:
message = state.get_message(reply)
self.replies.append(message)
except LookupError:
pass
self.reply_ids.append(reply)
reactions = data.get("reactions", {})
self.reactions: dict[str, list[User]] = {}
for emoji, users in reactions.items():
self.reactions[emoji] = [self.state.get_user(user_id) for user_id in users]
self.interactions: MessageInteractions | None
if interactions := data.get("interactions"):
self.interactions = MessageInteractions(
reactions=interactions.get("reactions"),
restrict_reactions=interactions.get("restrict_reactions", False),
)
else:
self.interactions = None
def _update(
self,
*,
content: Optional[str] = None,
embeds: Optional[list[EmbedPayload]] = None,
edited: Optional[Union[str, int]] = None,
components: Optional[list[Component]] = None,
):
if content is not None:
self.content = content
if embeds is not None:
self.embeds = [to_embed(embed, self.state) for embed in embeds]
if edited is not None:
self.edited_at = parse_timestamp(edited)
if components:
self.components = components
async def edit(
self,
*,
content: Optional[str] = None,
embeds: Optional[list[SendableEmbed]] = None,
components: Optional[list[Component]] = None,
) -> None:
"""Edits the message. The bot can only edit its own message
Parameters
-----------
content: :class:`str`
The new content of the message
embeds: list[:class:`SendableEmbed`]
The new embeds of the message
"""
new_embeds = [embed.to_dict() for embed in embeds] if embeds else None
await self.state.http.edit_message(
self.channel.id, self.id, content, new_embeds, components
)
async def delete(self) -> None:
"""Deletes the message. The bot can only delete its own messages and messages it has permission to delete"""
await self.state.http.delete_message(self.channel.id, self.id)
def reply(
self, *args: Any, mention: bool = False, **kwargs: Any
) -> Coroutine[Any, Any, Message]:
"""Replies to this message, equivilant to:
.. code-block:: python
await channel.send(..., replies=[MessageReply(message, mention)])
"""
return self.channel.send(*args, **kwargs, replies=[MessageReply(self, mention)])
async def add_reaction(self, emoji: str) -> None:
"""Adds a reaction to the message
Parameters
-----------
emoji: :class:`str`
The emoji to add as a reaction
"""
await self.state.http.add_reaction(self.channel.id, self.id, emoji)
async def remove_reaction(
self, emoji: str, user: Optional[User] = None, remove_all: bool = False
) -> None:
"""Removes a reaction from the message, this can remove either a specific users, the current users reaction or all of a specific emoji
Parameters
-----------
emoji: :class:`str`
The emoji to remove
user: Optional[:class:`User`]
The user to use for removing a reaction from
remove_all: bool
Whether or not to remove all reactions for that specific emoji
"""
await self.state.http.remove_reaction(
self.channel.id, self.id, emoji, user.id if user else None, remove_all
)
async def remove_all_reactions(self) -> None:
"""Removes all reactions from the message"""
await self.state.http.remove_all_reactions(self.channel.id, self.id)
@property
def server(self) -> Server:
""":class:`Server` The server this voice channel belongs too
Raises
-------
:class:`LookupError`
Raises if the channel is not part of a server
"""
return self.channel.server
class MessageReply:
"""represents a reply to a message.
Parameters
-----------
message: :class:`Message`
The message being replied to.
mention: :class:`bool`
Whether the reply should mention the author of the message. Defaults to false.
"""
__slots__ = ("message", "mention")
def __init__(self, message: Message, mention: bool = False):
self.message: Message = message
self.mention: bool = mention
def to_dict(self) -> MessageReplyPayload:
return {"id": self.message.id, "mention": self.mention}
class Masquerade:
"""represents a message's masquerade.
Parameters
-----------
name: Optional[:class:`str`]
The name to display for the message
avatar: Optional[:class:`str`]
The avatar's url to display for the message
colour: Optional[:class:`str`]
The colour of the name, similar to role colours
"""
__slots__ = ("name", "avatar", "colour")
def __init__(
self,
name: Optional[str] = None,
avatar: Optional[str] = None,
colour: Optional[str] = None,
):
self.name: str | None = name
self.avatar: str | None = avatar
self.colour: str | None = colour
def to_dict(self) -> MasqueradePayload:
output: MasqueradePayload = {}
if name := self.name:
output["name"] = name
if avatar := self.avatar:
output["avatar"] = avatar
if colour := self.colour:
output["colour"] = colour
return output
class MessageInteractions:
"""Represents a message's interactions, this is for allowing preset reactions and restricting adding reactions to only those.
Parameters
-----------
reactions: Optional[list[:class:`str`]]
The preset reactions on the message
restrict_reactions: bool
Whether or not users can only react to the interaction's reactions
"""
__slots__ = ("reactions", "restrict_reactions")
def __init__(
self, *, reactions: Optional[list[str]] = None, restrict_reactions: bool = False
):
self.reactions: list[str] | None = reactions
self.restrict_reactions: bool = restrict_reactions
def to_dict(self) -> InteractionsPayload:
output: InteractionsPayload = {}
if reactions := self.reactions:
output["reactions"] = reactions
if restrict_reactions := self.restrict_reactions:
output["restrict_reactions"] = restrict_reactions
return output | /revolt_baidu.py-0.0.5.tar.gz/revolt_baidu.py-0.0.5/revolt/message.py | 0.860765 | 0.266003 | message.py | pypi |
from __future__ import annotations
from typing import TYPE_CHECKING
from .asset import Asset
from .utils import Ulid
if TYPE_CHECKING:
from .state import State
from .channel import Channel
from .server import Server
from .types import Invite as InvitePayload
from .user import User
__all__ = ("Invite",)
class Invite(Ulid):
"""Represents a server invite.
Attributes
-----------
code: :class:`str`
The code for the invite
id: :class:`str`
Alias for :attr:`code`
server: :class:`Server`
The server this invite is for
channel: :class:`Channel`
The channel this invite is for
user_name: :class:`str`
The name of the user who made the invite
user: Optional[:class:`User`]
The user who made the invite, this is only set if this was fetched via :meth:`Server.fetch_invites`
user_avatar: Optional[:class:`Asset`]
The invite creator's avatar, if any
member_count: :class:`int`
The member count of the server this invite is for
"""
__slots__ = ("state", "code", "id", "server", "channel", "user_name", "user_avatar", "user", "member_count")
def __init__(self, data: InvitePayload, code: str, state: State):
self.state: State = state
self.code: str = code
self.id: str = code
self.server: Server = state.get_server(data["server_id"])
self.channel: Channel = self.server.get_channel(data["channel_id"])
self.user_name: str = data["user_name"]
self.user: User | None = None
self.user_avatar: Asset | None
if avatar := data.get("user_avatar"):
self.user_avatar = Asset(avatar, state)
else:
self.user_avatar = None
self.member_count: int = data["member_count"]
@staticmethod
def _from_partial(code: str, server: str, creator: str, channel: str, state: State) -> Invite:
invite = Invite.__new__(Invite)
invite.state = state
invite.code = code
invite.server = state.get_server(server)
invite.channel = state.get_channel(channel)
invite.user = state.get_user(creator)
invite.user_name = invite.user.name
invite.user_avatar = invite.user.avatar
invite.member_count = len(invite.server.members)
return invite
async def delete(self) -> None:
"""Deletes the invite"""
await self.state.http.delete_invite(self.code) | /revolt_baidu.py-0.0.5.tar.gz/revolt_baidu.py-0.0.5/revolt/invite.py | 0.89648 | 0.161221 | invite.py | pypi |
from __future__ import annotations
from typing import TYPE_CHECKING, Any, Optional, Union
from .asset import Asset
from .enums import ChannelType
from .messageable import Messageable
from .permissions import Permissions, PermissionsOverwrite
from .utils import Missing, Ulid
from abc import abstractmethod
if TYPE_CHECKING:
from .message import Message
from .role import Role
from .server import Server
from .state import State
from .types import Channel as ChannelPayload
from .types import DMChannel as DMChannelPayload
from .types import File as FilePayload
from .types import GroupDMChannel as GroupDMChannelPayload
from .types import Overwrite as OverwritePayload
from .types import SavedMessages as SavedMessagesPayload
from .types import ServerChannel as ServerChannelPayload
from .types import TextChannel as TextChannelPayload
from .user import User
__all__ = (
"DMChannel",
"GroupDMChannel",
"SavedMessageChannel",
"TextChannel",
"VoiceChannel",
"Channel",
"ServerChannel",
)
class EditableChannel:
__slots__ = ()
state: State
id: str
async def edit(self, **kwargs: Any) -> None:
"""Edits the channel
Passing ``None`` to the parameters that accept it will remove them.
Parameters
-----------
name: str
The new name for the channel
description: Optional[str]
The new description for the channel
owner: User
The new owner for the group dm channel
icon: Optional[File]
The new icon for the channel
nsfw: bool
Sets whether the channel is nsfw or not
"""
remove: list[str] = []
if kwargs.get("icon", Missing) == None:
remove.append("Icon")
elif kwargs.get("description", Missing) == None:
remove.append("Description")
if icon := kwargs.get("icon"):
asset = await self.state.http.upload_file(icon, "icons")
kwargs["icon"] = asset["id"]
if owner := kwargs.get("owner"):
kwargs["owner"] = owner.id
await self.state.http.edit_channel(self.id, remove, kwargs)
class Channel(Ulid):
"""Base class for all channels
Attributes
-----------
id: :class:`str`
The id of the channel
channel_type: ChannelType
The type of the channel
server_id: Optional[:class:`str`]
The server id of the chanel, if any
"""
__slots__ = ("state", "id", "channel_type", "server_id")
def __init__(self, data: ChannelPayload, state: State):
self.state: State = state
self.id: str = data["_id"]
self.channel_type: ChannelType = ChannelType(data["channel_type"])
self.server_id: Optional[str] = None
async def _get_channel_id(self) -> str:
return self.id
def _update(self, **_: Any) -> None:
pass
async def delete(self) -> None:
"""Deletes or closes the channel"""
await self.state.http.close_channel(self.id)
@abstractmethod
def get_users_in_channel(self) -> list[str]:
return []
@property
def server(self) -> Server:
""":class:`Server` The server this voice channel belongs too
Raises
-------
:class:`LookupError`
Raises if the channel is not part of a server
"""
if not self.server_id:
raise LookupError
return self.state.get_server(self.server_id)
@property
def mention(self) -> str:
""":class:`str`: Returns a string that allows you to mention the given channel."""
return f"<#{self.id}>"
class SavedMessageChannel(Channel, Messageable):
"""The Saved Message Channel"""
def __init__(self, data: SavedMessagesPayload, state: State):
super().__init__(data, state)
self.users: list[str] = [data["user"]]
def get_users_in_channel(self) -> list[str]:
return self.users
class DMChannel(Channel, Messageable):
"""A DM channel
Attributes
-----------
last_message_id: Optional[:class:`str`]
The id of the last message in this channel, if any
"""
__slots__ = ("last_message_id", "recipient_ids")
def __init__(self, data: DMChannelPayload, state: State):
super().__init__(data, state)
self.recipient_ids: tuple[str, str] = tuple(data["recipients"])
self.last_message_id: str | None = data.get("last_message_id")
self.users: list[str] = data.get("recipients", [])
def get_users_in_channel(self) -> list[str]:
return self.users
@property
def recipients(self) -> tuple[User, User]:
a, b = self.recipient_ids
return (self.state.get_user(a), self.state.get_user(b))
@property
def recipient(self) -> User:
if self.recipient_ids[0] != self.state.user_id:
user_id = self.recipient_ids[0]
else:
user_id = self.recipient_ids[1]
return self.state.get_user(user_id)
@property
def last_message(self) -> Message:
"""Gets the last message from the channel, shorthand for `client.get_message(channel.last_message_id)`
Returns
--------
:class:`Message` the last message in the channel
"""
if not self.last_message_id:
raise LookupError
return self.state.get_message(self.last_message_id)
class GroupDMChannel(Channel, Messageable, EditableChannel):
"""A group DM channel
Attributes
-----------
recipients: list[:class:`User`]
The recipients of the group dm channel
name: :class:`str`
The name of the group dm channel
owner: :class:`User`
The user who created the group dm channel
icon: Optional[:class:`Asset`]
The icon of the group dm channel
permissions: :class:`ChannelPermissions`
The permissions of the users inside the group dm channel
description: Optional[:class:`str`]
The description of the channel, if any
last_message_id: Optional[:class:`str`]
The id of the last message in this channel, if any
"""
__slots__ = (
"recipient_ids",
"name",
"owner_id",
"permissions",
"icon",
"description",
"last_message_id",
)
def __init__(self, data: GroupDMChannelPayload, state: State):
super().__init__(data, state)
self.recipient_ids: list[str] = data["recipients"]
self.name: str = data["name"]
self.owner_id: str = data["owner"]
self.description: str | None = data.get("description")
self.last_message_id: str | None = data.get("last_message_id")
self.users: list[str] = data.get("recipients", [])
if data["owner"]:
self.users.append(data["owner"])
self.icon: Asset | None
if icon := data.get("icon"):
self.icon = Asset(icon, state)
else:
self.icon = None
self.permissions: Permissions = Permissions(data.get("permissions", 0))
def _update(
self,
*,
name: Optional[str] = None,
recipients: Optional[list[str]] = None,
description: Optional[str] = None,
) -> None:
if name is not None:
self.name = name
if recipients is not None:
self.recipient_ids = recipients
if description is not None:
self.description = description
def get_users_in_channel(self) -> list[str]:
return self.users
@property
def recipients(self) -> list[User]:
return [self.state.get_user(user_id) for user_id in self.recipient_ids]
@property
def owner(self) -> User:
return self.state.get_user(self.owner_id)
async def set_default_permissions(self, permissions: Permissions) -> None:
"""Sets the default permissions for a group.
Parameters
-----------
permissions: :class:`ChannelPermissions`
The new default group permissions
"""
await self.state.http.set_group_channel_default_permissions(
self.id, permissions.value
)
@property
def last_message(self) -> Message:
"""Gets the last message from the channel, shorthand for `client.get_message(channel.last_message_id)`
Returns
--------
:class:`Message` the last message in the channel
"""
if not self.last_message_id:
raise LookupError
return self.state.get_message(self.last_message_id)
class ServerChannel(Channel):
"""Base class for all guild channels
Attributes
-----------
server_id: :class:`str`
The id of the server this text channel belongs to
name: :class:`str`
The name of the text channel
description: Optional[:class:`str`]
The description of the channel, if any
nsfw: bool
Sets whether the channel is nsfw or not
default_permissions: :class:`ChannelPermissions`
The default permissions for all users in the text channel
"""
def __init__(self, data: ServerChannelPayload, state: State):
super().__init__(data, state)
self.server_id: str = data["server"]
self.name: str = data["name"]
self.description: Optional[str] = data.get("description")
self.nsfw: bool = data.get("nsfw", False)
self.active: bool = False
self.default_permissions: PermissionsOverwrite = (
PermissionsOverwrite._from_overwrite(
data.get("default_permissions", {"a": 0, "d": 0})
)
)
permissions: dict[str, PermissionsOverwrite] = {}
for role_name, overwrite_data in data.get("role_permissions", {}).items():
overwrite = PermissionsOverwrite._from_overwrite(overwrite_data)
permissions[role_name] = overwrite
self.permissions: dict[str, PermissionsOverwrite] = permissions
self.icon: Asset | None
if icon := data.get("icon"):
self.icon = Asset(icon, state)
else:
self.icon = None
async def set_default_permissions(self, permissions: PermissionsOverwrite) -> None:
"""Sets the default permissions for the channel.
Parameters
-----------
permissions: :class:`ChannelPermissions`
The new default channel permissions
"""
allow, deny = permissions.to_pair()
await self.state.http.set_guild_channel_default_permissions(
self.id, allow.value, deny.value
)
async def set_role_permissions(
self, role: Role, permissions: PermissionsOverwrite
) -> None:
"""Sets the permissions for a role in the channel.
Parameters
-----------
permissions: :class:`ChannelPermissions`
The new channel permissions
"""
allow, deny = permissions.to_pair()
await self.state.http.set_guild_channel_role_permissions(
self.id, role.id, allow.value, deny.value
)
def _update(
self,
*,
name: Optional[str] = None,
description: Optional[str] = None,
icon: Optional[FilePayload] = None,
nsfw: Optional[bool] = None,
active: Optional[bool] = None,
role_permissions: Optional[dict[str, OverwritePayload]] = None,
default_permissions: Optional[OverwritePayload] = None,
):
if name is not None:
self.name = name
if description is not None:
self.description = description
if icon is not None:
self.icon = Asset(icon, self.state)
if nsfw is not None:
self.nsfw = nsfw
if active is not None:
self.active = active
if role_permissions is not None:
permissions = {}
for role_name, overwrite_data in role_permissions.items():
overwrite = PermissionsOverwrite._from_overwrite(overwrite_data)
permissions[role_name] = overwrite
self.permissions = permissions
if default_permissions is not None:
self.default_permissions = PermissionsOverwrite._from_overwrite(
default_permissions
)
class TextChannel(ServerChannel, Messageable, EditableChannel):
"""A text channel
Subclasses :class:`ServerChannel` and :class:`Messageable`
Attributes
-----------
name: :class:`str`
The name of the text channel
server_id: :class:`str`
The id of the server this text channel belongs to
last_message_id: Optional[:class:`str`]
The id of the last message in this channel, if any
default_permissions: :class:`ChannelPermissions`
The default permissions for all users in the text channel
role_permissions: dict[:class:`str`, :class:`ChannelPermissions`]
A dictionary of role id's to the permissions of that role in the text channel
icon: Optional[:class:`Asset`]
The icon of the text channel, if any
description: Optional[:class:`str`]
The description of the channel, if any
"""
__slots__ = (
"name",
"description",
"last_message_id",
"default_permissions",
"icon",
"overwrites",
)
def __init__(self, data: TextChannelPayload, state: State):
super().__init__(data, state)
self.last_message_id: str | None = data.get("last_message_id")
async def _get_channel_id(self) -> str:
return self.id
@property
def last_message(self) -> Message:
"""Gets the last message from the channel, shorthand for `client.get_message(channel.last_message_id)`
Returns
--------
:class:`Message` the last message in the channel
"""
if not self.last_message_id:
raise LookupError
return self.state.get_message(self.last_message_id)
class VoiceChannel(ServerChannel, EditableChannel):
"""A voice channel
Subclasses :class:`ServerChannel`
Attributes
-----------
name: :class:`str`
The name of the voice channel
server_id: :class:`str`
The id of the server this voice channel belongs to
last_message_id: Optional[:class:`str`]
The id of the last message in this channel, if any
default_permissions: :class:`ChannelPermissions`
The default permissions for all users in the voice channel
role_permissions: dict[:class:`str`, :class:`ChannelPermissions`]
A dictionary of role id's to the permissions of that role in the voice channel
icon: Optional[:class:`Asset`]
The icon of the voice channel, if any
description: Optional[:class:`str`]
The description of the channel, if any
"""
def channel_factory(
data: ChannelPayload, state: State
) -> Union[DMChannel, GroupDMChannel, SavedMessageChannel, TextChannel, VoiceChannel]:
if data["channel_type"] == "SavedMessages":
return SavedMessageChannel(data, state)
elif data["channel_type"] == "DirectMessage":
return DMChannel(data, state)
elif data["channel_type"] == "Group":
return GroupDMChannel(data, state)
elif data["channel_type"] == "TextChannel":
return TextChannel(data, state)
elif data["channel_type"] == "VoiceChannel":
return VoiceChannel(data, state)
else:
raise Exception | /revolt_baidu.py-0.0.5.tar.gz/revolt_baidu.py-0.0.5/revolt/channel.py | 0.926611 | 0.17515 | channel.py | pypi |
from __future__ import annotations
from typing import TYPE_CHECKING, Any, Optional
from .permissions import Overwrite, PermissionsOverwrite
from .utils import Missing, Ulid
if TYPE_CHECKING:
from .server import Server
from .state import State
from .types import Role as RolePayload
__all__ = ("Role",)
class Role(Ulid):
"""Represents a role
Attributes
-----------
id: :class:`str`
The id of the role
name: :class:`str`
The name of the role
colour: Optional[:class:`str`]
The colour of the role
hoist: :class:`bool`
Whether members with the role will display seperate from everyone else
rank: :class:`int`
The position of the role in the role heirarchy
server: :class:`Server`
The server the role belongs to
server_permissions: :class:`ServerPermissions`
The server permissions for the role
channel_permissions: :class:`ChannelPermissions`
The channel permissions for the role
"""
__slots__: tuple[str, ...] = ("id", "name", "colour", "hoist", "rank", "state", "server", "permissions")
def __init__(self, data: RolePayload, role_id: str, server: Server, state: State):
self.state: State = state
self.id: str = role_id
self.name: str = data["name"]
self.colour: str | None = data.get("colour", None)
self.hoist: bool = data.get("hoist", False)
self.rank: int = data["rank"]
self.server: Server = server
self.permissions: PermissionsOverwrite = PermissionsOverwrite._from_overwrite(data.get("permissions", {"a": 0, "d": 0}))
@property
def color(self) -> str | None:
return self.colour
async def set_permissions_overwrite(self, *, permissions: PermissionsOverwrite) -> None:
"""Sets the permissions for a role in a server.
Parameters
-----------
server_permissions: Optional[:class:`ServerPermissions`]
The new server permissions for the role
channel_permissions: Optional[:class:`ChannelPermissions`]
The new channel permissions for the role
"""
allow, deny = permissions.to_pair()
await self.state.http.set_server_role_permissions(self.server.id, self.id, allow.value, deny.value)
def _update(self, *, name: Optional[str] = None, colour: Optional[str] = None, hoist: Optional[bool] = None, rank: Optional[int] = None, permissions: Optional[Overwrite] = None) -> None:
if name is not None:
self.name = name
if colour is not None:
self.colour = colour
if hoist is not None:
self.hoist = hoist
if rank is not None:
self.rank = rank
if permissions is not None:
self.permissions = PermissionsOverwrite._from_overwrite(permissions)
async def delete(self) -> None:
"""Deletes the role"""
await self.state.http.delete_role(self.server.id, self.id)
async def edit(self, **kwargs: Any) -> None:
"""Edits the role
Parameters
-----------
name: str
The name of the role
colour: str
The colour of the role
hoist: bool
Whether the role should make the member display seperately in the member list
rank: int
The position of the role
"""
if kwargs.get("colour", Missing) is None:
remove = ["Colour"]
else:
remove = None
await self.state.http.edit_role(self.server.id, self.id, remove, kwargs) | /revolt_baidu.py-0.0.5.tar.gz/revolt_baidu.py-0.0.5/revolt/role.py | 0.932161 | 0.317281 | role.py | pypi |
from __future__ import annotations
import datetime
import inspect
from contextlib import asynccontextmanager
from operator import attrgetter
from typing import Any, Callable, Coroutine, Iterable, Literal, TypeVar, Union
import ulid
from aiohttp import ClientSession
from typing_extensions import ParamSpec
__all__ = ("_Missing", "Missing", "copy_doc", "maybe_coroutine", "get", "client_session", "parse_timestamp")
class _Missing:
def __repr__(self) -> str:
return "<Missing>"
def __bool__(self) -> Literal[False]:
return False
Missing: _Missing = _Missing()
T = TypeVar("T")
def copy_doc(from_t: T) -> Callable[[T], T]:
def inner(to_t: T) -> T:
to_t.__doc__ = from_t.__doc__
return to_t
return inner
R_T = TypeVar("R_T")
P = ParamSpec("P")
# it is impossible to type this function correctly as typeguard does not narrow for the negative case,
# so `value` would stay being a union even after the if statement (PEP 647 - "The type is not narrowed in the negative case")
# see typing#926, typing#930, typing#996
async def maybe_coroutine(func: Callable[P, Union[R_T, Coroutine[Any, Any, R_T]]], *args: P.args, **kwargs: P.kwargs) -> R_T:
value = func(*args, **kwargs)
if inspect.isawaitable(value):
value = await value
return value # type: ignore
class Ulid:
id: str
@property
def created_at(self) -> datetime.datetime:
return ulid.from_str(self.id).timestamp().datetime
def get(iterable: Iterable[T], **attrs: Any) -> T:
"""A convenience function to help get a value from an iterable with a specific attribute
Examples
---------
.. code-block:: python
:emphasize-lines: 3
from revolt import utils
channel = utils.get(server.channels, name="General")
await channel.send("Hello general chat.")
Parameters
-----------
iterable: Iterable
The values to search though
**attrs: Any
The attributes to check
Returns
--------
Any
The value from the iterable with the met attributes
Raises
-------
LookupError
Raises when none of the values in the iterable matches the attributes
"""
converted = [(attrgetter(attr.replace('__', '.')), value) for attr, value in attrs.items()]
for elem in iterable:
if all(pred(elem) == value for pred, value in converted):
return elem
raise LookupError
@asynccontextmanager
async def client_session():
"""A context manager that creates a new aiohttp.ClientSession() and closes it when exiting the context.
Examples
---------
.. code-block:: python
:emphasize-lines: 3
async def main():
async with client_session() as session:
client = revolt.Client(session, "TOKEN")
await client.start()
asyncio.run(main())
"""
session = ClientSession()
try:
yield session
finally:
await session.close()
def parse_timestamp(timestamp: int | str) -> datetime.datetime:
if isinstance(timestamp, int):
return datetime.datetime.fromtimestamp(timestamp / 1000, tz=datetime.timezone.utc)
else:
return datetime.datetime.strptime(timestamp, "%Y-%m-%dT%H:%M:%S.%f%z") | /revolt_baidu.py-0.0.5.tar.gz/revolt_baidu.py-0.0.5/revolt/utils.py | 0.829423 | 0.224002 | utils.py | pypi |
from __future__ import annotations
from typing import TYPE_CHECKING, NamedTuple, Optional, Union
from weakref import WeakValueDictionary
from .asset import Asset, PartialAsset
from .channel import DMChannel, GroupDMChannel, SavedMessageChannel
from .enums import PresenceType, RelationshipType
from .flags import UserBadges
from .messageable import Messageable
from .permissions import UserPermissions
from .utils import Ulid
if TYPE_CHECKING:
from .member import Member
from .state import State
from .types import File, UserBot, UserRelation
from .types import Status as StatusPayload
from .types import User as UserPayload
from .types import UserProfile as UserProfileData
from .server import Server
__all__ = ("User", "Status", "Relation", "UserProfile")
class Relation(NamedTuple):
"""A namedtuple representing a relation between the bot and a user"""
type: RelationshipType
user: User
class Status(NamedTuple):
"""A namedtuple representing a users status"""
text: Optional[str]
presence: Optional[PresenceType]
class UserProfile(NamedTuple):
"""A namedtuple representing a users profile"""
content: Optional[str]
background: Optional[Asset]
class User(Messageable, Ulid):
"""Represents a user
Attributes
-----------
id: :class:`str`
The user's id
discriminator: :class:`str`
The user's discriminator
display_name: Optional[:class:`str`]
The user's display name if they have one
bot: :class:`bool`
Whether or not the user is a bot
owner_id: Optional[:class:`str`]
The bot's owner id if the user is a bot
badges: :class:`UserBadges`
The users badges
online: :class:`bool`
Whether or not the user is online
flags: :class:`int`
The user flags
relations: list[:class:`Relation`]
A list of the users relations
relationship: Optional[:class:`RelationshipType`]
The relationship between the user and the bot
status: Optional[:class:`Status`]
The users status
dm_channel: Optional[:class:`DMChannel`]
The dm channel between the client and the user, this will only be set if the client has dm'ed the user or :meth:`User.open_dm` was run
privileged: :class:`bool`
Whether the user is privileged
"""
__flattern_attributes__: tuple[str, ...] = (
"id",
"discriminator",
"display_name",
"bot",
"owner_id",
"badges",
"online",
"flags",
"relations",
"relationship",
"status",
"masquerade_avatar",
"masquerade_name",
"original_name",
"original_avatar",
"profile",
"dm_channel",
"privileged",
)
__slots__: tuple[str, ...] = (*__flattern_attributes__, "state", "_members")
def __init__(self, data: UserPayload, state: State):
self.state = state
self._members: WeakValueDictionary[
str, Member
] = (
WeakValueDictionary()
) # we store all member versions of this user to avoid having to check every guild when needing to update.
self.id: str = data["_id"]
self.discriminator: str = data["discriminator"]
self.display_name: str | None = data.get("display_name")
self.original_name: str = data["username"]
self.dm_channel: DMChannel | SavedMessageChannel | None = None
self.bot: UserBot | None = data.get("bot")
if self.bot:
self.owner_id = self.bot["owner"]
else:
self.owner_id = None
self.badges: UserBadges = UserBadges._from_value(data.get("badges", 0))
self.online: bool = data.get("online", False)
self.flags: int = data.get("flags", 0)
self.privileged: bool = data.get("privileged", False)
avatar = data.get("avatar")
self.original_avatar: Asset | None = Asset(avatar, state) if avatar else None
relations: list[Relation] = []
for relation in data.get("relations", []):
user = state.get_user(relation["_id"])
if user:
relations.append(Relation(RelationshipType(relation["status"]), user))
self.relations: list[Relation] = relations
relationship = data.get("relationship")
self.relationship: RelationshipType | None = (
RelationshipType(relationship) if relationship else None
)
status = data.get("status")
self.status: Status | None
if status:
presence = status.get("presence")
self.status = (
Status(status.get("text"), PresenceType(presence) if presence else None)
if status
else None
)
else:
self.status = None
self.profile: Optional[UserProfile] = None
self.masquerade_avatar: Optional[PartialAsset] = None
self.masquerade_name: Optional[str] = None
def get_permissions(self) -> UserPermissions:
"""Gets the permissions for the user
Returns
--------
:class:`UserPermissions`
The users permissions
"""
permissions = UserPermissions()
if self.relationship in [RelationshipType.friend, RelationshipType.user]:
return UserPermissions.all()
elif self.relationship in [
RelationshipType.blocked,
RelationshipType.blocked_other,
]:
return UserPermissions(access=True)
elif self.relationship in [
RelationshipType.incoming_friend_request,
RelationshipType.outgoing_friend_request,
]:
permissions.access = True
for channel in self.state.channels.values():
if (
isinstance(channel, (GroupDMChannel, DMChannel))
and self.id in channel.recipient_ids
) or any(
self.id in (m.id for m in server.members)
for server in self.state.servers.values()
):
if self.state.me.bot or self.bot:
permissions.send_message = True
permissions.access = True
permissions.view_profile = True
return permissions
def has_permissions(self, **permissions: bool) -> bool:
"""Computes if the user has the specified permissions
Parameters
-----------
permissions: :class:`bool`
The permissions to check, this also accepted `False` if you need to check if the user does not have the permission
Returns
--------
:class:`bool`
Whether or not they have the permissions
"""
perms = self.get_permissions()
return all(
[getattr(perms, key, False) == value for key, value in permissions.items()]
)
async def _get_channel_id(self):
if not self.dm_channel:
payload = await self.state.http.open_dm(self.id)
if payload["channel_type"] == "SavedMessages":
self.dm_channel = SavedMessageChannel(payload, self.state)
else:
self.dm_channel = DMChannel(payload, self.state)
return self.dm_channel.id
@property
def owner(self) -> User:
""":class:`User` the owner of the bot account"""
if not self.bot:
raise LookupError
return self.state.get_user(self.bot["owner"])
@property
def name(self) -> str:
""":class:`str` The name the user is displaying, this includes (in order) their masqueraded name, display name and orginal name"""
return self.display_name or self.masquerade_name or self.original_name
@property
def avatar(self) -> Union[Asset, PartialAsset, None]:
"""Optional[:class:`Asset`] The avatar the member is displaying, this includes there orginal avatar and masqueraded avatar"""
return self.masquerade_avatar or self.original_avatar
@property
def mention(self) -> str:
""":class:`str`: Returns a string that allows you to mention the given user."""
return f"<@{self.id}>"
def _update(
self,
*,
status: Optional[StatusPayload] = None,
profile: Optional[UserProfileData] = None,
avatar: Optional[File] = None,
online: Optional[bool] = None,
display_name: Optional[str] = None,
relations: Optional[list[UserRelation]] = None,
badges: Optional[int] = None,
flags: Optional[int] = None,
discriminator: Optional[str] = None,
privileged: Optional[bool] = None,
username: Optional[str] = None,
bot: Optional[UserBot] = None,
) -> None:
if status is not None:
presence = status.get("presence")
self.status = Status(
status.get("text"), PresenceType(presence) if presence else None
)
if profile is not None:
if background_file := profile.get("background"):
background = Asset(background_file, self.state)
else:
background = None
self.profile = UserProfile(profile.get("content"), background)
if avatar is not None:
self.original_avatar = Asset(avatar, self.state)
if online is not None:
self.online = online
if display_name is not None:
self.display_name = display_name
if relations is not None:
new_relations: list[Relation] = []
for relation in relations:
user = self.state.get_user(relation["_id"])
if user:
new_relations.append(
Relation(RelationshipType(relation["status"]), user)
)
self.relations = new_relations
if badges is not None:
self.badges = UserBadges(badges)
if flags is not None:
self.flags = flags
if discriminator is not None:
self.discriminator = discriminator
if privileged is not None:
self.privileged = privileged
if username is not None:
self.original_name = username
if bot:
self.bot = bot
# update user infomation for all members
if self.__class__ is User:
for member in self._members.values():
User._update(
member,
status=status,
profile=profile,
avatar=avatar,
online=online,
display_name=display_name,
relations=relations,
badges=badges,
flags=flags,
discriminator=discriminator,
privileged=privileged,
username=username,
bot=member.bot,
)
async def default_avatar(self) -> bytes:
"""Returns the default avatar for this user
Returns
--------
:class:`bytes`
The bytes of the image
"""
return await self.state.http.fetch_default_avatar(self.id)
async def fetch_profile(self) -> UserProfile:
"""Fetches the user's profile
Returns
--------
:class:`UserProfile`
The user's profile
"""
if profile := self.profile:
return profile
payload = await self.state.http.fetch_profile(self.id)
if file := payload.get("background"):
background = Asset(file, self.state)
else:
background = None
self.profile = UserProfile(payload.get("content"), background)
return self.profile
def to_member(self, server: Server) -> Member:
"""Gets the member instance for this user for a specific server.
Roughly equivelent to:
.. code-block:: python
member = server.get_member(user.id)
Parameters
-----------
server: :class:`Server`
The server to get the member for
Returns
--------
:class:`Member`
The member
Raises
-------
:class:`LookupError`
"""
try:
return self._members[server.id]
except IndexError:
raise LookupError from None
async def open_dm(self) -> DMChannel | SavedMessageChannel:
"""Opens a dm with the user, if this user is the current user this will return :class:`SavedMessageChannel`
.. note:: using this function is discouraged as :meth:`User.send` does this implicitally.
Returns
--------
Union[:class:`DMChannel`, :class:`SavedMessageChannel`]
"""
await self._get_channel_id()
assert self.dm_channel
return self.dm_channel | /revolt_baidu.py-0.0.5.tar.gz/revolt_baidu.py-0.0.5/revolt/user.py | 0.898438 | 0.239216 | user.py | pypi |
from __future__ import annotations
from typing import Callable, Iterator, Optional, Union, overload
from typing_extensions import Self
__all__ = ("Flag", "Flags", "UserBadges")
class Flag:
__slots__ = ("flag", "__doc__")
def __init__(self, func: Callable[[], int]):
self.flag: int = func()
self.__doc__: str | None = func.__doc__
@overload
def __get__(self: Self, instance: None, owner: type[Flags]) -> Self:
...
@overload
def __get__(self, instance: Flags, owner: type[Flags]) -> bool:
...
def __get__(self: Self, instance: Optional[Flags], owner: type[Flags]) -> Union[Self, bool]:
if instance is None:
return self
return instance._check_flag(self.flag)
def __set__(self, instance: Flags, value: bool) -> None:
instance._set_flag(self.flag, value)
class Flags:
FLAG_NAMES: list[str]
def __init_subclass__(cls) -> None:
cls.FLAG_NAMES = []
for name in dir(cls):
value = getattr(cls, name)
if isinstance(value, Flag):
cls.FLAG_NAMES.append(name)
def __init__(self, value: int = 0, **flags: bool):
self.value = value
for k, v in flags.items():
setattr(self, k, v)
@classmethod
def _from_value(cls, value: int) -> Self:
self = cls.__new__(cls)
self.value = value
return self
def _check_flag(self, flag: int) -> bool:
return (self.value & flag) == flag
def _set_flag(self, flag: int, value: bool) -> None:
if value:
self.value |= flag
else:
self.value &= ~flag
def __eq__(self, other: Self) -> bool:
return self.value == other.value
def __ne__(self, other: Self) -> bool:
return not self.__eq__(other)
def __or__(self, other: Self) -> Self:
return self.__class__._from_value(self.value | other.value)
def __and__(self, other: Self) -> Self:
return self.__class__._from_value(self.value & other.value)
def __invert__(self) -> Self:
return self.__class__._from_value(~self.value)
def __add__(self, other: Self) -> Self:
return self | other
def __sub__(self, other: Self) -> Self:
return self & ~other
def __lt__(self, other: Self) -> bool:
return self.value < other.value
def __gt__(self, other: Self) -> bool:
return self.value > other.value
def __repr__(self) -> str:
return f"<{self.__class__.__name__} value={self.value}>"
def __iter__(self) -> Iterator[tuple[str, bool]]:
for name, value in self.__class__.__dict__.items():
if isinstance(value, Flag):
yield name, self._check_flag(value.flag)
def __hash__(self) -> int:
return hash(self.value)
class UserBadges(Flags):
"""Contains all user badges"""
@Flag
def developer():
""":class:`bool` The developer badge."""
return 1 << 0
@Flag
def translator():
""":class:`bool` The translator badge."""
return 1 << 1
@Flag
def supporter():
""":class:`bool` The supporter badge."""
return 1 << 2
@Flag
def responsible_disclosure():
""":class:`bool` The responsible disclosure badge."""
return 1 << 3
@Flag
def founder():
""":class:`bool` The founder badge."""
return 1 << 4
@Flag
def platform_moderation():
""":class:`bool` The platform moderation badge."""
return 1 << 5
@Flag
def active_supporter():
""":class:`bool` The active supporter badge."""
return 1 << 6
@Flag
def paw():
""":class:`bool` The paw badge."""
return 1 << 7
@Flag
def early_adopter():
""":class:`bool` The early adopter badge."""
return 1 << 8
@Flag
def reserved_relevant_joke_badge_1():
""":class:`bool` The reserved relevant joke badge 1 badge."""
return 1 << 9 | /revolt_baidu.py-0.0.5.tar.gz/revolt_baidu.py-0.0.5/revolt/flags.py | 0.912324 | 0.228608 | flags.py | pypi |
from __future__ import annotations
import datetime
from typing import TYPE_CHECKING, Any, Optional
from .utils import _Missing, Missing, parse_timestamp
from .asset import Asset
from .permissions import Permissions
from .permissions_calculator import calculate_permissions
from .user import User
from .file import File
if TYPE_CHECKING:
from .channel import Channel
from .server import Server
from .state import State
from .types import File as FilePayload
from .types import Member as MemberPayload
from .role import Role
__all__ = ("Member",)
def flattern_user(member: Member, user: User) -> None:
for attr in user.__flattern_attributes__:
setattr(member, attr, getattr(user, attr))
class Member(User):
"""Represents a member of a server, subclasses :class:`User`
Attributes
-----------
nickname: Optional[:class:`str`]
The nickname of the member if any
roles: list[:class:`Role`]
The roles of the member, ordered by the role's rank in decending order
server: :class:`Server`
The server the member belongs to
guild_avatar: Optional[:class:`Asset`]
The member's guild avatar if any
"""
__slots__ = ("state", "nickname", "roles", "server", "guild_avatar", "joined_at", "current_timeout")
def __init__(self, data: MemberPayload, server: Server, state: State):
user = state.get_user(data["_id"]["user"])
# due to not having a user payload and only a user object we have to manually add all the attributes instead of calling User.__init__
flattern_user(self, user)
user._members[server.id] = self
self.state: State = state
self.guild_avatar: Asset | None
if avatar := data.get("avatar"):
self.guild_avatar = Asset(avatar, state)
else:
self.guild_avatar = None
roles = [server.get_role(role_id) for role_id in data.get("roles", [])]
self.roles: list[Role] = sorted(roles, key=lambda role: role.rank, reverse=True)
self.server: Server = server
self.nickname: str | None = data.get("nickname")
self.joined_at: datetime.datetime = parse_timestamp(data["joined_at"])
self.current_timeout: datetime.datetime | None
if current_timeout := data.get("timeout"):
self.current_timeout = parse_timestamp(current_timeout)
else:
self.current_timeout = None
@property
def avatar(self) -> Optional[Asset]:
"""Optional[:class:`Asset`] The avatar the member is displaying, this includes guild avatars and masqueraded avatar"""
return self.masquerade_avatar or self.guild_avatar or self.original_avatar
@property
def name(self) -> str:
""":class:`str` The name the user is displaying, this includes (in order) their masqueraded name, display name and orginal name"""
return self.nickname or self.display_name or self.masquerade_name or self.original_name
@property
def mention(self) -> str:
""":class:`str`: Returns a string that allows you to mention the given member."""
return f"<@{self.id}>"
def _update(
self,
*,
nickname: Optional[str] = None,
avatar: Optional[FilePayload] = None,
roles: Optional[list[str]] = None,
timeout: Optional[str | int] = None
) -> None:
if nickname is not None:
self.nickname = nickname
if avatar is not None:
self.guild_avatar = Asset(avatar, self.state)
if roles is not None:
member_roles = [self.server.get_role(role_id) for role_id in roles]
self.roles = sorted(member_roles, key=lambda role: role.rank, reverse=True)
if timeout is not None:
self.current_timeout = parse_timestamp(timeout)
async def kick(self) -> None:
"""Kicks the member from the server"""
await self.state.http.kick_member(self.server.id, self.id)
async def ban(self, *, reason: Optional[str] = None) -> None:
"""Bans the member from the server
Parameters
-----------
reason: Optional[:class:`str`]
The reason for the ban
"""
await self.state.http.ban_member(self.server.id, self.id, reason)
async def unban(self) -> None:
"""Unbans the member from the server"""
await self.state.http.unban_member(self.server.id, self.id)
async def edit(
self,
*,
nickname: str | None | _Missing = Missing,
roles: list[Role] | None | _Missing = Missing,
avatar: File | None | _Missing = Missing,
timeout: datetime.timedelta | None | _Missing = Missing
) -> None:
remove: list[str] = []
data: dict[str, Any] = {}
if nickname is None:
remove.append("Nickname")
elif nickname is not Missing:
data["nickname"] = nickname
if roles is None:
remove.append("Roles")
elif roles is not Missing:
data["roles"] = roles
if avatar is None:
remove.append("Avatar")
elif avatar is not Missing:
# pyright cant understand custom singletons - it doesnt know this will never be an instance of _Missing here because Missing is the only instance
assert not isinstance(avatar, _Missing)
data["avatar"] = (await self.state.http.upload_file(avatar, "avatars"))["id"]
if timeout is None:
remove.append("Timeout")
elif timeout is not Missing:
assert not isinstance(timeout, _Missing)
data["timeout"] = (datetime.datetime.now(datetime.timezone.utc) + timeout).isoformat()
await self.state.http.edit_member(self.server.id, self.id, remove, data)
async def timeout(self, length: datetime.timedelta) -> None:
"""Timeouts the member
Parameters
-----------
length: :class:`datetime.timedelta`
The length of the timeout
"""
ends_at = datetime.datetime.now(tz=datetime.timezone.utc) + length
await self.state.http.edit_member(self.server.id, self.id, None, {"timeout": ends_at.isoformat()})
def get_permissions(self) -> Permissions:
"""Gets the permissions for the member in the server
Returns
--------
:class:`Permissions`
The members permissions
"""
return calculate_permissions(self, self.server)
def get_channel_permissions(self, channel: Channel) -> Permissions:
"""Gets the permissions for the member in the server taking into account the channel as well
Parameters
-----------
channel: :class:`Channel`
The channel to calculate permissions with
Returns
--------
:class:`Permissions`
The members permissions
"""
return calculate_permissions(self, channel)
def has_permissions(self, **permissions: bool) -> bool:
"""Computes if the member has the specified permissions
Parameters
-----------
permissions: :class:`bool`
The permissions to check, this also accepted `False` if you need to check if the member does not have the permission
Returns
--------
:class:`bool`
Whether or not they have the permissions
"""
calculated_perms = self.get_permissions()
return all([getattr(calculated_perms, key, False) == value for key, value in permissions.items()])
def has_channel_permissions(self, channel: Channel, **permissions: bool) -> bool:
"""Computes if the member has the specified permissions, taking into account the channel as well
Parameters
-----------
channel: :class:`Channel`
The channel to calculate permissions with
permissions: :class:`bool`
The permissions to check, this also accepted `False` if you need to check if the member does not have the permission
Returns
--------
:class:`bool`
Whether or not they have the permissions
"""
calculated_perms = self.get_channel_permissions(channel)
return all([getattr(calculated_perms, key, False) == value for key, value in permissions.items()]) | /revolt_baidu.py-0.0.5.tar.gz/revolt_baidu.py-0.0.5/revolt/member.py | 0.922783 | 0.204521 | member.py | pypi |
from __future__ import annotations
from typing import TYPE_CHECKING, Any, Optional
from typing_extensions import Self
from .flags import Flag, Flags
from .types.permissions import Overwrite
__all__ = ("Permissions", "PermissionsOverwrite", "UserPermissions")
class UserPermissions(Flags):
"""Permissions for users"""
@Flag
def access() -> int:
return 1 << 0
@Flag
def view_profile() -> int:
return 1 << 1
@Flag
def send_message() -> int:
return 1 << 2
@Flag
def invite() -> int:
return 1 << 3
@classmethod
def all(cls) -> Self:
return cls(access=True, view_profile=True, send_message=True, invite=True)
class Permissions(Flags):
"""Server permissions for members and roles"""
@Flag
def manage_channel() -> int:
return 1 << 0
@Flag
def manage_server() -> int:
return 1 << 1
@Flag
def manage_permissions() -> int:
return 1 << 2
@Flag
def manage_role() -> int:
return 1 << 3
@Flag
def kick_members() -> int:
return 1 << 6
@Flag
def ban_members() -> int:
return 1 << 7
@Flag
def timeout_members() -> int:
return 1 << 8
@Flag
def asign_roles() -> int:
return 1 << 9
@Flag
def change_nickname() -> int:
return 1 << 10
@Flag
def manage_nicknames() -> int:
return 1 << 11
@Flag
def change_avatars() -> int:
return 1 << 12
@Flag
def remove_avatars() -> int:
return 1 << 13
@Flag
def view_channel() -> int:
return 1 << 20
@Flag
def read_message_history() -> int:
return 1 << 21
@Flag
def send_messages() -> int:
return 1 << 22
@Flag
def manage_messages() -> int:
return 1 << 23
@Flag
def manage_webhooks() -> int:
return 1 << 24
@Flag
def invite_others() -> int:
return 1 << 25
@Flag
def send_embeds() -> int:
return 1 << 26
@Flag
def upload_files() -> int:
return 1 << 27
@Flag
def masquerade() -> int:
return 1 << 28
@Flag
def connect() -> int:
return 1 << 30
@Flag
def speak() -> int:
return 1 << 31
@Flag
def video() -> int:
return 1 << 32
@Flag
def mute_members() -> int:
return 1 << 33
@Flag
def deafen_members() -> int:
return 1 << 34
@Flag
def move_members() -> int:
return 1 << 35
@classmethod
def all(cls) -> Self:
return cls(0x000F_FFFF_FFFF_FFFF)
@classmethod
def default_view_only(cls) -> Self:
return cls(view_channel=True, read_message_history=True)
@classmethod
def default(cls) -> Self:
return cls.default_view_only() | cls(send_messages=True, invite_others=True, send_embeds=True, upload_files=True, connect=True, speak=True)
@classmethod
def default_direct_message(cls) -> Self:
return cls.default_view_only() | cls(react=True, manage_channel=True)
class PermissionsOverwrite:
"""A permissions overwrite in a channel"""
def __init__(self, allow: Permissions, deny: Permissions):
self._allow = allow
self._deny = deny
for perm in Permissions.FLAG_NAMES:
if getattr(allow, perm):
value = True
elif getattr(deny, perm):
value = False
else:
value = None
super().__setattr__(perm, value)
def __setattr__(self, key: str, value: Any) -> None:
if key in Permissions.FLAG_NAMES:
if key is True:
setattr(self._allow, key, True)
super().__setattr__(key, True)
elif key is False:
setattr(self._deny, key, True)
super().__setattr__(key, False)
else:
setattr(self._allow, key, False)
setattr(self._deny, key, False)
super().__setattr__(key, None)
else:
super().__setattr__(key, value)
if TYPE_CHECKING:
manage_channel: Optional[bool]
manage_server: Optional[bool]
manage_permissions: Optional[bool]
manage_role: Optional[bool]
kick_members: Optional[bool]
ban_members: Optional[bool]
timeout_members: Optional[bool]
asign_roles: Optional[bool]
change_nickname: Optional[bool]
manage_nicknames: Optional[bool]
change_avatars: Optional[bool]
remove_avatars: Optional[bool]
view_channel: Optional[bool]
read_message_history: Optional[bool]
send_messages: Optional[bool]
manage_messages: Optional[bool]
manage_webhooks: Optional[bool]
invite_others: Optional[bool]
send_embeds: Optional[bool]
upload_files: Optional[bool]
masquerade: Optional[bool]
connect: Optional[bool]
speak: Optional[bool]
video: Optional[bool]
mute_members: Optional[bool]
deafen_members: Optional[bool]
move_members: Optional[bool]
def to_pair(self) -> tuple[Permissions, Permissions]:
return self._allow, self._deny
@classmethod
def _from_overwrite(cls, overwrite: Overwrite) -> Self:
allow = Permissions(overwrite["a"])
deny = Permissions(overwrite["d"])
return cls(allow, deny) | /revolt_baidu.py-0.0.5.tar.gz/revolt_baidu.py-0.0.5/revolt/permissions.py | 0.897993 | 0.317347 | permissions.py | pypi |
from __future__ import annotations
from typing import TYPE_CHECKING, Optional, cast
from .asset import Asset
from .category import Category
from .invite import Invite
from .permissions import Permissions
from .role import Role
from .utils import Ulid
from .channel import Channel, TextChannel, VoiceChannel
from .member import Member
if TYPE_CHECKING:
from .emoji import Emoji
from .file import File
from .state import State
from .types import Ban
from .types import Category as CategoryPayload
from .types import File as FilePayload
from .types import Server as ServerPayload
from .types import SystemMessagesConfig
__all__ = ("Server", "SystemMessages", "ServerBan")
class SystemMessages:
"""Holds all the configuration for the server's system message channels"""
def __init__(self, data: SystemMessagesConfig, state: State):
self.state: State = state
self.user_joined_id: str | None = data.get("user_joined")
self.user_left_id: str | None = data.get("user_left")
self.user_kicked_id: str | None = data.get("user_kicked")
self.user_banned_id: str | None = data.get("user_banned")
@property
def user_joined(self) -> Optional[TextChannel]:
"""The channel which user join messages get sent in
Returns
--------
Optional[:class:`TextChannel`]
The channel
"""
if not self.user_joined_id:
return
channel = self.state.get_channel(self.user_joined_id)
assert isinstance(channel, TextChannel)
return channel
@property
def user_left(self) -> Optional[TextChannel]:
"""The channel which user leave messages get sent in
Returns
--------
Optional[:class:`TextChannel`]
The channel
"""
if not self.user_left_id:
return
channel = self.state.get_channel(self.user_left_id)
assert isinstance(channel, TextChannel)
return channel
@property
def user_kicked(self) -> Optional[TextChannel]:
"""The channel which user kick messages get sent in
Returns
--------
Optional[:class:`TextChannel`]
The channel
"""
if not self.user_kicked_id:
return
channel = self.state.get_channel(self.user_kicked_id)
assert isinstance(channel, TextChannel)
return channel
@property
def user_banned(self) -> Optional[TextChannel]:
"""The channel which user ban messages get sent in
Returns
--------
Optional[:class:`TextChannel`]
The channel
"""
if not self.user_banned_id:
return
channel = self.state.get_channel(self.user_banned_id)
assert isinstance(channel, TextChannel)
return channel
class Server(Ulid):
"""Represents a server
Attributes
-----------
id: :class:`str`
The id of the server
name: :class:`str`
The name of the server
owner_id: :class:`str`
The owner's id of the server
description: Optional[:class:`str`]
The servers description
nsfw: :class:`bool`
Whether the server is nsfw or not
system_messages: :class:`SystemMessages`
The system message config for the server
icon: Optional[:class:`Asset`]
The servers icon
banner: Optional[:class:`Asset`]
The servers banner
default_permissions: :class:`Permissions`
The permissions for the default role
"""
__slots__ = ("state", "id", "name", "owner_id", "default_permissions", "_members", "_roles", "_channels", "description", "icon", "banner", "nsfw", "system_messages", "_categories", "_emojis")
def __init__(self, data: ServerPayload, state: State):
self.state: State = state
self.id: str = data["_id"]
self.name: str = data["name"]
self.owner_id: str = data["owner"]
self.description: str | None = data.get("description") or None
self.nsfw: bool = data.get("nsfw", False)
self.system_messages: SystemMessages = SystemMessages(data.get("system_messages", cast("SystemMessagesConfig", {})), state)
self._categories: dict[str, Category] = {data["id"]: Category(data, state) for data in data.get("categories", [])}
self.default_permissions: Permissions = Permissions(data["default_permissions"])
self.icon: Asset | None
if icon := data.get("icon"):
self.icon = Asset(icon, state)
else:
self.icon = None
self.banner: Asset | None
if banner := data.get("banner"):
self.banner = Asset(banner, state)
else:
self.banner = None
self._members: dict[str, Member] = {}
self._roles: dict[str, Role] = {role_id: Role(role, role_id, self, state) for role_id, role in data.get("roles", {}).items()}
self._channels: dict[str, Channel] = {}
# The api doesnt send us all the channels but sends us all the ids, this is because channels we dont have permissions to see are not sent
# this causes get_channel to error so we have to first check ourself if its in the cache.
for channel_id in data["channels"]:
if channel := state.channels.get(channel_id):
self._channels[channel_id] = channel
self._emojis: dict[str, Emoji] = {}
def _update(self, *, owner: Optional[str] = None, name: Optional[str] = None, description: Optional[str] = None, icon: Optional[FilePayload] = None, banner: Optional[FilePayload] = None, default_permissions: Optional[int] = None, nsfw: Optional[bool] = None, system_messages: Optional[SystemMessagesConfig] = None, categories: Optional[list[CategoryPayload]] = None, channels: Optional[list[str]] = None):
if owner is not None:
self.owner_id = owner
if name is not None:
self.name = name
if description is not None:
self.description = description or None
if icon is not None:
self.icon = Asset(icon, self.state)
if banner is not None:
self.banner = Asset(banner, self.state)
if default_permissions is not None:
self.default_permissions = Permissions(default_permissions)
if nsfw is not None:
self.nsfw = nsfw
if system_messages is not None:
self.system_messages = SystemMessages(system_messages, self.state)
if categories is not None:
self._categories = {data["id"]: Category(data, self.state) for data in categories}
if channels is not None:
self._channels = {channel_id: self.state.get_channel(channel_id) for channel_id in channels}
@property
def roles(self) -> list[Role]:
"""list[:class:`Role`] Gets all roles in the server in decending order"""
return list(self._roles.values())
@property
def members(self) -> list[Member]:
"""list[:class:`Member`] Gets all members in the server"""
return list(self._members.values())
@property
def channels(self) -> list[Channel]:
"""list[:class:`Member`] Gets all channels in the server"""
return list(self._channels.values())
@property
def categories(self) -> list[Category]:
"""list[:class:`Category`] Gets all categories in the server"""
return list(self._categories.values())
@property
def emojis(self) -> list[Emoji]:
"""list[:class:`Emoji`] Gets all emojis in the server"""
return list(self._emojis.values())
def get_role(self, role_id: str) -> Role:
"""Gets a role from the cache
Parameters
-----------
id: :class:`str`
The id of the role
Returns
--------
:class:`Role`
The role
"""
return self._roles[role_id]
def get_member(self, member_id: str) -> Member:
"""Gets a member from the cache
Parameters
-----------
id: :class:`str`
The id of the member
Returns
--------
:class:`Member`
The member
"""
try:
return self._members[member_id]
except KeyError:
raise LookupError from None
def get_channel(self, channel_id: str) -> Channel:
"""Gets a channel from the cache
Parameters
-----------
id: :class:`str`
The id of the channel
Returns
--------
:class:`Channel`
The channel
"""
try:
return self._channels[channel_id]
except KeyError:
raise LookupError from None
def get_category(self, category_id: str) -> Category:
"""Gets a category from the cache
Parameters
-----------
id: :class:`str`
The id of the category
Returns
--------
:class:`Category`
The category
"""
try:
return self._categories[category_id]
except KeyError:
raise LookupError from None
def get_emoji(self, emoji_id: str) -> Emoji:
"""Gets a emoji from the cache
Parameters
-----------
id: :class:`str`
The id of the emoji
Returns
--------
:class:`Emoji`
The emoji
"""
try:
return self._emojis[emoji_id]
except KeyError as e:
raise LookupError from e
@property
def owner(self) -> Member:
""":class:`Member` The owner of the server"""
return self.get_member(self.owner_id)
async def set_default_permissions(self, permissions: Permissions) -> None:
"""Sets the default server permissions.
Parameters
-----------
server_permissions: Optional[:class:`ServerPermissions`]
The new default server permissions
channel_permissions: Optional[:class:`ChannelPermissions`]
the new default channel permissions
"""
await self.state.http.set_server_default_permissions(self.id, permissions.value)
async def leave_server(self) -> None:
"""Leaves or deletes the server"""
await self.state.http.delete_leave_server(self.id)
async def delete_server(self) -> None:
"""Leaves or deletes a server, alias to :meth`Server.leave_server`"""
await self.leave_server()
async def create_text_channel(self, *, name: str, description: Optional[str] = None) -> TextChannel:
"""Creates a text channel in the server
Parameters
-----------
name: :class:`str`
The name of the channel
description: Optional[:class:`str`]
The channel's description
Returns
--------
:class:`TextChannel`
The text channel that was just created
"""
payload = await self.state.http.create_channel(self.id, "Text", name, description)
channel = TextChannel(payload, self.state)
self._channels[channel.id] = channel
return channel
async def create_voice_channel(self, *, name: str, description: Optional[str] = None) -> VoiceChannel:
"""Creates a voice channel in the server
Parameters
-----------
name: :class:`str`
The name of the channel
description: Optional[:class:`str`]
The channel's description
Returns
--------
:class:`VoiceChannel`
The voice channel that was just created
"""
payload = await self.state.http.create_channel(self.id, "Voice", name, description)
channel = self.state.add_channel(payload)
self._channels[channel.id] = channel
return cast(VoiceChannel, channel)
async def fetch_invites(self) -> list[Invite]:
"""Fetches all invites in the server
Returns
--------
list[:class:`Invite`]
"""
invite_payloads = await self.state.http.fetch_server_invites(self.id)
return [Invite._from_partial(payload["_id"], payload["server"], payload["creator"], payload["channel"], self.state) for payload in invite_payloads]
async def fetch_member(self, member_id: str) -> Member:
"""Fetches a member from this server
Parameters
-----------
member_id: :class:`str`
The id of the member you are fetching
Returns
--------
:class:`Member`
The member with the matching id
"""
payload = await self.state.http.fetch_member(self.id, member_id)
return Member(payload, self, self.state)
async def fetch_bans(self) -> list[ServerBan]:
"""Fetches all bans in the server
Returns
--------
list[:class:`ServerBan`]
"""
payload = await self.state.http.fetch_bans(self.id)
return [ServerBan(ban, self.state) for ban in payload["bans"]]
async def create_role(self, name: str) -> Role:
"""Creates a role in the server
Parameters
-----------
name: :class:`str`
The name of the role
Returns
--------
:class:`Role`
The role that was just created
"""
payload = await self.state.http.create_role(self.id, name)
return Role(payload, name, self, self.state)
async def create_emoji(self, name: str, file: File, *, nsfw: bool = False) -> Emoji:
"""Creates an emoji
Parameters
-----------
name: :class:`str`
The name for the emoji
file: :class:`File`
The image for the emoji
nsfw: :class:`bool`
Whether or not the emoji is nsfw
"""
payload = await self.state.http.create_emoji(name, file, nsfw, {"type": "Server", "id": self.id})
return self.state.add_emoji(payload)
class ServerBan:
"""Represents a server ban
Attributes
-----------
reason: Optional[:class:`str`]
The reason the user was banned
server: :class:`Server`
The server the user was banned in
user_id: :class:`str`
The id of the user who was banned
"""
__slots__ = ("reason", "server", "user_id", "state")
def __init__(self, ban: Ban, state: State):
self.reason: str | None = ban.get("reason")
self.server: Server = state.get_server(ban["_id"]["server"])
self.user_id: str = ban["_id"]["user"]
self.state: State = state
async def unban(self) -> None:
"""Unbans the user"""
await self.state.http.unban_member(self.server.id, self.user_id) | /revolt_baidu.py-0.0.5.tar.gz/revolt_baidu.py-0.0.5/revolt/server.py | 0.922343 | 0.218868 | server.py | pypi |
from __future__ import annotations
from typing import TYPE_CHECKING, TypedDict, Union
from typing_extensions import NotRequired
if TYPE_CHECKING:
from .embed import Embed
from .file import File
__all__ = (
"UserAddContent",
"UserRemoveContent",
"UserJoinedContent",
"UserLeftContent",
"UserKickedContent",
"UserBannedContent",
"ChannelRenameContent",
"ChannelDescriptionChangeContent",
"ChannelIconChangeContent",
"Masquerade",
"Interactions",
"Message",
"MessageReplyPayload",
"SystemMessageContent",
"Component",
)
class UserAddContent(TypedDict):
id: str
by: str
class UserRemoveContent(TypedDict):
id: str
by: str
class UserJoinedContent(TypedDict):
id: str
by: str
class UserLeftContent(TypedDict):
id: str
class UserKickedContent(TypedDict):
id: str
class UserBannedContent(TypedDict):
id: str
class ChannelRenameContent(TypedDict):
name: str
by: str
class ChannelDescriptionChangeContent(TypedDict):
by: str
class ChannelIconChangeContent(TypedDict):
by: str
class Masquerade(TypedDict, total=False):
name: str
avatar: str
colour: str
class Interactions(TypedDict):
reactions: NotRequired[list[str]]
restrict_reactions: NotRequired[bool]
SystemMessageContent = Union[
UserAddContent,
UserRemoveContent,
UserJoinedContent,
UserLeftContent,
UserKickedContent,
UserBannedContent,
ChannelRenameContent,
ChannelDescriptionChangeContent,
ChannelIconChangeContent,
]
class Message(TypedDict):
_id: str
channel: str
author: str
content: str
components: NotRequired[list[Component]]
system: NotRequired[SystemMessageContent]
attachments: NotRequired[list[File]]
embeds: NotRequired[list[Embed]]
mentions: NotRequired[list[str]]
replies: NotRequired[list[str]]
edited: NotRequired[str | int]
masquerade: NotRequired[Masquerade]
interactions: NotRequired[Interactions]
reactions: dict[str, list[str]]
class MessageReplyPayload(TypedDict):
id: str
mention: bool
class Component(TypedDict):
type: str
label: str
style: str
enabled: bool | /revolt_baidu.py-0.0.5.tar.gz/revolt_baidu.py-0.0.5/revolt/types/message.py | 0.764716 | 0.164886 | message.py | pypi |
from __future__ import annotations
from typing import TYPE_CHECKING, Literal, TypedDict, Union
from typing_extensions import NotRequired
if TYPE_CHECKING:
from .file import File
__all__ = ("Embed", "SendableEmbed", "WebsiteEmbed", "ImageEmbed", "TextEmbed", "NoneEmbed", "YoutubeSpecial", "TwitchSpecial", "SpotifySpecial", "SoundcloudSpecial", "BandcampSpecial", "WebsiteSpecial", "JanuaryImage", "JanuaryVideo")
class YoutubeSpecial(TypedDict):
type: Literal["Youtube"]
id: str
timestamp: NotRequired[str]
class TwitchSpecial(TypedDict):
type: Literal["Twitch"]
content_type: Literal["Channel", "Video", "Clip"]
id: str
class SpotifySpecial(TypedDict):
type: Literal["Spotify"]
content_type: str
id: str
class SoundcloudSpecial(TypedDict):
type: Literal["Soundcloud"]
class BandcampSpecial(TypedDict):
type: Literal["Bandcamp"]
content_type: Literal["Album", "Track"]
id: str
WebsiteSpecial = Union[YoutubeSpecial, TwitchSpecial, SpotifySpecial, SoundcloudSpecial, BandcampSpecial]
class JanuaryImage(TypedDict):
url: str
width: int
height: int
size: Literal["Large", "Preview"]
class JanuaryVideo(TypedDict):
url: str
width: int
height: int
class WebsiteEmbed(TypedDict):
type: Literal["Website"]
url: NotRequired[str]
special: NotRequired[WebsiteSpecial]
title: NotRequired[str]
description: NotRequired[str]
image: NotRequired[JanuaryImage]
video: NotRequired[JanuaryVideo]
site_name: NotRequired[str]
icon_url: NotRequired[str]
colour: NotRequired[str]
class ImageEmbed(JanuaryImage):
type: Literal["Image"]
class TextEmbed(TypedDict):
type: Literal["Text"]
icon_url: NotRequired[str]
url: NotRequired[str]
title: NotRequired[str]
description: NotRequired[str]
media: NotRequired[File]
colour: NotRequired[str]
class NoneEmbed(TypedDict):
type: Literal["None"]
Embed = Union[WebsiteEmbed, ImageEmbed, TextEmbed, NoneEmbed]
class SendableEmbed(TypedDict):
type: Literal["Text"]
icon_url: NotRequired[str]
url: NotRequired[str]
title: NotRequired[str]
description: NotRequired[str]
media: NotRequired[str]
colour: NotRequired[str] | /revolt_baidu.py-0.0.5.tar.gz/revolt_baidu.py-0.0.5/revolt/types/embed.py | 0.72086 | 0.187021 | embed.py | pypi |
from __future__ import annotations
from typing import TYPE_CHECKING, Any, Generic, Optional
import revolt
from revolt.utils import maybe_coroutine
from .command import Command
from .group import Group
from .utils import ClientT_Co_D
if TYPE_CHECKING:
from .view import StringView
from revolt.state import State
__all__ = (
"Context",
)
class Context(revolt.Messageable, Generic[ClientT_Co_D]):
"""Stores metadata the commands execution.
Attributes
-----------
command: Optional[:class:`Command`]
The command, this can be `None` when no command was found and the error handler is being executed
invoked_with: :class:`str`
The command name that was used, this can be an alias, the commands name or a command that doesnt exist
message: :class:`Message`
The message that was sent to invoke the command
channel: :class:`Messageable`
The channel the command was invoked in
server_id: Optional[:class:`Server`]
The server the command was invoked in
author: Union[:class:`Member`, :class:`User`]
The user or member that invoked the commad, will be :class:`User` in DMs
args: list[:class:`str`]
The positional arguments being passed to the command
kwargs: dict[:class:`str`, Any]
The keyword arguments being passed to the command
client: :class:`CommandsClient`
The revolt client
"""
__slots__ = ("command", "invoked_with", "args", "message", "channel", "author", "view", "kwargs", "state", "client", "server_id")
async def _get_channel_id(self) -> str:
return self.channel.id
def __init__(self, command: Optional[Command[ClientT_Co_D]], invoked_with: str, view: StringView, message: revolt.Message, client: ClientT_Co_D):
self.command: Command[ClientT_Co_D] | None = command
self.invoked_with: str = invoked_with
self.view: StringView = view
self.message: revolt.Message = message
self.client: ClientT_Co_D = client
self.args: list[Any] = []
self.kwargs: dict[str, Any] = {}
self.server_id: str | None = message.server_id
self.channel: revolt.TextChannel | revolt.GroupDMChannel | revolt.DMChannel | revolt.SavedMessageChannel = message.channel
self.author: revolt.Member | revolt.User = message.author
self.state: State = message.state
@property
def server(self) -> revolt.Server:
""":class:`Server` The server this context belongs too
Raises
-------
:class:`LookupError`
Raises if the context is not from a server
"""
if not self.server_id:
raise LookupError
return self.state.get_server(self.server_id)
async def invoke(self) -> Any:
"""Invokes the command.
.. note:: If the command is `None`, this function will do nothing.
Parameters
-----------
args: list[:class:`str`]
The args being passed to the command
"""
if command := self.command:
if isinstance(command, Group):
try:
subcommand_name = self.view.get_next_word()
except StopIteration:
pass
else:
if subcommand := command.subcommands.get(subcommand_name):
self.command = command = subcommand
return await self.invoke()
self.view.undo()
await command.parse_arguments(self)
return await command.invoke(self, *self.args, **self.kwargs)
async def can_run(self, command: Optional[Command[ClientT_Co_D]] = None) -> bool:
"""Runs all of the commands checks, and returns true if all of them pass"""
command = command or self.command
return all([await maybe_coroutine(check, self) for check in (command.checks if command else [])])
async def send_help(self, argument: Command[Any] | Group[Any] | ClientT_Co_D | None = None) -> None:
argument = argument or self.client
command = self.client.get_command("help")
await command.invoke(self, argument) | /revolt_baidu.py-0.0.5.tar.gz/revolt_baidu.py-0.0.5/revolt/ext/commands/context.py | 0.910379 | 0.160135 | context.py | pypi |
from __future__ import annotations
from typing import Any, Callable, Coroutine, Union, cast
from typing_extensions import TypeVar
import revolt
from .command import Command
from .context import Context
from .errors import (MissingPermissionsError, NotBotOwner, NotServerOwner,
ServerOnly)
from .utils import ClientT_D
__all__ = ("check", "Check", "is_bot_owner", "is_server_owner", "has_permissions", "has_channel_permissions")
T = TypeVar("T", Callable[..., Any], Command, default=Command)
Check = Callable[[Context[ClientT_D]], Union[Any, Coroutine[Any, Any, Any]]]
def check(check: Check[ClientT_D]) -> Callable[[T], T]:
"""A decorator for adding command checks
Parameters
-----------
check: Callable[[Context], Union[Any, Coroutine[Any, Any, Any]]]
The function to be called, must take one parameter, context and optionally be a coroutine, the return value denoating whether the check should pass or fail
"""
def inner(func: T) -> T:
if isinstance(func, Command):
command = cast(Command[ClientT_D], func) # cant verify generic at runtime so must cast
command.checks.append(check)
else:
checks = getattr(func, "_checks", [])
checks.append(check)
func._checks = checks # type: ignore
return func
return inner
def is_bot_owner() -> Callable[[T], T]:
"""A command check for limiting the command to only the bot's owner"""
@check
def inner(context: Context[ClientT_D]):
if user_id := context.client.user.owner_id:
if context.author.id == user_id:
return True
else:
if context.author.id == context.client.user.id:
return True
raise NotBotOwner
return inner
def is_server_owner() -> Callable[[T], T]:
"""A command check for limiting the command to only a server's owner"""
@check
def inner(context: Context[ClientT_D]) -> bool:
if not context.server_id:
raise ServerOnly
if context.author.id == context.server.owner_id:
return True
raise NotServerOwner
return inner
def has_permissions(**permissions: bool) -> Callable[[T], T]:
@check
def inner(context: Context[ClientT_D]) -> bool:
author = context.author
if not author.has_permissions(**permissions):
raise MissingPermissionsError(permissions)
return True
return inner
def has_channel_permissions(**permissions: bool) -> Callable[[T], T]:
@check
def inner(context: Context[ClientT_D]) -> bool:
author = context.author
if not isinstance(author, revolt.Member):
raise ServerOnly
if not author.has_channel_permissions(context.channel, **permissions):
raise MissingPermissionsError(permissions)
return True
return inner | /revolt_baidu.py-0.0.5.tar.gz/revolt_baidu.py-0.0.5/revolt/ext/commands/checks.py | 0.886457 | 0.155751 | checks.py | pypi |
from __future__ import annotations
import inspect
import traceback
from contextlib import suppress
from typing import (TYPE_CHECKING, Annotated, Any, Callable, Coroutine,
Generic, Literal, Optional, Union, get_args, get_origin)
from typing_extensions import ParamSpec
from revolt.utils import maybe_coroutine
from .errors import InvalidLiteralArgument, UnionConverterError
from .utils import ClientT_Co_D, evaluate_parameters, ClientT_Co
if TYPE_CHECKING:
from .checks import Check
from .cog import Cog
from .context import Context
from .group import Group
__all__: tuple[str, ...] = (
"Command",
"command"
)
NoneType: type[None] = type(None)
P = ParamSpec("P")
class Command(Generic[ClientT_Co_D]):
"""Class for holding info about a command.
Parameters
-----------
callback: Callable[..., Coroutine[Any, Any, Any]]
The callback for the command
name: :class:`str`
The name of the command
aliases: list[:class:`str`]
The aliases of the command
parent: Optional[:class:`Group`]
The parent of the command if this command is a subcommand
cog: Optional[:class:`Cog`]
The cog the command is apart of.
usage: Optional[:class:`str`]
The usage string for the command
checks: list[Callable]
The list of checks the command has
description: Optional[:class:`str`]
The commands description if it has one
hidden: :class:`bool`
Whether or not the command should be hidden from the help command
"""
__slots__ = ("callback", "name", "aliases", "signature", "checks", "parent", "_error_handler", "cog", "description", "usage", "parameters", "hidden")
def __init__(self, callback: Callable[..., Coroutine[Any, Any, Any]], name: str, aliases: list[str], usage: Optional[str] = None):
self.callback: Callable[..., Coroutine[Any, Any, Any]] = callback
self.name: str = name
self.aliases: list[str] = aliases
self.usage: str | None = usage
self.signature: inspect.Signature = inspect.signature(self.callback)
self.parameters: list[inspect.Parameter] = evaluate_parameters(self.signature.parameters.values(), getattr(callback, "__globals__", {}))
self.checks: list[Check[ClientT_Co_D]] = getattr(callback, "_checks", [])
self.parent: Optional[Group[ClientT_Co_D]] = None
self.cog: Optional[Cog[ClientT_Co_D]] = None
self._error_handler: Callable[[Any, Context[ClientT_Co_D], Exception], Coroutine[Any, Any, Any]] = type(self)._default_error_handler
self.description: str | None = callback.__doc__
self.hidden: bool = False
async def invoke(self, context: Context[ClientT_Co_D], *args: Any, **kwargs: Any) -> Any:
"""Runs the command and calls the error handler if the command errors.
Parameters
-----------
context: :class:`Context`
The context for the command
args: list[:class:`str`]
The arguments for the command
"""
try:
return await self.callback(self.cog or context.client, context, *args, **kwargs)
except Exception as err:
return await self._error_handler(self.cog or context.client, context, err)
def __call__(self, context: Context[ClientT_Co_D], *args: Any, **kwargs: Any) -> Any:
return self.invoke(context, *args, **kwargs)
def error(self, func: Callable[..., Coroutine[Any, Any, Any]]) -> Callable[..., Coroutine[Any, Any, Any]]:
"""Sets the error handler for the command.
Parameters
-----------
func: Callable[..., Coroutine[Any, Any, Any]]
The function for the error handler
Example
--------
.. code-block:: python3
@mycommand.error
async def mycommand_error(self, ctx, error):
await ctx.send(str(error))
"""
self._error_handler = func
return func
async def _default_error_handler(self, ctx: Context[ClientT_Co_D], error: Exception):
traceback.print_exception(type(error), error, error.__traceback__)
@classmethod
async def handle_origin(cls, context: Context[ClientT_Co_D], origin: Any, annotation: Any, arg: str) -> Any:
if origin is Union:
for converter in get_args(annotation):
try:
return await cls.convert_argument(arg, converter, context)
except:
if converter is NoneType:
context.view.undo()
return None
raise UnionConverterError(arg)
elif origin is Annotated:
annotated_args = get_args(annotation)
if origin := get_origin(annotated_args[0]):
return await cls.handle_origin(context, origin, annotated_args[1], arg)
else:
return await cls.convert_argument(arg, annotated_args[1], context)
elif origin is Literal:
if arg in get_args(annotation):
return arg
else:
raise InvalidLiteralArgument(arg)
@classmethod
async def convert_argument(cls, arg: str, annotation: Any, context: Context[ClientT_Co_D]) -> Any:
if annotation is not inspect.Signature.empty:
if annotation is str: # no converting is needed - its already a string
return arg
origin: Any
if origin := get_origin(annotation):
return await cls.handle_origin(context, origin, annotation, arg)
else:
return await maybe_coroutine(annotation, arg, context)
else:
return arg
async def parse_arguments(self, context: Context[ClientT_Co_D]) -> None:
# please pr if you can think of a better way to do this
for parameter in self.parameters[2:]:
if parameter.kind == parameter.KEYWORD_ONLY:
try:
arg = await self.convert_argument(context.view.get_rest(), parameter.annotation, context)
except StopIteration:
if parameter.default is not parameter.empty:
arg = parameter.default
else:
raise
context.kwargs[parameter.name] = arg
elif parameter.kind == parameter.VAR_POSITIONAL:
with suppress(StopIteration):
while True:
context.args.append(await self.convert_argument(context.view.get_next_word(), parameter.annotation, context))
elif parameter.kind == parameter.POSITIONAL_OR_KEYWORD:
try:
rest = context.view.get_next_word()
arg = await self.convert_argument(rest, parameter.annotation, context)
except StopIteration:
if parameter.default is not parameter.empty:
arg = parameter.default
else:
raise
context.args.append(arg)
def __repr__(self) -> str:
return f"<{self.__class__.__name__} name=\"{self.name}\">"
@property
def short_description(self) -> Optional[str]:
"""Returns the first line of the description or None if there is no description."""
if self.description:
return self.description.split("\n")[0]
def get_usage(self) -> str:
"""Returns the usage string for the command."""
if self.usage:
return self.usage
parents: list[str] = []
if self.parent:
parent = self.parent
while parent:
parents.append(parent.name)
parent = parent.parent
parameters: list[str] = []
for parameter in self.parameters[2:]:
if parameter.kind == parameter.POSITIONAL_OR_KEYWORD:
if parameter.default is not parameter.empty:
parameters.append(f"[{parameter.name}]")
else:
parameters.append(f"<{parameter.name}>")
elif parameter.kind == parameter.KEYWORD_ONLY:
if parameter.default is not parameter.empty:
parameters.append(f"[{parameter.name}]")
else:
parameters.append(f"<{parameter.name}...>")
elif parameter.kind == parameter.VAR_POSITIONAL:
parameters.append(f"[{parameter.name}...]")
return f"{' '.join(parents[::-1])} {self.name} {' '.join(parameters)}"
def command(
*,
name: Optional[str] = None,
aliases: Optional[list[str]] = None,
cls: type[Command[ClientT_Co]] = Command,
usage: Optional[str] = None
) -> Callable[[Callable[..., Coroutine[Any, Any, Any]]], Command[ClientT_Co]]:
"""A decorator that turns a function into a :class:`Command`.n
Parameters
-----------
name: Optional[:class:`str`]
The name of the command, this defaults to the functions name
aliases: Optional[list[:class:`str`]]
The aliases of the command, defaults to no aliases
cls: type[:class:`Command`]
The class used for creating the command, this defaults to :class:`Command` but can be used to use a custom command subclass
Returns
--------
Callable[Callable[..., Coroutine], :class:`Command`]
A function that takes the command callback and returns a :class:`Command`
"""
def inner(func: Callable[..., Coroutine[Any, Any, Any]]) -> Command[ClientT_Co]:
return cls(func, name or func.__name__, aliases or [], usage)
return inner | /revolt_baidu.py-0.0.5.tar.gz/revolt_baidu.py-0.0.5/revolt/ext/commands/command.py | 0.915013 | 0.153296 | command.py | pypi |
from revolt import RevoltError
__all__ = (
"CommandError",
"CommandNotFound",
"NoClosingQuote",
"CheckError",
"NotBotOwner",
"NotServerOwner",
"ServerOnly",
"ConverterError",
"InvalidLiteralArgument",
"BadBoolArgument",
"CategoryConverterError",
"ChannelConverterError",
"UserConverterError",
"MemberConverterError",
"MissingSetup",
)
class CommandError(RevoltError):
"""base error for all command's related errors"""
class CommandNotFound(CommandError):
"""Raised when a command isnt found.
Parameters
-----------
command_name: :class:`str`
The name of the command that wasnt found
"""
__slots__ = ("command_name",)
def __init__(self, command_name: str):
self.command_name: str = command_name
class NoClosingQuote(CommandError):
"""Raised when there is no closing quote for a command argument"""
class CheckError(CommandError):
"""Raised when a check fails for a command"""
class NotBotOwner(CheckError):
"""Raised when the `is_bot_owner` check fails"""
class NotServerOwner(CheckError):
"""Raised when the `is_server_owner` check fails"""
class ServerOnly(CheckError):
"""Raised when a check requires the command to be ran in a server"""
class MissingPermissionsError(CheckError):
"""Raised when a check requires permissions the user does not have
Attributes
-----------
permissions: :class:`dict[str, bool]`
The permissions which the user did not have
"""
def __init__(self, permissions: dict[str, bool]):
self.permissions = permissions
class ConverterError(CommandError):
"""Base class for all converter errors"""
class InvalidLiteralArgument(ConverterError):
"""Raised when the argument is not a valid literal argument"""
class BadBoolArgument(ConverterError):
"""Raised when the bool converter fails"""
class CategoryConverterError(ConverterError):
"""Raised when the Category conveter fails"""
def __init__(self, argument: str):
self.argument = argument
class ChannelConverterError(ConverterError):
"""Raised when the Channel conveter fails"""
def __init__(self, argument: str):
self.argument = argument
class UserConverterError(ConverterError):
"""Raised when the Category conveter fails"""
def __init__(self, argument: str):
self.argument = argument
class MemberConverterError(ConverterError):
"""Raised when the Category conveter fails"""
def __init__(self, argument: str):
self.argument = argument
class UnionConverterError(ConverterError):
"""Raised when all converters in a union fails"""
def __init__(self, argument: str):
self.argument = argument
class MissingSetup(CommandError):
"""Raised when an extension is missing the `setup` function""" | /revolt_baidu.py-0.0.5.tar.gz/revolt_baidu.py-0.0.5/revolt/ext/commands/errors.py | 0.826432 | 0.206294 | errors.py | pypi |
from __future__ import annotations
from typing import Any, Callable, Coroutine, Optional
from .command import Command
from .utils import ClientT_Co_D, ClientT_D
__all__ = (
"Group",
"group"
)
class Group(Command[ClientT_Co_D]):
"""Class for holding info about a group command.
Parameters
-----------
callback: Callable[..., Coroutine[Any, Any, Any]]
The callback for the group command
name: :class:`str`
The name of the command
aliases: list[:class:`str`]
The aliases of the group command
subcommands: dict[:class:`str`, :class:`Command`]
The group's subcommands.
"""
__slots__: tuple[str, ...] = ("subcommands",)
def __init__(self, callback: Callable[..., Coroutine[Any, Any, Any]], name: str, aliases: list[str]):
self.subcommands: dict[str, Command[ClientT_Co_D]] = {}
super().__init__(callback, name, aliases)
def command(self, *, name: Optional[str] = None, aliases: Optional[list[str]] = None, cls: type[Command[ClientT_Co_D]] = Command[ClientT_Co_D]) -> Callable[[Callable[..., Coroutine[Any, Any, Any]]], Command[ClientT_Co_D]]:
"""A decorator that turns a function into a :class:`Command` and registers the command as a subcommand.
Parameters
-----------
name: Optional[:class:`str`]
The name of the command, this defaults to the functions name
aliases: Optional[list[:class:`str`]]
The aliases of the command, defaults to no aliases
cls: type[:class:`Command`]
The class used for creating the command, this defaults to :class:`Command` but can be used to use a custom command subclass
Returns
--------
Callable[Callable[..., Coroutine], :class:`Command`]
A function that takes the command callback and returns a :class:`Command`
"""
def inner(func: Callable[..., Coroutine[Any, Any, Any]]):
command = cls(func, name or func.__name__, aliases or [])
command.parent = self
self.subcommands[command.name] = command
for alias in command.aliases:
self.subcommands[alias] = command
return command
return inner
def group(self, *, name: Optional[str] = None, aliases: Optional[list[str]] = None, cls: Optional[type[Group[ClientT_Co_D]]] = None) -> Callable[[Callable[..., Coroutine[Any, Any, Any]]], Group[ClientT_Co_D]]:
"""A decorator that turns a function into a :class:`Group` and registers the command as a subcommand
Parameters
-----------
name: Optional[:class:`str`]
The name of the group command, this defaults to the functions name
aliases: Optional[list[:class:`str`]]
The aliases of the group command, defaults to no aliases
cls: type[:class:`Group`]
The class used for creating the command, this defaults to :class:`Group` but can be used to use a custom group subclass
Returns
--------
Callable[Callable[..., Coroutine], :class:`Group`]
A function that takes the command callback and returns a :class:`Group`
"""
cls = cls or type(self)
def inner(func: Callable[..., Coroutine[Any, Any, Any]]):
command = cls(func, name or func.__name__, aliases or [])
command.parent = self
self.subcommands[command.name] = command
for alias in command.aliases:
self.subcommands[alias] = command
return command
return inner
def __repr__(self) -> str:
return f"<Group name=\"{self.name}\">"
@property
def commands(self) -> list[Command[ClientT_Co_D]]:
"""Gets all commands registered
Returns
--------
list[:class:`Command`]
The registered commands
"""
return list(set(self.subcommands.values()))
def get_command(self, name: str) -> Command[ClientT_Co_D]:
"""Gets a command.
Parameters
-----------
name: :class:`str`
The name or alias of the command
Returns
--------
:class:`Command`
The command with the name
"""
return self.subcommands[name]
def add_command(self, command: Command[ClientT_Co_D]) -> None:
"""Adds a command, this is typically only used for dynamic commands, you should use the `commands.command` decorator for most usecases.
Parameters
-----------
name: :class:`str`
The name or alias of the command
command: :class:`Command`
The command to be added
"""
self.subcommands[command.name] = command
for alias in command.aliases:
self.subcommands[alias] = command
def remove_command(self, name: str) -> Optional[Command[ClientT_Co_D]]:
"""Removes a command.
Parameters
-----------
name: :class:`str`
The name or alias of the command
Returns
--------
Optional[:class:`Command`]
The command that was removed
"""
command = self.subcommands.pop(name, None)
if command is not None:
for alias in command.aliases:
self.subcommands.pop(alias, None)
return command
def group(*, name: Optional[str] = None, aliases: Optional[list[str]] = None, cls: type[Group[ClientT_D]] = Group) -> Callable[[Callable[..., Coroutine[Any, Any, Any]]], Group[ClientT_D]]:
"""A decorator that turns a function into a :class:`Group`
Parameters
-----------
name: Optional[:class:`str`]
The name of the group command, this defaults to the functions name
aliases: Optional[list[:class:`str`]]
The aliases of the group command, defaults to no aliases
cls: type[:class:`Group`]
The class used for creating the command, this defaults to :class:`Group` but can be used to use a custom group subclass
Returns
--------
Callable[Callable[..., Coroutine], :class:`Group`]
A function that takes the command callback and returns a :class:`Group`
"""
def inner(func: Callable[..., Coroutine[Any, Any, Any]]):
return cls(func, name or func.__name__, aliases or [])
return inner | /revolt_baidu.py-0.0.5.tar.gz/revolt_baidu.py-0.0.5/revolt/ext/commands/group.py | 0.948251 | 0.32013 | group.py | pypi |
from __future__ import annotations
from abc import ABC, abstractmethod
from typing import TYPE_CHECKING, Generic, Optional, TypedDict, Union, cast
from typing_extensions import NotRequired
from .cog import Cog
from .command import Command
from .context import Context
from .group import Group
from .utils import ClientT_Co_D, ClientT_D
from revolt import File, Message, Messageable, MessageReply, SendableEmbed
if TYPE_CHECKING:
from .cog import Cog
__all__ = ("MessagePayload", "HelpCommand", "DefaultHelpCommand", "help_command_impl")
class MessagePayload(TypedDict):
content: str
embed: NotRequired[SendableEmbed]
embeds: NotRequired[list[SendableEmbed]]
attachments: NotRequired[list[File]]
replies: NotRequired[list[MessageReply]]
class HelpCommand(ABC, Generic[ClientT_Co_D]):
@abstractmethod
async def create_global_help(self, context: Context[ClientT_Co_D], commands: dict[Optional[Cog[ClientT_Co_D]], list[Command[ClientT_Co_D]]]) -> Union[str, SendableEmbed, MessagePayload]:
raise NotImplementedError
@abstractmethod
async def create_command_help(self, context: Context[ClientT_Co_D], command: Command[ClientT_Co_D]) -> Union[str, SendableEmbed, MessagePayload]:
raise NotImplementedError
@abstractmethod
async def create_group_help(self, context: Context[ClientT_Co_D], group: Group[ClientT_Co_D]) -> Union[str, SendableEmbed, MessagePayload]:
raise NotImplementedError
@abstractmethod
async def create_cog_help(self, context: Context[ClientT_Co_D], cog: Cog[ClientT_Co_D]) -> Union[str, SendableEmbed, MessagePayload]:
raise NotImplementedError
async def send_help_command(self, context: Context[ClientT_Co_D], message_payload: MessagePayload) -> Message:
return await context.send(**message_payload)
async def filter_commands(self, context: Context[ClientT_Co_D], commands: list[Command[ClientT_Co_D]]) -> list[Command[ClientT_Co_D]]:
filtered: list[Command[ClientT_Co_D]] = []
for command in commands:
if command.hidden:
continue
try:
if await context.can_run(command):
filtered.append(command)
except Exception:
pass
return filtered
async def group_commands(self, context: Context[ClientT_Co_D], commands: list[Command[ClientT_Co_D]]) -> dict[Optional[Cog[ClientT_Co_D]], list[Command[ClientT_Co_D]]]:
cogs: dict[Optional[Cog[ClientT_Co_D]], list[Command[ClientT_Co_D]]] = {}
for command in commands:
cogs.setdefault(command.cog, []).append(command)
return cogs
async def handle_message(self, context: Context[ClientT_Co_D], message: Message) -> None:
pass
async def get_channel(self, context: Context) -> Messageable:
return context
@abstractmethod
async def handle_no_command_found(self, context: Context[ClientT_Co_D], name: str) -> Union[str, SendableEmbed, MessagePayload]:
raise NotImplementedError
class DefaultHelpCommand(HelpCommand[ClientT_Co_D]):
def __init__(self, default_cog_name: str = "No Cog"):
self.default_cog_name = default_cog_name
async def create_global_help(self, context: Context[ClientT_Co_D], commands: dict[Optional[Cog[ClientT_Co_D]], list[Command[ClientT_Co_D]]]) -> Union[str, SendableEmbed, MessagePayload]:
lines = ["```"]
for cog, cog_commands in commands.items():
cog_lines: list[str] = []
cog_lines.append(f"{cog.qualified_name if cog else self.default_cog_name}:")
for command in cog_commands:
cog_lines.append(f" {command.name} - {command.short_description or 'No description'}")
lines.append("\n".join(cog_lines))
lines.append("```")
return "\n".join(lines)
async def create_cog_help(self, context: Context[ClientT_Co_D], cog: Cog[ClientT_Co_D]) -> Union[str, SendableEmbed, MessagePayload]:
lines = ["```"]
lines.append(f"{cog.qualified_name}:")
for command in cog.commands:
lines.append(f" {command.name} - {command.short_description or 'No description'}")
lines.append("```")
return "\n".join(lines)
async def create_command_help(self, context: Context[ClientT_Co_D], command: Command[ClientT_Co_D]) -> Union[str, SendableEmbed, MessagePayload]:
lines = ["```"]
lines.append(f"{command.name}:")
lines.append(f" Usage: {command.get_usage()}")
if command.aliases:
lines.append(f" Aliases: {', '.join(command.aliases)}")
if command.description:
lines.append(command.description)
lines.append("```")
return "\n".join(lines)
async def create_group_help(self, context: Context[ClientT_Co_D], group: Group[ClientT_Co_D]) -> Union[str, SendableEmbed, MessagePayload]:
lines = ["```"]
lines.append(f"{group.name}:")
lines.append(f" Usage: {group.get_usage()}")
if group.aliases:
lines.append(f" Aliases: {', '.join(group.aliases)}")
if group.description:
lines.append(group.description)
for command in group.commands:
lines.append(f" {command.name} - {command.short_description or 'No description'}")
lines.append("```")
return "\n".join(lines)
async def handle_no_command_found(self, context: Context[ClientT_Co_D], name: str) -> str:
return f"Command `{name}` not found."
class HelpCommandImpl(Command[ClientT_Co_D]):
def __init__(self, client: ClientT_Co_D):
self.client = client
async def callback(_: Union[ClientT_Co_D, Cog[ClientT_Co_D]], context: Context[ClientT_Co_D], *args: str) -> None:
await help_command_impl(context.client, context, *args)
super().__init__(callback=callback, name="help", aliases=[])
self.description: str | None = "Shows help for a command, cog or the entire bot"
async def help_command_impl(client: ClientT_D, context: Context[ClientT_D], *arguments: str) -> None:
help_command = client.help_command
if not help_command:
return
filtered_commands = await help_command.filter_commands(context, client.commands)
commands = await help_command.group_commands(context, filtered_commands)
if not arguments:
payload = await help_command.create_global_help(context, commands)
else:
parent: ClientT_D | Group[ClientT_D] = client
for param in arguments:
try:
command = parent.get_command(param)
except LookupError:
try:
cog = client.get_cog(param)
except LookupError:
payload = await help_command.handle_no_command_found(context, param)
else:
payload = await help_command.create_cog_help(context, cog)
finally:
break
if isinstance(command, Group):
command = cast(Group[ClientT_D], command)
parent = command
else:
payload = await help_command.create_command_help(context, command)
break
else:
if TYPE_CHECKING:
command = cast(Command[ClientT_D], ...)
if isinstance(command, Group):
payload = await help_command.create_group_help(context, command)
else:
payload = await help_command.create_command_help(context, command)
if TYPE_CHECKING:
payload = cast(MessagePayload, ...)
msg_payload: MessagePayload
if isinstance(payload, str):
msg_payload = {"content": payload}
elif isinstance(payload, SendableEmbed):
msg_payload = {"embed": payload, "content": " "}
else:
msg_payload = payload
message = await help_command.send_help_command(context, msg_payload)
await help_command.handle_message(context, message) | /revolt_baidu.py-0.0.5.tar.gz/revolt_baidu.py-0.0.5/revolt/ext/commands/help.py | 0.773302 | 0.251998 | help.py | pypi |
from __future__ import annotations
import mimetypes
from typing import TYPE_CHECKING
from .enums import AssetType
from .utils import Ulid
if TYPE_CHECKING:
from io import IOBase
from .state import State
from .types import File as FilePayload
__all__ = ("Asset", "PartialAsset")
class Asset(Ulid):
"""Represents a file on revolt
Attributes
-----------
id: :class:`str`
The id of the asset
tag: :class:`str`
The tag of the asset, this corrasponds to where the asset is used
size: :class:`int`
Amount of bytes in the file
filename: :class:`str`
The name of the file
height: Optional[:class:`int`]
The height of the file if it is an image or video
width: Optional[:class:`int`]
The width of the file if it is an image or video
content_type: :class:`str`
The content type of the file
type: :class:`AssetType`
The type of asset it is
url: :class:`str`
The assets url
"""
__slots__ = ("state", "id", "tag", "size", "filename", "content_type", "width", "height", "type", "url")
def __init__(self, data: FilePayload, state: State):
self.state: State = state
self.id: str = data['_id']
self.tag: str = data['tag']
self.size: int = data['size']
self.filename: str = data['filename']
metadata = data['metadata']
self.height: int | None
self.width: int | None
if metadata["type"] == "Image" or metadata["type"] == "Video": # cannot use `in` because type narrowing will not happen
self.height = metadata["height"]
self.width = metadata["width"]
else:
self.height = None
self.width = None
self.content_type: str | None = data["content_type"]
self.type: AssetType = AssetType(metadata["type"])
base_url = self.state.api_info["features"]["autumn"]["url"]
self.url: str = f"{base_url}/{self.tag}/{self.id}"
async def read(self) -> bytes:
"""Reads the files content into bytes"""
return await self.state.http.request_file(self.url)
async def save(self, fp: IOBase) -> None:
"""Reads the files content and saves it to a file
Parameters
-----------
fp: IOBase
The file to write too.
"""
fp.write(await self.read())
class PartialAsset(Asset):
"""Partial asset for when we get limited data about the asset
Attributes
-----------
id: :class:`str`
The id of the asset, this will always be ``"0"``
size: :class:`int`
Amount of bytes in the file, this will always be ``0``
filename: :class:`str`
The name of the file, this be always be ``""``
height: Optional[:class:`int`]
The height of the file if it is an image or video, this will always be ``None``
width: Optional[:class:`int`]
The width of the file if it is an image or video, this will always be ``None``
content_type: Optional[:class:`str`]
The content type of the file, this is guessed from the url's file extension if it has one
type: :class:`AssetType`
The type of asset it is, this always be ``AssetType.file``
"""
def __init__(self, url: str, state: State):
self.state: State = state
self.id: str = "0"
self.size: int = 0
self.filename: str = ""
self.height: int | None = None
self.width: int | None = None
self.content_type: str | None = mimetypes.guess_extension(url)
self.type: AssetType = AssetType.file
self.url: str = url | /revolt_py-0.1.11-py3-none-any.whl/revolt/asset.py | 0.916159 | 0.277332 | asset.py | pypi |
from __future__ import annotations
from typing import TYPE_CHECKING, Optional
from .enums import SortType
if TYPE_CHECKING:
from .embed import SendableEmbed
from .file import File
from .message import Masquerade, Message, MessageInteractions, MessageReply
from .state import State
__all__ = ("Messageable",)
class Messageable:
"""Base class for all channels that you can send messages in
Attributes
-----------
id: :class:`str`
The id of the channel
"""
state: State
__slots__ = ()
async def _get_channel_id(self) -> str:
raise NotImplementedError
async def send(self, content: Optional[str] = None, *, embeds: Optional[list[SendableEmbed]] = None, embed: Optional[SendableEmbed] = None, attachments: Optional[list[File]] = None, replies: Optional[list[MessageReply]] = None, reply: Optional[MessageReply] = None, masquerade: Optional[Masquerade] = None, interactions: Optional[MessageInteractions] = None) -> Message:
"""Sends a message in a channel, you must send at least one of either `content`, `embeds` or `attachments`
Parameters
-----------
content: Optional[:class:`str`]
The content of the message, this will not include system message's content
attachments: Optional[list[:class:`File`]]
The attachments of the message
embed: Optional[:class:`SendableEmbed`]
The embed to send with the message
embeds: Optional[list[:class:`SendableEmbed`]]
The embeds to send with the message
replies: Optional[list[:class:`MessageReply`]]
The list of messages to reply to.
masquerade: Optional[:class:`Masquerade`]
The masquerade for the message, this can overwrite the username and avatar shown
interactions: Optional[:class:`MessageInteractions`]
The interactions for the message
Returns
--------
:class:`Message`
The message that was just sent
"""
if embed:
embeds = [embed]
if reply:
replies = [reply]
embed_payload = [embed.to_dict() for embed in embeds] if embeds else None
reply_payload = [reply.to_dict() for reply in replies] if replies else None
masquerade_payload = masquerade.to_dict() if masquerade else None
interactions_payload = interactions.to_dict() if interactions else None
message = await self.state.http.send_message(await self._get_channel_id(), content, embed_payload, attachments, reply_payload, masquerade_payload, interactions_payload)
return self.state.add_message(message)
async def fetch_message(self, message_id: str) -> Message:
"""Fetches a message from the channel
Parameters
-----------
message_id: :class:`str`
The id of the message you want to fetch
Returns
--------
:class:`Message`
The message with the matching id
"""
from .message import Message
payload = await self.state.http.fetch_message(await self._get_channel_id(), message_id)
return Message(payload, self.state)
async def history(self, *, sort: SortType = SortType.latest, limit: int = 100, before: Optional[str] = None, after: Optional[str] = None, nearby: Optional[str] = None) -> list[Message]:
"""Fetches multiple messages from the channel's history
Parameters
-----------
sort: :class:`SortType`
The order to sort the messages in
limit: :class:`int`
How many messages to fetch
before: Optional[:class:`str`]
The id of the message which should come *before* all the messages to be fetched
after: Optional[:class:`str`]
The id of the message which should come *after* all the messages to be fetched
nearby: Optional[:class:`str`]
The id of the message which should be nearby all the messages to be fetched
Returns
--------
list[:class:`Message`]
The messages found in order of the sort parameter
"""
from .message import Message
payloads = await self.state.http.fetch_messages(await self._get_channel_id(), sort=sort, limit=limit, before=before, after=after, nearby=nearby)
return [Message(payload, self.state) for payload in payloads]
async def search(self, query: str, *, sort: SortType = SortType.latest, limit: int = 100, before: Optional[str] = None, after: Optional[str] = None) -> list[Message]:
"""searches the channel for a query
Parameters
-----------
query: :class:`str`
The query to search for in the channel
sort: :class:`SortType`
The order to sort the messages in
limit: :class:`int`
How many messages to fetch
before: Optional[:class:`str`]
The id of the message which should come *before* all the messages to be fetched
after: Optional[:class:`str`]
The id of the message which should come *after* all the messages to be fetched
Returns
--------
list[:class:`Message`]
The messages found in order of the sort parameter
"""
from .message import Message
payloads = await self.state.http.search_messages(await self._get_channel_id(), query, sort=sort, limit=limit, before=before, after=after)
return [Message(payload, self.state) for payload in payloads]
async def delete_messages(self, messages: list[Message]) -> None:
"""Bulk deletes messages from the channel
.. note:: The messages must have been sent in the last 7 days.
Parameters
-----------
messages: list[:class:`Message`]
The messages for deletion, this can be up to 100 messages
"""
await self.state.http.delete_messages(await self._get_channel_id(), [message.id for message in messages]) | /revolt_py-0.1.11-py3-none-any.whl/revolt/messageable.py | 0.930923 | 0.292038 | messageable.py | pypi |
from __future__ import annotations
import datetime
from typing import TYPE_CHECKING, Any, Coroutine, Optional, Union
from revolt.types.message import SystemMessageContent
from .asset import Asset, PartialAsset
from .channel import DMChannel, GroupDMChannel, TextChannel
from .embed import Embed, SendableEmbed, to_embed
from .utils import Ulid
if TYPE_CHECKING:
from .server import Server
from .state import State
from .types import Embed as EmbedPayload
from .types import Interactions as InteractionsPayload
from .types import Masquerade as MasqueradePayload
from .types import Message as MessagePayload
from .types import MessageReplyPayload
from .user import User
from .member import Member
__all__ = (
"Message",
"MessageReply",
"Masquerade",
"MessageInteractions"
)
class Message(Ulid):
"""Represents a message
Attributes
-----------
id: :class:`str`
The id of the message
content: :class:`str`
The content of the message, this will not include system message's content
attachments: list[:class:`Asset`]
The attachments of the message
embeds: list[Union[:class:`WebsiteEmbed`, :class:`ImageEmbed`, :class:`TextEmbed`, :class:`NoneEmbed`]]
The embeds of the message
channel: :class:`Messageable`
The channel the message was sent in
author: Union[:class:`Member`, :class:`User`]
The author of the message, will be :class:`User` in DMs
edited_at: Optional[:class:`datetime.datetime`]
The time at which the message was edited, will be None if the message has not been edited
mentions: list[Union[:class:`Member`, :class:`User`]]
The users or members that where mentioned in the message
replies: list[:class:`Message`]
The message's this message has replied to, this may not contain all the messages if they are outside the cache
reply_ids: list[:class:`str`]
The message's ids this message has replies to
reactions: dict[str, list[:class:`User`]]
The reactions on the message
interactions: Optional[:class:`MessageInteractions`]
The interactions on the message, if any
"""
__slots__ = ("state", "id", "content", "attachments", "embeds", "channel", "author", "edited_at", "mentions", "replies", "reply_ids", "reactions", "interactions")
def __init__(self, data: MessagePayload, state: State):
self.state: State = state
self.id: str = data["_id"]
content = data.get("content", "")
if not isinstance(content, str):
self.system_content: SystemMessageContent = content
self.content: str = ""
else:
self.content = content
self.attachments: list[Asset] = [Asset(attachment, state) for attachment in data.get("attachments", [])]
self.embeds: list[Embed] = [to_embed(embed, state) for embed in data.get("embeds", [])]
channel = state.get_channel(data["channel"])
assert isinstance(channel, Union[TextChannel, GroupDMChannel, DMChannel])
self.channel: TextChannel | GroupDMChannel | DMChannel = channel
self.server_id: str | None = self.channel.server_id
self.mentions: list[Member | User]
if self.server_id:
author = state.get_member(self.server_id, data["author"])
self.mentions = [self.server.get_member(member_id) for member_id in data.get("mentions", [])]
else:
author = state.get_user(data["author"])
self.mentions = [state.get_user(member_id) for member_id in data.get("mentions", [])]
self.author: Member | User = author
if masquerade := data.get("masquerade"):
if name := masquerade.get("name"):
self.author.masquerade_name = name
if avatar := masquerade.get("avatar"):
self.author.masquerade_avatar = PartialAsset(avatar, state)
if edited_at := data.get("edited"):
self.edited_at: Optional[datetime.datetime] = datetime.datetime.strptime(edited_at, "%Y-%m-%dT%H:%M:%S.%f%z")
self.replies: list[Message] = []
self.reply_ids: list[str] = []
for reply in data.get("replies", []):
try:
message = state.get_message(reply)
self.replies.append(message)
except LookupError:
pass
self.reply_ids.append(reply)
reactions = data.get("reactions", {})
self.reactions: dict[str, list[User]] = {}
for emoji, users in reactions.items():
self.reactions[emoji] = [self.state.get_user(user_id) for user_id in users]
self.interactions: MessageInteractions | None
if interactions := data.get("interactions"):
self.interactions = MessageInteractions(reactions=interactions.get("reactions"), restrict_reactions=interactions.get("restrict_reactions", False))
else:
self.interactions = None
def _update(self, *, content: Optional[str] = None, embeds: Optional[list[EmbedPayload]] = None, edited: Optional[Union[str, int]] = None):
if content is not None:
self.content = content
if embeds is not None:
self.embeds = [to_embed(embed, self.state) for embed in embeds]
if edited is not None:
if isinstance(edited, int):
self.edited_at = datetime.datetime.fromtimestamp(edited / 1000, tz=datetime.timezone.utc)
else:
self.edited_at = datetime.datetime.strptime(edited, "%Y-%m-%dT%H:%M:%S.%f%z")
async def edit(self, *, content: Optional[str] = None, embeds: Optional[list[SendableEmbed]] = None) -> None:
"""Edits the message. The bot can only edit its own message
Parameters
-----------
content: :class:`str`
The new content of the message
"""
new_embeds = [embed.to_dict() for embed in embeds] if embeds else None
await self.state.http.edit_message(self.channel.id, self.id, content, new_embeds)
async def delete(self) -> None:
"""Deletes the message. The bot can only delete its own messages and messages it has permission to delete """
await self.state.http.delete_message(self.channel.id, self.id)
def reply(self, *args: Any, mention: bool = False, **kwargs: Any) -> Coroutine[Any, Any, Message]:
"""Replies to this message, equivilant to:
.. code-block:: python
await channel.send(..., replies=[MessageReply(message, mention)])
"""
return self.channel.send(*args, **kwargs, replies=[MessageReply(self, mention)])
async def add_reaction(self, emoji: str) -> None:
await self.state.http.add_reaction(self.channel.id, self.id, emoji)
async def remove_reaction(self, emoji: str, user: Optional[User] = None, remove_all: bool = False) -> None:
await self.state.http.remove_reaction(self.channel.id, self.id, emoji, user.id if user else None, remove_all)
async def remove_all_reactions(self) -> None:
await self.state.http.remove_all_reactions(self.channel.id, self.id)
@property
def server(self) -> Server:
""":class:`Server` The server this voice channel belongs too
Raises
-------
:class:`LookupError`
Raises if the channel is not part of a server
"""
return self.channel.server
class MessageReply:
"""represents a reply to a message.
Parameters
-----------
message: :class:`Message`
The message being replied to.
mention: :class:`bool`
Whether the reply should mention the author of the message. Defaults to false.
"""
__slots__ = ("message", "mention")
def __init__(self, message: Message, mention: bool = False):
self.message: Message = message
self.mention: bool = mention
def to_dict(self) -> MessageReplyPayload:
return { "id": self.message.id, "mention": self.mention }
class Masquerade:
"""represents a message's masquerade.
Parameters
-----------
name: Optional[:class:`str`]
The name to display for the message
avatar: Optional[:class:`str`]
The avatar's url to display for the message
colour: Optional[:class:`str`]
The colour of the name, similar to role colours
"""
__slots__ = ("name", "avatar", "colour")
def __init__(self, name: Optional[str] = None, avatar: Optional[str] = None, colour: Optional[str] = None):
self.name: str | None = name
self.avatar: str | None = avatar
self.colour: str | None = colour
def to_dict(self) -> MasqueradePayload:
output: MasqueradePayload = {}
if name := self.name:
output["name"] = name
if avatar := self.avatar:
output["avatar"] = avatar
if colour := self.colour:
output["colour"] = colour
return output
class MessageInteractions:
"""Represents a message's interactions, this is for allowing preset reactions and restricting adding reactions to only those.
Parameters
-----------
reactions: Optional[list[:class:`str`]]
The preset reactions on the message
restrict_reactions: bool
Whether or not users can only react to the interaction's reactions
"""
__slots__ = ("reactions", "restrict_reactions")
def __init__(self, *, reactions: Optional[list[str]] = None, restrict_reactions: bool = False):
self.reactions: list[str] | None = reactions
self.restrict_reactions: bool = restrict_reactions
def to_dict(self) -> InteractionsPayload:
output: InteractionsPayload = {}
if reactions := self.reactions:
output["reactions"] = reactions
if restrict_reactions := self.restrict_reactions:
output["restrict_reactions"] = restrict_reactions
return output | /revolt_py-0.1.11-py3-none-any.whl/revolt/message.py | 0.848863 | 0.249639 | message.py | pypi |
from __future__ import annotations
from typing import TYPE_CHECKING
from .asset import Asset
from .utils import Ulid
if TYPE_CHECKING:
from .state import State
from .channel import Channel
from .server import Server
from .types import Invite as InvitePayload
from .user import User
__all__ = ("Invite",)
class Invite(Ulid):
"""Represents a server invite.
Attributes
-----------
code: :class:`str`
The code for the invite
id: :class:`str`
Alias for :attr:`code`
server: :class:`Server`
The server this invite is for
channel: :class:`Channel`
The channel this invite is for
user_name: :class:`str`
The name of the user who made the invite
user: Optional[:class:`User`]
The user who made the invite, this is only set if this was fetched via :meth:`Server.fetch_invites`
user_avatar: Optional[:class:`Asset`]
The invite creator's avatar, if any
member_count: :class:`int`
The member count of the server this invite is for
"""
__slots__ = ("state", "code", "id", "server", "channel", "user_name", "user_avatar", "user", "member_count")
def __init__(self, data: InvitePayload, code: str, state: State):
self.state: State = state
self.code: str = code
self.id: str = code
self.server: Server = state.get_server(data["server_id"])
self.channel: Channel = self.server.get_channel(data["channel_id"])
self.user_name: str = data["user_name"]
self.user: User | None = None
self.user_avatar: Asset | None
if avatar := data.get("user_avatar"):
self.user_avatar = Asset(avatar, state)
else:
self.user_avatar = None
self.member_count: int = data["member_count"]
@staticmethod
def _from_partial(code: str, server: str, creator: str, channel: str, state: State) -> Invite:
invite = Invite.__new__(Invite)
invite.state = state
invite.code = code
invite.server = state.get_server(server)
invite.channel = state.get_channel(channel)
invite.user = state.get_user(creator)
invite.user_name = invite.user.name
invite.user_avatar = invite.user.avatar
invite.member_count = len(invite.server.members)
return invite
async def delete(self) -> None:
"""Deletes the invite"""
await self.state.http.delete_invite(self.code) | /revolt_py-0.1.11-py3-none-any.whl/revolt/invite.py | 0.89648 | 0.161221 | invite.py | pypi |
from __future__ import annotations
from typing import TYPE_CHECKING, Any, Optional, Union
from .asset import Asset
from .enums import ChannelType
from .messageable import Messageable
from .permissions import Permissions, PermissionsOverwrite
from .utils import Missing, Ulid
if TYPE_CHECKING:
from .message import Message
from .role import Role
from .server import Server
from .state import State
from .types import Channel as ChannelPayload
from .types import DMChannel as DMChannelPayload
from .types import File as FilePayload
from .types import GroupDMChannel as GroupDMChannelPayload
from .types import Overwrite as OverwritePayload
from .types import SavedMessages as SavedMessagesPayload
from .types import ServerChannel as ServerChannelPayload
from .types import TextChannel as TextChannelPayload
from .user import User
__all__ = ("DMChannel", "GroupDMChannel", "SavedMessageChannel", "TextChannel", "VoiceChannel", "Channel", "ServerChannel")
class EditableChannel:
__slots__ = ()
state: State
id: str
async def edit(self, **kwargs: Any) -> None:
"""Edits the channel
Passing ``None`` to the parameters that accept it will remove them.
Parameters
-----------
name: str
The new name for the channel
description: Optional[str]
The new description for the channel
owner: User
The new owner for the group dm channel
icon: Optional[File]
The new icon for the channel
nsfw: bool
Sets whether the channel is nsfw or not
"""
remove: list[str] = []
if kwargs.get("icon", Missing) == None:
remove.append("Icon")
elif kwargs.get("description", Missing) == None:
remove.append("Description")
if icon := kwargs.get("icon"):
asset = await self.state.http.upload_file(icon, "icons")
kwargs["icon"] = asset["id"]
if owner := kwargs.get("owner"):
kwargs["owner"] = owner.id
await self.state.http.edit_channel(self.id, remove, kwargs)
class Channel(Ulid):
"""Base class for all channels
Attributes
-----------
id: :class:`str`
The id of the channel
channel_type: ChannelType
The type of the channel
server_id: Optional[:class:`str`]
The server id of the chanel, if any
"""
__slots__ = ("state", "id", "channel_type", "server_id")
def __init__(self, data: ChannelPayload, state: State):
self.state: State = state
self.id: str = data["_id"]
self.channel_type: ChannelType = ChannelType(data["channel_type"])
self.server_id: Optional[str] = None
async def _get_channel_id(self) -> str:
return self.id
def _update(self, **_: Any) -> None:
pass
async def delete(self) -> None:
"""Deletes or closes the channel"""
await self.state.http.close_channel(self.id)
@property
def server(self) -> Server:
""":class:`Server` The server this voice channel belongs too
Raises
-------
:class:`LookupError`
Raises if the channel is not part of a server
"""
if not self.server_id:
raise LookupError
return self.state.get_server(self.server_id)
@property
def mention(self) -> str:
""":class:`str`: Returns a string that allows you to mention the given channel."""
return f"<#{self.id}>"
class SavedMessageChannel(Channel, Messageable):
"""The Saved Message Channel"""
def __init__(self, data: SavedMessagesPayload, state: State):
super().__init__(data, state)
class DMChannel(Channel, Messageable):
"""A DM channel
Attributes
-----------
last_message_id: Optional[:class:`str`]
The id of the last message in this channel, if any
"""
__slots__ = ("last_message_id", "recipient_ids")
def __init__(self, data: DMChannelPayload, state: State):
super().__init__(data, state)
self.recipient_ids: tuple[str, str] = tuple(data["recipients"])
self.last_message_id: str | None = data.get("last_message_id")
@property
def recipients(self) -> tuple[User, User]:
a, b = self.recipient_ids
return (self.state.get_user(a), self.state.get_user(b))
@property
def recipient(self) -> User:
if self.recipient_ids[0] != self.state.user_id:
user_id = self.recipient_ids[0]
else:
user_id = self.recipient_ids[1]
return self.state.get_user(user_id)
@property
def last_message(self) -> Message:
"""Gets the last message from the channel, shorthand for `client.get_message(channel.last_message_id)`
Returns
--------
:class:`Message` the last message in the channel
"""
if not self.last_message_id:
raise LookupError
return self.state.get_message(self.last_message_id)
class GroupDMChannel(Channel, Messageable, EditableChannel):
"""A group DM channel
Attributes
-----------
recipients: list[:class:`User`]
The recipients of the group dm channel
name: :class:`str`
The name of the group dm channel
owner: :class:`User`
The user who created the group dm channel
icon: Optional[:class:`Asset`]
The icon of the group dm channel
permissions: :class:`ChannelPermissions`
The permissions of the users inside the group dm channel
description: Optional[:class:`str`]
The description of the channel, if any
last_message_id: Optional[:class:`str`]
The id of the last message in this channel, if any
"""
__slots__ = ("recipient_ids", "name", "owner_id", "permissions", "icon", "description", "last_message_id")
def __init__(self, data: GroupDMChannelPayload, state: State):
super().__init__(data, state)
self.recipient_ids: list[str] = data["recipients"]
self.name: str = data["name"]
self.owner_id: str = data["owner"]
self.description: str | None = data.get("description")
self.last_message_id: str | None = data.get("last_message_id")
self.icon: Asset | None
if icon := data.get("icon"):
self.icon = Asset(icon, state)
else:
self.icon = None
self.permissions: Permissions = Permissions(data.get("permissions", 0))
def _update(self, *, name: Optional[str] = None, recipients: Optional[list[str]] = None, description: Optional[str] = None) -> None:
if name is not None:
self.name = name
if recipients is not None:
self.recipient_ids = recipients
if description is not None:
self.description = description
@property
def recipients(self) -> list[User]:
return [self.state.get_user(user_id) for user_id in self.recipient_ids]
@property
def owner(self) -> User:
return self.state.get_user(self.owner_id)
async def set_default_permissions(self, permissions: Permissions) -> None:
"""Sets the default permissions for a group.
Parameters
-----------
permissions: :class:`ChannelPermissions`
The new default group permissions
"""
await self.state.http.set_group_channel_default_permissions(self.id, permissions.value)
@property
def last_message(self) -> Message:
"""Gets the last message from the channel, shorthand for `client.get_message(channel.last_message_id)`
Returns
--------
:class:`Message` the last message in the channel
"""
if not self.last_message_id:
raise LookupError
return self.state.get_message(self.last_message_id)
class ServerChannel(Channel):
"""Base class for all guild channels
Attributes
-----------
server_id: :class:`str`
The id of the server this text channel belongs to
name: :class:`str`
The name of the text channel
description: Optional[:class:`str`]
The description of the channel, if any
nsfw: bool
Sets whether the channel is nsfw or not
default_permissions: :class:`ChannelPermissions`
The default permissions for all users in the text channel
"""
def __init__(self, data: ServerChannelPayload, state: State):
super().__init__(data, state)
self.server_id: str = data["server"]
self.name: str = data["name"]
self.description: Optional[str] = data.get("description")
self.nsfw: bool = data.get("nsfw", False)
self.active: bool = False
self.default_permissions: PermissionsOverwrite = PermissionsOverwrite._from_overwrite(data.get("default_permissions", {"a": 0, "d": 0}))
permissions: dict[str, PermissionsOverwrite] = {}
for role_name, overwrite_data in data.get("role_permissions", {}).items():
overwrite = PermissionsOverwrite._from_overwrite(overwrite_data)
permissions[role_name] = overwrite
self.permissions: dict[str, PermissionsOverwrite] = permissions
self.icon: Asset | None
if icon := data.get("icon"):
self.icon = Asset(icon, state)
else:
self.icon = None
async def set_default_permissions(self, permissions: PermissionsOverwrite) -> None:
"""Sets the default permissions for the channel.
Parameters
-----------
permissions: :class:`ChannelPermissions`
The new default channel permissions
"""
allow, deny = permissions.to_pair()
await self.state.http.set_guild_channel_default_permissions(self.id, allow.value, deny.value)
async def set_role_permissions(self, role: Role, permissions: PermissionsOverwrite) -> None:
"""Sets the permissions for a role in the channel.
Parameters
-----------
permissions: :class:`ChannelPermissions`
The new channel permissions
"""
allow, deny = permissions.to_pair()
await self.state.http.set_guild_channel_role_permissions(self.id, role.id, allow.value, deny.value)
def _update(self, *, name: Optional[str] = None, description: Optional[str] = None, icon: Optional[FilePayload] = None, nsfw: Optional[bool] = None, active: Optional[bool] = None, role_permissions: Optional[dict[str, OverwritePayload]] = None, default_permissions: Optional[OverwritePayload] = None):
if name is not None:
self.name = name
if description is not None:
self.description = description
if icon is not None:
self.icon = Asset(icon, self.state)
if nsfw is not None:
self.nsfw = nsfw
if active is not None:
self.active = active
if role_permissions is not None:
permissions = {}
for role_name, overwrite_data in role_permissions.items():
overwrite = PermissionsOverwrite._from_overwrite(overwrite_data)
permissions[role_name] = overwrite
self.permissions = permissions
if default_permissions is not None:
self.default_permissions = PermissionsOverwrite._from_overwrite(default_permissions)
class TextChannel(ServerChannel, Messageable, EditableChannel):
"""A text channel
Subclasses :class:`ServerChannel` and :class:`Messageable`
Attributes
-----------
name: :class:`str`
The name of the text channel
server_id: :class:`str`
The id of the server this text channel belongs to
last_message_id: Optional[:class:`str`]
The id of the last message in this channel, if any
default_permissions: :class:`ChannelPermissions`
The default permissions for all users in the text channel
role_permissions: dict[:class:`str`, :class:`ChannelPermissions`]
A dictionary of role id's to the permissions of that role in the text channel
icon: Optional[:class:`Asset`]
The icon of the text channel, if any
description: Optional[:class:`str`]
The description of the channel, if any
"""
__slots__ = ("name", "description", "last_message_id", "default_permissions", "icon", "overwrites")
def __init__(self, data: TextChannelPayload, state: State):
super().__init__(data, state)
self.last_message_id: str | None = data.get("last_message_id")
async def _get_channel_id(self) -> str:
return self.id
@property
def last_message(self) -> Message:
"""Gets the last message from the channel, shorthand for `client.get_message(channel.last_message_id)`
Returns
--------
:class:`Message` the last message in the channel
"""
if not self.last_message_id:
raise LookupError
return self.state.get_message(self.last_message_id)
class VoiceChannel(ServerChannel, EditableChannel):
"""A voice channel
Subclasses :class:`ServerChannel`
Attributes
-----------
name: :class:`str`
The name of the voice channel
server_id: :class:`str`
The id of the server this voice channel belongs to
last_message_id: Optional[:class:`str`]
The id of the last message in this channel, if any
default_permissions: :class:`ChannelPermissions`
The default permissions for all users in the voice channel
role_permissions: dict[:class:`str`, :class:`ChannelPermissions`]
A dictionary of role id's to the permissions of that role in the voice channel
icon: Optional[:class:`Asset`]
The icon of the voice channel, if any
description: Optional[:class:`str`]
The description of the channel, if any
"""
def channel_factory(data: ChannelPayload, state: State) -> Union[DMChannel, GroupDMChannel, SavedMessageChannel, TextChannel, VoiceChannel]:
if data["channel_type"] == "SavedMessages":
return SavedMessageChannel(data, state)
elif data["channel_type"] == "DirectMessage":
return DMChannel(data, state)
elif data["channel_type"] == "Group":
return GroupDMChannel(data, state)
elif data["channel_type"] == "TextChannel":
return TextChannel(data, state)
elif data["channel_type"] == "VoiceChannel":
return VoiceChannel(data, state)
else:
raise Exception | /revolt_py-0.1.11-py3-none-any.whl/revolt/channel.py | 0.938899 | 0.152347 | channel.py | pypi |
from __future__ import annotations
import asyncio
import logging
from typing import TYPE_CHECKING, Any, Callable, Literal, Optional, Union, cast
import aiohttp
from .channel import (DMChannel, GroupDMChannel, SavedMessageChannel,
TextChannel, VoiceChannel, channel_factory)
from .http import HttpClient
from .invite import Invite
from .message import Message
from .state import State
from .utils import Missing, Ulid
from .websocket import WebsocketHandler
try:
import ujson as json
except ImportError:
import json
if TYPE_CHECKING:
from .channel import Channel
from .emoji import Emoji
from .file import File
from .server import Server
from .types import ApiInfo
from .user import User
__all__ = ("Client",)
logger: logging.Logger = logging.getLogger("revolt")
class Client:
"""The client for interacting with revolt
Parameters
-----------
session: :class:`aiohttp.ClientSession`
The aiohttp session to use for http request and the websocket
token: :class:`str`
The bots token
api_url: :class:`str`
The api url for the revolt instance you are connecting to, by default it uses the offical instance hosted at revolt.chat
max_messages: :class:`int`
The max amount of messages stored in the cache, by default this is 5k
"""
def __init__(self, session: aiohttp.ClientSession, token: str, *, api_url: str = "https://api.revolt.chat", max_messages: int = 5000, bot: bool = True):
self.session: aiohttp.ClientSession = session
self.token: str = token
self.api_url: str = api_url
self.max_messages: int = max_messages
self.bot: bool = bot
self.api_info: ApiInfo
self.http: HttpClient
self.state: State
self.websocket: WebsocketHandler
self.listeners: dict[str, list[tuple[Callable[..., bool], asyncio.Future[Any]]]] = {}
super().__init__()
def dispatch(self, event: str, *args: Any) -> None:
"""Dispatch an event, this is typically used for testing and internals.
Parameters
----------
event: class:`str`
The name of the event to dispatch, not including `on_`
args: :class:`Any`
The arguments passed to the event
"""
for check, future in self.listeners.pop(event, []):
if check(*args):
if len(args) == 1:
future.set_result(args[0])
else:
future.set_result(args)
func = getattr(self, f"on_{event}", None)
if func:
asyncio.create_task(func(*args))
async def get_api_info(self) -> ApiInfo:
async with self.session.get(self.api_url) as resp:
return json.loads(await resp.text())
async def start(self) -> None:
"""Starts the client"""
api_info = await self.get_api_info()
self.api_info = api_info
self.http = HttpClient(self.session, self.token, self.api_url, self.api_info, self.bot)
self.state = State(self.http, api_info, self.max_messages)
self.websocket = WebsocketHandler(self.session, self.token, api_info["ws"], self.dispatch, self.state)
await self.websocket.start()
async def stop(self) -> None:
await self.websocket.websocket.close()
def get_user(self, id: str) -> User:
"""Gets a user from the cache
Parameters
-----------
id: :class:`str`
The id of the user
Returns
--------
:class:`User`
The user
"""
return self.state.get_user(id)
def get_channel(self, id: str) -> Channel:
"""Gets a channel from the cache
Parameters
-----------
id: :class:`str`
The id of the channel
Returns
--------
:class:`Channel`
The channel
"""
return self.state.get_channel(id)
def get_server(self, id: str) -> Server:
"""Gets a server from the cache
Parameters
-----------
id: :class:`str`
The id of the server
Returns
--------
:class:`Server`
The server
"""
return self.state.get_server(id)
async def wait_for(self, event: str, *, check: Optional[Callable[..., bool]] = None, timeout: Optional[float] = None) -> Any:
"""Waits for an event
Parameters
-----------
event: :class:`str`
The name of the event to wait for, without the `on_`
check: Optional[Callable[..., :class:`bool`]]
A function that says what event to wait_for, this function takes the same parameters as the event you are waiting for and should return a bool saying if that is the event you want
timeout: Optional[:class:`float`]
Time in seconds to wait for the event. By default it waits forever
Raises
-------
asyncio.TimeoutError
If timeout is provided and it was reached
Returns
--------
Any
The parameters of the event
"""
if not check:
check = lambda *_: True
future = asyncio.get_running_loop().create_future()
self.listeners.setdefault(event, []).append((check, future))
return await asyncio.wait_for(future, timeout)
@property
def user(self) -> User:
""":class:`User` the user corrasponding to the client"""
user = self.websocket.user
assert user
return user
@property
def users(self) -> list[User]:
"""list[:class:`User`] All users the client can see"""
return list(self.state.users.values())
@property
def servers(self) -> list[Server]:
"""list[:class:'Server'] All servers the client can see"""
return list(self.state.servers.values())
@property
def global_emojis(self) -> list[Emoji]:
return self.state.global_emojis
async def fetch_user(self, user_id: str) -> User:
"""Fetchs a user
Parameters
-----------
user_id: :class:`str`
The id of the user you are fetching
Returns
--------
:class:`User`
The user with the matching id
"""
payload = await self.http.fetch_user(user_id)
return User(payload, self.state)
async def fetch_dm_channels(self) -> list[Union[DMChannel, GroupDMChannel]]:
"""Fetchs all dm channels the client has made
Returns
--------
list[Union[:class:`DMChanel`, :class:`GroupDMChannel`]]
A list of :class:`DMChannel` or :class`GroupDMChannel`
"""
channel_payloads = await self.http.fetch_dm_channels()
return cast(list[Union[DMChannel, GroupDMChannel]], [channel_factory(payload, self.state) for payload in channel_payloads])
async def fetch_channel(self, channel_id: str) -> Union[DMChannel, GroupDMChannel, SavedMessageChannel, TextChannel, VoiceChannel]:
"""Fetches a channel
Parameters
-----------
channel_id: :class:`str`
The id of the channel
Returns
--------
Union[:class:`DMChannel`, :class:`GroupDMChannel`, :class:`SavedMessageChannel`, :class:`TextChannel`, :class:`VoiceChannel`]
The channel with the matching id
"""
payload = await self.http.fetch_channel(channel_id)
return channel_factory(payload, self.state)
async def fetch_server(self, server_id: str) -> Server:
"""Fetchs a server
Parameters
-----------
server_id: :class:`str`
The id of the server you are fetching
Returns
--------
:class:`Server`
The server with the matching id
"""
payload = await self.http.fetch_server(server_id)
return Server(payload, self.state)
async def fetch_invite(self, code: str) -> Invite:
"""Fetchs an invite
Parameters
-----------
code: :class:`str`
The code of the invite you are fetching
Returns
--------
:class:`Invite`
The invite with the matching code
"""
payload = await self.http.fetch_invite(code)
return Invite(payload, code, self.state)
def get_message(self, message_id: str) -> Message:
"""Gets a message from the cache
Parameters
-----------
message_id: :class:`str`
The id of the message you are getting
Returns
--------
:class:`Message`
The message with the matching id
Raises
-------
LookupError
This raises if the message is not found in the cache
"""
for message in self.state.messages:
if message.id == message_id:
return message
raise LookupError
async def edit_self(self, **kwargs: Any) -> None:
"""Edits the client's own user
Parameters
-----------
avatar: Optional[:class:`File`]
The avatar to change to, passing in ``None`` will remove the avatar
"""
if kwargs.get("avatar", Missing) is None:
del kwargs["avatar"]
remove = ["Avatar"]
else:
remove = None
await self.state.http.edit_self(remove, kwargs)
async def edit_status(self, **kwargs: Any) -> None:
"""Edits the client's own status
Parameters
-----------
presence: :class:`PresenceType`
The presence to change to
text: Optional[:class:`str`]
The text to change the status to, passing in ``None`` will remove the status
"""
if kwargs.get("text", Missing) is None:
del kwargs["text"]
remove = ["StatusText"]
else:
remove = None
if presence := kwargs.get("presence"):
kwargs["presence"] = presence.value
await self.state.http.edit_self(remove, {"status": kwargs})
async def edit_profile(self, **kwargs: Any) -> None:
"""Edits the client's own profile
Parameters
-----------
content: Optional[:class:`str`]
The new content for the profile, passing in ``None`` will remove the profile content
background: Optional[:class:`File`]
The new background for the profile, passing in ``None`` will remove the profile background
"""
remove: list[str] = []
if kwargs.get("content", Missing) is None:
del kwargs["content"]
remove.append("ProfileContent")
if kwargs.get("background", Missing) is None:
del kwargs["background"]
remove.append("ProfileBackground")
await self.state.http.edit_self(remove, {"profile": kwargs})
async def fetch_emoji(self, emoji_id: str) -> Emoji:
"""Fetches an emoji
Parameters
-----------
emoji_id: str
The id of the emoji
Returns
--------
:class:`Emoji`
The emoji with the corrasponding id
"""
emoji = await self.state.http.fetch_emoji(emoji_id)
return Emoji(emoji, self.state)
async def upload_file(self, file: File, tag: Literal['attachments', 'avatars', 'backgrounds', 'icons', 'banners', 'emojis']) -> Ulid:
"""Uploads a file to revolt
Parameters
-----------
file: :class:`File`
The file to upload
tag: :class:`str`
The type of file to upload, this should a string of either `'attachments'`, `'avatars'`, `'backgrounds'`, `'icons'`, `'banners'` or `'emojis'`
Returns
--------
:class:`Ulid`
The id of the file that was uploaded
"""
asset = await self.http.upload_file(file, tag)
ulid = Ulid()
ulid.id = asset["id"]
return ulid | /revolt_py-0.1.11-py3-none-any.whl/revolt/client.py | 0.929176 | 0.155816 | client.py | pypi |
from __future__ import annotations
from typing import TYPE_CHECKING, Any, Optional
from .permissions import Overwrite, PermissionsOverwrite
from .utils import Missing, Ulid
if TYPE_CHECKING:
from .server import Server
from .state import State
from .types import Role as RolePayload
__all__ = ("Role",)
class Role(Ulid):
"""Represents a role
Attributes
-----------
id: :class:`str`
The id of the role
name: :class:`str`
The name of the role
colour: Optional[:class:`str`]
The colour of the role
hoist: :class:`bool`
Whether members with the role will display seperate from everyone else
rank: :class:`int`
The position of the role in the role heirarchy
server: :class:`Server`
The server the role belongs to
server_permissions: :class:`ServerPermissions`
The server permissions for the role
channel_permissions: :class:`ChannelPermissions`
The channel permissions for the role
"""
__slots__: tuple[str, ...] = ("id", "name", "colour", "hoist", "rank", "state", "server", "permissions")
def __init__(self, data: RolePayload, role_id: str, server: Server, state: State):
self.state: State = state
self.id: str = role_id
self.name: str = data["name"]
self.colour: str | None = data.get("colour", None)
self.hoist: bool = data.get("hoist", False)
self.rank: int = data["rank"]
self.server: Server = server
self.permissions: PermissionsOverwrite = PermissionsOverwrite._from_overwrite(data.get("permissions", {"a": 0, "d": 0}))
@property
def color(self) -> str | None:
return self.colour
async def set_permissions_overwrite(self, *, permissions: PermissionsOverwrite) -> None:
"""Sets the permissions for a role in a server.
Parameters
-----------
server_permissions: Optional[:class:`ServerPermissions`]
The new server permissions for the role
channel_permissions: Optional[:class:`ChannelPermissions`]
The new channel permissions for the role
"""
allow, deny = permissions.to_pair()
await self.state.http.set_server_role_permissions(self.server.id, self.id, allow.value, deny.value)
def _update(self, *, name: Optional[str] = None, colour: Optional[str] = None, hoist: Optional[bool] = None, rank: Optional[int] = None, permissions: Optional[Overwrite] = None) -> None:
if name is not None:
self.name = name
if colour is not None:
self.colour = colour
if hoist is not None:
self.hoist = hoist
if rank is not None:
self.rank = rank
if permissions is not None:
self.permissions = PermissionsOverwrite._from_overwrite(permissions)
async def delete(self) -> None:
"""Deletes the role"""
await self.state.http.delete_role(self.server.id, self.id)
async def edit(self, **kwargs: Any) -> None:
"""Edits the role
Parameters
-----------
name: str
The name of the role
colour: str
The colour of the role
hoist: bool
Whether the role should make the member display seperately in the member list
rank: int
The position of the role
"""
if kwargs.get("colour", Missing) is None:
remove = ["Colour"]
else:
remove = None
await self.state.http.edit_role(self.server.id, self.id, remove, kwargs) | /revolt_py-0.1.11-py3-none-any.whl/revolt/role.py | 0.932161 | 0.317281 | role.py | pypi |
import datetime
import inspect
from contextlib import asynccontextmanager
from operator import attrgetter
from typing import Any, Callable, Coroutine, Iterable, Literal, TypeVar, Union
import ulid
from aiohttp import ClientSession
from typing_extensions import ParamSpec
__all__ = ("Missing", "copy_doc", "maybe_coroutine", "get", "client_session")
class _Missing:
def __repr__(self) -> str:
return "<Missing>"
def __bool__(self) -> Literal[False]:
return False
Missing: _Missing = _Missing()
T = TypeVar("T")
def copy_doc(from_t: T) -> Callable[[T], T]:
def inner(to_t: T) -> T:
to_t.__doc__ = from_t.__doc__
return to_t
return inner
R_T = TypeVar("R_T")
P = ParamSpec("P")
# it is impossible to type this function correctly as typeguard does not narrow for the negative case,
# so `value` would stay being a union even after the if statement (PEP 647 - "The type is not narrowed in the negative case")
# see typing#926, typing#930, typing#996
async def maybe_coroutine(func: Callable[P, Union[R_T, Coroutine[Any, Any, R_T]]], *args: P.args, **kwargs: P.kwargs) -> R_T:
value = func(*args, **kwargs)
if inspect.isawaitable(value):
value = await value
return value # type: ignore
class Ulid:
id: str
def created_at(self) -> datetime.datetime:
return ulid.from_str(self.id).timestamp().datetime
def get(iterable: Iterable[T], **attrs: Any) -> T:
"""A convenience function to help get a value from an iterable with a specific attribute
Examples
---------
.. code-block:: python
:emphasize-lines: 3
from revolt import utils
channel = utils.get(server.channels, name="General")
await channel.send("Hello general chat.")
Parameters
-----------
iterable: Iterable
The values to search though
**attrs: Any
The attributes to check
Returns
--------
Any
The value from the iterable with the met attributes
Raises
-------
LookupError
Raises when none of the values in the iterable matches the attributes
"""
converted = [(attrgetter(attr.replace('__', '.')), value) for attr, value in attrs.items()]
for elem in iterable:
if all(pred(elem) == value for pred, value in converted):
return elem
raise LookupError
@asynccontextmanager
async def client_session():
"""A context manager that creates a new aiohttp.ClientSession() and closes it when exiting the context.
Examples
---------
.. code-block:: python
:emphasize-lines: 3
async def main():
async with client_session() as session:
client = revolt.Client(session, "TOKEN")
await client.start()
asyncio.run(main())
"""
session = ClientSession()
try:
yield session
finally:
await session.close() | /revolt_py-0.1.11-py3-none-any.whl/revolt/utils.py | 0.80954 | 0.219672 | utils.py | pypi |
from __future__ import annotations
from typing import TYPE_CHECKING, NamedTuple, Optional, Union
from weakref import WeakValueDictionary
from .asset import Asset, PartialAsset
from .channel import DMChannel, GroupDMChannel
from .enums import PresenceType, RelationshipType
from .flags import UserBadges
from .messageable import Messageable
from .permissions import UserPermissions
from .utils import Ulid
if TYPE_CHECKING:
from .member import Member
from .state import State
from .types import File
from .types import Status as StatusPayload
from .types import User as UserPayload
from .types import UserProfile as UserProfileData
from .server import Server
__all__ = ("User", "Status", "Relation", "UserProfile")
class Relation(NamedTuple):
"""A namedtuple representing a relation between the bot and a user"""
type: RelationshipType
user: User
class Status(NamedTuple):
"""A namedtuple representing a users status"""
text: Optional[str]
presence: Optional[PresenceType]
class UserProfile(NamedTuple):
"""A namedtuple representing a users profile"""
content: Optional[str]
background: Optional[Asset]
class User(Messageable, Ulid):
"""Represents a user
Attributes
-----------
id: :class:`str`
The user's id
discriminator: :class:`str`
The user's discriminator
display_name: Optional[:class:`str`]
The user's display name if they have one
bot: :class:`bool`
Whether or not the user is a bot
owner_id: Optional[:class:`str`]
The bot's owner id if the user is a bot
badges: :class:`UserBadges`
The users badges
online: :class:`bool`
Whether or not the user is online
flags: :class:`int`
The user flags
relations: list[:class:`Relation`]
A list of the users relations
relationship: Optional[:class:`RelationshipType`]
The relationship between the user and the bot
status: Optional[:class:`Status`]
The users status
dm_channel: Optional[:class:`DMChannel`]
The dm channel between the client and the user, this will only be set if the client has dm'ed the user or :meth:`User.open_dm` was run
privileged: :class:`bool`
Whether the user is privileged
"""
__flattern_attributes__: tuple[str, ...] = ("id", "discriminator", "display_name", "bot", "owner_id", "badges", "online", "flags", "relations", "relationship", "status", "masquerade_avatar", "masquerade_name", "original_name", "original_avatar", "profile", "dm_channel", "privileged")
__slots__: tuple[str, ...] = (*__flattern_attributes__, "state", "_members")
def __init__(self, data: UserPayload, state: State):
self.state = state
self._members: WeakValueDictionary[str, Member] = WeakValueDictionary() # we store all member versions of this user to avoid having to check every guild when needing to update.
self.id: str = data["_id"]
self.discriminator = data["discriminator"]
self.display_name = data.get("display_name")
self.original_name: str = data["username"]
self.dm_channel: DMChannel | None = None
bot = data.get("bot")
self.bot: bool
self.owner_id: str | None
if bot:
self.bot = True
self.owner_id = bot["owner"]
else:
self.bot = False
self.owner_id = None
self.badges: UserBadges = UserBadges._from_value(data.get("badges", 0))
self.online: bool = data.get("online", False)
self.flags: int = data.get("flags", 0)
self.privileged: bool = data.get("privileged", False)
avatar = data.get("avatar")
self.original_avatar: Asset | None = Asset(avatar, state) if avatar else None
relations: list[Relation] = []
for relation in data.get("relations", []):
user = state.get_user(relation["_id"])
if user:
relations.append(Relation(RelationshipType(relation["status"]), user))
self.relations: list[Relation] = relations
relationship = data.get("relationship")
self.relationship: RelationshipType | None = RelationshipType(relationship) if relationship else None
status = data.get("status")
self.status: Status | None
if status:
presence = status.get("presence")
self.status = Status(status.get("text"), PresenceType(presence) if presence else None) if status else None
else:
self.status = None
self.profile: Optional[UserProfile] = None
self.masquerade_avatar: Optional[PartialAsset] = None
self.masquerade_name: Optional[str] = None
def get_permissions(self) -> UserPermissions:
"""Gets the permissions for the user
Returns
--------
:class:`UserPermissions`
The users permissions
"""
permissions = UserPermissions()
if self.relationship in [RelationshipType.friend, RelationshipType.user]:
return UserPermissions.all()
elif self.relationship in [RelationshipType.blocked, RelationshipType.blocked_other]:
return UserPermissions(access=True)
elif self.relationship in [RelationshipType.incoming_friend_request, RelationshipType.outgoing_friend_request]:
permissions.access = True
for channel in self.state.channels.values():
if (isinstance(channel, (GroupDMChannel, DMChannel)) and self.id in channel.recipient_ids) or any(self.id in (m.id for m in server.members) for server in self.state.servers.values()):
if self.state.me.bot or self.bot:
permissions.send_message = True
permissions.access = True
permissions.view_profile = True
return permissions
def has_permissions(self, **permissions: bool) -> bool:
"""Computes if the user has the specified permissions
Parameters
-----------
permissions: :class:`bool`
The permissions to check, this also accepted `False` if you need to check if the user does not have the permission
Returns
--------
:class:`bool`
Whether or not they have the permissions
"""
perms = self.get_permissions()
return all([getattr(perms, key, False) == value for key, value in permissions.items()])
async def _get_channel_id(self):
if not self.dm_channel:
payload = await self.state.http.open_dm(self.id)
self.dm_channel = DMChannel(payload, self.state)
return self.id
@property
def owner(self) -> User:
""":class:`User` the owner of the bot account"""
if not self.owner_id:
raise LookupError
return self.state.get_user(self.owner_id)
@property
def name(self) -> str:
""":class:`str` The name the user is displaying, this includes (in order) their masqueraded name, display name and orginal name"""
return self.display_name or self.masquerade_name or self.original_name
@property
def avatar(self) -> Union[Asset, PartialAsset, None]:
"""Optional[:class:`Asset`] The avatar the member is displaying, this includes there orginal avatar and masqueraded avatar"""
return self.masquerade_avatar or self.original_avatar
@property
def mention(self) -> str:
""":class:`str`: Returns a string that allows you to mention the given user."""
return f"<@{self.id}>"
def _update(self, *, status: Optional[StatusPayload] = None, profile: Optional[UserProfileData] = None, avatar: Optional[File] = None, online: Optional[bool] = None):
if status is not None:
presence = status.get("presence")
self.status = Status(status.get("text"), PresenceType(presence) if presence else None)
if profile is not None:
if background_file := profile.get("background"):
background = Asset(background_file, self.state)
else:
background = None
self.profile = UserProfile(profile.get("content"), background)
if avatar is not None:
self.original_avatar = Asset(avatar, self.state)
if online is not None:
self.online = online
# update user infomation for all members
if self.__class__ is User:
for member in self._members.values():
User._update(member, status=status, profile=profile, avatar=avatar, online=online)
async def default_avatar(self) -> bytes:
"""Returns the default avatar for this user
Returns
--------
:class:`bytes`
The bytes of the image
"""
return await self.state.http.fetch_default_avatar(self.id)
async def fetch_profile(self) -> UserProfile:
"""Fetches the user's profile
Returns
--------
:class:`UserProfile`
The user's profile
"""
if profile := self.profile:
return profile
payload = await self.state.http.fetch_profile(self.id)
if file := payload.get("background"):
background = Asset(file, self.state)
else:
background = None
self.profile = UserProfile(payload.get("content"), background)
return self.profile
def to_member(self, server: Server) -> Member:
"""Gets the member instance for this user for a specific server.
Roughly equivelent to:
.. code-block:: python
member = server.get_member(user.id)
Parameters
-----------
server: :class:`Server`
The server to get the member for
Returns
--------
:class:`Member`
The member
Raises
-------
:class:`LookupError`
"""
try:
return self._members[server.id]
except IndexError:
raise LookupError from None | /revolt_py-0.1.11-py3-none-any.whl/revolt/user.py | 0.919086 | 0.220857 | user.py | pypi |
from __future__ import annotations
from typing import Callable, Iterator, Optional, Union, overload
from typing_extensions import Self
__all__ = ("Flag", "Flags", "UserBadges")
class Flag:
__slots__ = ("flag", "__doc__")
def __init__(self, func: Callable[[], int]):
self.flag: int = func()
self.__doc__: str | None = func.__doc__
@overload
def __get__(self: Self, instance: None, owner: type[Flags]) -> Self:
...
@overload
def __get__(self, instance: Flags, owner: type[Flags]) -> bool:
...
def __get__(self: Self, instance: Optional[Flags], owner: type[Flags]) -> Union[Self, bool]:
if instance is None:
return self
return instance._check_flag(self.flag)
def __set__(self, instance: Flags, value: bool) -> None:
instance._set_flag(self.flag, value)
class Flags:
FLAG_NAMES: list[str]
def __init_subclass__(cls) -> None:
cls.FLAG_NAMES = []
for name in dir(cls):
value = getattr(cls, name)
if isinstance(value, Flag):
cls.FLAG_NAMES.append(name)
def __init__(self, value: int = 0, **flags: bool):
self.value = value
for k, v in flags.items():
setattr(self, k, v)
@classmethod
def _from_value(cls, value: int) -> Self:
self = cls.__new__(cls)
self.value = value
return self
def _check_flag(self, flag: int) -> bool:
return (self.value & flag) == flag
def _set_flag(self, flag: int, value: bool) -> None:
if value:
self.value |= flag
else:
self.value &= ~flag
def __eq__(self, other: Self) -> bool:
return self.value == other.value
def __ne__(self, other: Self) -> bool:
return not self.__eq__(other)
def __or__(self, other: Self) -> Self:
return self.__class__._from_value(self.value | other.value)
def __and__(self, other: Self) -> Self:
return self.__class__._from_value(self.value & other.value)
def __invert__(self) -> Self:
return self.__class__._from_value(~self.value)
def __add__(self, other: Self) -> Self:
return self | other
def __sub__(self, other: Self) -> Self:
return self & ~other
def __lt__(self, other: Self) -> bool:
return self.value < other.value
def __gt__(self, other: Self) -> bool:
return self.value > other.value
def __repr__(self) -> str:
return f"<{self.__class__.__name__} value={self.value}>"
def __iter__(self) -> Iterator[tuple[str, bool]]:
for name, value in self.__class__.__dict__.items():
if isinstance(value, Flag):
yield name, self._check_flag(value.flag)
def __hash__(self) -> int:
return hash(self.value)
class UserBadges(Flags):
"""Contains all user badges"""
@Flag
def developer():
""":class:`bool` The developer badge."""
return 1 << 0
@Flag
def translator():
""":class:`bool` The translator badge."""
return 1 << 1
@Flag
def supporter():
""":class:`bool` The supporter badge."""
return 1 << 2
@Flag
def responsible_disclosure():
""":class:`bool` The responsible disclosure badge."""
return 1 << 3
@Flag
def founder():
""":class:`bool` The founder badge."""
return 1 << 4
@Flag
def platform_moderation():
""":class:`bool` The platform moderation badge."""
return 1 << 5
@Flag
def active_supporter():
""":class:`bool` The active supporter badge."""
return 1 << 6
@Flag
def paw():
""":class:`bool` The paw badge."""
return 1 << 7
@Flag
def early_adopter():
""":class:`bool` The early adopter badge."""
return 1 << 8
@Flag
def reserved_relevant_joke_badge_1():
""":class:`bool` The reserved relevant joke badge 1 badge."""
return 1 << 9 | /revolt_py-0.1.11-py3-none-any.whl/revolt/flags.py | 0.912324 | 0.228608 | flags.py | pypi |
from __future__ import annotations
import datetime
from typing import TYPE_CHECKING, Any, Optional
from .utils import _Missing, Missing
from .asset import Asset
from .permissions import Permissions
from .permissions_calculator import calculate_permissions
from .user import User
from .file import File
if TYPE_CHECKING:
from .channel import Channel
from .server import Server
from .state import State
from .types import File as FilePayload
from .types import Member as MemberPayload
from .role import Role
__all__ = ("Member",)
def flattern_user(member: Member, user: User) -> None:
for attr in user.__flattern_attributes__:
setattr(member, attr, getattr(user, attr))
class Member(User):
"""Represents a member of a server, subclasses :class:`User`
Attributes
-----------
nickname: Optional[:class:`str`]
The nickname of the member if any
roles: list[:class:`Role`]
The roles of the member, ordered by the role's rank in decending order
server: :class:`Server`
The server the member belongs to
guild_avatar: Optional[:class:`Asset`]
The member's guild avatar if any
"""
__slots__ = ("state", "nickname", "roles", "server", "guild_avatar", "joined_at", "current_timeout")
def __init__(self, data: MemberPayload, server: Server, state: State):
user = state.get_user(data["_id"]["user"])
# due to not having a user payload and only a user object we have to manually add all the attributes instead of calling User.__init__
flattern_user(self, user)
user._members[server.id] = self
self.state: State = state
self.guild_avatar: Asset | None
if avatar := data.get("avatar"):
self.guild_avatar = Asset(avatar, state)
else:
self.guild_avatar = None
roles = [server.get_role(role_id) for role_id in data.get("roles", [])]
self.roles: list[Role] = sorted(roles, key=lambda role: role.rank, reverse=True)
self.server: Server = server
self.nickname: str | None = data.get("nickname")
joined_at = data["joined_at"]
if isinstance(joined_at, int):
self.joined_at: datetime.datetime = datetime.datetime.fromtimestamp(joined_at / 1000)
else:
self.joined_at: datetime.datetime = datetime.datetime.strptime(joined_at, "%Y-%m-%dT%H:%M:%S.%f%z")
self.current_timeout: datetime.datetime | None
if current_timeout := data.get("timeout"):
self.current_timeout = datetime.datetime.strptime(current_timeout, "%Y-%m-%dT%H:%M:%S.%f%z")
else:
self.current_timeout = None
@property
def avatar(self) -> Optional[Asset]:
"""Optional[:class:`Asset`] The avatar the member is displaying, this includes guild avatars and masqueraded avatar"""
return self.masquerade_avatar or self.guild_avatar or self.original_avatar
@property
def name(self) -> str:
""":class:`str` The name the user is displaying, this includes (in order) their masqueraded name, display name and orginal name"""
return self.nickname or self.display_name or self.masquerade_name or self.original_name
@property
def mention(self) -> str:
""":class:`str`: Returns a string that allows you to mention the given member."""
return f"<@{self.id}>"
def _update(self, *, nickname: Optional[str] = None, avatar: Optional[FilePayload] = None, roles: Optional[list[str]] = None):
if nickname is not None:
self.nickname = nickname
if avatar is not None:
self.guild_avatar = Asset(avatar, self.state)
if roles is not None:
member_roles = [self.server.get_role(role_id) for role_id in roles]
self.roles = sorted(member_roles, key=lambda role: role.rank, reverse=True)
async def kick(self) -> None:
"""Kicks the member from the server"""
await self.state.http.kick_member(self.server.id, self.id)
async def ban(self, *, reason: Optional[str] = None) -> None:
"""Bans the member from the server
Parameters
-----------
reason: Optional[:class:`str`]
The reason for the ban
"""
await self.state.http.ban_member(self.server.id, self.id, reason)
async def unban(self) -> None:
"""Unbans the member from the server"""
await self.state.http.unban_member(self.server.id, self.id)
async def edit(
self,
*,
nickname: str | None | _Missing = Missing,
roles: list[Role] | None | _Missing = Missing,
avatar: File | None | _Missing = Missing,
timeout: datetime.timedelta | None | _Missing = Missing
) -> None:
remove: list[str] = []
data: dict[str, Any] = {}
if nickname is None:
remove.append("Nickname")
elif nickname is not Missing:
data["nickname"] = nickname
if roles is None:
remove.append("Roles")
elif roles is not Missing:
data["roles"] = roles
if avatar is None:
remove.append("Avatar")
elif avatar is not Missing:
# pyright cant understand custom singletons - it doesnt know this will never be an instance of _Missing here because Missing is the only instance
assert not isinstance(avatar, _Missing)
data["avatar"] = (await self.state.http.upload_file(avatar, "avatars"))["id"]
if timeout is None:
remove.append("Timeout")
elif timeout is not Missing:
assert not isinstance(timeout, _Missing)
data["timeout"] = (datetime.datetime.now(datetime.timezone.utc) + timeout).isoformat()
await self.state.http.edit_member(self.server.id, self.id, remove, data)
async def timeout(self, length: datetime.timedelta) -> None:
"""Timeouts the member
Parameters
-----------
length: :class:`datetime.timedelta`
The length of the timeout
"""
ends_at = datetime.datetime.utcnow() + length
await self.state.http.edit_member(self.server.id, self.id, None, {"timeout": ends_at.isoformat()})
def get_permissions(self) -> Permissions:
"""Gets the permissions for the member in the server
Returns
--------
:class:`Permissions`
The members permissions
"""
return calculate_permissions(self, self.server)
def get_channel_permissions(self, channel: Channel) -> Permissions:
"""Gets the permissions for the member in the server taking into account the channel as well
Parameters
-----------
channel: :class:`Channel`
The channel to calculate permissions with
Returns
--------
:class:`Permissions`
The members permissions
"""
return calculate_permissions(self, channel)
def has_permissions(self, **permissions: bool) -> bool:
"""Computes if the member has the specified permissions
Parameters
-----------
permissions: :class:`bool`
The permissions to check, this also accepted `False` if you need to check if the member does not have the permission
Returns
--------
:class:`bool`
Whether or not they have the permissions
"""
calculated_perms = self.get_permissions()
return all([getattr(calculated_perms, key, False) == value for key, value in permissions.items()])
def has_channel_permissions(self, channel: Channel, **permissions: bool) -> bool:
"""Computes if the member has the specified permissions, taking into account the channel as well
Parameters
-----------
channel: :class:`Channel`
The channel to calculate permissions with
permissions: :class:`bool`
The permissions to check, this also accepted `False` if you need to check if the member does not have the permission
Returns
--------
:class:`bool`
Whether or not they have the permissions
"""
calculated_perms = self.get_channel_permissions(channel)
return all([getattr(calculated_perms, key, False) == value for key, value in permissions.items()]) | /revolt_py-0.1.11-py3-none-any.whl/revolt/member.py | 0.920799 | 0.172764 | member.py | pypi |
from __future__ import annotations
from typing import TYPE_CHECKING, Any, Optional
from typing_extensions import Self
from .flags import Flag, Flags
from .types.permissions import Overwrite
__all__ = ("Permissions", "PermissionsOverwrite", "UserPermissions")
class UserPermissions(Flags):
"""Permissions for users"""
@Flag
def access() -> int:
return 1 << 0
@Flag
def view_profile() -> int:
return 1 << 1
@Flag
def send_message() -> int:
return 1 << 2
@Flag
def invite() -> int:
return 1 << 3
@classmethod
def all(cls) -> Self:
return cls(access=True, view_profile=True, send_message=True, invite=True)
class Permissions(Flags):
"""Server permissions for members and roles"""
@Flag
def manage_channel() -> int:
return 1 << 0
@Flag
def manage_server() -> int:
return 1 << 1
@Flag
def manage_permissions() -> int:
return 1 << 2
@Flag
def manage_role() -> int:
return 1 << 3
@Flag
def kick_members() -> int:
return 1 << 6
@Flag
def ban_members() -> int:
return 1 << 7
@Flag
def timeout_members() -> int:
return 1 << 8
@Flag
def asign_roles() -> int:
return 1 << 9
@Flag
def change_nickname() -> int:
return 1 << 10
@Flag
def manage_nicknames() -> int:
return 1 << 11
@Flag
def change_avatars() -> int:
return 1 << 12
@Flag
def remove_avatars() -> int:
return 1 << 13
@Flag
def view_channel() -> int:
return 1 << 20
@Flag
def read_message_history() -> int:
return 1 << 21
@Flag
def send_messages() -> int:
return 1 << 22
@Flag
def manage_messages() -> int:
return 1 << 23
@Flag
def manage_webhooks() -> int:
return 1 << 24
@Flag
def invite_others() -> int:
return 1 << 25
@Flag
def send_embeds() -> int:
return 1 << 26
@Flag
def upload_files() -> int:
return 1 << 27
@Flag
def masquerade() -> int:
return 1 << 28
@Flag
def connect() -> int:
return 1 << 30
@Flag
def speak() -> int:
return 1 << 31
@Flag
def video() -> int:
return 1 << 32
@Flag
def mute_members() -> int:
return 1 << 33
@Flag
def deafen_members() -> int:
return 1 << 34
@Flag
def move_members() -> int:
return 1 << 35
@classmethod
def all(cls) -> Self:
return cls(0x000F_FFFF_FFFF_FFFF)
@classmethod
def default_view_only(cls) -> Self:
return cls(view_channel=True, read_message_history=True)
@classmethod
def default(cls) -> Self:
return cls.default_view_only() | cls(send_messages=True, invite_others=True, send_embeds=True, upload_files=True, connect=True, speak=True)
@classmethod
def default_direct_message(cls) -> Self:
return cls.default_view_only() | cls(react=True, manage_channel=True)
class PermissionsOverwrite:
"""A permissions overwrite in a channel"""
def __init__(self, allow: Permissions, deny: Permissions):
self._allow = allow
self._deny = deny
for perm in Permissions.FLAG_NAMES:
if getattr(allow, perm):
value = True
elif getattr(deny, perm):
value = False
else:
value = None
super().__setattr__(perm, value)
def __setattr__(self, key: str, value: Any) -> None:
if key in Permissions.FLAG_NAMES:
if key is True:
setattr(self._allow, key, True)
super().__setattr__(key, True)
elif key is False:
setattr(self._deny, key, True)
super().__setattr__(key, False)
else:
setattr(self._allow, key, False)
setattr(self._deny, key, False)
super().__setattr__(key, None)
else:
super().__setattr__(key, value)
if TYPE_CHECKING:
manage_channel: Optional[bool]
manage_server: Optional[bool]
manage_permissions: Optional[bool]
manage_role: Optional[bool]
kick_members: Optional[bool]
ban_members: Optional[bool]
timeout_members: Optional[bool]
asign_roles: Optional[bool]
change_nickname: Optional[bool]
manage_nicknames: Optional[bool]
change_avatars: Optional[bool]
remove_avatars: Optional[bool]
view_channel: Optional[bool]
read_message_history: Optional[bool]
send_messages: Optional[bool]
manage_messages: Optional[bool]
manage_webhooks: Optional[bool]
invite_others: Optional[bool]
send_embeds: Optional[bool]
upload_files: Optional[bool]
masquerade: Optional[bool]
connect: Optional[bool]
speak: Optional[bool]
video: Optional[bool]
mute_members: Optional[bool]
deafen_members: Optional[bool]
move_members: Optional[bool]
def to_pair(self) -> tuple[Permissions, Permissions]:
return self._allow, self._deny
@classmethod
def _from_overwrite(cls, overwrite: Overwrite) -> Self:
allow = Permissions(overwrite["a"])
deny = Permissions(overwrite["d"])
return cls(allow, deny) | /revolt_py-0.1.11-py3-none-any.whl/revolt/permissions.py | 0.897993 | 0.317347 | permissions.py | pypi |
from __future__ import annotations
from typing import TYPE_CHECKING, Optional, cast
from .asset import Asset
from .category import Category
from .invite import Invite
from .permissions import Permissions
from .role import Role
from .utils import Ulid
if TYPE_CHECKING:
from .channel import Channel, TextChannel, VoiceChannel
from .emoji import Emoji
from .file import File
from .member import Member
from .state import State
from .types import Ban
from .types import Category as CategoryPayload
from .types import File as FilePayload
from .types import Server as ServerPayload
from .types import SystemMessagesConfig
__all__ = ("Server", "SystemMessages", "ServerBan")
class SystemMessages:
def __init__(self, data: SystemMessagesConfig, state: State):
self.state: State = state
self.user_joined_id: str | None = data.get("user_joined")
self.user_left_id: str | None = data.get("user_left")
self.user_kicked_id: str | None = data.get("user_kicked")
self.user_banned_id: str | None = data.get("user_banned")
@property
def user_joined(self) -> Optional[TextChannel]:
if not self.user_joined_id:
return
channel = self.state.get_channel(self.user_joined_id)
assert isinstance(channel, TextChannel)
return channel
@property
def user_left(self) -> Optional[TextChannel]:
if not self.user_left_id:
return
channel = self.state.get_channel(self.user_left_id)
assert isinstance(channel, TextChannel)
return channel
@property
def user_kicked(self) -> Optional[TextChannel]:
if not self.user_kicked_id:
return
channel = self.state.get_channel(self.user_kicked_id)
assert isinstance(channel, TextChannel)
return channel
@property
def user_banned(self) -> Optional[TextChannel]:
if not self.user_banned_id:
return
channel = self.state.get_channel(self.user_banned_id)
assert isinstance(channel, TextChannel)
return channel
class Server(Ulid):
"""Represents a server
Attributes
-----------
id: :class:`str`
The id of the server
name: :class:`str`
The name of the server
owner_id: :class:`str`
The owner's id of the server
description: Optional[:class:`str`]
The servers description
nsfw: :class:`bool`
Whether the server is nsfw or not
system_messages: :class:`SystemMessages`
The system message config for the server
icon: Optional[:class:`Asset`]
The servers icon
banner: Optional[:class:`Asset`]
The servers banner
default_permissions: :class:`Permissions`
The permissions for the default role
"""
__slots__ = ("state", "id", "name", "owner_id", "default_permissions", "_members", "_roles", "_channels", "description", "icon", "banner", "nsfw", "system_messages", "_categories", "_emojis")
def __init__(self, data: ServerPayload, state: State):
self.state: State = state
self.id: str = data["_id"]
self.name: str = data["name"]
self.owner_id: str = data["owner"]
self.description: str | None = data.get("description") or None
self.nsfw: bool = data.get("nsfw", False)
self.system_messages: SystemMessages = SystemMessages(data.get("system_messages", cast("SystemMessagesConfig", {})), state)
self._categories: dict[str, Category] = {data["id"]: Category(data, state) for data in data.get("categories", [])}
self.default_permissions: Permissions = Permissions(data["default_permissions"])
self.icon: Asset | None
if icon := data.get("icon"):
self.icon = Asset(icon, state)
else:
self.icon = None
self.banner: Asset | None
if banner := data.get("banner"):
self.banner = Asset(banner, state)
else:
self.banner = None
self._members: dict[str, Member] = {}
self._roles: dict[str, Role] = {role_id: Role(role, role_id, self, state) for role_id, role in data.get("roles", {}).items()}
self._channels: dict[str, Channel] = {}
# The api doesnt send us all the channels but sends us all the ids, this is because channels we dont have permissions to see are not sent
# this causes get_channel to error so we have to first check ourself if its in the cache.
for channel_id in data["channels"]:
if channel := state.channels.get(channel_id):
self._channels[channel_id] = channel
self._emojis: dict[str, Emoji] = {}
def _update(self, *, owner: Optional[str] = None, name: Optional[str] = None, description: Optional[str] = None, icon: Optional[FilePayload] = None, banner: Optional[FilePayload] = None, default_permissions: Optional[int] = None, nsfw: Optional[bool] = None, system_messages: Optional[SystemMessagesConfig] = None, categories: Optional[list[CategoryPayload]] = None, channels: Optional[list[str]] = None):
if owner is not None:
self.owner_id = owner
if name is not None:
self.name = name
if description is not None:
self.description = description or None
if icon is not None:
self.icon = Asset(icon, self.state)
if banner is not None:
self.banner = Asset(banner, self.state)
if default_permissions is not None:
self.default_permissions = Permissions(default_permissions)
if nsfw is not None:
self.nsfw = nsfw
if system_messages is not None:
self.system_messages = SystemMessages(system_messages, self.state)
if categories is not None:
self._categories = {data["id"]: Category(data, self.state) for data in categories}
if channels is not None:
self._channels = {channel_id: self.state.get_channel(channel_id) for channel_id in channels}
@property
def roles(self) -> list[Role]:
"""list[:class:`Role`] Gets all roles in the server in decending order"""
return list(self._roles.values())
@property
def members(self) -> list[Member]:
"""list[:class:`Member`] Gets all members in the server"""
return list(self._members.values())
@property
def channels(self) -> list[Channel]:
"""list[:class:`Member`] Gets all channels in the server"""
return list(self._channels.values())
@property
def categories(self) -> list[Category]:
"""list[:class:`Category`] Gets all categories in the server"""
return list(self._categories.values())
@property
def emojis(self) -> list[Emoji]:
"""list[:class:`Emoji`] Gets all emojis in the server"""
return list(self._emojis.values())
def get_role(self, role_id: str) -> Role:
"""Gets a role from the cache
Parameters
-----------
id: :class:`str`
The id of the role
Returns
--------
:class:`Role`
The role
"""
return self._roles[role_id]
def get_member(self, member_id: str) -> Member:
"""Gets a member from the cache
Parameters
-----------
id: :class:`str`
The id of the member
Returns
--------
:class:`Member`
The member
"""
try:
return self._members[member_id]
except KeyError:
raise LookupError from None
def get_channel(self, channel_id: str) -> Channel:
"""Gets a channel from the cache
Parameters
-----------
id: :class:`str`
The id of the channel
Returns
--------
:class:`Channel`
The channel
"""
try:
return self._channels[channel_id]
except KeyError:
raise LookupError from None
def get_category(self, category_id: str) -> Category:
"""Gets a category from the cache
Parameters
-----------
id: :class:`str`
The id of the category
Returns
--------
:class:`Category`
The category
"""
try:
return self._categories[category_id]
except KeyError:
raise LookupError from None
def get_emoji(self, emoji_id: str) -> Emoji:
"""Gets a emoji from the cache
Parameters
-----------
id: :class:`str`
The id of the emoji
Returns
--------
:class:`Emoji`
The emoji
"""
try:
return self._emojis[emoji_id]
except KeyError as e:
raise LookupError from e
@property
def owner(self) -> Member:
""":class:`Member` The owner of the server"""
return self.get_member(self.owner_id)
async def set_default_permissions(self, permissions: Permissions) -> None:
"""Sets the default server permissions.
Parameters
-----------
server_permissions: Optional[:class:`ServerPermissions`]
The new default server permissions
channel_permissions: Optional[:class:`ChannelPermissions`]
the new default channel permissions
"""
await self.state.http.set_server_default_permissions(self.id, permissions.value)
async def leave_server(self) -> None:
"""Leaves or deletes the server"""
await self.state.http.delete_leave_server(self.id)
async def delete_server(self) -> None:
"""Leaves or deletes a server, alias to :meth`Server.leave_server`"""
await self.leave_server()
async def create_text_channel(self, *, name: str, description: Optional[str] = None) -> TextChannel:
"""Creates a text channel in the server
Parameters
-----------
name: :class:`str`
The name of the channel
description: Optional[:class:`str`]
The channel's description
Returns
--------
:class:`TextChannel`
The text channel that was just created
"""
payload = await self.state.http.create_channel(self.id, "Text", name, description)
channel = TextChannel(payload, self.state)
self._channels[channel.id] = channel
return channel
async def create_voice_channel(self, *, name: str, description: Optional[str] = None) -> VoiceChannel:
"""Creates a voice channel in the server
Parameters
-----------
name: :class:`str`
The name of the channel
description: Optional[:class:`str`]
The channel's description
Returns
--------
:class:`VoiceChannel`
The voice channel that was just created
"""
payload = await self.state.http.create_channel(self.id, "Voice", name, description)
channel = self.state.add_channel(payload)
self._channels[channel.id] = channel
return cast("VoiceChannel", channel)
async def fetch_invites(self) -> list[Invite]:
"""Fetches all invites in the server
Returns
--------
list[:class:`Invite`]
"""
invite_payloads = await self.state.http.fetch_server_invites(self.id)
return [Invite._from_partial(payload["_id"], payload["server"], payload["creator"], payload["channel"], self.state) for payload in invite_payloads]
async def fetch_member(self, member_id: str) -> Member:
"""Fetches a member from this server
Parameters
-----------
member_id: :class:`str`
The id of the member you are fetching
Returns
--------
:class:`Member`
The member with the matching id
"""
payload = await self.state.http.fetch_member(self.id, member_id)
return Member(payload, self, self.state)
async def fetch_bans(self) -> list[ServerBan]:
"""Fetches all invites in the server
Returns
--------
list[:class:`Invite`]
"""
payload = await self.state.http.fetch_bans(self.id)
return [ServerBan(ban, self.state) for ban in payload["bans"]]
async def create_role(self, name: str) -> Role:
"""Creates a role in the server
Parameters
-----------
name: :class:`str`
The name of the role
Returns
--------
:class:`Role`
The role that was just created
"""
payload = await self.state.http.create_role(self.id, name)
return Role(payload, name, self, self.state)
async def create_emoji(self, name: str, file: File, *, nsfw: bool = False) -> Emoji:
"""Creates an emoji
Parameters
-----------
name: :class:`str`
The name for the emoji
file: :class:`File`
The image for the emoji
nsfw: :class:`bool`
Whether or not the emoji is nsfw
"""
payload = await self.state.http.create_emoji(name, file, nsfw, {"type": "Server", "id": self.id})
return self.state.add_emoji(payload)
class ServerBan:
"""Represents a server ban
Attributes
-----------
reason: Optional[:class:str`]
The reason the user was banned
server: :class:`Server`
The server the user was banned in
user_id: :class:`str`
The id of the user who was banned
"""
__slots__ = ("reason", "server", "user_id", "state")
def __init__(self, ban: Ban, state: State):
self.reason: str | None = ban.get("reason")
self.server: Server = state.get_server(ban["_id"]["server"])
self.user_id: str = ban["_id"]["user"]
self.state: State = state
async def unban(self) -> None:
"""Unbans the user"""
await self.state.http.unban_member(self.server.id, self.user_id) | /revolt_py-0.1.11-py3-none-any.whl/revolt/server.py | 0.923208 | 0.208904 | server.py | pypi |
from __future__ import annotations
from typing import TYPE_CHECKING, Literal, TypedDict, Union
from typing_extensions import NotRequired
if TYPE_CHECKING:
from .file import File
__all__ = ("Embed", "SendableEmbed", "WebsiteEmbed", "ImageEmbed", "TextEmbed", "NoneEmbed", "YoutubeSpecial", "TwitchSpecial", "SpotifySpecial", "SoundcloudSpecial", "BandcampSpecial", "WebsiteSpecial", "JanuaryImage", "JanuaryVideo")
class YoutubeSpecial(TypedDict):
type: Literal["Youtube"]
id: str
timestamp: NotRequired[str]
class TwitchSpecial(TypedDict):
type: Literal["Twitch"]
content_type: Literal["Channel", "Video", "Clip"]
id: str
class SpotifySpecial(TypedDict):
type: Literal["Spotify"]
content_type: str
id: str
class SoundcloudSpecial(TypedDict):
type: Literal["Soundcloud"]
class BandcampSpecial(TypedDict):
type: Literal["Bandcamp"]
content_type: Literal["Album", "Track"]
id: str
WebsiteSpecial = Union[YoutubeSpecial, TwitchSpecial, SpotifySpecial, SoundcloudSpecial, BandcampSpecial]
class JanuaryImage(TypedDict):
url: str
width: int
height: int
size: Literal["Large", "Preview"]
class JanuaryVideo(TypedDict):
url: str
width: int
height: int
class WebsiteEmbed(TypedDict):
type: Literal["Website"]
url: NotRequired[str]
special: NotRequired[WebsiteSpecial]
title: NotRequired[str]
description: NotRequired[str]
image: NotRequired[JanuaryImage]
video: NotRequired[JanuaryVideo]
site_name: NotRequired[str]
icon_url: NotRequired[str]
colour: NotRequired[str]
class ImageEmbed(JanuaryImage):
type: Literal["Image"]
class TextEmbed(TypedDict):
type: Literal["Text"]
icon_url: NotRequired[str]
url: NotRequired[str]
title: NotRequired[str]
description: NotRequired[str]
media: NotRequired[File]
colour: NotRequired[str]
class NoneEmbed(TypedDict):
type: Literal["None"]
Embed = Union[WebsiteEmbed, ImageEmbed, TextEmbed, NoneEmbed]
class SendableEmbed(TypedDict):
type: Literal["Text"]
icon_url: NotRequired[str]
url: NotRequired[str]
title: NotRequired[str]
description: NotRequired[str]
media: NotRequired[str]
colour: NotRequired[str] | /revolt_py-0.1.11-py3-none-any.whl/revolt/types/embed.py | 0.72086 | 0.187021 | embed.py | pypi |
from __future__ import annotations
from typing import TYPE_CHECKING, Any, Generic, Optional
import revolt
from revolt.utils import maybe_coroutine
from .command import Command
from .group import Group
from .utils import ClientCoT
if TYPE_CHECKING:
from .view import StringView
from revolt.state import State
__all__ = (
"Context",
)
class Context(revolt.Messageable, Generic[ClientCoT]):
"""Stores metadata the commands execution.
Attributes
-----------
command: Optional[:class:`Command`]
The command, this can be `None` when no command was found and the error handler is being executed
invoked_with: :class:`str`
The command name that was used, this can be an alias, the commands name or a command that doesnt exist
message: :class:`Message`
The message that was sent to invoke the command
channel: :class:`Messageable`
The channel the command was invoked in
server_id: Optional[:class:`Server`]
The server the command was invoked in
author: Union[:class:`Member`, :class:`User`]
The user or member that invoked the commad, will be :class:`User` in DMs
args: list[:class:`str`]
The positional arguments being passed to the command
kwargs: dict[:class:`str`, Any]
The keyword arguments being passed to the command
client: :class:`CommandsClient`
The revolt client
"""
__slots__ = ("command", "invoked_with", "args", "message", "channel", "author", "view", "kwargs", "state", "client", "server_id")
async def _get_channel_id(self) -> str:
return self.channel.id
def __init__(self, command: Optional[Command[ClientCoT]], invoked_with: str, view: StringView, message: revolt.Message, client: ClientCoT):
self.command: Command[ClientCoT] | None = command
self.invoked_with: str = invoked_with
self.view: StringView = view
self.message: revolt.Message = message
self.client: ClientCoT = client
self.args: list[Any] = []
self.kwargs: dict[str, Any] = {}
self.server_id: str | None = message.server_id
self.channel: revolt.TextChannel | revolt.GroupDMChannel | revolt.DMChannel = message.channel
self.author: revolt.Member | revolt.User = message.author
self.state: State = message.state
@property
def server(self) -> revolt.Server:
""":class:`Server` The server this voice channel belongs too
Raises
-------
:class:`LookupError`
Raises if the channel is not part of a server
"""
if not self.server_id:
raise LookupError
return self.state.get_server(self.server_id)
async def invoke(self) -> Any:
"""Invokes the command.
.. note:: If the command is `None`, this function will do nothing.
Parameters
-----------
args: list[:class:`str`]
The args being passed to the command
"""
if command := self.command:
if isinstance(command, Group):
try:
subcommand_name = self.view.get_next_word()
except StopIteration:
pass
else:
if subcommand := command.subcommands.get(subcommand_name):
self.command = command = subcommand
return await self.invoke()
self.view.undo()
await command.parse_arguments(self)
return await command.invoke(self, *self.args, **self.kwargs)
async def can_run(self, command: Optional[Command[ClientCoT]] = None) -> bool:
"""Runs all of the commands checks, and returns true if all of them pass"""
command = command or self.command
return all([await maybe_coroutine(check, self) for check in (command.checks if command else [])])
async def send_help(self, argument: Command[Any] | Group[Any] | ClientCoT | None = None) -> None:
argument = argument or self.client
command = self.client.get_command("help")
await command.invoke(self, argument) | /revolt_py-0.1.11-py3-none-any.whl/revolt/ext/commands/context.py | 0.909549 | 0.151812 | context.py | pypi |
from __future__ import annotations
from typing import Any, Callable, Coroutine, Union, cast
from typing_extensions import TypeVar
import revolt
from .command import Command
from .context import Context
from .errors import (MissingPermissionsError, NotBotOwner, NotServerOwner,
ServerOnly)
from .utils import ClientT
__all__ = ("check", "Check", "is_bot_owner", "is_server_owner", "has_permissions", "has_channel_permissions")
T = TypeVar("T", Callable[..., Any], Command, default=Command)
Check = Callable[[Context[ClientT]], Union[Any, Coroutine[Any, Any, Any]]]
def check(check: Check[ClientT]) -> Callable[[T], T]:
"""A decorator for adding command checks
Parameters
-----------
check: Callable[[Context], Union[Any, Coroutine[Any, Any, Any]]]
The function to be called, must take one parameter, context and optionally be a coroutine, the return value denoating whether the check should pass or fail
"""
def inner(func: T) -> T:
if isinstance(func, Command):
command = cast(Command[ClientT], func) # cant verify generic at runtime so must cast
command.checks.append(check)
else:
checks = getattr(func, "_checks", [])
checks.append(check)
func._checks = checks # type: ignore
return func
return inner
def is_bot_owner() -> Callable[[T], T]:
"""A command check for limiting the command to only the bot's owner"""
@check
def inner(context: Context[ClientT]):
if context.author.id == context.client.user.owner_id:
return True
raise NotBotOwner
return inner
def is_server_owner() -> Callable[[T], T]:
"""A command check for limiting the command to only a server's owner"""
@check
def inner(context: Context[ClientT]) -> bool:
if not context.server_id:
raise ServerOnly
if context.author.id == context.server.owner_id:
return True
raise NotServerOwner
return inner
def has_permissions(**permissions: bool) -> Callable[[T], T]:
@check
def inner(context: Context[ClientT]) -> bool:
author = context.author
if not author.has_permissions(**permissions):
raise MissingPermissionsError(permissions)
return True
return inner
def has_channel_permissions(**permissions: bool) -> Callable[[T], T]:
@check
def inner(context: Context[ClientT]) -> bool:
author = context.author
if not isinstance(author, revolt.Member):
raise ServerOnly
if not author.has_channel_permissions(context.channel, **permissions):
raise MissingPermissionsError(permissions)
return True
return inner | /revolt_py-0.1.11-py3-none-any.whl/revolt/ext/commands/checks.py | 0.8931 | 0.153994 | checks.py | pypi |
from revolt import RevoltError
__all__ = (
"CommandError",
"CommandNotFound",
"NoClosingQuote",
"CheckError",
"NotBotOwner",
"NotServerOwner",
"ServerOnly",
"ConverterError",
"InvalidLiteralArgument",
"BadBoolArgument",
"CategoryConverterError",
"ChannelConverterError",
"UserConverterError",
"MemberConverterError",
"MissingSetup",
)
class CommandError(RevoltError):
"""base error for all command's related errors"""
class CommandNotFound(CommandError):
"""Raised when a command isnt found.
Parameters
-----------
command_name: :class:`str`
The name of the command that wasnt found
"""
__slots__ = ("command_name",)
def __init__(self, command_name: str):
self.command_name: str = command_name
class NoClosingQuote(CommandError):
"""Raised when there is no closing quote for a command argument"""
class CheckError(CommandError):
"""Raised when a check fails for a command"""
class NotBotOwner(CheckError):
"""Raised when the `is_bot_owner` check fails"""
class NotServerOwner(CheckError):
"""Raised when the `is_server_owner` check fails"""
class ServerOnly(CheckError):
"""Raised when a check requires the command to be ran in a server"""
class MissingPermissionsError(CheckError):
"""Raised when a check requires permissions the user does not have
Attributes
-----------
permissions: :class:`dict[str, bool]`
The permissions which the user did not have
"""
def __init__(self, permissions: dict[str, bool]):
self.permissions = permissions
class ConverterError(CommandError):
"""Base class for all converter errors"""
class InvalidLiteralArgument(ConverterError):
"""Raised when the argument is not a valid literal argument"""
class BadBoolArgument(ConverterError):
"""Raised when the bool converter fails"""
class CategoryConverterError(ConverterError):
"""Raised when the Category conveter fails"""
def __init__(self, argument: str):
self.argument = argument
class ChannelConverterError(ConverterError):
"""Raised when the Channel conveter fails"""
def __init__(self, argument: str):
self.argument = argument
class UserConverterError(ConverterError):
"""Raised when the Category conveter fails"""
def __init__(self, argument: str):
self.argument = argument
class MemberConverterError(ConverterError):
"""Raised when the Category conveter fails"""
def __init__(self, argument: str):
self.argument = argument
class UnionConverterError(ConverterError):
"""Raised when all converters in a union fails"""
def __init__(self, argument: str):
self.argument = argument
class MissingSetup(CommandError):
"""Raised when an extension is missing the `setup` function""" | /revolt_py-0.1.11-py3-none-any.whl/revolt/ext/commands/errors.py | 0.826432 | 0.206294 | errors.py | pypi |
from __future__ import annotations
from typing import Any, Callable, Coroutine, Optional
from .command import Command
from .utils import ClientCoT, ClientT
__all__ = (
"Group",
"group"
)
class Group(Command[ClientCoT]):
"""Class for holding info about a group command.
Parameters
-----------
callback: Callable[..., Coroutine[Any, Any, Any]]
The callback for the group command
name: :class:`str`
The name of the command
aliases: list[:class:`str`]
The aliases of the group command
subcommands: dict[:class:`str`, :class:`Command`]
The group's subcommands.
"""
__slots__: tuple[str, ...] = ("subcommands",)
def __init__(self, callback: Callable[..., Coroutine[Any, Any, Any]], name: str, aliases: list[str]):
self.subcommands: dict[str, Command[ClientCoT]] = {}
super().__init__(callback, name, aliases)
def command(self, *, name: Optional[str] = None, aliases: Optional[list[str]] = None, cls: type[Command[ClientCoT]] = Command[ClientCoT]) -> Callable[[Callable[..., Coroutine[Any, Any, Any]]], Command[ClientCoT]]:
"""A decorator that turns a function into a :class:`Command` and registers the command as a subcommand.
Parameters
-----------
name: Optional[:class:`str`]
The name of the command, this defaults to the functions name
aliases: Optional[list[:class:`str`]]
The aliases of the command, defaults to no aliases
cls: type[:class:`Command`]
The class used for creating the command, this defaults to :class:`Command` but can be used to use a custom command subclass
Returns
--------
Callable[Callable[..., Coroutine], :class:`Command`]
A function that takes the command callback and returns a :class:`Command`
"""
def inner(func: Callable[..., Coroutine[Any, Any, Any]]):
command = cls(func, name or func.__name__, aliases or [])
command.parent = self
self.subcommands[command.name] = command
return command
return inner
def group(self, *, name: Optional[str] = None, aliases: Optional[list[str]] = None, cls: Optional[type[Group[ClientCoT]]] = None) -> Callable[[Callable[..., Coroutine[Any, Any, Any]]], Group[ClientCoT]]:
"""A decorator that turns a function into a :class:`Group` and registers the command as a subcommand
Parameters
-----------
name: Optional[:class:`str`]
The name of the group command, this defaults to the functions name
aliases: Optional[list[:class:`str`]]
The aliases of the group command, defaults to no aliases
cls: type[:class:`Group`]
The class used for creating the command, this defaults to :class:`Group` but can be used to use a custom group subclass
Returns
--------
Callable[Callable[..., Coroutine], :class:`Group`]
A function that takes the command callback and returns a :class:`Group`
"""
cls = cls or type(self)
def inner(func: Callable[..., Coroutine[Any, Any, Any]]):
command = cls(func, name or func.__name__, aliases or [])
command.parent = self
self.subcommands[command.name] = command
return command
return inner
def __repr__(self) -> str:
return f"<Group name=\"{self.name}\">"
@property
def commands(self) -> list[Command[ClientCoT]]:
return list(self.subcommands.values())
def group(*, name: Optional[str] = None, aliases: Optional[list[str]] = None, cls: type[Group[ClientT]] = Group) -> Callable[[Callable[..., Coroutine[Any, Any, Any]]], Group[ClientT]]:
"""A decorator that turns a function into a :class:`Group`
Parameters
-----------
name: Optional[:class:`str`]
The name of the group command, this defaults to the functions name
aliases: Optional[list[:class:`str`]]
The aliases of the group command, defaults to no aliases
cls: type[:class:`Group`]
The class used for creating the command, this defaults to :class:`Group` but can be used to use a custom group subclass
Returns
--------
Callable[Callable[..., Coroutine], :class:`Group`]
A function that takes the command callback and returns a :class:`Group`
"""
def inner(func: Callable[..., Coroutine[Any, Any, Any]]):
return cls(func, name or func.__name__, aliases or [])
return inner | /revolt_py-0.1.11-py3-none-any.whl/revolt/ext/commands/group.py | 0.954223 | 0.296476 | group.py | pypi |
from __future__ import annotations
from abc import ABC, abstractmethod
from typing import TYPE_CHECKING, Any, Generic, Optional, TypedDict, Union
from typing_extensions import NotRequired
from .cog import Cog
from .command import Command
from .context import Context
from .group import Group
from .utils import ClientCoT, ClientT
if TYPE_CHECKING:
from revolt import File, Message, Messageable, MessageReply, SendableEmbed
from .cog import Cog
__all__ = ("MessagePayload", "HelpCommand", "DefaultHelpCommand", "help_command_impl")
class MessagePayload(TypedDict):
content: str
embed: NotRequired[SendableEmbed]
embeds: NotRequired[list[SendableEmbed]]
attachments: NotRequired[list[File]]
replies: NotRequired[list[MessageReply]]
class HelpCommand(ABC, Generic[ClientCoT]):
@abstractmethod
async def create_bot_help(self, context: Context[ClientCoT], commands: dict[Optional[Cog[ClientCoT]], list[Command[ClientCoT]]]) -> Union[str, SendableEmbed, MessagePayload]:
raise NotImplementedError
@abstractmethod
async def create_command_help(self, context: Context[ClientCoT], command: Command[ClientCoT]) -> Union[str, SendableEmbed, MessagePayload]:
raise NotImplementedError
@abstractmethod
async def create_group_help(self, context: Context[ClientCoT], group: Group[ClientCoT]) -> Union[str, SendableEmbed, MessagePayload]:
raise NotImplementedError
@abstractmethod
async def create_cog_help(self, context: Context[ClientCoT], cog: Cog[ClientCoT]) -> Union[str, SendableEmbed, MessagePayload]:
raise NotImplementedError
async def send_help_command(self, context: Context[ClientCoT], message_payload: MessagePayload) -> Message:
return await context.send(**message_payload)
async def filter_commands(self, context: Context[ClientCoT], commands: list[Command[ClientCoT]]) -> list[Command[ClientCoT]]:
filtered: list[Command[ClientCoT]] = []
for command in commands:
try:
if await context.can_run(command):
filtered.append(command)
except Exception:
pass
return filtered
async def group_commands(self, context: Context[ClientCoT], commands: list[Command[ClientCoT]]) -> dict[Optional[Cog[ClientCoT]], list[Command[ClientCoT]]]:
cogs: dict[Optional[Cog[ClientCoT]], list[Command[ClientCoT]]] = {}
for command in commands:
cogs.setdefault(command.cog, []).append(command)
return cogs
async def handle_message(self, context: Context[ClientCoT], message: Message) -> None:
pass
async def get_channel(self, context: Context) -> Messageable:
return context
@abstractmethod
async def handle_no_command_found(self, context: Context[ClientCoT], name: str) -> Any:
raise NotImplementedError
@abstractmethod
async def handle_no_cog_found(self, context: Context[ClientCoT], name: str) -> Any:
raise NotImplementedError
class DefaultHelpCommand(HelpCommand[ClientCoT]):
def __init__(self, default_cog_name: str = "No Cog"):
self.default_cog_name = default_cog_name
async def create_bot_help(self, context: Context[ClientCoT], commands: dict[Optional[Cog[ClientCoT]], list[Command[ClientCoT]]]) -> Union[str, SendableEmbed, MessagePayload]:
lines = ["```"]
for cog, cog_commands in commands.items():
cog_lines: list[str] = []
cog_lines.append(f"{cog.qualified_name if cog else self.default_cog_name}:")
for command in cog_commands:
cog_lines.append(f" {command.name} - {command.short_description or 'No description'}")
lines.append("\n".join(cog_lines))
lines.append("```")
return "\n".join(lines)
async def create_cog_help(self, context: Context[ClientCoT], cog: Cog[ClientCoT]) -> Union[str, SendableEmbed, MessagePayload]:
lines = ["```"]
lines.append(f"{cog.qualified_name}:")
for command in cog.commands:
lines.append(f" {command.name} - {command.short_description or 'No description'}")
lines.append("```")
return "\n".join(lines)
async def create_command_help(self, context: Context[ClientCoT], command: Command[ClientCoT]) -> Union[str, SendableEmbed, MessagePayload]:
lines = ["```"]
lines.append(f"{command.name}:")
lines.append(f" Usage: {command.get_usage()}")
if command.aliases:
lines.append(f" Aliases: {', '.join(command.aliases)}")
if command.description:
lines.append(command.description)
lines.append("```")
return "\n".join(lines)
async def create_group_help(self, context: Context[ClientCoT], group: Group[ClientCoT]) -> Union[str, SendableEmbed, MessagePayload]:
lines = ["```"]
lines.append(f"{group.name}:")
lines.append(f" Usage: {group.get_usage()}")
if group.aliases:
lines.append(f" Aliases: {', '.join(group.aliases)}")
if group.description:
lines.append(group.description)
for command in group.commands:
lines.append(f" {command.name} - {command.short_description or 'No description'}")
lines.append("```")
return "\n".join(lines)
async def handle_no_command_found(self, context: Context[ClientCoT], name: str) -> None:
channel = await self.get_channel(context)
await channel.send(f"Command `{name}` not found.")
async def handle_no_cog_found(self, context: Context[ClientCoT], name: str) -> None:
channel = await self.get_channel(context)
await channel.send(f"Cog `{name}` not found.")
class HelpCommandImpl(Command[ClientCoT]):
def __init__(self, client: ClientCoT):
self.client = client
async def callback(_: Union[ClientCoT, Cog[ClientCoT]], context: Context[ClientCoT], *args: str) -> None:
await help_command_impl(context.client, context, *args)
super().__init__(callback=callback, name="help", aliases=[])
self.description: str | None = "Shows help for a command, cog or the entire bot"
async def help_command_impl(self: ClientT, context: Context[ClientT], *arguments: str) -> None:
help_command = self.help_command
if not help_command:
return
filtered_commands = await help_command.filter_commands(context, self.commands)
commands = await help_command.group_commands(context, filtered_commands)
if not arguments:
payload = await help_command.create_bot_help(context, commands)
else:
command_name = arguments[0]
try:
command = self.get_command(command_name)
except KeyError:
cog = self.cogs.get(command_name)
if cog:
payload = await help_command.create_cog_help(context, cog)
else:
return await help_command.handle_no_command_found(context, command_name)
else:
if isinstance(command, Group):
payload = await help_command.create_group_help(context, command)
else:
payload = await help_command.create_command_help(context, command)
msg_payload: MessagePayload
if isinstance(payload, str):
msg_payload = {"content": payload}
elif isinstance(payload, SendableEmbed):
msg_payload = {"embed": payload, "content": " "}
else:
msg_payload = payload
message = await help_command.send_help_command(context, msg_payload)
await help_command.handle_message(context, message) | /revolt_py-0.1.11-py3-none-any.whl/revolt/ext/commands/help.py | 0.764804 | 0.197909 | help.py | pypi |
from datetime import date, datetime
import dateutil.parser
from decimal import Decimal
import requests
from typing import Optional, Union
from . import base, utils
class Order(utils._UpdateFromKwargsMixin):
id: str = ""
client = None
public_id: str = ""
merchant_order_ext_ref: str = ""
type: str = ""
state: str = ""
created_at: Optional[datetime] = None
updated_at: Optional[datetime] = None
capture_mode: str = ""
currency: str = ""
order_amount: Optional[dict] = None
order_outstanding_amount: Optional[dict] = None
refunded_amount: Optional[dict] = None
description: Optional[str] = None
metadata: str = ""
customer_id: Optional[str] = None
email: Optional[str] = None
phone: str = ""
completed_at: Optional[datetime] = None
payments: Optional[list] = None
related: Optional[list] = None
shipping_address: Optional[dict] = None
checkout_url: str = ""
def __init__(self, **kwargs):
self.client = kwargs.pop("client")
self._update(**kwargs)
def __repr__(self):
return f"<Order {self.id}>"
def _update(self, **kwargs):
super(Order, self)._update(**kwargs)
self.created_at = (
dateutil.parser.parse(self.created_at) if self.created_at else None
)
self.updated_at = (
dateutil.parser.parse(self.updated_at) if self.updated_at else None
)
self.completed_at = (
dateutil.parser.parse(self.completed_at) if self.completed_at else ""
)
self.shipping_address = kwargs.get("shipping_address", {})
@property
def currency(self) -> Optional[str]:
"""
Returns the currency taken from ``self.order_amount["currency"]``.
"""
return self.order_amount.get("currency", None)
@currency.setter
def currency(self, currency: str):
"""
Sets the currency into ``self.order_amount["currency"]``.
"""
self.order_amount["currency"] = currency
@property
def value(self) -> Optional[Decimal]:
"""
Returns the value of the order taken from ``self.order_amount["value"]``
and converted to ``Decimal``.
"""
if self.order_amount.get("value") is None:
return None
return utils._integertomoney(self.order_amount["value"])
@value.setter
def value(self, val: Decimal):
"""
Sets proper ``self.order_amount["value"]`` from given ``Decimal``.
"""
self.order_amount["value"] = utils._moneytointeger(val)
@property
def outstanding_value(self) -> Optional[Decimal]:
if (
not self.order_outstanding_amount
or self.order_outstanding_amount.get("value") is None
):
return None
return utils._integertomoney(self.order_outstanding_amount["value"])
@property
def refunded_value(self) -> Optional[Decimal]:
if not self.refunded_amount or self.refunded_amount.get("value") is None:
return None
return utils._integertomoney(self.refunded_amount["value"])
def save(self) -> None:
data = {}
for k in (
"merchant_order_ext_ref",
"currency",
"description",
"email",
"customer_id",
"capture_mode",
"shipping_address",
):
v = getattr(self, k)
if v is not None and v != {}:
data[k] = v
data["amount"] = self.order_amount["value"]
respdata = self.client._patch(f"orders/{self.id}", data)
self._update(**respdata)
class MerchantClient(base.BaseClient):
merchant_key: Optional[str] = None
sandbox: bool = False
def __init__(
self,
merchant_key: str,
sandbox: bool = False,
timeout: Optional[Union[int, float]] = None,
):
"""
Client to the Merchant API. The authorization is based upon the secret key
passed as ``merchant_key`` argument. The connection is stateless.
As there's no simple distinction between production and sandbox, the environment
is determined upon the state of the ``sandbox`` flag.
"""
self.sandbox = sandbox
if sandbox:
self.base_url = "https://sandbox-merchant.revolut.com/api/1.0/"
else:
self.base_url = "https://merchant.revolut.com/api/1.0/" # pragma: nocover
self.merchant_key = merchant_key
self.timeout = timeout
self._requester = requests.Session()
self._requester.headers.update(
{"Authorization": "Bearer {}".format(self.merchant_key)}
)
def create_order(
self, amount: Union[Decimal, int], currency: str, merchant_reference: str
) -> Order:
"""
Creates an order with ``merchant_reference`` being a custom identifier.
**WARNING:** The amount of the order has to be specified in regular currency units, even
though Revolut uses integer denomination of 1/100th of the unit.
"""
amount = utils._moneytointeger(amount)
data = self._post(
"orders",
data={
"amount": amount,
"currency": currency,
"merchant_order_ext_ref": merchant_reference,
}
or None,
)
return Order(client=self, **data)
def get_order(self, order_id: str) -> Order:
"""
Retrieves ``Order`` with the given ID.
"""
data = self._get(f"orders/{order_id}")
return Order(client=self, **data)
def orders(
self,
from_date: Optional[Union[date, datetime]] = None,
to_date: Optional[Union[date, datetime]] = None,
) -> [Order]:
"""
Retrieves a list of ``Order``s, optionally within the given time span.
"""
orders = []
reqdata = {}
if from_date:
reqdata["from_created_date"] = utils._datetime(from_date)
if to_date:
reqdata["to_created_date"] = utils._datetime(to_date)
data = self._get(path="orders", data=reqdata)
for txdat in data:
txn = Order(client=self, **txdat)
orders.append(txn)
return orders
def webhook(self, url, events):
reqdata = {}
if url:
reqdata["url"] = url
if events:
reqdata["events"] = events
_ = self._post(f"webhooks", data=reqdata) | /revolut-python-0.10.1.tar.gz/revolut-python-0.10.1/revolut/merchant.py | 0.891575 | 0.198413 | merchant.py | pypi |
import csv
from datetime import datetime
import io
from revolut import Amount, Transaction
_CSV_COLUMNS = ["date", "hour", "from_amount", "from_currency",
"to_amount", "to_currency"]
def csv_to_dict(csv_str, separator=","):
""" From a csv string, returns a list of dictionnaries
>>> csv_to_dict("a,b,c\\n1,2,3")
[{'a': '1', 'b': '2', 'c': '3'}]
>>> csv_to_dict("a,b,c\\n1,2,3\\n4,5,6")
[{'a': '1', 'b': '2', 'c': '3'}, {'a': '4', 'b': '5', 'c': '6'}]
>>> csv_to_dict("a;b;c\\n1;2;3", separator=";")
[{'a': '1', 'b': '2', 'c': '3'}]"""
reader = csv.DictReader(io.StringIO(csv_str), delimiter=separator)
# By default, DictReader returns OrderedDict => convert to dict:
return list(map(dict, reader))
def append_dict_to_csv(filename, dict_obj, separator=",",
col_names=_CSV_COLUMNS):
""" Append a dict object, to a csv file """
with open(filename, 'a', newline='\n') as csvfile:
writer = csv.DictWriter(csvfile,
delimiter=separator,
fieldnames=col_names,
lineterminator='\n') # To avoid '^M'
writer.writerow(dict_obj)
def convert_Transaction_to_dict(transaction_obj):
return {
"date": transaction_obj.date.strftime("%d/%m/%Y"),
"hour": transaction_obj.date.strftime("%H:%M:%S"),
"from_amount": transaction_obj.from_amount.real_amount,
"from_currency": transaction_obj.from_amount.currency,
"to_amount": transaction_obj.to_amount.real_amount,
"to_currency": transaction_obj.to_amount.currency,
}
def update_historyfile(filename, exchange_transaction):
""" Update the history file with an exchange transaction """
tr_dict = convert_Transaction_to_dict(transaction_obj=exchange_transaction)
append_dict_to_csv(filename=filename, dict_obj=tr_dict)
def read_file_to_str(filename):
with open(filename, 'r') as f:
ret_str = f.read()
return ret_str
def get_last_transactions_from_csv(filename="exchange_history.csv",
separator=","):
csv_str = read_file_to_str(filename=filename)
last_transactions = csv_to_dict(csv_str=csv_str, separator=separator)
return list(map(dict_transaction_to_Transaction, last_transactions))
def dict_transaction_to_Transaction(tr_dict):
""" Converts a transaction dictionnary to a Transaction object """
if set(tr_dict) != set(_CSV_COLUMNS):
raise TypeError("Columns expected : {}\n{} received".format(
_CSV_COLUMNS, list(tr_dict)))
str_date = "{} {}".format(tr_dict["date"],
tr_dict["hour"])
tr = Transaction(from_amount=Amount(
real_amount=float(tr_dict["from_amount"]),
currency=tr_dict["from_currency"]),
to_amount=Amount(
real_amount=float(tr_dict["to_amount"]),
currency=tr_dict["to_currency"]),
date=datetime.strptime(str_date, "%d/%m/%Y %H:%M:%S"))
return tr
def get_amount_with_margin(amount, percent_margin):
""" Returns the amount with a margin
>>> print(get_amount_with_margin(amount=Amount(real_amount=100,\
currency="EUR"), percent_margin=1))
101.00 EUR
"""
if type(amount) != Amount:
raise TypeError
if type(percent_margin) not in [float, int]:
raise TypeError
margin = percent_margin/100
amount_with_margin = amount.real_amount * (1 + margin)
return Amount(real_amount=amount_with_margin, currency=amount.currency) | /revolut-0.1.4.tar.gz/revolut-0.1.4/revolut_bot/__init__.py | 0.700383 | 0.210969 | __init__.py | pypi |
import code
import re
from typing import Callable
from PySide6.QtCore import (Qt, Signal, QEvent, QSize, )
from PySide6.QtGui import (QTextCharFormat, QBrush, QColor, QFont)
from PySide6.QtWidgets import (QLineEdit, QWidget, QGridLayout, QPlainTextEdit)
class LineEdit(QLineEdit):
"""QLIneEdit with a history buffer for recalling previous lines.
I also accept tab as input (4 spaces).
"""
newline = Signal(str) # Signal when return key pressed
def __init__(self, history: int = 100) -> 'LineEdit':
super().__init__()
self.historymax = history
self.clearhistory()
self.promptpattern = re.compile('^[>\.]')
def clearhistory(self) -> None:
"""Clear history buffer"""
self.historyindex = 0
self.historylist = []
def event(self, ev: QEvent) -> bool:
"""Intercept tab and arrow key presses. Insert 4 spaces
when tab pressed instead of moving to next contorl. WHen
arrow up or down are pressed select a line from the history
buffer. Emit newline signal when return key is pressed.
"""
if ev.type() == QEvent.KeyPress:
if ev.key() == int(Qt.Key_Tab):
self.insert(' ')
return True
elif ev.key() == int(Qt.Key_Up):
self.recall(self.historyindex - 1)
return True
elif ev.key() == int(Qt.Key_Down):
self.recall(self.historyindex + 1)
return True
elif ev.key() == int(Qt.Key_Home):
self.recall(0)
return True
elif ev.key() == int(Qt.Key_End):
self.recall(len(self.historylist) - 1)
return True
elif ev.key() == int(Qt.Key_Return):
self.returnkey()
return True
return super().event(ev)
def returnkey(self) -> None:
"""Return key was pressed. Add line to history and emit
the newline signal.
"""
text = self.text().rstrip()
self.record(text)
self.newline.emit(text)
self.setText('')
def recall(self, index: int) -> None:
"""Select a line from the history list"""
length = len(self.historylist)
if (length > 0):
index = max(0, min(index, length - 1))
self.setText(self.historylist[index])
self.historyindex = index
def record(self, line: str) -> None:
"""Add line to history buffer"""
self.historyindex += 1
while len(self.historylist) >= self.historymax - 1:
self.historylist.pop()
self.historylist.append(line)
self.historyindex = min(self.historyindex, len(self.historylist))
class Redirect():
"""Map self.write to a function"""
def __init__(self, func: Callable) -> 'Redirect':
self.func = func
def write(self, line: str) -> None:
self.func(line)
class pythonConsole(QWidget):
"""A GUI version of code.InteractiveConsole."""
def __init__(self, context=locals(), # context for interpreter
history: int = 200, # max lines in history buffer
blockcount: int = 500 # max lines in output buffer
) -> 'pythonConsole':
super().__init__()
self.setcontext(context)
self.buffer = []
self.content = QGridLayout(self)
self.content.setContentsMargins(0, 0, 0, 0)
self.content.setSpacing(0)
# Display for output and stderr
self.outdisplay = QPlainTextEdit(self)
self.outdisplay.setMaximumBlockCount(blockcount)
self.outdisplay.setReadOnly(True)
self.content.addWidget(self.outdisplay, 0, 0, 1, 2)
# Use color to differentiate input, output and stderr
self.inpfmt = self.outdisplay.currentCharFormat()
self.outfmt = QTextCharFormat(self.inpfmt)
self.outfmt.setForeground(QBrush(QColor(0, 0, 255)))
self.errfmt = QTextCharFormat(self.inpfmt)
self.errfmt.setForeground(QBrush(QColor(255, 0, 0)))
# Display input prompt left of input edit
self.promptdisp = QLineEdit(self)
self.promptdisp.setReadOnly(True)
self.promptdisp.setFixedWidth(15)
self.promptdisp.setFrame(False)
self.content.addWidget(self.promptdisp, 1, 0)
self.setprompt('> ')
# Enter commands here
self.inpedit = LineEdit(history=history)
self.inpedit.newline.connect(self.push)
self.inpedit.setFrame(False)
self.content.addWidget(self.inpedit, 1, 1)
def setcontext(self, context):
"""Set context for interpreter"""
self.interp = code.InteractiveInterpreter(context)
def resetbuffer(self) -> None:
"""Reset the input buffer."""
self.buffer = []
def setprompt(self, text: str):
self.prompt = text
self.promptdisp.setText(text)
def push(self, line: str) -> None:
"""Execute entered command. Command may span multiple lines"""
if line == 'clear':
self.inpedit.clearhistory()
self.outdisplay.clear()
else:
lines = line.split('\n')
for line in lines:
if re.match('^[\>\.] ', line):
line = line[2:]
self.writeoutput(self.prompt + line, self.inpfmt)
self.setprompt('. ')
self.buffer.append(line)
# Built a command string from lines in the buffer
source = "\n".join(self.buffer)
more = self.interp.runsource(source, '<input>', 'exec')
if not more:
self.setprompt('> ')
self.resetbuffer()
def setfont(self, font: QFont) -> None:
"""Set font for input and display widgets. Should be monospaced"""
self.outdisplay.setFont(font)
self.inpedit.setFont(font)
def write(self, line: str) -> None:
"""Capture stdout and display in outdisplay"""
if (len(line) != 1 or ord(line[0]) != 10):
self.writeoutput(line.rstrip(), self.outfmt)
def errorwrite(self, line: str) -> None:
"""Capture stderr and display in outdisplay"""
self.writeoutput(line, self.errfmt)
def writeoutput(self, line: str, fmt: QTextCharFormat = None) -> None:
"""Set text formatting and display line in outdisplay"""
if fmt is not None:
self.outdisplay.setCurrentCharFormat(fmt)
self.outdisplay.appendPlainText(line.rstrip())
def sizeHint(self):
return QSize(500, 200) | /revolution-eda-0.5.2.tar.gz/revolution-eda-0.5.2/revedaEditor/gui/pythonConsole.py | 0.703651 | 0.179459 | pythonConsole.py | pypi |
import pathlib
class verilogaC:
"""
This class represents an imported verilog-A module.
"""
def __init__(self, pathObj: pathlib.Path):
self._pathObj = pathObj
keyWords = ["analog", "electrical"]
self._vaModule = ''
self.instanceParams = dict()
self.modelParams = dict()
self.inPins = list()
self.inoutPins = list()
self.outPins = list()
self._netlistLine = ''
self.statementLines = list()
self._pinOrder = ''
with open(self._pathObj) as f:
self.fileLines = f.readlines()
self.stripComments()
self.oneLiners()
# splitLines=[fileline.split() for fileline in fileLines]
def stripComments(self):
comment = False
for line in self.fileLines: # concatenate the lines until it reaches a ;
stripLine = line.strip()
if stripLine.startswith('/*'):
comment = True
if not comment:
doubleSlashLoc = stripLine.find('//')
if doubleSlashLoc > -1:
stripLine = stripLine[:doubleSlashLoc]
if stripLine != '':
self.statementLines.append(stripLine)
if comment and stripLine.endswith('*/'):
comment = False
def oneLiners(self):
tempLine = ''
oneLiners = list()
for line in self.statementLines:
stripLine = line.strip()
if not stripLine.startswith('`include'):
tempLine = f'{tempLine} {stripLine}'
if stripLine.endswith(';'):
oneLiners.append(tempLine.strip())
splitLine = tempLine.strip().split()
if splitLine:
if splitLine[0] == 'module':
self._vaModule = splitLine[1].split('(')[0]
indexLow = line.index("(")
indexHigh = line.index(")")
self.pins = [pin.strip() for pin in
line[indexLow + 1: indexHigh].split(",")]
elif splitLine[0] == 'in' or splitLine[0] == 'input':
pinsList = splitLine[1].split(';')[0].split(',')
self.inPins.extend([pin.strip() for pin in pinsList])
elif splitLine[0] == 'out' or splitLine[0] == 'output':
pinsList = splitLine[1].split(';')[0].split(',')
self.outPins.extend([pin.strip() for pin in pinsList])
elif splitLine[0] == 'inout':
pinsList = splitLine[1].split(';')[0].split(',')
self.inoutPins.extend([pin.strip() for pin in pinsList])
elif splitLine[0] == "parameter":
paramDefPart = tempLine.split('*(')[0]
paramName = paramDefPart.split('=')[0].split()[-1].strip()
paramValue = paramDefPart.split('=')[1].split()[0].strip()
try:
paramAttr = tempLine.strip().split("(*")[1]
except IndexError:
paramAttr = ""
if "type" in paramAttr and '"instance"' in paramAttr:
# parameter value is between = and (*
self.instanceParams[paramName] = paramValue
if "xyceAlsoModel" in paramAttr and '"yes"' in paramAttr:
self.modelParams[paramName] = paramValue
else: # no parameter attribute statement
self.modelParams[paramName] = paramValue
tempLine = ''
@property
def pathObj(self):
return self._pathObj
@pathObj.setter
def pathObj(self, value:pathlib.Path):
assert isinstance(value,pathlib.Path)
self._pathObj = value
@property
def pinOrder(self):
return self._pinOrder
@pinOrder.setter
def pinOrder(self, value:str):
assert isinstance(value,str)
self._pinOrder = value
@property
def netlistLine(self):
self._pinOrder = ','.join(self.pins)
print(f'pinOrder : {self.pinOrder}')
instParamString = ' '.join(
[f'[@{key}:{key}=%:{key}={item}]' for key, item in
self.instanceParams.items()])
self._netlistLine = f'Y{self._vaModule} [@instName] [@pinList] ' \
f'{self._vaModule}Model {instParamString}'
return self._netlistLine
@netlistLine.setter
def netListLine(self, value:str):
assert isinstance(value,str)
self._netlistLine = value
@property
def vaModule(self):
return self._vaModule | /revolution-eda-0.5.2.tar.gz/revolution-eda-0.5.2/revedaEditor/backend/hdlBackEnd.py | 0.532911 | 0.182826 | hdlBackEnd.py | pypi |
import json
import revedaEditor.common.net as net
import revedaEditor.common.shape as shp
class symbolAttribute(object):
def __init__(self, name: str, definition: str):
self._name = name
self._definition = definition
def __str__(self):
return f'{self.name}: {self.definition}'
def __repr__(self):
return f'{self.name}: {self.definition}'
@property
def name(self):
return self._name
@name.setter
def name(self, value):
assert isinstance(value, str)
self._name = value
@property
def definition(self):
return self._definition
@definition.setter
def definition(self, value):
assert isinstance(value, str)
self._definition = value
class symbolEncoder(json.JSONEncoder):
def default(self, item):
if isinstance(item, shp.rectangle):
itemDict = {"type": "rect", "rect": item.rect.getCoords(),
"pen": item.pen.pname,
"loc": (item.scenePos() - item.scene().origin).toTuple(),
"ang": item.angle, }
return itemDict
elif isinstance(item, shp.line):
itemDict = {"type": "line", "st": item.start.toTuple(),
"end": item.end.toTuple(), "pen": item.pen.pname,
"loc": (item.scenePos() - item.scene().origin).toTuple(),
"ang": item.angle, }
return itemDict
elif isinstance(item, shp.circle):
itemDict = {"type": "circle", "cen": item.centre.toTuple(),
"end": item.end.toTuple(), "pen": item.pen.pname,
"loc": (item.scenePos() - item.scene().origin).toTuple(),
"ang": item.angle, }
return itemDict
elif isinstance(item, shp.arc):
itemDict = {"type": "arc", "st": item.start.toTuple(),
"end": item.end.toTuple(), "pen": item.pen.pname,
"loc": (item.scenePos() - item.scene().origin).toTuple(),
"ang": item.angle, }
return itemDict
elif isinstance(item, shp.pin):
itemDict = {"type": "pin", "st": item.start.toTuple(),
"pen": item.pen.pname, "nam": item.pinName,
"pd": item.pinDir, "pt": item.pinType,
"loc": (item.scenePos() - item.scene().origin).toTuple(),
"ang": item.angle, }
return itemDict
elif isinstance(item, shp.text):
itemDict = {"type": "text", "st": item.start.toTuple(),
"pen": item.pen.pname, 'tc': item.textContent,
'ff': item.fontFamily, 'fs': item.fontStyle,
'th': item.textHeight, 'ta': item.textAlignment,
'to': item.textOrient,
"loc": (item.scenePos() - item.scene().origin).toTuple(),
"ang": item.angle, }
return itemDict
elif isinstance(item, shp.label):
itemDict = {"type": "label", "st": item.start.toTuple(),
"pen": item.pen.pname, "nam": item.labelName,
"def": item.labelDefinition, # label as entered
"txt": item.labelText, # shown label
"val": item.labelValue, # label value
"vis": item.labelVisible, # label visibility
"lt": item.labelType, "ht": item.labelHeight,
"al": item.labelAlign, "or": item.labelOrient,
"use": item.labelUse,
"loc": (item.scenePos() - item.scene().origin).toTuple(),
"ang": item.angle, }
return itemDict
elif isinstance(item, symbolAttribute):
itemDict = {"type": "attr", "nam": item.name,
"def": item.definition, }
return itemDict
class schematicEncoder(json.JSONEncoder):
def default(self, item):
if isinstance(item, shp.symbolShape):
# only value and visibility be changed in the symbol instance.
itemLabelDict = {
item.labelName: [item.labelValue, item.labelVisible] for item in
item.labels.values()}
itemDict = {"type": "symbolShape", "lib": item.libraryName,
"cell": item.cellName, "view": item.viewName,
"nam": item.instanceName, "ic": item.counter,
"ld": itemLabelDict, "loc": (
item.scenePos() - item.scene().origin).toTuple(),
"ang": item.angle, "ign": int(item.netlistIgnore) }
return itemDict
elif isinstance(item, net.schematicNet):
itemDict = {"type": "schematicNet", "st": item.start.toTuple(),
"end": item.end.toTuple(), "pen": item.pen.pname,
"loc": (item.scenePos() - item.scene().origin).toTuple(),
"nam": item.name, "ns": item.nameSet, }
return itemDict
elif isinstance(item, shp.schematicPin):
itemDict = {"type": "schematicPin", "st": item.start.toTuple(),
"pen": item.pen.pname, "pn": item.pinName,
"pd": item.pinDir, "pt": item.pinType, "loc": (
item.scenePos() - item.scene().origin).toTuple(),
"ang": item.angle, }
return itemDict
elif isinstance(item, shp.text):
itemDict = {"type": "text", "st": item.start.toTuple(),
"pen": item.pen.pname, 'tc': item.textContent,
'ff': item.fontFamily, 'fs': item.fontStyle,
'th': item.textHeight, 'ta': item.textAlignment,
'to': item.textOrient, 'loc': (
item.scenePos() - item.scene().origin).toTuple(),
'ang': item.angle, }
return itemDict | /revolution-eda-0.5.2.tar.gz/revolution-eda-0.5.2/revedaEditor/fileio/symbolEncoder.py | 0.719975 | 0.411525 | symbolEncoder.py | pypi |
import logging
"""
_logging.py
websocket - WebSocket client library for Python
Copyright 2023 engn33r
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
_logger = logging.getLogger('websocket')
try:
from logging import NullHandler
except ImportError:
class NullHandler(logging.Handler):
def emit(self, record) -> None:
pass
_logger.addHandler(NullHandler())
_traceEnabled = False
__all__ = ["enableTrace", "dump", "error", "warning", "debug", "trace",
"isEnabledForError", "isEnabledForDebug", "isEnabledForTrace"]
def enableTrace(traceable: bool,
handler: logging.StreamHandler = logging.StreamHandler(),
level: str = "DEBUG") -> None:
"""
Turn on/off the traceability.
Parameters
----------
traceable: bool
If set to True, traceability is enabled.
"""
global _traceEnabled
_traceEnabled = traceable
if traceable:
_logger.addHandler(handler)
_logger.setLevel(getattr(logging, level))
def dump(title: str, message: str) -> None:
if _traceEnabled:
_logger.debug("--- " + title + " ---")
_logger.debug(message)
_logger.debug("-----------------------")
def error(msg: str) -> None:
_logger.error(msg)
def warning(msg: str) -> None:
_logger.warning(msg)
def debug(msg: str) -> None:
_logger.debug(msg)
def info(msg: str) -> None:
_logger.info(msg)
def trace(msg: str) -> None:
if _traceEnabled:
_logger.debug(msg)
def isEnabledForError() -> bool:
return _logger.isEnabledFor(logging.ERROR)
def isEnabledForDebug() -> bool:
return _logger.isEnabledFor(logging.DEBUG)
def isEnabledForTrace() -> bool:
return _traceEnabled | /revolution.py-5.0.1.4.tar.gz/revolution.py-5.0.1.4/websocket/_logging.py | 0.707 | 0.194502 | _logging.py | pypi |
import os
import socket
import struct
from urllib.parse import unquote, urlparse
"""
_url.py
websocket - WebSocket client library for Python
Copyright 2023 engn33r
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
__all__ = ["parse_url", "get_proxy_info"]
def parse_url(url: str) -> tuple:
"""
parse url and the result is tuple of
(hostname, port, resource path and the flag of secure mode)
Parameters
----------
url: str
url string.
"""
if ":" not in url:
raise ValueError("url is invalid")
scheme, url = url.split(":", 1)
parsed = urlparse(url, scheme="http")
if parsed.hostname:
hostname = parsed.hostname
else:
raise ValueError("hostname is invalid")
port = 0
if parsed.port:
port = parsed.port
is_secure = False
if scheme == "ws":
if not port:
port = 80
elif scheme == "wss":
is_secure = True
if not port:
port = 443
else:
raise ValueError("scheme %s is invalid" % scheme)
if parsed.path:
resource = parsed.path
else:
resource = "/"
if parsed.query:
resource += "?" + parsed.query
return hostname, port, resource, is_secure
DEFAULT_NO_PROXY_HOST = ["localhost", "127.0.0.1"]
def _is_ip_address(addr: str) -> bool:
try:
socket.inet_aton(addr)
except socket.error:
return False
else:
return True
def _is_subnet_address(hostname: str) -> bool:
try:
addr, netmask = hostname.split("/")
return _is_ip_address(addr) and 0 <= int(netmask) < 32
except ValueError:
return False
def _is_address_in_network(ip: str, net: str) -> bool:
ipaddr = struct.unpack('!I', socket.inet_aton(ip))[0]
netaddr, netmask = net.split('/')
netaddr = struct.unpack('!I', socket.inet_aton(netaddr))[0]
netmask = (0xFFFFFFFF << (32 - int(netmask))) & 0xFFFFFFFF
return ipaddr & netmask == netaddr
def _is_no_proxy_host(hostname: str, no_proxy: list) -> bool:
if not no_proxy:
v = os.environ.get("no_proxy", os.environ.get("NO_PROXY", "")).replace(" ", "")
if v:
no_proxy = v.split(",")
if not no_proxy:
no_proxy = DEFAULT_NO_PROXY_HOST
if '*' in no_proxy:
return True
if hostname in no_proxy:
return True
if _is_ip_address(hostname):
return any([_is_address_in_network(hostname, subnet) for subnet in no_proxy if _is_subnet_address(subnet)])
for domain in [domain for domain in no_proxy if domain.startswith('.')]:
if hostname.endswith(domain):
return True
return False
def get_proxy_info(
hostname: str, is_secure: bool, proxy_host: str = None, proxy_port: int = 0, proxy_auth: tuple = None,
no_proxy: list = None, proxy_type: str = 'http') -> tuple:
"""
Try to retrieve proxy host and port from environment
if not provided in options.
Result is (proxy_host, proxy_port, proxy_auth).
proxy_auth is tuple of username and password
of proxy authentication information.
Parameters
----------
hostname: str
Websocket server name.
is_secure: bool
Is the connection secure? (wss) looks for "https_proxy" in env
before falling back to "http_proxy"
proxy_host: str
http proxy host name.
proxy_port: str or int
http proxy port.
no_proxy: list
Whitelisted host names that don't use the proxy.
proxy_auth: tuple
HTTP proxy auth information. Tuple of username and password. Default is None.
proxy_type: str
Specify the proxy protocol (http, socks4, socks4a, socks5, socks5h). Default is "http".
Use socks4a or socks5h if you want to send DNS requests through the proxy.
"""
if _is_no_proxy_host(hostname, no_proxy):
return None, 0, None
if proxy_host:
port = proxy_port
auth = proxy_auth
return proxy_host, port, auth
env_keys = ["http_proxy"]
if is_secure:
env_keys.insert(0, "https_proxy")
for key in env_keys:
value = os.environ.get(key, os.environ.get(key.upper(), "")).replace(" ", "")
if value:
proxy = urlparse(value)
auth = (unquote(proxy.username), unquote(proxy.password)) if proxy.username else None
return proxy.hostname, proxy.port, auth
return None, 0, None | /revolution.py-5.0.1.4.tar.gz/revolution.py-5.0.1.4/websocket/_url.py | 0.635336 | 0.181825 | _url.py | pypi |
import array
import os
import struct
import sys
from ._exceptions import *
from ._utils import validate_utf8
from threading import Lock
"""
_abnf.py
websocket - WebSocket client library for Python
Copyright 2023 engn33r
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
try:
# If wsaccel is available, use compiled routines to mask data.
# wsaccel only provides around a 10% speed boost compared
# to the websocket-client _mask() implementation.
# Note that wsaccel is unmaintained.
from wsaccel.xormask import XorMaskerSimple
def _mask(_m, _d) -> bytes:
return XorMaskerSimple(_m).process(_d)
except ImportError:
# wsaccel is not available, use websocket-client _mask()
native_byteorder = sys.byteorder
def _mask(mask_value: array.array, data_value: array.array) -> bytes:
datalen = len(data_value)
data_value = int.from_bytes(data_value, native_byteorder)
mask_value = int.from_bytes(mask_value * (datalen // 4) + mask_value[: datalen % 4], native_byteorder)
return (data_value ^ mask_value).to_bytes(datalen, native_byteorder)
__all__ = [
'ABNF', 'continuous_frame', 'frame_buffer',
'STATUS_NORMAL',
'STATUS_GOING_AWAY',
'STATUS_PROTOCOL_ERROR',
'STATUS_UNSUPPORTED_DATA_TYPE',
'STATUS_STATUS_NOT_AVAILABLE',
'STATUS_ABNORMAL_CLOSED',
'STATUS_INVALID_PAYLOAD',
'STATUS_POLICY_VIOLATION',
'STATUS_MESSAGE_TOO_BIG',
'STATUS_INVALID_EXTENSION',
'STATUS_UNEXPECTED_CONDITION',
'STATUS_BAD_GATEWAY',
'STATUS_TLS_HANDSHAKE_ERROR',
]
# closing frame status codes.
STATUS_NORMAL = 1000
STATUS_GOING_AWAY = 1001
STATUS_PROTOCOL_ERROR = 1002
STATUS_UNSUPPORTED_DATA_TYPE = 1003
STATUS_STATUS_NOT_AVAILABLE = 1005
STATUS_ABNORMAL_CLOSED = 1006
STATUS_INVALID_PAYLOAD = 1007
STATUS_POLICY_VIOLATION = 1008
STATUS_MESSAGE_TOO_BIG = 1009
STATUS_INVALID_EXTENSION = 1010
STATUS_UNEXPECTED_CONDITION = 1011
STATUS_SERVICE_RESTART = 1012
STATUS_TRY_AGAIN_LATER = 1013
STATUS_BAD_GATEWAY = 1014
STATUS_TLS_HANDSHAKE_ERROR = 1015
VALID_CLOSE_STATUS = (
STATUS_NORMAL,
STATUS_GOING_AWAY,
STATUS_PROTOCOL_ERROR,
STATUS_UNSUPPORTED_DATA_TYPE,
STATUS_INVALID_PAYLOAD,
STATUS_POLICY_VIOLATION,
STATUS_MESSAGE_TOO_BIG,
STATUS_INVALID_EXTENSION,
STATUS_UNEXPECTED_CONDITION,
STATUS_SERVICE_RESTART,
STATUS_TRY_AGAIN_LATER,
STATUS_BAD_GATEWAY,
)
class ABNF:
"""
ABNF frame class.
See http://tools.ietf.org/html/rfc5234
and http://tools.ietf.org/html/rfc6455#section-5.2
"""
# operation code values.
OPCODE_CONT = 0x0
OPCODE_TEXT = 0x1
OPCODE_BINARY = 0x2
OPCODE_CLOSE = 0x8
OPCODE_PING = 0x9
OPCODE_PONG = 0xa
# available operation code value tuple
OPCODES = (OPCODE_CONT, OPCODE_TEXT, OPCODE_BINARY, OPCODE_CLOSE,
OPCODE_PING, OPCODE_PONG)
# opcode human readable string
OPCODE_MAP = {
OPCODE_CONT: "cont",
OPCODE_TEXT: "text",
OPCODE_BINARY: "binary",
OPCODE_CLOSE: "close",
OPCODE_PING: "ping",
OPCODE_PONG: "pong"
}
# data length threshold.
LENGTH_7 = 0x7e
LENGTH_16 = 1 << 16
LENGTH_63 = 1 << 63
def __init__(self, fin: int = 0, rsv1: int = 0, rsv2: int = 0, rsv3: int = 0,
opcode: int = OPCODE_TEXT, mask: int = 1, data: str or bytes = "") -> None:
"""
Constructor for ABNF. Please check RFC for arguments.
"""
self.fin = fin
self.rsv1 = rsv1
self.rsv2 = rsv2
self.rsv3 = rsv3
self.opcode = opcode
self.mask = mask
if data is None:
data = ""
self.data = data
self.get_mask_key = os.urandom
def validate(self, skip_utf8_validation: bool = False) -> None:
"""
Validate the ABNF frame.
Parameters
----------
skip_utf8_validation: skip utf8 validation.
"""
if self.rsv1 or self.rsv2 or self.rsv3:
raise WebSocketProtocolException("rsv is not implemented, yet")
if self.opcode not in ABNF.OPCODES:
raise WebSocketProtocolException("Invalid opcode %r", self.opcode)
if self.opcode == ABNF.OPCODE_PING and not self.fin:
raise WebSocketProtocolException("Invalid ping frame.")
if self.opcode == ABNF.OPCODE_CLOSE:
l = len(self.data)
if not l:
return
if l == 1 or l >= 126:
raise WebSocketProtocolException("Invalid close frame.")
if l > 2 and not skip_utf8_validation and not validate_utf8(self.data[2:]):
raise WebSocketProtocolException("Invalid close frame.")
code = 256 * self.data[0] + self.data[1]
if not self._is_valid_close_status(code):
raise WebSocketProtocolException("Invalid close opcode %r", code)
@staticmethod
def _is_valid_close_status(code: int) -> bool:
return code in VALID_CLOSE_STATUS or (3000 <= code < 5000)
def __str__(self) -> str:
return "fin=" + str(self.fin) \
+ " opcode=" + str(self.opcode) \
+ " data=" + str(self.data)
@staticmethod
def create_frame(data: str, opcode: int, fin: int = 1) -> 'ABNF':
"""
Create frame to send text, binary and other data.
Parameters
----------
data: str
data to send. This is string value(byte array).
If opcode is OPCODE_TEXT and this value is unicode,
data value is converted into unicode string, automatically.
opcode: int
operation code. please see OPCODE_MAP.
fin: int
fin flag. if set to 0, create continue fragmentation.
"""
if opcode == ABNF.OPCODE_TEXT and isinstance(data, str):
data = data.encode("utf-8")
# mask must be set if send data from client
return ABNF(fin, 0, 0, 0, opcode, 1, data)
def format(self) -> bytes:
"""
Format this object to string(byte array) to send data to server.
"""
if any(x not in (0, 1) for x in [self.fin, self.rsv1, self.rsv2, self.rsv3]):
raise ValueError("not 0 or 1")
if self.opcode not in ABNF.OPCODES:
raise ValueError("Invalid OPCODE")
length = len(self.data)
if length >= ABNF.LENGTH_63:
raise ValueError("data is too long")
frame_header = chr(self.fin << 7 |
self.rsv1 << 6 | self.rsv2 << 5 | self.rsv3 << 4 |
self.opcode).encode('latin-1')
if length < ABNF.LENGTH_7:
frame_header += chr(self.mask << 7 | length).encode('latin-1')
elif length < ABNF.LENGTH_16:
frame_header += chr(self.mask << 7 | 0x7e).encode('latin-1')
frame_header += struct.pack("!H", length)
else:
frame_header += chr(self.mask << 7 | 0x7f).encode('latin-1')
frame_header += struct.pack("!Q", length)
if not self.mask:
return frame_header + self.data
else:
mask_key = self.get_mask_key(4)
return frame_header + self._get_masked(mask_key)
def _get_masked(self, mask_key: str or bytes) -> bytes:
s = ABNF.mask(mask_key, self.data)
if isinstance(mask_key, str):
mask_key = mask_key.encode('utf-8')
return mask_key + s
@staticmethod
def mask(mask_key: str or bytes, data: str or bytes) -> bytes:
"""
Mask or unmask data. Just do xor for each byte
Parameters
----------
mask_key: bytes or str
4 byte mask.
data: bytes or str
data to mask/unmask.
"""
if data is None:
data = ""
if isinstance(mask_key, str):
mask_key = mask_key.encode('latin-1')
if isinstance(data, str):
data = data.encode('latin-1')
return _mask(array.array("B", mask_key), array.array("B", data))
class frame_buffer:
_HEADER_MASK_INDEX = 5
_HEADER_LENGTH_INDEX = 6
def __init__(self, recv_fn: int, skip_utf8_validation: bool) -> None:
self.recv = recv_fn
self.skip_utf8_validation = skip_utf8_validation
# Buffers over the packets from the layer beneath until desired amount
# bytes of bytes are received.
self.recv_buffer = []
self.clear()
self.lock = Lock()
def clear(self) -> None:
self.header = None
self.length = None
self.mask = None
def has_received_header(self) -> bool:
return self.header is None
def recv_header(self) -> None:
header = self.recv_strict(2)
b1 = header[0]
fin = b1 >> 7 & 1
rsv1 = b1 >> 6 & 1
rsv2 = b1 >> 5 & 1
rsv3 = b1 >> 4 & 1
opcode = b1 & 0xf
b2 = header[1]
has_mask = b2 >> 7 & 1
length_bits = b2 & 0x7f
self.header = (fin, rsv1, rsv2, rsv3, opcode, has_mask, length_bits)
def has_mask(self) -> bool or int:
if not self.header:
return False
return self.header[frame_buffer._HEADER_MASK_INDEX]
def has_received_length(self) -> bool:
return self.length is None
def recv_length(self) -> None:
bits = self.header[frame_buffer._HEADER_LENGTH_INDEX]
length_bits = bits & 0x7f
if length_bits == 0x7e:
v = self.recv_strict(2)
self.length = struct.unpack("!H", v)[0]
elif length_bits == 0x7f:
v = self.recv_strict(8)
self.length = struct.unpack("!Q", v)[0]
else:
self.length = length_bits
def has_received_mask(self) -> bool:
return self.mask is None
def recv_mask(self) -> None:
self.mask = self.recv_strict(4) if self.has_mask() else ""
def recv_frame(self) -> ABNF:
with self.lock:
# Header
if self.has_received_header():
self.recv_header()
(fin, rsv1, rsv2, rsv3, opcode, has_mask, _) = self.header
# Frame length
if self.has_received_length():
self.recv_length()
length = self.length
# Mask
if self.has_received_mask():
self.recv_mask()
mask = self.mask
# Payload
payload = self.recv_strict(length)
if has_mask:
payload = ABNF.mask(mask, payload)
# Reset for next frame
self.clear()
frame = ABNF(fin, rsv1, rsv2, rsv3, opcode, has_mask, payload)
frame.validate(self.skip_utf8_validation)
return frame
def recv_strict(self, bufsize: int) -> bytes:
shortage = bufsize - sum(map(len, self.recv_buffer))
while shortage > 0:
# Limit buffer size that we pass to socket.recv() to avoid
# fragmenting the heap -- the number of bytes recv() actually
# reads is limited by socket buffer and is relatively small,
# yet passing large numbers repeatedly causes lots of large
# buffers allocated and then shrunk, which results in
# fragmentation.
bytes_ = self.recv(min(16384, shortage))
self.recv_buffer.append(bytes_)
shortage -= len(bytes_)
unified = b"".join(self.recv_buffer)
if shortage == 0:
self.recv_buffer = []
return unified
else:
self.recv_buffer = [unified[bufsize:]]
return unified[:bufsize]
class continuous_frame:
def __init__(self, fire_cont_frame: bool, skip_utf8_validation: bool) -> None:
self.fire_cont_frame = fire_cont_frame
self.skip_utf8_validation = skip_utf8_validation
self.cont_data = None
self.recving_frames = None
def validate(self, frame: ABNF) -> None:
if not self.recving_frames and frame.opcode == ABNF.OPCODE_CONT:
raise WebSocketProtocolException("Illegal frame")
if self.recving_frames and \
frame.opcode in (ABNF.OPCODE_TEXT, ABNF.OPCODE_BINARY):
raise WebSocketProtocolException("Illegal frame")
def add(self, frame: ABNF) -> None:
if self.cont_data:
self.cont_data[1] += frame.data
else:
if frame.opcode in (ABNF.OPCODE_TEXT, ABNF.OPCODE_BINARY):
self.recving_frames = frame.opcode
self.cont_data = [frame.opcode, frame.data]
if frame.fin:
self.recving_frames = None
def is_fire(self, frame: ABNF) -> bool or int:
return frame.fin or self.fire_cont_frame
def extract(self, frame: ABNF) -> list:
data = self.cont_data
self.cont_data = None
frame.data = data[1]
if not self.fire_cont_frame and data[0] == ABNF.OPCODE_TEXT and not self.skip_utf8_validation and not validate_utf8(frame.data):
raise WebSocketPayloadException(
"cannot decode: " + repr(frame.data))
return [data[0], frame] | /revolution.py-5.0.1.4.tar.gz/revolution.py-5.0.1.4/websocket/_abnf.py | 0.555435 | 0.169956 | _abnf.py | pypi |
import struct
import warnings
from json import load as jload
from os import access, F_OK, R_OK
from signal import signal, SIG_DFL, SIGINT, SIGTERM
from threading import Thread, Event, Lock
from timeit import default_timer
# Globale Werte
OFF = 0
GREEN = 1
RED = 2
RISING = 31
FALLING = 32
BOTH = 33
warnings.simplefilter(action="always")
class RevPiCallback(Thread):
"""Thread fuer das interne Aufrufen von Event-Funktionen.
Der Eventfunktion, welche dieser Thread aufruft, wird der Thread selber
als Parameter uebergeben. Darauf muss bei der definition der Funktion
geachtet werden z.B. "def event(th):". Bei umfangreichen Funktionen kann
dieser ausgewertet werden um z.B. doppeltes Starten zu verhindern.
Ueber RevPiCallback.ioname kann der Name des IO-Objekts abgerufen werden,
welches das Event ausgeloest hast. RevPiCallback.iovalue gibt den Wert des
IO-Objekts zum Ausloesezeitpunkt zurueck.
Der Thread stellt das RevPiCallback.exit Event als Abbruchbedingung fuer
die aufgerufene Funktion zur Verfuegung.
Durch Aufruf der Funktion RevPiCallback.stop() wird das exit-Event gesetzt
und kann bei Schleifen zum Abbrechen verwendet werden.
Mit dem .exit() Event auch eine Wartefunktion realisiert
werden: "th.exit.wait(0.5)" - Wartet 500ms oder bricht sofort ab, wenn
fuer den Thread .stop() aufgerufen wird.
while not th.exit.is_set():
# IO-Arbeiten
th.exit.wait(0.5)
"""
def __init__(self, func, name, value):
"""Init RevPiCallback class.
@param func Funktion die beim Start aufgerufen werden soll
@param name IO-Name
@param value IO-Value zum Zeitpunkt des Events
"""
super().__init__()
self.daemon = True
self.exit = Event()
self.func = func
self.ioname = name
self.iovalue = value
def run(self):
"""Ruft die registrierte Funktion auf."""
self.func(self)
def stop(self):
"""Setzt das exit-Event mit dem die Funktion beendet werden kann."""
self.exit.set()
class RevPiCycletools():
"""Werkzeugkasten fuer Cycleloop-Funktion.
Diese Klasse enthaelt Werkzeuge fuer Zyklusfunktionen, wie Taktmerker
und Flankenmerker.
Zu beachten ist, dass die Flankenmerker beim ersten Zyklus alle den Wert
True haben! Ueber den Merker RevPiCycletools.first kann ermittelt werden,
ob es sich um den ersten Zyklus handelt.
Taktmerker flag1c, flag5c, flag10c, usw. haben den als Zahl angegebenen
Wert an Zyklen jeweils False und True.
Beispiel: flag5c hat 5 Zyklen den Wert False und in den naechsten 5 Zyklen
den Wert True.
Flankenmerker flank5c, flank10c, usw. haben immer im, als Zahl angebenen
Zyklus fuer einen Zyklusdurchlauf den Wert True, sonst False.
Beispiel: flank5c hat immer alle 5 Zyklen den Wert True.
Diese Merker koennen z.B. verwendet werden um, an Outputs angeschlossene,
Lampen synchron blinken zu lassen.
"""
def __init__(self):
"""Init RevPiCycletools class."""
self.__cycle = 0
self.__ucycle = 0
self.__dict_ton = {}
self.__dict_tof = {}
self.__dict_tp = {}
# Taktmerker
self.first = True
self.flag1c = False
self.flag5c = False
self.flag10c = False
self.flag15c = False
self.flag20c = False
# Flankenmerker
self.flank5c = True
self.flank10c = True
self.flank15c = True
self.flank20c = True
def _docycle(self):
"""Zyklusarbeiten."""
# Einschaltverzoegerung
for tof in self.__dict_tof:
if self.__dict_tof[tof] > 0:
self.__dict_tof[tof] -= 1
# Ausschaltverzoegerung
for ton in self.__dict_ton:
if self.__dict_ton[ton][1]:
if self.__dict_ton[ton][0] > 0:
self.__dict_ton[ton][0] -= 1
self.__dict_ton[ton][1] = False
else:
self.__dict_ton[ton][0] = -1
# Impuls
for tp in self.__dict_tp:
if self.__dict_tp[tp][1]:
if self.__dict_tp[tp][0] > 0:
self.__dict_tp[tp][0] -= 1
else:
self.__dict_tp[tp][1] = False
else:
self.__dict_tp[tp][0] = -1
# Flankenmerker
self.flank5c = False
self.flank10c = False
self.flank15c = False
self.flank20c = False
# Logische Flags
self.first = False
self.flag1c = not self.flag1c
# Berechnete Flags
self.__cycle += 1
if self.__cycle == 5:
self.__ucycle += 1
if self.__ucycle == 3:
self.flank15c = True
self.flag15c = not self.flag15c
self.__ucycle = 0
if self.flag5c:
if self.flag10c:
self.flank20c = True
self.flag20c = not self.flag20c
self.flank10c = True
self.flag10c = not self.flag10c
self.flank5c = True
self.flag5c = not self.flag5c
self.__cycle = 0
def get_tofc(self, name):
"""Wert der Ausschaltverzoegerung.
@param name Eindeutiger Name des Timers
@return Wert der Ausschaltverzoegerung"""
return self.__dict_tof.get(name, 0) > 0
def set_tofc(self, name, cycles):
"""Startet bei Aufruf einen ausschaltverzoegerten Timer.
@param name Eindeutiger Name fuer Zugriff auf Timer
@param cycles Zyklusanzahl, der Verzoegerung wenn nicht neu gestartet
"""
self.__dict_tof[name] = cycles
def get_tonc(self, name):
"""Einschaltverzoegerung.
@param name Eindeutiger Name des Timers
@return Wert der Einschaltverzoegerung"""
return self.__dict_ton.get(name, [-1])[0] == 0
def set_tonc(self, name, cycles):
"""Startet einen einschaltverzoegerten Timer.
@param name Eindeutiger Name fuer Zugriff auf Timer
@param cycles Zyklusanzahl, der Verzoegerung wenn neu gestartet
"""
if self.__dict_ton.get(name, [-1])[0] == -1:
self.__dict_ton[name] = [cycles, True]
else:
self.__dict_ton[name][1] = True
def get_tpc(self, name):
"""Impulstimer.
@param name Eindeutiger Name des Timers
@return Wert der des Impulses"""
return self.__dict_tp.get(name, [-1])[0] > 0
def set_tpc(self, name, cycles):
"""Startet einen Impuls Timer.
@param name Eindeutiger Name fuer Zugriff auf Timer
@param cycles Zyklusanzahl, die der Impuls anstehen soll
"""
if self.__dict_tp.get(name, [-1])[0] == -1:
self.__dict_tp[name] = [cycles, True]
else:
self.__dict_tp[name][1] = True
class RevPiProcimgWriter(Thread):
"""Klasse fuer Synchroniseriungs-Thread.
Diese Klasse wird als Thread gestartet, wenn das Prozessabbild zyklisch
synchronisiert werden soll. Diese Funktion wird hauptsaechlich fuer das
Event-Handling verwendet.
"""
def __init__(self, procimg, length, refreshlist, monitoring):
"""Init RevPiProcimgWriter class.
@param procimg Dateiname des piControl Devices
@param length Laenge des benutzen Speicherbereichs
@param refreshlist list() mit devices fuer Aktualisierung
@param monitoring In- und Outputs werden gelesen, niemals geschrieben
"""
super().__init__()
self._adjwait = 0
self._buffedwrite = False
self._ioerror = 0
self._length = length
self._lst_refresh = refreshlist
self._monitoring = monitoring
self._procimg = procimg
self._refresh = 0.05
self._work = Event()
self.daemon = True
self.lck_refresh = Lock()
self.maxioerrors = 0
self.newdata = Event()
def _create_myfh(self, path):
"""Erstellt FileObject.
param path Pfad zur Datei
return FileObject"""
self._buffedwrite = False
return open(path, "r+b", 0)
def _gotioerror(self):
"""IOError Verwaltung fuer auto_refresh."""
self._ioerror += 1
if self.maxioerrors != 0 and self._ioerror >= self.maxioerrors:
raise RuntimeError(
"reach max io error count {} on process image".format(
self.maxioerrors
)
)
warnings.warn(
"count {} io errors on process image".format(self._ioerror),
RuntimeWarning
)
def get_refresh(self):
"""Gibt Zykluszeit zurueck.
@return int() Zykluszeit in Millisekunden"""
return int(self._refresh * 1000)
def run(self):
"""Startet die automatische Prozessabbildsynchronisierung."""
fh = self._create_myfh(self._procimg)
self._adjwait = self._refresh
while not self._work.is_set():
ot = default_timer()
# Lockobjekt holen und Fehler werfen, wenn nicht schnell genug
if not self.lck_refresh.acquire(timeout=self._adjwait):
warnings.warn(
"cycle time of {} ms exceeded on lock".format(
int(self._refresh * 1000)
),
RuntimeWarning
)
continue
try:
fh.seek(0)
bytesbuff = bytearray(fh.read(self._length))
except IOError:
self._gotioerror()
self.lck_refresh.release()
self._work.wait(self._adjwait)
continue
if self._monitoring:
# Inputs und Outputs in Puffer
for dev in self._lst_refresh:
dev.filelock.acquire()
dev._ba_devdata[:] = bytesbuff[dev.slc_devoff]
dev.filelock.release()
else:
# Inputs in Puffer, Outputs in Prozessabbild
ioerr = False
for dev in self._lst_refresh:
dev.filelock.acquire()
dev._ba_devdata[dev.slc_inp] = bytesbuff[dev.slc_inpoff]
try:
fh.seek(dev.slc_outoff.start)
fh.write(dev._ba_devdata[dev.slc_out])
except IOError:
ioerr = True
finally:
dev.filelock.release()
if self._buffedwrite:
try:
fh.flush()
except IOError:
ioerr = True
if ioerr:
self._gotioerror()
self.lck_refresh.release()
self._work.wait(self._adjwait)
continue
self.lck_refresh.release()
# Alle aufwecken
self.newdata.set()
self._work.wait(self._adjwait)
# Wartezeit anpassen um echte self._refresh zu erreichen
if default_timer() - ot >= self._refresh:
self._adjwait -= 0.001
if self._adjwait < 0:
warnings.warn(
"cycle time of {} ms exceeded".format(
int(self._refresh * 1000)
),
RuntimeWarning
)
self._adjwait = 0
else:
self._adjwait += 0.001
# Alle am Ende erneut aufwecken
self.newdata.set()
fh.close()
def stop(self):
"""Beendet die automatische Prozessabbildsynchronisierung."""
self._work.set()
def set_refresh(self, value):
"""Setzt die Zykluszeit in Millisekunden.
@param value int() Millisekunden"""
if value >= 10 and value < 2000:
self._refresh = value / 1000
self._adjwait = self._refresh
else:
raise ValueError(
"refresh time must be 10 to 2000 milliseconds"
)
refresh = property(get_refresh, set_refresh)
class RevPiModIO():
"""Klasse fuer die Verwaltung aller piCtory Informationen.
Diese Klasse uebernimmt die gesamte Konfiguration aus piCtory und bilded
die Devices und IOs ab. Sie uebernimmt die exklusive Verwaltung des
Prozessabbilds und stellt sicher, dass die Daten synchron sind.
Sollten nur einzelne Devices gesteuert werden, verwendet man
RevPiModIOSelected() und uebergibt bei Instantiierung eine Liste mit
Device Positionen oder Device Namen.
"""
def __init__(self, **kwargs):
"""Instantiiert die Grundfunktionen.
@param kwargs Weitere Parameter:
- auto_refresh: Wenn True, alle Devices zu auto_refresh hinzufuegen
- configrsc: Pfad zur piCtory Konfigurationsdatei
- procimg: Pfad zum Prozessabbild
- monitoring: In- und Outputs werden gelesen, niemals geschrieben
- simulator: Laed das Modul als Simulator und vertauscht IOs
- syncoutputs: Aktuell gesetzte Outputs vom Prozessabbild einlesen
"""
self.auto_refresh = kwargs.get("auto_refresh", False)
self.configrsc = kwargs.get("configrsc", None)
self.procimg = kwargs.get("procimg", "/dev/piControl0")
self.monitoring = kwargs.get("monitoring", False)
self.simulator = kwargs.get("simulator", False)
self.syncoutputs = kwargs.get("syncoutputs", True)
self._cleanupfunc = None
self._lst_devselect = []
# piCtory Klassen
self.app = None
self.devices = None
self.summary = None
# Nur Konfigurieren, wenn nicht vererbt
if type(self) == RevPiModIO:
self.configure()
def __del__(self):
"""Zerstoert alle Klassen um auzuraeumen."""
if self.devices is not None:
self.devices.exit(full=True)
def _evt_exit(self, signum, sigframe):
"""Eventhandler fuer Programmende.
@param signum Signalnummer
@param sigframe Signalframe"""
if self.devices is not None:
self.devices.exit(full=True)
if self._cleanupfunc is not None:
self.devices.readprocimg()
self._cleanupfunc()
self.devices.writeprocimg()
signal(SIGINT, SIG_DFL)
signal(SIGTERM, SIG_DFL)
def cleanup(self):
"""Beendet auto_refresh und alle Threads."""
if self.devices is not None:
self.devices.exit(full=True)
self.app = None
self.devices = None
self.summary = None
def configure(self):
"""Verarbeitet die piCtory Konfigurationsdatei."""
jconfigrsc = self.get_jconfigrsc()
# App Klasse instantiieren
self.app = RevPiApp(jconfigrsc["App"])
# Device Klasse instantiieren
if len(self._lst_devselect) > 0:
lst_found = []
if type(self) == RevPiModIODriver:
_searchtype = "VIRTUAL"
else:
_searchtype = None
# Angegebene Devices suchen
for dev in jconfigrsc["Devices"]:
if _searchtype is None or dev["type"] == _searchtype:
if dev["name"] in self._lst_devselect:
lst_found.append(dev)
elif dev["position"].isnumeric() \
and int(dev["position"]) in self._lst_devselect:
lst_found.append(dev)
self.devices = RevPiDevicelist(
self.procimg,
(
jconfigrsc["Devices"] if len(self._lst_devselect) == 0
else lst_found
),
auto_refresh=self.auto_refresh,
monitoring=self.monitoring,
simulator=self.simulator,
syncoutputs=self.syncoutputs
)
# Summary Klasse instantiieren
self.summary = RevPiSummary(jconfigrsc["Summary"])
def get_jconfigrsc(self):
"""Laed die piCotry Konfiguration und erstellt ein dict().
@return dict() der piCtory Konfiguration"""
# piCtory Konfiguration prüfen
if self.configrsc is not None:
if not access(self.configrsc, F_OK | R_OK):
raise RuntimeError(
"can not access pictory configuration at {}".format(
self.configrsc))
else:
# piCtory Konfiguration an bekannten Stellen prüfen
lst_rsc = ["/etc/revpi/config.rsc", "/opt/KUNBUS/config.rsc"]
for rscfile in lst_rsc:
if access(rscfile, F_OK | R_OK):
self.configrsc = rscfile
break
if self.configrsc is None:
raise RuntimeError(
"can not access known pictory configurations at {} - "
"use 'configrsc' parameter so specify location"
"".format(", ".join(lst_rsc))
)
with open(self.configrsc, "r") as fhconfigrsc:
return jload(fhconfigrsc)
def handlesignalend(self, cleanupfunc=None):
"""Signalhandler fuer Programmende verwalten.
Wird diese Funktion aufgerufen, uebernimmt RevPiModIO die SignalHandler
fuer SIGINT und SIGTERM. Diese werden Empfangen, wenn das
Betriebssystem oder der Benutzer das Steuerungsprogramm sauber beenden
will.
Die optionale Funktion "cleanupfunc" wird als letztes nach dem letzten
Einlesen der Inputs ausgefuehrt. Dort gesetzte Outputs werden nach
Ablauf der Funktion ein letztes Mal geschrieben.
Gedacht ist dies fuer Aufraeumarbeiten, wie z.B. das abschalten der
LEDs am RevPi-Core.
Nach einmaligem Empfangen eines der Signale und dem Beenden der
RevPiModIO Thrads / Funktionen werden die SignalHandler wieder
freigegeben.
@param cleanupfunc Funktion wird nach dem letzten Lesen der Inputs
ausgefuehrt, gefolgt vom letzten Schreiben der Outputs
"""
# Prüfen ob Funktion callable ist
if not (cleanupfunc is None or callable(cleanupfunc)):
raise RuntimeError(
"registered function '{}' is not callable".format(cleanupfunc)
)
self._cleanupfunc = cleanupfunc
signal(SIGINT, self._evt_exit)
signal(SIGTERM, self._evt_exit)
def exit(self, full=True):
"""Beendet mainloop() und optional auto_refresh.
@see #RevPiDevicelist.exit RevPiDevicelist.exit(...)"""
if self.devices is not None:
self.devices.exit(full=full)
def mainloop(self, freeze=False, blocking=True):
"""Startet den Mainloop mit Eventueberwachung.
@see #RevPiDevicelist.mainloop RevPiDevicelist.mainloop(...)"""
if self.devices is not None:
self.devices.mainloop(freeze=freeze, blocking=blocking)
class RevPiModIOSelected(RevPiModIO):
"""Klasse fuer die Verwaltung einzelner Devices aus piCtory.
Diese Klasse uebernimmt nur angegebene Devices der piCtory Konfiguration
und bilded sie inkl. IOs ab. Sie uebernimmt die exklusive Verwaltung des
Adressbereichs im Prozessabbild an dem sich die angegebenen Devices
befinden und stellt sicher, dass die Daten synchron sind.
"""
def __init__(self, deviceselection, **kwargs):
"""Instantiiert nur fuer angegebene Devices die Grundfunktionen.
Der Parameter deviceselection kann eine einzelne
Device Position / einzelner Device Name sein oder eine Liste mit
mehreren Positionen / Namen
@param deviceselection Positionsnummer oder Devicename
@param kwargs Weitere Parameter
@see #RevPiModIO.__init__ RevPiModIO.__init__(...)
"""
super().__init__(**kwargs)
# Device liste erstellen
if type(deviceselection) == list:
for dev in deviceselection:
self._lst_devselect.append(dev)
else:
self._lst_devselect.append(deviceselection)
for vdev in self._lst_devselect:
if type(vdev) != int and type(vdev) != str:
raise ValueError(
"need device position as int() or device name as str()"
)
self.configure()
if len(self.devices._device) == 0:
if type(self) == RevPiModIODriver:
raise RuntimeError(
"could not find any given VIRTUAL devices in config"
)
else:
raise RuntimeError(
"could not find any given devices in config"
)
elif len(self.devices._device) != len(self._lst_devselect):
if type(self) == RevPiModIODriver:
raise RuntimeError(
"could not find all given VIRTUAL devices in config"
)
else:
raise RuntimeError(
"could not find all given devices in config"
)
class RevPiModIODriver(RevPiModIOSelected):
"""Klasse um eigene Treiber fuer die virtuellen Devices zu erstellen.
Mit dieser Klasse werden nur angegebene Virtuelle Devices mit RevPiModIO
verwaltet. Bei Instantiierung werden automatisch die Inputs und Outputs
verdreht, um das Schreiben der Inputs zu ermoeglichen. Die Daten koennen
dann ueber logiCAD an den Devices abgerufen werden.
"""
def __init__(self, vdev, **kwargs):
"""Instantiiert die Grundfunktionen.
@param vdev Virtuelles Device fuer die Verwendung / oder list()
@param kwargs Weitere Parameter (nicht monitoring und simulator)
@see #RevPiModIO.__init__ RevPiModIO.__init__(...)
"""
kwargs["monitoring"] = False
kwargs["simulator"] = True
super().__init__(vdev, **kwargs)
class RevPiApp(object):
"""Bildet die App Sektion der config.rsc ab."""
def __init__(self, app):
"""Instantiiert die RevPiApp-Klasse.
@param app piCtory Appinformationen"""
self.name = app["name"]
self.version = app["version"]
self.language = app["language"]
self.layout = app["layout"]
class RevPiSummary(object):
"""Bildet die Summary Sektion der config.rsc ab."""
def __init__(self, summary):
"""Instantiiert die RevPiSummary-Klasse.
@param summary piCtory Summaryinformationen"""
self.inptotal = summary["inpTotal"]
self.outtotal = summary["outTotal"]
class RevPiDevicelist():
"""Enthaelt alle Devices des RevolutionPi Buses."""
def __init__(self, procimg, list_devices, **kwargs):
"""Instantiiert die einzelnen Bus-Devices.
@param procimg Dateiname des piControl Devices
@param list_devices piCtory Devicesinformationen
@param kwargs Weitere Parameter:
- auto_refresh: Wenn True, alle Devices zu auto_refresh hinzufuegen
- monitoring: In- und Outputs werden gelesen, niemals geschrieben
- simulator: Laed das Modul als Simulator und vertauscht IOs
- syncoutputs: Aktuell gesetzte Outputs vom Prozessabbild einlesen
"""
self._buffedwrite = False
self._exit = Event()
self._waitexit = Event()
self._procimg = procimg
self.myfh = self._create_myfh(procimg)
self.core = None
self._device = []
self._dict_devname = {}
self._dict_devposition = {}
self.imgwriter = None
self.length = 0
self._lst_refresh = []
self._th_mainloop = None
self._looprunning = False
self.monitoring = kwargs.get("monitoring", False)
simulator = kwargs.get("simulator", False)
err_names = False
for device in sorted(list_devices, key=lambda x: x["position"]):
# Bei VDev in alter piCtory Version, Position eindeutig machen
if device["position"] == "adap.":
device["position"] = -1
while device["position"] in self._dict_devposition:
device["position"] -= 1
if device["type"] == "BASE":
# Core
dev_new = RevPiCore(device, simulator=simulator)
self.core = dev_new
if not (self.monitoring or simulator):
# Für RS485 errors defaults laden sollte procimg NULL sein
if dev_new._ioerrorlimit1 is not None:
dev_new._lst_io[dev_new._ioerrorlimit1].set_value(
dev_new._lst_io[
dev_new._ioerrorlimit1].defaultvalue
)
if dev_new._ioerrorlimit2 is not None:
dev_new._lst_io[dev_new._ioerrorlimit2].set_value(
dev_new._lst_io[
dev_new._ioerrorlimit2].defaultvalue
)
# RS485 errors schreiben
self.writeprocimg(True, dev_new)
elif device["type"] == "LEFT_RIGHT":
# IOs
dev_new = RevPiDevice(device, simulator=simulator)
elif device["type"] == "VIRTUAL":
# Virtuals
dev_new = RevPiVirtual(device, simulator=simulator)
elif device["type"] == "EDGE":
# Gateways
dev_new = RevPiGateway(device, simulator=simulator)
else:
# Device-Type nicht gefunden
warnings.warn("device type {} unknown", Warning)
dev_new = None
if dev_new is not None:
self._device.append(dev_new)
# Offset prüfen, muss mit Länge übereinstimmen
if self.length < dev_new.offset:
self.length = dev_new.offset
self.length += dev_new.length
# Auf doppelte Namen prüfen, da piCtory dies zulässt
if dev_new.name in self._dict_devname:
err_names = True
# Dict()s für schnelle Zugriffe aufbauen
self._dict_devname[dev_new.name] = dev_new
self._dict_devposition[dev_new.position] = dev_new
# dict_devname zerstören, wenn doppelte Namen vorhanden sind
if err_names:
self._dict_devname.clear()
warnings.warn(
"equal device names in pictory configuration. can not "
"build name dictionary. you can access all devices by "
"position number only!",
Warning
)
if kwargs.get("syncoutputs", True):
# Aktuellen Outputstatus von procimg einlesen
self.syncoutputs(force=True)
# Optional ins auto_refresh aufnehmen
if kwargs.get("auto_refresh", False):
for dev in self._device:
self.auto_refresh(dev)
def __contains__(self, key):
"""Prueft ob Device existiert.
@param key DeviceName str() / Positionsnummer int()
@return True, wenn device vorhanden"""
if type(key) == str:
return key in self._dict_devname
if type(key) == int:
return key in self._dict_devposition
else:
return key in self._device
def __del__(self):
"""FileHandler und RevPiProcimgWriter beenden."""
if self.imgwriter is not None:
self.imgwriter.stop()
if not self.myfh.closed:
self.myfh.close()
def __getitem__(self, key):
"""Gibt angegebenes Device zurueck.
@param key DeviceName str() / Positionsnummer int()
@return Gefundenes RevPiDevice()-Objekt"""
if type(key) == str:
return self._dict_devname[key]
if type(key) == int:
return self._dict_devposition[key]
else:
raise KeyError(
"need device name as str() or device position as int()"
)
def __iter__(self):
"""Gibt alle Devices zurueck.
@return Iterator alle Devices"""
return iter(self._device)
def __len__(self):
"""Gibt Anzahl der Devices zurueck.
@return int() Anzalh der Devices"""
return len(self._device)
def _create_myfh(self, path):
"""Erstellt FileObject.
param path Pfad zur Datei
return FileObject"""
self._buffedwrite = False
return open(path, "r+b", 0)
def auto_refresh(self, device, remove=False):
"""Registriert ein Device fuer die automatische Synchronisierung.
@param device Device fuer Synchronisierung
@param remove bool() True entfernt Device aus Synchronisierung"""
dev = device if issubclass(type(device), RevPiDevice) \
else self.__getitem__(device)
if not remove and dev not in self._lst_refresh:
# Daten bei Aufnahme direkt einlesen!
self.readprocimg(True, dev)
# Datenkopie anlegen
dev.filelock.acquire()
dev._ba_datacp = dev._ba_devdata[:]
dev.filelock.release()
dev._selfupdate = True
self._lst_refresh.append(dev)
# Thread starten, wenn er noch nicht läuft
if self.imgwriter is None or not self.imgwriter.is_alive():
self.imgwriter = RevPiProcimgWriter(
self._procimg,
self.length,
self._lst_refresh,
self.monitoring
)
self.imgwriter.start()
elif remove and dev in self._lst_refresh:
# Sicher aus Liste entfernen
with self.imgwriter.lck_refresh:
self._lst_refresh.remove(dev)
dev._selfupdate = False
# Beenden, wenn keien Devices mehr in Liste sind
if len(self.imgwriter._lst_refresh) == 0:
self.imgwriter.stop()
# Daten beim Entfernen noch einmal schreiben
if not self.monitoring:
self.writeprocimg(True, dev)
def auto_refresh_maxioerrors(self, value=None):
"""Maximale IO Fehler fuer auto_refresh.
@param value Setzt maximale Anzahl bis exception ausgeloest wird
@return Maximale Anzahl bis exception ausgeloest wird"""
if value is None:
return self.imgwriter.maxioerrors
elif type(value) == int and value >= 0:
self.imgwriter.maxioerrors = value
def auto_refresh_resetioerrors(self):
"""Setzt aktuellen IOError-Zaehler auf 0 zurueck."""
if self.imgwriter is not None:
self.imgwriter.maxioerrors = 0
def cycleloop(self, func, cycletime=None):
"""Startet den Cycleloop.
Der aktuelle Programmthread wird hier bis Aufruf von
RevPiDevicelist.exit() "gefangen". Er fuehrt nach jeder Aktualisierung
des Prozessabbilds die uebergebene Funktion "func" aus und arbeitet sie
ab. Waehrend der Ausfuehrung der Funktion wird das Prozessabbild nicht
weiter aktualisiert. Die Inputs behalten bis zum Ende den aktuellen
Wert. Gesetzte Outputs werden nach Ende des Funktionsdurchlaufs in das
Prozessabbild geschrieben.
Verlassen wird der Cycleloop, wenn die aufgerufene Funktion einen
Rueckgabewert nicht gleich None liefert, oder durch Aufruf von
revpimodio.exit().
HINWEIS: Die Aktualisierungszeit und die Laufzeit der Funktion duerfen
die eingestellte auto_refresh Zeit, bzw. uebergebene cycletime nicht
ueberschreiten!
Ueber den Parameter cycletime kann die Aktualisierungsrate fuer das
Prozessabbild gesetzt werden (selbe Funktion wie
set_refreshtime(milliseconds)).
@param func Funktion, die ausgefuehrt werden soll
@param cycletime Zykluszeit in Millisekunden, bei Nichtangabe wird
aktuelle auto_refresh Zeit verwendet - Standardwert 50 ms
@return None
"""
# Prüfen ob ein Loop bereits läuft
if self._looprunning:
raise RuntimeError(
"can not start multiple loops mainloop/cycleloop"
)
# Prüfen ob Devices in auto_refresh sind
if len(self._lst_refresh) == 0:
raise RuntimeError("no device with auto_refresh activated")
# Prüfen ob Funktion callable ist
if not callable(func):
raise RuntimeError(
"registered function '{}' ist not callable".format(func)
)
# Zykluszeit übernehmen
if not (cycletime is None or cycletime == self.imgwriter.refresh):
self.imgwriter.refresh = cycletime
# Cycleloop starten
self._exit.clear()
self._looprunning = True
cycleinfo = RevPiCycletools()
ec = None
try:
while ec is None and not self._exit.is_set():
# Auf neue Daten warten und nur ausführen wenn set()
if not self.imgwriter.newdata.wait(2.5):
if not self._exit.is_set() \
and not self.imgwriter.is_alive():
raise RuntimeError("auto_refresh thread not running")
continue
self.imgwriter.newdata.clear()
# Vor Aufruf der Funktion auto_refresh sperren
self.imgwriter.lck_refresh.acquire()
# Funktion aufrufen und auswerten
ec = func(cycleinfo)
cycleinfo._docycle()
# auto_refresh freigeben
self.imgwriter.lck_refresh.release()
except Exception as e:
# Fehler fangen, reinigen und werfen
self.imgwriter.lck_refresh.release()
self.exit(full=False)
self._looprunning = False
raise e
# Cycleloop beenden
self._looprunning = False
return ec
def exit(self, full=True):
"""Beendet mainloop() und optional auto_refresh.
Wenn sich das Programm im mainloop() befindet, wird durch Aufruf
von exit() die Kontrolle wieder an das Hauptprogramm zurueckgegeben.
Der Parameter full ist mit True vorbelegt und entfernt alle Devices aus
dem auto_refresh. Der Thread fuer die Prozessabbildsynchronisierung
wird dann gestoppt und das Programm kann sauber beendet werden.
@param full Entfernt auch alle Devices aus auto_refresh"""
self._exit.set()
self._waitexit.set()
if full and self.imgwriter is not None:
self.imgwriter.stop()
self.imgwriter.join(self.imgwriter._refresh)
while len(self._lst_refresh) > 0:
dev = self._lst_refresh.pop()
dev._selfupdate = False
if not self.monitoring:
self.writeprocimg(True, dev)
def get_devbyname(self, name):
"""Gibt durch Namen angegebenes Device zurueck.
@param name Devicename aus piCtory
@return Gefundenes RevPiDevice()"""
return self._dict_devname[name]
def get_devbyposition(self, position):
"""Gibt durch Position angegebenes Device zurueck.
@param position Deviceposition aus piCtory
@return Gefundenes RevPiDevice()"""
return self._dict_devposition[position]
def get_refreshtime(self):
"""Gibt Aktualisierungsrate in ms der Prozessabbildsynchronisierung aus.
@return Millisekunden"""
return self.imgwriter.refresh
def readprocimg(self, force=False, device=None):
"""Einlesen aller Inputs aller Devices vom Prozessabbild.
@param force auch Devices mit autoupdate=False
@param device nur auf einzelnes Device anwenden
@return True, wenn Arbeiten an allen Devices erfolgreich waren
"""
if device is None:
mylist = self._device
else:
dev = device if issubclass(type(device), RevPiDevice) \
else self.__getitem__(device)
if dev._selfupdate:
raise RuntimeError(
"can not read process image, while device '{}|{}'"
"is in auto_refresh mode".format(dev.position, dev.name)
)
mylist = [dev]
# Daten komplett einlesen
try:
self.myfh.seek(0)
bytesbuff = self.myfh.read(self.length)
except IOError:
warnings.warn(
"read error on process image '{}'".format(self.myfh.name),
RuntimeWarning
)
return False
for dev in mylist:
if (force or dev.autoupdate) and not dev._selfupdate:
# FileHandler sperren
dev.filelock.acquire()
if self.monitoring:
# Alles vom Bus einlesen
dev._ba_devdata[:] = bytesbuff[dev.slc_devoff]
else:
# Inputs vom Bus einlesen
dev._ba_devdata[dev.slc_inp] = bytesbuff[dev.slc_inpoff]
# Mems vom Bus lesen
dev._ba_devdata[dev.slc_mem] = bytesbuff[dev.slc_memoff]
dev.filelock.release()
return True
def mainloop(self, freeze=False, blocking=True):
"""Startet den Mainloop mit Eventueberwachung.
Der aktuelle Programmthread wird hier bis Aufruf von
RevPiDevicelist.exit() "gefangen" (es sei denn blocking=False). Er
durchlaeuft die Eventueberwachung und prueft Aenderungen der, mit
einem Event registrierten, IOs. Wird eine Veraenderung erkannt,
fuert das Programm die dazugehoerigen Funktionen der Reihe nach aus.
Wenn der Parameter "freeze" mit True angegeben ist, wird die
Prozessabbildsynchronisierung angehalten bis alle Eventfunktionen
ausgefuehrt wurden. Inputs behalten fuer die gesamte Dauer ihren
aktuellen Wert und Outputs werden erst nach Durchlauf aller Funktionen
in das Prozessabbild geschrieben.
Wenn der Parameter "blocking" mit False angegeben wird, aktiviert
dies die Eventueberwachung und blockiert das Programm NICHT an der
Stelle des Aufrufs. Eignet sich gut fuer die GUI Programmierung, wenn
Events vom RevPi benoetigt werden, aber das Programm weiter ausgefuehrt
werden soll.
@param freeze Wenn True, Prozessabbildsynchronisierung anhalten
@param blocking Wenn False, blockiert das Programm NICHT
@return None
"""
# Prüfen ob ein Loop bereits läuft
if self._looprunning:
raise RuntimeError(
"can not start multiple loops mainloop/cycleloop"
)
# Prüfen ob Devices in auto_refresh sind
if len(self._lst_refresh) == 0:
raise RuntimeError("no device with auto_refresh activated")
# Thread erstellen, wenn nicht blockieren soll
if not blocking:
self._th_mainloop = Thread(
target=self.mainloop,
kwargs={"freeze": freeze, "blocking": True}
)
self._th_mainloop.start()
return
# Event säubern vor Eintritt in Mainloop
self._exit.clear()
self._looprunning = True
# Beim Eintritt in mainloop Bytecopy erstellen
for dev in self._lst_refresh:
dev.filelock.acquire()
dev._ba_datacp = dev._ba_devdata[:]
dev.filelock.release()
lst_fire = []
while not self._exit.is_set():
# Auf neue Daten warten und nur ausführen wenn set()
if not self.imgwriter.newdata.wait(2.5):
if not self._exit.is_set() and not self.imgwriter.is_alive():
self.exit(full=False)
self._looprunning = False
raise RuntimeError("auto_refresh thread not running")
continue
self.imgwriter.newdata.clear()
# Während Auswertung refresh sperren
self.imgwriter.lck_refresh.acquire()
for dev in self._lst_refresh:
if len(dev._dict_events) == 0 \
or dev._ba_datacp == dev._ba_devdata:
continue
for io_event in dev._dict_events:
if dev._ba_datacp[io_event.slc_address] == \
dev._ba_devdata[io_event.slc_address]:
continue
if io_event._bitaddress >= 0:
boolcp = bool(int.from_bytes(
dev._ba_datacp[io_event.slc_address],
byteorder=io_event._byteorder
) & 1 << io_event._bitaddress)
boolor = bool(int.from_bytes(
dev._ba_devdata[io_event.slc_address],
byteorder=io_event._byteorder
) & 1 << io_event._bitaddress)
if boolor == boolcp:
continue
for regfunc in dev._dict_events[io_event]:
if regfunc[1] == BOTH \
or regfunc[1] == RISING and boolor \
or regfunc[1] == FALLING and not boolor:
lst_fire.append(
(regfunc, io_event.name, io_event.value)
)
else:
for regfunc in dev._dict_events[io_event]:
lst_fire.append(
(regfunc, io_event.name, io_event.value)
)
# Nach Verarbeitung aller IOs die Bytes kopieren
dev.filelock.acquire()
dev._ba_datacp = dev._ba_devdata[:]
dev.filelock.release()
# Refreshsperre aufheben wenn nicht freeze
if not freeze:
self.imgwriter.lck_refresh.release()
# Erst nach Datenübernahme alle Events feuern
try:
while len(lst_fire) > 0:
tup_fire = lst_fire.pop()
event_func = tup_fire[0][0]
passname = tup_fire[1]
passvalue = tup_fire[2]
if tup_fire[0][2]:
th = RevPiCallback(
event_func, passname, passvalue
)
th.start()
else:
# Direct callen da Prüfung in RevPiDevice.reg_event ist
event_func(passname, passvalue)
except Exception as e:
# Fehler fangen, reinigen und werfen
if freeze:
self.imgwriter.lck_refresh.release()
self.exit(full=False)
self._looprunning = False
raise e
# Refreshsperre aufheben wenn freeze
if freeze:
self.imgwriter.lck_refresh.release()
# Mainloop verlassen
self._looprunning = False
def set_refreshtime(self, milliseconds):
"""Setzt Aktualisierungsrate der Prozessabbild-Synchronisierung.
@param milliseconds int() in Millisekunden"""
self.imgwriter.refresh = milliseconds
def setdefaultvalues(self, force=False, device=None):
"""Alle Outputbuffer werden auf die piCtory default Werte gesetzt.
@param force auch Devices mit autoupdate=False
@param device nur auf einzelnes Device anwenden"""
if self.monitoring:
raise RuntimeError(
"can not set default values, while system is in monitoring "
"mode"
)
if device is None:
mylist = self._device
else:
dev = device if issubclass(type(device), RevPiDevice) \
else self.__getitem__(device)
mylist = [dev]
for dev in mylist:
if (force or dev.autoupdate):
for io in dev._lst_io:
if not io._readonly:
io.set_value(io.defaultvalue)
def syncoutputs(self, force=False, device=None):
"""Lesen aller aktuell gesetzten Outputs im Prozessabbild.
@param force auch Devices mit autoupdate=False
@param device nur auf einzelnes Device anwenden
@return True, wenn Arbeiten an allen Devices erfolgreich waren
"""
if device is None:
mylist = self._device
else:
dev = device if issubclass(type(device), RevPiDevice) \
else self.__getitem__(device)
if dev._selfupdate:
raise RuntimeError(
"can not sync process image, while device '{}|{}'"
"is in auto_refresh mode".format(dev.position, dev.name)
)
mylist = [dev]
try:
self.myfh.seek(0)
bytesbuff = self.myfh.read(self.length)
except IOError:
warnings.warn(
"read error on process image '{}'".format(self.myfh.name),
RuntimeWarning
)
return False
for dev in mylist:
if (force or dev.autoupdate) and not dev._selfupdate:
dev.filelock.acquire()
# Outputs vom Bus einlesen
dev._ba_devdata[dev.slc_out] = bytesbuff[dev.slc_outoff]
dev.filelock.release()
return True
def updateprocimg(self, force=False, device=None):
"""Schreiben/Lesen aller Outputs/Inputs aller Devices im Prozessab.
@param force auch Devices mit autoupdate=False
@param device nur auf einzelnes Device anwenden
@return True, wenn Arbeiten an allen Devices erfolgreich waren
"""
return self.readprocimg(force=force, device=device) and \
self.writeprocimg(force=force, device=device)
def wait(self, device, io, **kwargs):
"""Wartet auf Wertaenderung eines IOs.
Die Wertaenderung wird immer uerberprueft, wenn fuer Devices
in RevPiDevicelist.auto_refresh() neue Daten gelesen wurden.
Bei Wertaenderung, wird das Warten mit 0 als Rueckgabewert beendet.
HINWEIS: Wenn RevPiProcimgWriter() keine neuen Daten liefert, wird
bis in die Ewigkeit gewartet (nicht bei Angabe von "timeout").
Wenn edge mit RISING oder FALLING angegeben wird muss diese Flanke
ausgeloest werden. Sollte der Wert 1 sein beim Eintritt mit Flanke
RISING, wird das Warten erst bei Aenderung von 0 auf 1 beendet.
Als exitevent kann ein threading.Event()-Objekt uebergeben werden,
welches das Warten bei is_set() sofort mit 1 als Rueckgabewert
beendet.
Wenn der Wert okvalue an dem IO fuer das Warten anliegt, wird
das Warten sofort mit -1 als Rueckgabewert beendet.
Der Timeoutwert bricht beim Erreichen das Warten sofort mit
Wert 2 Rueckgabewert ab. (Das Timeout wird ueber die Zykluszeit
der auto_refresh Funktion berechnet, entspricht also nicht exact den
angegeben Millisekunden! Es wird immer nach oben gerundet!)
@param device Device auf dem sich der IO befindet
@param io Name des IOs auf dessen Aenderung gewartet wird
@param kwargs Weitere Parameter:
- edge: Flanke RISING, FALLING, BOTH bei der mit True beendet wird
- exitevent: thrading.Event() fuer vorzeitiges Beenden mit False
- okvalue: IO-Wert, bei dem das Warten sofort mit True beendet wird
- timeout: Zeit in ms nach der mit False abgebrochen wird
@return int() erfolgreich Werte <= 0
- Erfolgreich gewartet
Wert 0: IO hat den Wert gewechselt
Wert -1: okvalue stimmte mit IO ueberein
- Fehlerhaft gewartet
Wert 1: exitevent wurde gesetzt
Wert 2: timeout abgelaufen
Wert 100: RevPiDevicelist.exit() wurde aufgerufen
"""
dev = device if issubclass(type(device), RevPiDevice) \
else self.__getitem__(device)
# Prüfen ob Device in auto_refresh ist
if not dev._selfupdate:
raise RuntimeError(
"auto_refresh is not activated for device '{}|{}' - there "
"will never be new data".format(dev.position, dev.name)
)
io_watch = dev[io]
if type(io_watch) == list:
if len(io_watch) == 1:
io_watch = io_watch[0]
else:
raise KeyError(
"byte '{}' contains more than one bit-input".format(io)
)
val_start = io_watch.value
# kwargs auswerten
edge = kwargs.get("edge", None)
evt_exit = kwargs.get("exitevent", Event())
val_ok = kwargs.get("okvalue", None)
flt_timeout = kwargs.get("timeout", 0) / 1000
bool_timecount = flt_timeout > 0
if edge is not None and io_watch._bitaddress < 0:
raise AttributeError(
"parameter 'edge' can be used with bit Inputs only"
)
# Edge auflösen, wenn nicht angegeben
if edge is None:
edge = BOTH
# Abbruchwert prüfen
if val_ok == io_watch.value:
return -1
# WaitExit Event säubern
self._waitexit.clear()
flt_timecount = 0 if bool_timecount else -1
while not self._waitexit.is_set() and not evt_exit.is_set() \
and flt_timecount < flt_timeout:
if self.imgwriter.newdata.wait(2.5):
self.imgwriter.newdata.clear()
if val_start != io_watch.value:
if edge == BOTH \
or edge == RISING and not val_start \
or edge == FALLING and val_start:
return 0
else:
val_start = not val_start
if bool_timecount:
flt_timecount += self.imgwriter._refresh
elif bool_timecount:
flt_timecount += 1
# Abbruchevent wurde gesetzt
if evt_exit.is_set():
return 1
# RevPiModIO mainloop wurde verlassen
if self._waitexit.is_set():
return 100
# Timeout abgelaufen
return 2
def writedefaultinputs(self, virtual_device):
"""Schreibt fuer ein virtuelles Device piCtory Defaultinputwerte.
Sollten in piCtory Defaultwerte fuer Inputs eines virtuellen Devices
angegeben sein, werden diese nur beim Systemstart oder einem piControl
Reset gesetzt. Sollte danach das Prozessabbild mit NULL ueberschrieben,
gehen diese Werte verloren.
Diese Funktion kann nur auf virtuelle Devices angewendet werden!
@param virtual_device Virtuelles Device fuer Wiederherstellung
@return True, wenn Arbeiten am virtuellen Device erfolgreich waren
"""
if self.monitoring:
raise RuntimeError(
"can not write process image, while system is in monitoring "
"mode"
)
# Device suchen
dev = virtual_device if issubclass(type(virtual_device), RevPiDevice) \
else self.__getitem__(virtual_device)
# Prüfen ob es ein virtuelles Device ist
if not issubclass(type(dev), RevPiVirtual):
raise RuntimeError(
"this function can be used for virtual devices only"
)
workokay = True
dev.filelock.acquire()
for io in dev._lst_io:
if io._readonly:
dev._ba_devdata[io.slc_address] = io.defaultvalue
# Outpus auf Bus schreiben
try:
self.myfh.seek(dev.slc_inpoff.start)
self.myfh.write(dev._ba_devdata[dev.slc_inp])
if self._buffedwrite:
self.myfh.flush()
except IOError:
warnings.warn(
"write error on process image '{}'"
"".format(self.myfh.name),
RuntimeWarning
)
workokay = False
dev.filelock.release()
return workokay
def writeprocimg(self, force=False, device=None):
"""Schreiben aller Outputs aller Devices ins Prozessabbild.
@param force auch Devices mit autoupdate=False
@param device nur auf einzelnes Device anwenden
@return True, wenn Arbeiten an allen Devices erfolgreich waren
"""
if self.monitoring:
raise RuntimeError(
"can not write process image, while system is in monitoring "
"mode"
)
if device is None:
mylist = self._device
else:
dev = device if issubclass(type(device), RevPiDevice) \
else self.__getitem__(device)
if dev._selfupdate:
raise RuntimeError(
"can not write process image, while device '{}|{}'"
"is in auto_refresh mode".format(dev.position, dev.name)
)
mylist = [dev]
workokay = True
for dev in mylist:
if (force or dev.autoupdate) and not dev._selfupdate:
dev.filelock.acquire()
# Outpus auf Bus schreiben
try:
self.myfh.seek(dev.slc_outoff.start)
self.myfh.write(dev._ba_devdata[dev.slc_out])
except IOError:
workokay = False
dev.filelock.release()
if self._buffedwrite:
try:
self.myfh.flush()
except IOError:
workokay = False
if not workokay:
warnings.warn(
"write error on process image '{}'"
"".format(self.myfh.name),
RuntimeWarning
)
return workokay
class RevPiDevice(object):
"""Basisklasse fuer alle Device-Objekte der RevPiDevicelist()-Klasse.
Die Basisfunktionalitaet generiert bei Instantiierung alle IOs und
erweitert den Prozessabbildpuffer um die benoetigten Bytes. Ueber diese
Klasse oder von dieser abgeleiteten Klassen, werden alle IOs angesprochen.
Sie verwaltet ihren Prozessabbildpuffer und sorgt fuer die Aktualisierung
der IO-Werte.
"""
def __init__(self, dict_device, **kwargs):
"""Instantiierung der RevPiDevice()-Klasse.
@param dict_device dict() fuer dieses Device aus piCotry Konfiguration
@param kwargs Weitere Parameter:
- autoupdate: Wenn True fuehrt dieses Device Arbeiten am
Prozessabbild bei Aufruf der RevPiDevicelist-Funktionen aus
- simulator: Laed das Modul als Simulator und vertauscht IOs
"""
self._selfupdate = False
self.autoupdate = kwargs.get("autoupdate", True)
self._dict_ioname = {}
self._dict_events = {}
self.filelock = Lock()
self._lst_io = []
self.length = 0
# Wertzuweisung aus dict_device
self.name = dict_device.pop("name")
self.offset = int(dict_device.pop("offset"))
self.position = int(dict_device.pop("position"))
self.producttype = int(dict_device.pop("productType"))
# Neues bytearray und Kopie für mainloop anlegen
self._ba_devdata = bytearray()
self._ba_datacp = bytearray()
# Erst inp/out/mem poppen, dann in Klasse einfügen
if kwargs.get("simulator", False):
self.slc_inp = self._buildio(dict_device.pop("out"), True)
self.slc_out = self._buildio(dict_device.pop("inp"), False)
else:
self.slc_inp = self._buildio(dict_device.pop("inp"), True)
self.slc_out = self._buildio(dict_device.pop("out"), False)
self.slc_mem = self._buildio(dict_device.pop("mem"), True)
# Alle IOs nach Adresse sortieren
self._lst_io.sort(key=lambda x: x.slc_address.start)
# SLCs mit offset berechnen
self.slc_devoff = slice(self.offset, self.offset + self.length)
self.slc_inpoff = slice(
self.slc_inp.start + self.offset, self.slc_inp.stop + self.offset
)
self.slc_outoff = slice(
self.slc_out.start + self.offset, self.slc_out.stop + self.offset
)
self.slc_memoff = slice(
self.slc_mem.start + self.offset, self.slc_mem.stop + self.offset
)
# Byteadressen im Dict führen
self._dict_iobyte = {k: [] for k in range(self.length)}
for io in self._lst_io:
if io._bitaddress < 0:
self._dict_iobyte[io.slc_address.start].append(io)
else:
if len(self._dict_iobyte[io.slc_address.start]) != 8:
# "schnell" 8 Einträge erstellen da es BIT IOs sind
self._dict_iobyte[io.slc_address.start] += [
None, None, None, None, None, None, None, None
]
self._dict_iobyte[io.slc_address.start][io._bitaddress] = io
# Alle restlichen attribute an Klasse anhängen
self.__dict__.update(dict_device)
# Spezielle Konfiguration von abgeleiteten Klassen durchführen
self._devconfigure()
def __bytes__(self):
"""Gibt alle Daten des Devices als bytes() zurueck.
@return Devicedaten als bytes()"""
return bytes(self._ba_devdata)
def __contains__(self, key):
"""Prueft ob IO existiert.
@param key IO-Name str() / Positionsnummer int()
@return True, wenn device vorhanden"""
if type(key) == str:
return key in self._dict_ioname
if type(key) == int:
return key in self._dict_iobyte \
and len(self._dict_iobyte[key]) > 0
else:
return key in self._lst_io
def __getitem__(self, key):
"""Gibt angegebenes IO-Objekt zurueck.
@param key Name order Byteadresse des IOs
@return IO-Objekt wenn Name, sonst list() mit IO-Objekt"""
if type(key) == int:
if key in self._dict_iobyte:
return self._dict_iobyte[key]
else:
raise KeyError("byte '{}' does not exist".format(key))
else:
if key in self._dict_ioname:
return self._dict_ioname[key]
else:
raise KeyError("'{}' does not exist".format(key))
def __int__(self):
"""Gibt die Positon im RevPi Bus zurueck.
@return Positionsnummer"""
return self.position
def __iter__(self):
"""Gibt Iterator aller IOs zurueck.
@return iter() alle IOs"""
return iter(self._lst_io)
def __str__(self):
"""Gibt den Namen des Devices zurueck.
@return Devicename"""
return self.name
def __setitem__(self, key, value):
"""Setzt den Wert des angegebenen Inputs.
@param key Name oder Byte des Inputs
@param value Wert der gesetzt werden soll"""
if type(key) == int:
if key in self._dict_iobyte:
if len(self._dict_iobyte[key]) == 1:
self._dict_iobyte[key][0].value = value
elif len(self._dict_iobyte[key]) == 0:
raise KeyError("byte '{}' contains no input".format(key))
else:
raise KeyError(
"byte '{}' contains more than one bit-input"
"".format(key)
)
else:
raise KeyError("byte '{}' does not exist".format(key))
else:
self._dict_ioname[key].value = value
def _buildio(self, dict_io, readonly):
"""Erstellt aus der piCtory-Liste die IOs fuer dieses Device.
@param dict_io dict()-Objekt aus piCtory Konfiguration
@param readonly True bei inp und mem, False bei out
@return slice()-Objekt mit Start und Stop Position dieser IOs
"""
if len(dict_io) > 0:
int_min, int_max = 4096, 0
for key in sorted(dict_io, key=lambda x: int(x)):
# Neuen IO anlegen
if bool(dict_io[key][7]) or self.producttype == 95:
# Bei Bitwerten oder Core RevPiIOBase verwenden
io_new = RevPiIOBase(
self.offset,
dict_io[key],
readonly,
self._ba_devdata,
byteorder="little"
)
else:
io_new = RevPiIO(
self.offset,
dict_io[key],
readonly,
self._ba_devdata,
byteorder="little",
signed=self.producttype == 103
)
# IO registrieren
if io_new.name in self._dict_ioname:
raise NameError(
"name '{}' already exists on device '{}'".format(
io_new._name, self.name
)
)
else:
# Namesregister aufbauen
self._dict_ioname[io_new._name] = io_new
# Speicherbereich zuweisen
self._ba_devdata.extend(bytes(io_new.length))
# IO eintragen
self._lst_io.append(io_new)
self.length += io_new.length
# Kleinste und größte Speicheradresse ermitteln
if io_new.slc_address.start < int_min:
int_min = io_new.slc_address.start
if io_new.slc_address.stop > int_max:
int_max = io_new.slc_address.stop
return slice(int_min, int_max)
else:
return slice(0, 0)
def _devconfigure(self):
"""Funktion zum ueberschreiben von abgeleiteten Klassen."""
pass
def get_inps(self):
"""Gibt eine Liste aller Inputs zurueck.
@return list() Inputs"""
return [
io for io in self._lst_io if
self.slc_inp.start <= io.slc_address.start < self.slc_inp.stop
]
def get_outs(self):
"""Gibt eine Liste aller Outputs zurueck.
@return list() Outputs"""
return [
io for io in self._lst_io if
self.slc_out.start <= io.slc_address.start < self.slc_out.stop
]
def get_mems(self):
"""Gibt eine Liste aller mems zurueck.
@return list() Mems"""
return [
io for io in self._lst_io if
self.slc_mem.start <= io.slc_address.start < self.slc_mem.stop
]
def get_iobyabsaddress(self, address):
"""Gibt das IO-Objekt an angegebenen Byte im Prozessabbild zurueck.
@param address Byteadresse im Prozessabbild
@return list() mit IO-Objekt/en"""
return self._dict_iobyte[address - self.offset]
def get_iobyaddress(self, address):
"""Gibt das IO-Objekt an angegebenen Byte zurueck.
@param address Byteadresse im Deviceabbild
@return list() mit IO-Objekt/en"""
return self._dict_iobyte[address]
def get_iobyname(self, name):
"""Gibt das IO-Objekt mit angegebenen Namen zurueck.
@param name Name des IO-Objekts
@return IO-Objekt"""
return self._dict_ioname[name]
def reg_event(self, io_name, func, **kwargs):
"""Registriert ein Event bei der Eventueberwachung.
@param io_name Name des Inputs oder Outputs der ueberwacht wird
@param func Funktion die bei Aenderung aufgerufen werden soll
@param kwargs Weitere Parameter:
- as_thread: Bei True, Funktion als RevPiCallback-Thread ausfuehren
- edge: Ausfuehren bei RISING, FALLING or BOTH Wertaenderung
"""
as_thread = kwargs.get("as_thread", False)
edge = kwargs.get("edge", None)
io_event = self.__getitem__(io_name)
if type(io_event) == list:
if len(io_event) == 1:
io_event = io_event[0]
elif len(io_event) == 0:
raise KeyError(
"byte '{}' contains no io object".format(io_name))
else:
raise KeyError(
"byte '{}' contains more than one bit io object".format(
io_name
)
)
# Prüfen ob Funktion callable ist
if not callable(func):
raise RuntimeError(
"registered function '{}' ist not callable".format(func)
)
if edge is not None and io_event._bitaddress < 0:
raise AttributeError(
"parameter 'edge' can be used with bit io objects only"
)
# Edge auflösen, wenn nicht angegeben
if edge is None:
edge = BOTH
if io_event not in self._dict_events:
self._dict_events[io_event] = [(func, edge, as_thread)]
else:
# Prüfen ob Funktion schon registriert ist
for regfunc in self._dict_events[io_event]:
if regfunc[0] != func:
# Nächsten Eintrag testen
continue
if edge == BOTH or regfunc[1] == BOTH:
if io_event._bitaddress < 0:
raise AttributeError(
"io '{}' with function '{}' already in list."
"".format(io_name, func)
)
else:
raise AttributeError(
"io '{}' with function '{}' already in list with "
"edge 'BOTH' or RISING/FALLING - you can use BOTH "
"or RISING and FALLING only".format(
io_name, func
)
)
elif regfunc[1] == edge:
raise AttributeError(
"io '{}' with function '{}' for given edge "
"already in list".format(io_name, func)
)
# Eventfunktion einfügen
self._dict_events[io_event].append((func, edge, as_thread))
def unreg_event(self, io_name, func=None, edge=None):
"""Entfernt ein Event aus der Eventueberwachung.
@param io_name Name des Inputs, dessen Events entfert werden sollen
@param func Nur Events mit angegebener Funktion
@param edge Nur Events mit angegebener Funktion und angegebener Edge
"""
io_event = self.__getitem__(io_name)
if type(io_event) == list:
if len(io_event) == 1:
io_event = io_event[0]
elif len(io_event) == 0:
raise KeyError(
"byte '{}' contains no io object".format(io_name))
else:
raise KeyError(
"byte '{}' contains more than one bit io object".format(
io_name
)
)
if io_event in self._dict_events:
if func is None:
del self._dict_events[io_event]
else:
newlist = []
for regfunc in self._dict_events[io_event]:
if regfunc[0] != func or edge is not None \
and regfunc[1] != edge:
newlist.append(regfunc)
# Wenn Funktionen übrig bleiben, diese übernehmen
if len(newlist) > 0:
self._dict_events[io_event] = newlist
else:
del self._dict_events[io_event]
class RevPiCore(RevPiDevice):
"""Klasse fuer den RevPi Core.
Stellt Funktionen fuer die LEDs und den Status zur Verfuegung.
"""
def _devconfigure(self):
"""Core-Klasse vorbereiten."""
self._iocycle = None
self._iotemperature = None
self._iofrequency = None
self._ioerrorcnt = None
self._ioled = 1
self._ioerrorlimit1 = None
self._ioerrorlimit2 = None
int_lenio = len(self._lst_io)
if int_lenio == 6:
# Core 1.1
self._iocycle = 1
self._ioerrorcnt = 2
self._ioled = 3
self._ioerrorlimit1 = 4
self._ioerrorlimit2 = 5
elif int_lenio == 8:
# core 1.2
self._iocycle = 1
self._ioerrorcnt = 2
self._iotemperature = 3
self._iofrequency = 4
self._ioled = 5
self._ioerrorlimit1 = 6
self._ioerrorlimit2 = 7
def _errorlimit(self, io_id, errorlimit):
"""Verwaltet das Lesen und Schreiben der ErrorLimits.
@param io_id Index des IOs fuer ErrorLimit
@return Aktuellen ErrorLimit oder None wenn nicht verfuegbar"""
if errorlimit is None:
return None if io_id is None else int.from_bytes(
self._lst_io[io_id].get_value(),
byteorder=self._lst_io[io_id]._byteorder
)
else:
if 0 <= errorlimit <= 65535:
self._lst_io[io_id].set_value(errorlimit.to_bytes(
2, byteorder=self._lst_io[io_id]._byteorder
))
else:
raise ValueError(
"errorlimit value int() must be between 0 and 65535"
)
def get_status(self):
"""Gibt den RevPi Core Status zurueck.
@return Status als int()"""
return int.from_bytes(
self._lst_io[0].get_value(), byteorder=self._lst_io[0]._byteorder
)
def get_leda1(self):
"""Gibt den Zustand der LED A1 vom core zurueck.
@return 0=aus, 1=gruen, 2=rot"""
int_led = int.from_bytes(
self._lst_io[self._ioled].get_value(),
byteorder=self._lst_io[self._ioled]._byteorder
)
led = int_led & 1
led += int_led & 2
return led
def get_leda2(self):
"""Gibt den Zustand der LED A2 vom core zurueck.
@return 0=aus, 1=gruen, 2=rot"""
int_led = int.from_bytes(
self._lst_io[self._ioled].get_value(),
byteorder=self._lst_io[self._ioled]._byteorder
)
led = 1 if bool(int_led & 4) else 0
led = led + 2 if bool(int_led & 8) else led
return led
def set_leda1(self, value):
"""Setzt den Zustand der LED A1 vom core.
@param value 0=aus, 1=gruen, 2=rot"""
if 0 <= value <= 3:
int_led = (self.get_leda2() << 2) + value
self._lst_io[self._ioled].set_value(int_led.to_bytes(
length=1, byteorder=self._lst_io[self._ioled]._byteorder
))
else:
raise ValueError("led status int() must be between 0 and 3")
def set_leda2(self, value):
"""Setzt den Zustand der LED A2 vom core.
@param value 0=aus, 1=gruen, 2=rot"""
if 0 <= value <= 3:
int_led = (value << 2) + self.get_leda1()
self._lst_io[self._ioled].set_value(int_led.to_bytes(
length=1, byteorder=self._lst_io[self._ioled]._byteorder
))
else:
raise ValueError("led status int() must be between 0 and 3")
A1 = property(get_leda1, set_leda1)
A2 = property(get_leda2, set_leda2)
status = property(get_status)
@property
def picontrolrunning(self):
"""Statusbit fuer piControl-Treiber laeuft.
@return True, wenn Treiber laeuft"""
return bool(int.from_bytes(
self._lst_io[0].get_value(),
byteorder=self._lst_io[0]._byteorder
) & 1)
@property
def unconfdevice(self):
"""Statusbit fuer ein IO-Modul nicht mit PiCtory konfiguriert.
@return True, wenn IO Modul nicht konfiguriert"""
return bool(int.from_bytes(
self._lst_io[0].get_value(),
byteorder=self._lst_io[0]._byteorder
) & 2)
@property
def missingdeviceorgate(self):
"""Statusbit fuer ein IO-Modul fehlt oder piGate konfiguriert.
@return True, wenn IO-Modul fehlt oder piGate konfiguriert"""
return bool(int.from_bytes(
self._lst_io[0].get_value(),
byteorder=self._lst_io[0]._byteorder
) & 4)
@property
def overunderflow(self):
"""Statusbit Modul belegt mehr oder weniger Speicher als konfiguriert.
@return True, wenn falscher Speicher belegt ist"""
return bool(int.from_bytes(
self._lst_io[0].get_value(),
byteorder=self._lst_io[0]._byteorder
) & 8)
@property
def leftgate(self):
"""Statusbit links vom RevPi ist ein piGate Modul angeschlossen.
@return True, wenn piGate links existiert"""
return bool(int.from_bytes(
self._lst_io[0].get_value(),
byteorder=self._lst_io[0]._byteorder
) & 16)
@property
def rightgate(self):
"""Statusbit rechts vom RevPi ist ein piGate Modul angeschlossen.
@return True, wenn piGate rechts existiert"""
return bool(int.from_bytes(
self._lst_io[0].get_value(),
byteorder=self._lst_io[0]._byteorder
) & 32)
@property
def iocycle(self):
"""Gibt Zykluszeit der Prozessabbildsynchronisierung zurueck.
@return Zykluszeit in ms"""
return None if self._iocycle is None else int.from_bytes(
self._lst_io[self._iocycle].get_value(),
byteorder=self._lst_io[self._iocycle]._byteorder
)
@property
def temperature(self):
"""Gibt CPU-Temperatur zurueck.
@return CPU-Temperatur in Celsius"""
return None if self._iotemperature is None else int.from_bytes(
self._lst_io[self._iotemperature].get_value(),
byteorder=self._lst_io[self._iotemperature]._byteorder
)
@property
def frequency(self):
"""Gibt CPU Taktfrequenz zurueck.
@return CPU Taktfrequenz in MHz"""
return None if self._iofrequency is None else int.from_bytes(
self._lst_io[self._iofrequency].get_value(),
byteorder=self._lst_io[self._iofrequency]._byteorder
) * 10
@property
def ioerrorcount(self):
"""Gibt Fehleranzahl auf RS485 piBridge Bus zurueck.
@return Fehleranzahl der piBridge"""
return None if self._ioerrorcnt is None else int.from_bytes(
self._lst_io[self._ioerrorcnt].get_value(),
byteorder=self._lst_io[self._ioerrorcnt]._byteorder
)
@property
def errorlimit1(self):
"""Gibt RS485 ErrorLimit1 Wert zurueck.
@return Aktueller Wert fuer ErrorLimit1"""
return self._errorlimit(self._ioerrorlimit1, None)
@errorlimit1.setter
def errorlimit1(self, value):
"""Setzt RS485 ErrorLimit1 auf neuen Wert.
@param value Neuer ErrorLimit1 Wert"""
self._errorlimit(self._ioerrorlimit1, value)
@property
def errorlimit2(self):
"""Gibt RS485 ErrorLimit2 Wert zurueck.
@return Aktueller Wert fuer ErrorLimit2"""
return self._errorlimit(self._ioerrorlimit2, None)
@errorlimit2.setter
def errorlimit2(self, value):
"""Setzt RS485 ErrorLimit2 auf neuen Wert.
@param value Neuer ErrorLimit2 Wert"""
self._errorlimit(self._ioerrorlimit2, value)
class RevPiGateway(RevPiDevice):
"""Klasse fuer die RevPi Gateway-Devices.
Stellt neben den Funktionen von RevPiDevice weitere Funktionen fuer die
Gateways bereit. Es koennen ueber die reg_*-Funktionen eigene IOs definiert
werden, die ein RevPiStructIO-Objekt abbilden.
Dieser IO-Typ kann Werte ueber mehrere Bytes verarbeiten und zurueckgeben.
"""
def __init__(self, dict_device, **kwargs):
"""Erweitert RevPiDevice um reg_*-Funktionen.
@see #RevPiDevice.__init__ RevPiDevice.__init__(...)"""
super().__init__(dict_device, **kwargs)
self._dict_iorefbyte = {}
self._dict_iorefname = {}
self._dict_slc = {
"inp": self.slc_inp, "out": self.slc_out, "mem": self.slc_mem
}
def _create_io(self, name, startio, frm, io_type, **kwargs):
"""Erstellt einen neuen IO und ersetzt den/die Bestehenden.
@param name Name des neuen IO
@param startio IO ab dem eingefuegt wird
@param frm struct() formatierung (1 Zeichen)
@param io_type IO-Type "inp", "out", "mem"
@param kwargs Weitere Parameter:
- bmk: Bezeichnung fuer IO
- bit: Registriert IO als bool() am angegebenen Bit im Byte
- byteorder: Byteorder fuer diesen IO, Standardwert=little
- defaultvalue: Standardwert fuer IO, Standard ist 0
"""
if len(frm) == 1:
# Byteorder prüfen und übernehmen
byteorder = kwargs.get("byteorder", "little")
if not (byteorder == "little" or byteorder == "big"):
raise ValueError("byteorder must be 'little' or 'big'")
bofrm = "<" if byteorder == "little" else ">"
bitaddress = "" if frm != "?" else str(kwargs.get("bit", 0))
if bitaddress == "" or \
(int(bitaddress) >= 0 and int(bitaddress) < 8):
bitlength = "1" if bitaddress.isnumeric() else \
struct.calcsize(bofrm + frm) * 8
if startio in self._dict_iorefname:
startaddress = self._dict_iorefname[startio]
else:
startaddress = self.__getitem__(startio).slc_address.start
# [name,default,anzbits,adressbyte,export,adressid,bmk,bitaddress]
list_value = [
name,
kwargs.get("defaultvalue", 0),
bitlength,
startaddress,
False,
str(startaddress).rjust(4, "0"),
kwargs.get("bmk", ""),
bitaddress
]
# Neuen IO instantiieren
io_new = RevPiStructIO(
self.offset,
list_value,
io_type != "out",
self._ba_devdata,
byteorder,
bofrm + frm
)
io_new._byteorder = byteorder
# Platz für neuen IO prüfen
if (io_new.slc_address.start >=
self._dict_slc[io_type].start and
io_new.slc_address.stop <=
self._dict_slc[io_type].stop):
self._replace_io(io_new)
else:
raise BufferError(
"registered value does not fit process image {} "
"scope".format(io_type)
)
else:
raise AttributeError(
"bitaddress must be a value between 0 and 7"
)
else:
raise AttributeError("parameter frm has to be a single sign")
def _getbytename(self, iobyte):
"""Ermittelt den Namen eines IOs auf der Byteadresse.
@param iobyte Bytenummer
@return IO-Namen"""
# Wenn IO schon ausgetauscht wurde
if iobyte in self._dict_iorefbyte:
return self._dict_iorefbyte[iobyte]
# Wenn IO jetzt ausgetauscht wird
if iobyte in self._dict_iobyte:
intlen = len(self._dict_iobyte[iobyte])
if intlen == 1:
return self._dict_iobyte[iobyte][0].name
elif len == 0:
raise KeyError("byte '{}' contains no input".format(iobyte))
else:
raise KeyError(
"byte '{}' contains more than one bit-input".format(iobyte)
)
else:
raise KeyError("byte '{}' does not exist".format(iobyte))
def _replace_io(self, io):
"""Ersetzt bestehende IOs durch den neu Registrierten.
@param io IO ab dem der Neue eingefuegt werden soll"""
if io.name in self._dict_ioname:
raise NameError(
"name '{}' already exists on device '{}'".format(
io._name, self.name
)
)
else:
dict_oldio = {}
for oldio in self._lst_io:
# Alle IOs Prüfen ob sie im neuen Speicherbereich sind
errstart = oldio.slc_address.start >= io.slc_address.start \
and oldio.slc_address.start < io.slc_address.stop
errstop = oldio.slc_address.stop > io.slc_address.start \
and oldio.slc_address.stop <= io.slc_address.stop
if errstart or errstop:
if type(oldio) == RevPiStructIO:
# Hier gibt es schon einen neuen IO
if oldio._bitaddress >= 0:
if io._bitaddress == oldio._bitaddress:
raise MemoryError(
"bit {} already assigned to '{}'".format(
io._bitaddress, oldio._name
)
)
else:
# Bereits überschriebene bytes() sind ungültig
raise MemoryError(
"new io '{}' overlaps memory of '{}'".format(
io._name, oldio._name
)
)
else:
# IOs im Speicherbereich des neuen IO merken
dict_oldio[oldio.name] = oldio
for oldio in dict_oldio.values():
if io._bitaddress >= 0:
# ios für ref bei bitaddress speichern
self._dict_iorefbyte[oldio.slc_address.start] = oldio.name
self._dict_iorefname[oldio.name] = oldio.slc_address.start
# ios aus listen entfernen
self._dict_iobyte[oldio.slc_address.start].remove(oldio)
del self._dict_ioname[oldio.name]
self._lst_io.remove(oldio)
# Byteregister erweitern
if io._bitaddress >= 0:
if len(self._dict_iobyte[io.slc_address.start]) != 8:
# Wenn kein 8 Bits vorhandne sind, "schnell" anlegen
self._dict_iobyte[io.slc_address.start] = [
None, None, None, None, None, None, None, None
]
self._dict_iobyte[io.slc_address.start][io._bitaddress] = io
else:
self._dict_iobyte[io.slc_address.start].append(io)
# Namensregister erweitern
self._dict_ioname[io.name] = io
# io einfügen (auch wenn nicht richtige stelle wegen BitOffset)
self._lst_io.insert(io.slc_address.start, io)
# Liste neu sortieren
self._lst_io.sort(key=lambda x: x.slc_address.start)
def get_rawbytes(self):
"""Gibt die Bytes aus, die dieses Device verwendet.
@return bytes() des Devices"""
return bytes(self._ba_devdata)
def reg_inp(self, name, startinp, frm, **kwargs):
"""Registriert einen neuen Input.
@param name Name des neuen Inputs
@param startinp Inputname ab dem eingefuegt wird
@param frm struct() formatierung (1 Zeichen)
@param kwargs Weitere Parameter:
- bmk: Bezeichnung fuer Input
- bit: Registriert Input als bool() am angegebenen Bit im Byte
- byteorder: Byteorder fuer den Input, Standardwert=little
- defaultvalue: Standardwert fuer Input, Standard ist 0
- event: Funktion fuer Eventhandling registrieren
- as_thread: Fuehrt die event-Funktion als RevPiCallback-Thread aus
- edge: event-Ausfuehren bei RISING, FALLING or BOTH Wertaenderung
@see <a target="_blank"
href="https://docs.python.org/3/library/struct.html#format-characters"
>Python3 struct()</a>
"""
if type(startinp) == int:
# Byte int() umwandeln in Namen
startinp = self._getbytename(startinp)
if type(startinp) == str:
self._create_io(name, startinp, frm, "inp", **kwargs)
else:
raise TypeError(
"start input must be str() or int() not {}".format(
type(startinp)
)
)
# Optional Event eintragen
reg_event = kwargs.get("event", None)
if reg_event is not None:
as_thread = kwargs.get("as_thread", False)
edge = kwargs.get("edge", None)
self.reg_event(name, reg_event, as_thread=as_thread, edge=edge)
def reg_out(self, name, startout, frm, **kwargs):
"""Registriert einen neuen Output.
@param name Name des neuen Outputs
@param startout Outputname ab dem eingefuegt wird
@param frm struct() formatierung (1 Zeichen)
@param kwargs Weitere Parameter:
- bmk: Bezeichnung fuer Output
- bit: Registriert Outputs als bool() am angegebenen Bit im Byte
- byteorder: Byteorder fuer den Output, Standardwert=little
- defaultvalue: Standardwert fuer Output, Standard ist 0
- event: Funktion fuer Eventhandling registrieren
- as_thread: Fuehrt die event-Funktion als RevPiCallback-Thread aus
- edge: event-Ausfuehren bei RISING, FALLING or BOTH Wertaenderung
@see <a target="_blank"
href="https://docs.python.org/3/library/struct.html#format-characters"
>Python3 struct()</a>
"""
if type(startout) == int:
# Byte int() umwandeln in Namen
startout = self._getbytename(startout)
if type(startout) == str:
self._create_io(name, startout, frm, "out", **kwargs)
else:
raise TypeError(
"start output must be str() or int() not {}".format(
type(startout)
)
)
# Optional Event eintragen
reg_event = kwargs.get("event", None)
if reg_event is not None:
as_thread = kwargs.get("as_thread", False)
edge = kwargs.get("edge", None)
self.reg_event(name, reg_event, as_thread=as_thread, edge=edge)
def reg_mem(self, name, startmem, frm, **kwargs):
"""Registriert einen neuen Memory.
@param name Name des neuen Memory
@param startinp Memname ab dem eingefuegt wird
@param frm struct() formatierung (1 Zeichen)
@param kwargs Weitere Parameter:
- bmk: Bezeichnung fuer Memory
- bit: Registriert Memory als bool() am angegebenen Bit im Byte
- byteorder: Byteorder fuer den Memory, Standardwert=little
- defaultvalue: Standardwert fuer Memory, Standard ist 0
@see <a target="_blank"
href="https://docs.python.org/3/library/struct.html#format-characters"
>Python3 struct()</a>
"""
if type(startmem) == int:
# Byte int() umwandeln in Namen
startmem = self._getbytename(startmem)
if type(startmem) == str:
self._create_io(name, startmem, frm, "mem", **kwargs)
else:
raise TypeError(
"start mem must be str() or int() not {}".format(
type(startmem)
)
)
class RevPiVirtual(RevPiGateway):
"""Klasse fuer die RevPi Virtual-Devices.
Stellt die selben Funktionen wie RevPiGateway zur Verfuegung. Es koennen
ueber die reg_*-Funktionen eigene IOs definiert werden, die ein
RevPiStructIO-Objekt abbilden.
Dieser IO-Typ kann Werte ueber mehrere Bytes verarbeiten und zurueckgeben.
@see #RevPiGateway RevPiGateway
"""
pass
class RevPiIOBase(object):
"""Basisklasse fuer alle IO-Objekte der RevPiDevice()-Klasse.
Die Basisfunktionalitaet ermoeglicht das Lesen und Schreiben der Werte
als bytes() oder bool(). Dies entscheidet sich bei der Instantiierung.
Wenn eine Bittadresse angegeben wird, werden bool()-Werte erwartet
und zurueckgegeben, ansonsten bytes().
Diese Klasse dient als Basis fuer andere IO-Klassen mit denen die Werte
auch als int() verwendet werden koennen.
"""
def __init__(
self, offset, valuelist, readonly, byteproc,
byteorder, signed=False):
"""Instantiierung der RevPiIOBase()-Klasse.
@param offset Deviceoffset
@param valuelist Datenliste fuer Instantiierung
@param readonly True bei Inp und mem, False bei out
@param byteproc bytearray() Daten des Devices
@param byteorder Byteorder 'little' / 'big' fuer int() Berechnung
@param signed Vorzeichen bei int() Berechnung beruecksichtigen
"""
# Bitadressen auf Bytes aufbrechen und umrechnen
self._bitaddress = -1 if valuelist[7] == "" else int(valuelist[7]) % 8
# Längenberechnung
self._bitlength = int(valuelist[2])
self.length = 1 if self._bitaddress == 0 else int(self._bitlength / 8)
self._byteproc = byteproc
self._byteorder = byteorder
self._devoffset = offset
self._name = valuelist[0]
self._readonly = readonly
self._signed = signed
self.bmk = valuelist[6]
int_startaddress = int(valuelist[3])
if self._bitaddress == -1:
self.slc_address = slice(
int_startaddress, int_startaddress + self.length
)
# Defaultvalue aus Zahl in Bytes umrechnen
if str(valuelist[1]).isnumeric():
self.defaultvalue = int(valuelist[1]).to_bytes(
self.length, byteorder=self._byteorder
)
else:
# Defaultvalue direkt von bytes übernehmen
if type(valuelist[1]) == bytes:
if len(valuelist[1]) != self.length:
raise ValueError(
"given bytes for default value must have a length "
"of {}".format(self.length)
)
else:
self.defaultvalue = valuelist[1]
else:
self.defaultvalue = bytes(self.length)
else:
# Höhere Bits als 7 auf nächste Bytes umbrechen
int_startaddress += int((int(valuelist[7]) % 16) / 8)
self.slc_address = slice(
int_startaddress, int_startaddress + 1
)
self.defaultvalue = bool(int(valuelist[1]))
def __bool__(self):
"""bool()-wert der Klasse.
@return IO-Wert als bool(). Nur False wenn False oder 0 sonst True"""
return bool(self.get_value())
def __bytes__(self):
"""bytes()-wert der Klasse.
@return IO-Wert als bytes()"""
if self._bitaddress >= 0:
int_byte = int.from_bytes(
self._byteproc[self.slc_address], byteorder=self._byteorder
)
if bool(int_byte & 1 << self._bitaddress):
return b'\x01'
else:
return b'\x00'
else:
return bytes(self._byteproc[self.slc_address])
def __str__(self):
"""str()-wert der Klasse.
@return Namen des IOs"""
return self._name
def _get_byteorder(self):
"""Gibt konfigurierte Byteorder zurueck.
@return str() Byteorder"""
return self._byteorder
def get_name(self):
"""Gibt den Namen des IOs zurueck.
@return IO Name"""
return self._name
def get_absaddress(self):
"""Gibt die absolute Byteadresse im Prozessabbild zurueck.
@return Absolute Byteadresse"""
return self._devoffset + self.slc_address.start
def get_address(self):
"""Gibt die Byteadresse auf dem Device zurueck.
@return Byteadresse auf dem Device"""
return self.slc_address.start
def get_value(self):
"""Gibt den Wert des IOs als bytes() oder bool() zurueck.
@return IO-Wert"""
if self._bitaddress >= 0:
int_byte = int.from_bytes(
self._byteproc[self.slc_address], byteorder=self._byteorder
)
return bool(int_byte & 1 << self._bitaddress)
else:
return bytes(self._byteproc[self.slc_address])
def set_value(self, value):
"""Setzt den Wert des IOs mit bytes() oder bool().
@param value IO-Wert als bytes() oder bool()"""
if self._readonly:
raise AttributeError("can not write to input")
else:
if self._bitaddress >= 0:
# Versuchen egal welchen Typ in Bool zu konvertieren
value = bool(value)
# ganzes Byte laden
byte_buff = self._byteproc[self.slc_address]
# Bytes in integer umwandeln
int_len = len(byte_buff)
int_byte = int.from_bytes(byte_buff, byteorder=self._byteorder)
int_bit = 1 << self._bitaddress
# Aktuellen Wert vergleichen und ggf. setzen
if not bool(int_byte & int_bit) == value:
if value:
int_byte += int_bit
else:
int_byte -= int_bit
# Zurückschreiben wenn verändert
self._byteproc[self.slc_address] = int_byte.to_bytes(
int_len, byteorder=self._byteorder
)
else:
if type(value) == bytes:
if self.length == len(value):
self._byteproc[self.slc_address] = value
else:
raise ValueError(
"requires a bytes() object of length {}, but"
" {} was given".format(self.length, len(value))
)
else:
raise ValueError(
"requires a bytes() object, not {}".format(type(value))
)
name = property(get_name)
value = property(get_value, set_value)
class RevPiIO(RevPiIOBase):
"""Klasse fuer den Zugriff auf die Daten mit Konvertierung in int().
Diese Klasse erweitert die Funktion von RevPiIOBase() um Funktionen,
ueber die mit int() Werten gearbeitet werden kann. Fuer die Umwandlung
koennen 'Byteorder' (Default 'little') und 'signed' (Default False) als
Parameter gesetzt werden.
@see #RevPiIOBase RevPiIOBase
"""
def __int__(self):
"""Gibt IO als int() Wert zurueck mit Beachtung byteorder/signed.
@return int() Wert"""
return self.get_int()
def _get_signed(self):
"""Ruft ab, ob der Wert Vorzeichenbehaftet behandelt werden soll.
@return True, wenn Vorzeichenbehaftet"""
return self._signed
def _set_byteorder(self, value):
"""Setzt Byteorder fuer int() Umwandlung.
@param value str() 'little' or 'big'"""
if not (value == "little" or value == "big"):
raise ValueError("byteorder must be 'little' or 'big'")
if self._byteorder != value:
self._byteorder = value
self.defaultvalue = self.defaultvalue[::-1]
def _set_signed(self, value):
"""Left fest, ob der Wert Vorzeichenbehaftet behandelt werden soll.
@param value True, wenn mit Vorzeichen behandel"""
if type(value) != bool:
raise ValueError("signed must be bool() True or False")
self._signed = value
def get_int(self):
"""Gibt IO als int() Wert zurueck mit Beachtung byteorder/signed.
@return int() Wert"""
return int.from_bytes(
self._byteproc[self.slc_address],
byteorder=self._byteorder,
signed=self._signed
)
def set_int(self, value):
"""Setzt IO mit Beachtung byteorder/signed.
@param value int()"""
if type(value) == int:
self.set_value(value.to_bytes(
self.length,
byteorder=self._byteorder,
signed=self._signed
))
else:
raise ValueError(
"need an int() value, but {} was given".format(type(value))
)
byteorder = property(RevPiIOBase._get_byteorder, _set_byteorder)
signed = property(_get_signed, _set_signed)
value = property(get_int, set_int)
class RevPiStructIO(RevPiIOBase):
"""Klasse fuer den Zugriff auf Daten ueber ein definierten struct().
Diese Klasse ueberschreibt get_value() und set_value() der RevPiIOBase()
Klasse. Sie stellt ueber struct die Werte in der gewuenschten Formatierung
bereit. Der struct-Formatwert wird bei der Instantiierung festgelegt.
@see #RevPiIOBase RevPiIOBase
"""
def __init__(self, offset, valuelist, readonly, byteproc, byteorder, frm):
"""Erweitert RevPiIOBase um struct-Formatierung.
@see #RevPiIOBase.__init__ RevPiIOBase.__init__(...)"""
super().__init__(offset, valuelist, readonly, byteproc, byteorder)
self.frm = frm
def get_structvalue(self):
"""Gibt den Wert mit struct Formatierung zurueck.
@return Wert vom Typ der struct-Formatierung"""
if self._bitaddress >= 0:
return self.get_value()
else:
return struct.unpack(self.frm, self.get_value())[0]
def set_structvalue(self, value):
"""Setzt den Wert mit struct Formatierung.
@param value Wert vom Typ der struct-Formatierung"""
if self._bitaddress >= 0:
self.set_value(value)
else:
self.set_value(struct.pack(self.frm, value))
byteorder = property(RevPiIOBase._get_byteorder)
value = property(get_structvalue, set_structvalue) | /revpimodio-1.0.3.tar.gz/revpimodio-1.0.3/revpimodio.py | 0.446012 | 0.237024 | revpimodio.py | pypi |
=======
revrand
=======
.. image:: https://travis-ci.org/NICTA/revrand.svg?branch=master
:target: https://travis-ci.org/NICTA/revrand
.. image:: https://codecov.io/github/NICTA/revrand/coverage.svg?branch=master
:target: https://codecov.io/github/NICTA/revrand?branch=master
------------------------------------------------------------------------------
A library of scalable Bayesian generalized linear models with *fancy* features
------------------------------------------------------------------------------
*revrand* is a python (2 and 3) **supervised machine learning** library that
contains implementations of various Bayesian linear and generalized linear
models (i.e. Bayesian linear regression and Bayesian generalized linear
regression).
*revrand* can be used for **large scale approximate Gaussian process
regression**, like `GPflow <https://github.com/GPflow/GPflow>`_ and `GPy
<https://github.com/SheffieldML/GPy>`_, but it uses random basis kernel
approximations (see [1]_, [2]_, [3]_) as opposed to inducing point
approximations.
A few features of this library are:
- Random Basis functions that can be used to approximate Gaussian processes
with shift invariant covariance functions (e.g. Matern) when used with linear
models [1]_, [2]_, [3]_.
- A fancy basis functions/feature composition framework for combining basis
functions like those above and radial basis functions, sigmoidal basis
functions, polynomial basis functions etc *with basis function parameter
learning*.
- Non-Gaussian likelihoods with Bayesian generalized linear models (GLMs). We
infer all of the parameters in the GLMs using stochastic variational
inference [4]_, and we approximate the posterior over the weights with a
mixture of Gaussians, like [5]_.
- Large scale learning using stochastic gradients (Adam, AdaDelta and more).
- Scikit Learn compatibility, i.e. usable with `pipelines
<http://scikit-learn.org/stable/modules/pipeline.html>`_.
- A host of decorators for `scipy.optimize.minimize
<https://docs.scipy.org/doc/scipy/reference/optimize.html>`_ and stochastic
gradients that enhance the functionality of these optimisers.
Here is an example of approximating a Matern 3/2 kernel with some of our basis
functions,
.. image:: docs/matern32.png
here is an example of the algorithms in *revrand* approximating a Gaussian
Process,
.. image:: docs/glm_sgd_demo.png
and here is an example of running using our Bayesian GLM with a Poisson
likelihood and integer observations,
.. image:: docs/glm_demo.png
Have a look at some of the demo `notebooks <demos/>`_ for how we generated
these plots, and more!
Quickstart
----------
To install, you can use ``pip``:
.. code:: console
$ pip install revrand
or simply run ``setup.py`` in the location where you have cloned or
downloaded this repository:
.. code:: console
$ python setup.py install
Now have a look at our `quickstart guide
<http://nicta.github.io/revrand/quickstart.html>`_ to get up and running
quickly!
Useful Links
------------
Home Page
http://github.com/nicta/revrand
Documentation
http://nicta.github.io/revrand
Report on the algorithms in *revrand*
https://github.com/NICTA/revrand/blob/master/docs/report/report.pdf
Issue tracking
https://github.com/nicta/revrand/issues
Bugs & Feedback
---------------
For bugs, questions and discussions, please use
`Github Issues <https://github.com/NICTA/revrand/issues>`_.
Authors
-------
- `Daniel Steinberg <https://github.com/dsteinberg>`_
- `Louis Tiao <https://github.com/ltiao>`_
- `Alistair Reid <https://github.com/AlistaiReid>`_
- `Lachlan McCalman <https://github.com/lmccalman>`_
- `Simon O'Callaghan <https://github.com/socallaghan>`_
References
----------
.. [1] Yang, Z., Smola, A. J., Song, L., & Wilson, A. G. "A la Carte --
Learning Fast Kernels". Proceedings of the Eighteenth International
Conference on Artificial Intelligence and Statistics, pp. 1098-1106,
2015.
.. [2] Le, Q., Sarlos, T., & Smola, A. "Fastfood-approximating kernel
expansions in loglinear time." Proceedings of the international conference
on machine learning. 2013.
.. [3] Rahimi, A., & Recht, B. "Random features for large-scale kernel
machines". Advances in neural information processing systems. 2007.
.. [4] Kingma, D. P., & Welling, M. "Auto-encoding variational Bayes".
Proceedings of the 2nd International Conference on Learning Representations
(ICLR). 2014.
.. [5] Gershman, S., Hoffman, M., & Blei, D. "Nonparametric variational
inference". Proceedings of the international conference on machine learning.
2012.
Copyright & License
-------------------
Copyright 2015 National ICT Australia.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
| /revrand-1.0.0.tar.gz/revrand-1.0.0/README.rst | 0.95165 | 0.857291 | README.rst | pypi |
[](https://travis-ci.org/wikimedia/revscoring)
[](https://codecov.io/gh/wikimedia/revscoring)
[](./LICENSE)
[](https://badge.fury.io/py/revscoring)
# Revision Scoring
A generic, machine learning-based revision scoring system designed to help automate critical wiki-work — for example, vandalism detection and removal. This library powers [ORES](https://ores.wikimedia.org).
## Example
Using a scorer_model to score a revision::
```python
import mwapi
from revscoring import Model
from revscoring.extractors.api.extractor import Extractor
with open("models/enwiki.damaging.linear_svc.model") as f:
scorer_model = Model.load(f)
extractor = Extractor(mwapi.Session(host="https://en.wikipedia.org",
user_agent="revscoring demo"))
feature_values = list(extractor.extract(123456789, scorer_model.features))
print(scorer_model.score(feature_values))
{'prediction': True, 'probability': {False: 0.4694409344514984, True: 0.5305590655485017}}
```
# Installation
The easiest way to install is via the Python package installer
(pip).
``pip install revscoring``
You may find that some of the dependencies fail to compile (namely
`scipy`, `numpy` and `sklearn`). In that case, you'll need to install some
dependencies in your operating system.
### Ubuntu & Debian:
* Run ``sudo apt-get install python3-dev g++ gfortran liblapack-dev libopenblas-dev enchant``
* Run ``sudo apt-get install aspell-ar aspell-bn aspell-el aspell-id aspell-is aspell-pl aspell-ro aspell-sv aspell-ta aspell-uk myspell-cs myspell-de-at myspell-de-ch myspell-de-de myspell-es myspell-et myspell-fa myspell-fr myspell-he myspell-hr myspell-hu myspell-lv myspell-nb myspell-nl myspell-pt-pt myspell-pt-br myspell-ru myspell-hr hunspell-bs hunspell-ca hunspell-en-au hunspell-en-us hunspell-en-gb hunspell-eu hunspell-gl hunspell-it hunspell-hi hunspell-sr hunspell-vi voikko-fi``
<!-- ### Windows:
<i>TODO</i>
-->
### MacOS:
Using Homebrew and pip, installing `revscoring` and `enchant` can be accomplished
as follows::
```bash
brew install aspell --with-all-languages
brew install enchant
pip install --no-binary pyenchant revscoring
```
#### Adding languages in aspell (MacOS only)
```bash
cd /tmp
wget http://ftp.gnu.org/gnu/aspell/dict/pt/aspell-pt-0.50-2.tar.bz2
bzip2 -dc aspell-pt-0.50-2.tar.bz2 | tar xvf -
cd aspell-pt-0.50-2
./configure
make
sudo make install
```
Caveats: <br>
<b><u> The differences between the `aspell` and `myspell` dictionaries can cause </b>
<b> <u>some of the tests to fail </b>
Finally, in order to make use of language features, you'll need to download
some NLTK data. The following command will get the necessary corpora.
``python -m nltk.downloader omw sentiwordnet stopwords wordnet``
You'll also need to install [enchant](https://en.wikipedia.org/wiki/Enchant_(software))-compatible
dictionaries of the languages you'd like to use. We recommend the following:
* languages.arabic: aspell-ar
* languages.basque: hunspell-eu
* languages.bengali: aspell-bn
* languages.bosnian: hunspell-bs
* languages.catalan: myspell-ca
* languages.czech: myspell-cs
* languages.croatian: myspell-hr
* languages.dutch: myspell-nl
* languages.english: myspell-en-us myspell-en-gb myspell-en-au
* languages.estonian: myspell-et
* languages.finnish: voikko-fi
* languages.french: myspell-fr
* languages.galician: hunspell-gl
* languages.german: myspell-de-at myspell-de-ch myspell-de-de
* languages.greek: aspell-el
* languages.hebrew: myspell-he
* languages.hindi: aspell-hi
* languages.hungarian: myspell-hu
* languages.icelandic: aspell-is
* languages.indonesian: aspell-id
* languages.italian: myspell-it
* languages.latvian: myspell-lv
* languages.norwegian: myspell-nb
* languages.persian: myspell-fa
* languages.polish: aspell-pl
* languages.portuguese: myspell-pt-pt myspell-pt-br
* languages.serbian: hunspell-sr
* languages.spanish: myspell-es
* languages.swedish: aspell-sv
* languages.tamil: aspell-ta
* languages.russian: myspell-ru
* languages.ukrainian: aspell-uk
* languages.vietnamese: hunspell-vi
# Development
To contribute, ensure to install the dependencies:
```bash
$ pip install -r requirements.txt
```
Install necessary NLTK data:
``python -m nltk.downloader omw sentiwordnet stopwords wordnet``
## Running tests
Make sure you install test dependencies:
```bash
$ pip install -r test-requirements.txt
```
Then run:
```bash
$ pytest . -vv
```
# Reporting bugs
To report a bug, please use [Phabricator](https://phabricator.wikimedia.org/maniphest/task/edit/form/1/?projects=revscoring)
# Authors
* [Aaron Halfaker](http://halfaker.info)
* [Helder](https://github.com/he7d3r)
* [Adam Roses Wight](https://mediawiki.org/wiki/User:Adamw)
* [Amir Sarabadani](https://github.com/Ladsgroup)
| /revscoring-2.11.10.tar.gz/revscoring-2.11.10/README.md | 0.522933 | 0.928084 | README.md | pypi |
from abc import ABC, abstractmethod
import torch
from torch.optim import Adam
import torchrl.utils as U
class BaseOpt:
def __init__(
self,
model,
*,
num_epochs=1,
num_mini_batches=1,
shuffle=True,
opt_fn=None,
opt_params=None,
lr_schedule=None,
clip_grad_norm=float("inf"),
loss_coef=None
):
self.model = model
self.num_epochs = num_epochs
self.num_mini_batches = num_mini_batches
self.shuffle = shuffle
self.clip_grad_norm = clip_grad_norm
self.num_steps = 0
self.num_updates = 0
self.memory = U.memories.DefaultMemory()
opt_fn = opt_fn or Adam
opt_params = opt_params or dict()
self.opt = self._create_opt(opt_fn=opt_fn, opt_params=opt_params)
self.lr_schedule = U.make_callable(lr_schedule or self.opt.defaults["lr"])
self.loss_coef_sched = U.make_callable(loss_coef)
@property
@abstractmethod
def name(self):
pass
@property
@abstractmethod
def batch_keys(self):
pass
@property
@abstractmethod
def callbacks(self):
pass
@property
@abstractmethod
def loss_coef(self):
pass
@abstractmethod
def model_parameters(self):
pass
@abstractmethod
def calculate_loss(self, batch):
pass
@property
def lr(self):
return self.lr_schedule(self.num_steps)
def _create_opt(self, opt_fn, opt_params):
return opt_fn(self.model_parameters(), **opt_params)
def set_lr(self, value):
"""
Change the learning rate of the optimizer.
Parameters
----------
value: float
The new learning rate.
"""
for param_group in self.opt.param_groups:
param_group["lr"] = value
def update_lr(self):
self.set_lr(self.lr)
def optimizer_step(self, batch):
"""
Apply the gradients in respect to the losses defined by :meth:`add_losses`.
Should use the batch to compute and apply gradients to the network.
"""
self.update_lr()
self.opt.zero_grad()
loss = self.calculate_loss(batch)
loss.backward()
norm = torch.nn.utils.clip_grad_norm_(
self.model_parameters(), self.clip_grad_norm
)
self.opt.step()
self.memory.loss.append(loss)
self.memory.grad_norm.append(norm)
self.num_updates += 1
# TODO: Wrong docs
def learn_from_batch(self, batch, step):
"""
Define the model training procedure.
Parameters
----------
batch: torchrl.utils.Batch
The batch should contain all the information necessary
to compute the gradients.
num_epochs: int
How many times to train over the entire dataset.
num_mini_batches: int
How many mini-batches to subset the batch.
shuffle: bool
Whether to shuffle dataset.
"""
# TODO: Currently always CUDA if possible (no choice)
self.num_steps = step
self.memory.clear()
batch = batch.apply_to_all(U.to_tensor)
if self.callbacks.on_train_start(batch):
return
for i_epoch in range(self.num_epochs):
if self.callbacks.on_epoch_start(batch):
break
for mini_batch in batch.sample_keys(
keys=self.batch_keys,
num_mini_batches=self.num_mini_batches,
shuffle=self.shuffle,
):
if self.callbacks.on_mini_batch_start(mini_batch):
break
self.optimizer_step(mini_batch)
if self.callbacks.on_mini_batch_end(mini_batch):
break
if self.callbacks.on_epoch_end(batch):
break
if self.callbacks.on_train_end(batch):
return
if self.callbacks.cleanups(batch):
return
def wrap_name(self, name):
return "/".join([self.name, name])
def write_logs(self, logger):
logger.add_tf_only_log(self.wrap_name("LR"), self.lr, precision=4)
logger.add_tf_only_log(self.wrap_name("Loss"), self.memory.loss, precision=4)
logger.add_tf_only_log(
self.wrap_name("Grad Norm"), self.memory.grad_norm, precision=4
) | /reward-0.0.4.tar.gz/reward-0.0.4/torchrl/optimizers/base_opt.py | 0.913437 | 0.263377 | base_opt.py | pypi |
from collections import OrderedDict
import numpy as np
import torch
import torch.nn as nn
from torch.autograd import Variable
from torchrl.utils import get_obj, to_tensor
class ModuleExtended(nn.Module):
"""
A torch module with added functionalities.
"""
@property
def is_cuda(self):
return next(self.parameters()).is_cuda
def maybe_cuda(self, x):
"""
Convert input tensor to cuda if available.
Parameters
----------
x: torch.Tensor
A pytorch tensor.
Returns
-------
torch.Tensor
A pytorch tensor.
"""
return x.cuda() if self.is_cuda else x
def to_tensor(self, x):
return to_tensor(x, cuda_default=self.is_cuda)
def get_output_shape(self, input_shape):
"""
Feed forward the current module to find out the output shape.
Parameters
----------
input_shape: list
The input dimensions.
Returns
-------
torch.IntTensor
The dimensions of the output.
"""
self.maybe_cuda(self.layers)
fake_input = self.maybe_cuda(torch.zeros(input_shape)[None])
out = self.layers(fake_input)
shape = out.shape[1:]
return torch.IntTensor(list(shape))
class SequentialExtended(ModuleExtended):
"""
A torch sequential module with added functionalities.
"""
def __init__(self, *args, **kwargs):
super().__init__()
self.layers = nn.Sequential(*args, **kwargs)
def append(self, module, name=None):
name = str(name or len(self.layers))
self.layers.add_module(name=name, module=module)
def forward(self, x):
if len(self.layers) > 0:
return self.layers(x)
else:
return x
@classmethod
def from_config(cls, config, kwargs):
layers_config = OrderedDict(
{key: get_obj(value) for key, value in config.items()}
)
return cls(layers_config, **kwargs)
class Flatten(nn.Module):
"""
Flatten the input and apply a linear layer.
"""
def forward(self, x):
return x.view(x.shape[0], -1)
class FlattenLinear(nn.Linear):
"""
Flatten the input and apply a linear layer.
Parameters
----------
in_features: list
Size of each input sample.
out_features: list
Size of each output sample.
"""
def __init__(self, in_features, out_features, **kwargs):
if isinstance(in_features, torch.IntTensor):
in_features = in_features.prod()
else:
in_features = int(np.prod(in_features))
super().__init__(in_features=in_features, out_features=out_features, **kwargs)
def forward(self, x):
x = x.view(x.shape[0], -1)
return super().forward(x)
# TODO: This can be only an action layer, no need Linear
class ActionLinear(nn.Module):
"""
A linear layer that automatically calculates the output shape based on the action_info.
Parameters
----------
in_features: list
Size of each input sample
action_info: dict
Dict containing information about the environment actions (e.g. shape).
"""
def __init__(self, in_features, action_info, **kwargs):
super().__init__()
self.action_info = action_info
out_features = int(np.prod(action_info["shape"]))
self.linear = FlattenLinear(
in_features=in_features, out_features=out_features, **kwargs
)
if action_info.space == "continuous":
self.log_std = nn.Parameter(torch.zeros(1, out_features))
# Tiny layer for maximizing exploration
self.linear.weight.data.normal_(std=0.01)
def forward(self, x):
if self.action_info.space == "discrete":
logits = self.linear(x)
return logits
elif self.action_info.space == "continuous":
mean = self.linear(x)
log_std = self.log_std.expand_as(mean)
return torch.stack((mean, log_std), dim=-1)
else:
raise ValueError(
"Action space {} not implemented".format(self.action_info.space)
) | /reward-0.0.4.tar.gz/reward-0.0.4/torchrl/nn/container.py | 0.933005 | 0.566438 | container.py | pypi |
import torchrl.utils as U
class BaseBatcher:
"""
The returned batch will have the shape (num_steps, num_envs, *shape)
"""
def __init__(self, runner, batch_size, transforms=None):
self.runner = runner
self.batch_size = batch_size
self.transforms = transforms or []
self.batch = None
self._state_t = None
self._state_shape = None
def __str__(self):
return "<{}>".format(type(self).__name__)
@property
def unwrapped(self):
return self
@property
def num_steps(self):
return self.runner.num_steps
@property
def num_episodes(self):
return self.runner.num_episodes
def get_batch(self, select_action_fn):
if self._state_t is None:
self._state_t = self.transform_state(self.runner.reset())
def transform_state(self, state, training=True):
"""
Apply functions to state, called before selecting an action.
"""
# TODO
# state = U.to_tensor(state)
# state = U.to_np(state)
for t in self.transforms:
state = t.transform_state(state, training=training)
return state
def transform_batch(self, batch, training=True):
"""
Apply functions to batch.
"""
for t in self.transforms:
batch = t.transform_batch(batch, training=training)
return batch
def evaluate(self, env, select_action_fn, logger):
self.runner.evaluate(
env=env,
select_action_fn=select_action_fn,
state_transform=self.transform_state,
logger=logger,
)
def get_state_info(self):
info = self.runner.get_state_info()
if self._state_shape is None:
state = self.runner.reset()
self._state_shape = tuple(self.transform_state(state).shape)
info.shape = self._state_shape[1:]
return info
def get_action_info(self):
return self.runner.get_action_info()
def write_logs(self, logger):
self.runner.write_logs(logger)
def close(self):
self.runner.close() | /reward-0.0.4.tar.gz/reward-0.0.4/torchrl/batchers/base_batcher.py | 0.791217 | 0.549882 | base_batcher.py | pypi |
import torchrl.utils as U
from .base_transform import BaseTransform
class StackStates(BaseTransform):
def __init__(self, n, dim=1):
super().__init__()
self.n = n
self.dim = dim
self.ring_buffer = None
self.eval_ring_buffer = None
def transform(self, state):
state = U.to_np(state)
assert (
state.shape[self.dim + 1] == 1
), "Dimension to stack must be 1 but it is {}".format(
state.shape[self.dim + 1]
)
return state.swapaxes(0, self.dim + 1)[0]
def transform_state(self, state, training=True):
if self.ring_buffer is None:
self.ring_buffer = U.buffers.RingBuffer(
input_shape=state.shape, maxlen=self.n
)
if self.eval_ring_buffer is None:
# First dimension (num_envs) for evaluation is always 1
eval_shape = (1,) + state.shape[1:]
self.eval_ring_buffer = U.buffers.RingBuffer(
input_shape=eval_shape, maxlen=self.n
)
if training:
self.ring_buffer.append(state)
state = self.ring_buffer.get_data()
else:
self.eval_ring_buffer.append(state)
state = self.eval_ring_buffer.get_data()
return U.LazyArray(state, transform=self.transform)
class StateRunNorm(BaseTransform):
def __init__(self, clip_range=5, use_latest_filter_update=False):
super().__init__()
self.filt = None
self.clip_range = clip_range
self.use_latest_filter_update = use_latest_filter_update
def transform_state(self, state, training=True):
if self.filt is None:
shape = state.shape
if len(shape) != 2:
raise ValueError(
"state shape must be in the form (num_envs, num_features), got {}".format(
shape
)
)
self.filt = U.filters.MeanStdFilter(
num_features=state.shape[-1], clip_range=self.clip_range
)
state = self.filt.normalize(
state, add_sample=training, use_latest_update=self.use_latest_filter_update
)
return state
def transform_batch(self, batch, training=True):
if training:
self.filt.update()
return batch
def write_logs(self, logger):
logger.add_tf_only_log("Env/State/mean", self.filt.mean.mean())
logger.add_tf_only_log("Env/State/std", self.filt.std.mean())
class Frame2Float(BaseTransform):
def transform_state(self, state, training=True):
return U.LazyArray(data=state, transform=lambda x: x.astype("float") / 255.) | /reward-0.0.4.tar.gz/reward-0.0.4/torchrl/batchers/transforms/state_transforms.py | 0.795579 | 0.42925 | state_transforms.py | pypi |
import torch
import torch.nn.functional as F
from torch.distributions.kl import kl_divergence
from torchrl.models import BasePGModel
class SurrogatePGModel(BasePGModel):
r"""
The Surrogate Policy Gradient algorithm instead maximizes a "surrogate" objective, given by:
.. math::
L^{CPI}({\theta}) = \hat{E}_t \left[\frac{\pi_{\theta}(a|s)}
{\pi_{\theta_{old}}(a|s)} \hat{A} \right ]
"""
@property
def kl_div(self):
return (
kl_divergence(self.memory.old_dists, self.memory.new_dists).sum(-1).mean()
)
@property
def entropy(self):
return self.memory.new_dists.entropy().mean()
@property
def batch_keys(self):
return ["state_t", "action", "advantage", "log_prob"]
def register_losses(self):
self.register_loss(self.surrogate_pg_loss)
self.register_loss(self.entropy_loss)
def register_callbacks(self):
super().register_callbacks()
self.callbacks.register_on_train_start(self.add_old_dist)
self.callbacks.register_on_mini_batch_start(self.add_new_dist)
self.callbacks.register_on_train_end(self.add_new_dist)
def add_new_dist(self, batch):
parameters = self.forward(batch.state_t)
self.memory.new_dists = self.create_dist(parameters)
batch.new_log_prob = self.memory.new_dists.log_prob(batch.action).sum(-1)
self.memory.prob_ratio = self.calculate_prob_ratio(
batch.new_log_prob, batch.log_prob
)
def add_old_dist(self, batch):
with torch.no_grad():
parameters = self.forward(batch.state_t)
self.memory.old_dists = self.create_dist(parameters)
batch.log_prob = self.memory.old_dists.log_prob(batch.action).sum(-1)
def surrogate_pg_loss(self, batch):
"""
The surrogate pg loss, as described before.
Parameters
----------
batch: Batch
"""
prob_ratio = self.calculate_prob_ratio(batch.new_log_prob, batch.log_prob)
surrogate = prob_ratio * batch.advantage
assert len(surrogate.shape) == 1
loss = -surrogate.mean()
return loss
def calculate_prob_ratio(self, new_log_probs, old_log_probs):
"""
Calculates the probability ratio between two policies.
Parameters
----------
new_log_probs: torch.Tensor
old_log_probs: torch.Tensor
"""
prob_ratio = (new_log_probs - old_log_probs).exp()
return prob_ratio
def write_logs(self, batch):
super().write_logs(batch)
self.add_log("KL Divergence", self.kl_div, precision=4) | /reward-0.0.4.tar.gz/reward-0.0.4/torchrl/models/surrogate_pg_model.py | 0.952042 | 0.578657 | surrogate_pg_model.py | pypi |
from abc import abstractproperty
import torch
from torchrl.distributions import Categorical, Normal
import torchrl.utils as U
from torchrl.models import BaseModel
from torchrl.nn import ActionLinear
class BasePGModel(BaseModel):
"""
Base class for all Policy Gradient Models.
"""
def __init__(self, model, batcher, *, entropy_coef=0, **kwargs):
super().__init__(model=model, batcher=batcher, **kwargs)
self.entropy_coef_fn = U.make_callable(entropy_coef)
@abstractproperty
def entropy(self):
pass
@property
def entropy_coef(self):
return self.entropy_coef_fn(self.num_steps)
def entropy_loss(self, batch):
"""
Adds a entropy cost to the loss function,
with the intent of encouraging exploration.
Parameters
----------
batch: Batch
The batch should contain all the information necessary
to compute the gradients.
"""
loss = -self.entropy * self.entropy_coef
return loss
def create_dist(self, parameters):
"""
Specify how the policy distributions should be created.
The type of the distribution depends on the environment.
Parameters
----------
parameters: np.array
The parameters are used to create a distribution
(continuous or discrete depending on the type of the environment).
"""
if self.batcher.get_action_info().space == "discrete":
logits = parameters
return Categorical(logits=logits)
elif self.batcher.get_action_info().space == "continuous":
means = parameters[..., 0]
std_devs = parameters[..., 1].exp()
return Normal(loc=means, scale=std_devs)
else:
raise ValueError(
"No distribution is defined for {} actions".format(
self.batcher.get_action_info().space
)
)
def write_logs(self, batch):
super().write_logs(batch)
self.add_log("Entropy", self.entropy)
self.add_log("Policy/log_prob", batch.log_prob)
@staticmethod
def output_layer(input_shape, action_info):
return ActionLinear(in_features=input_shape, action_info=action_info)
@staticmethod
def select_action(model, state, step):
"""
Define how the actions are selected, in this case the actions
are sampled from a distribution which values are given be a NN.
Parameters
----------
state: np.array
The state of the environment (can be a batch of states).
"""
parameters = model.forward(state)
dist = model.create_dist(parameters)
action = dist.sample()
return U.to_np(action) | /reward-0.0.4.tar.gz/reward-0.0.4/torchrl/models/base_pg_model.py | 0.950595 | 0.398641 | base_pg_model.py | pypi |
import torch
from torchrl.models import SurrogatePGModel
class PPOModel(SurrogatePGModel):
def __init__(
self,
policy_nn_config,
value_nn_config=None,
share_body=False,
ppo_clip_range=0.2,
kl_penalty_coef=1,
kl_target=0.01,
**kwargs
):
self.ppo_clip_range = ppo_clip_range
self.kl_penalty_coef = kl_penalty_coef
self.kl_target = kl_target
super().__init__(
policy_nn_config=policy_nn_config,
value_nn_config=value_nn_config,
share_body=share_body,
**kwargs
)
def add_ppo_clip(self, batch, new_dists):
prob_ratio = self.calculate_prob_ratio(batch, new_dists)
surrogate = prob_ratio * batch["advantages"]
clipped_prob_ratio = prob_ratio.clamp(
min=1 - self.ppo_clip_range, max=1 + self.ppo_clip_range
)
clipped_surrogate = clipped_prob_ratio * batch["advantages"]
losses = torch.min(surrogate, clipped_surrogate)
loss = -losses.mean()
self.losses.append(loss)
# Add logs
self.logger.add_log("Loss/policy/ppo_clip", loss.item())
self.logger.add_histogram("Policy/clipped_prob_ratio", clipped_prob_ratio.data)
clip_frac = torch.mean(
(torch.abs(prob_ratio - 1) > self.ppo_clip_range).float()
)
self.logger.add_log("Policy/clip_fraction", clip_frac.item())
def add_ppo_adaptive_kl(self, batch, new_dists):
prob_ratio = self.calculate_prob_ratio(batch, new_dists)
surrogate = prob_ratio * batch["advantages"]
kl_div = self.kl_divergence(new_dists)
kl_loss = self.kl_penalty_coef * kl_div
hinge_loss = 1000 * torch.clamp(kl_div - 2 * self.kl_target, min=0) ** 2
losses = surrogate - kl_loss - hinge_loss
loss = -losses.sum()
self.losses.append(loss)
def add_losses(self, batch):
new_parameters = self.forward(batch["state_ts"])
new_dists = [self.create_dist(p) for p in new_parameters]
self.add_ppo_clip(batch, new_dists)
# self.add_ppo_adaptive_kl(batch, new_dists)
self.add_value_nn_loss(batch)
def train(self, batch, num_epochs=10):
super().train(batch=batch, num_epochs=num_epochs) | /reward-0.0.4.tar.gz/reward-0.0.4/torchrl/models/ppo_model_deprecated.py | 0.847716 | 0.224576 | ppo_model_deprecated.py | pypi |
from abc import ABC, abstractmethod, abstractproperty
from collections import ChainMap
import os
import numpy as np
import torch
import torch.nn as nn
import torchrl.utils as U
from torchrl.nn import ModuleExtended
from multiprocessing import Process
# TODO; Paramters changes, change doc
class BaseModel(ModuleExtended, ABC):
"""
Basic TorchRL model. Takes two :obj:`Config` objects that identify
the body(ies) and head(s) of the model.
Parameters
----------
model: nn.Module
A pytorch model.
batcher: torchrl.batcher
A torchrl batcher.
num_epochs: int
How many times to train over the entire dataset (Default is 1).
num_mini_batches: int
How many mini-batches to subset the batch
(Default is 1, so all the batch is used at once).
opt_fn: torch.optim
The optimizer reference function (the constructor, not the instance)
(Default is Adam).
opt_params: dict
Parameters for the optimizer (Default is empty dict).
clip_grad_norm: float
Max norm of the gradients, if float('inf') no clipping is done
(Default is float('inf')).
loss_coef: float
Used when sharing networks, should balance the contribution
of the grads of each model.
cuda_default: bool
If True and cuda is supported, use it (Default is True).
"""
def __init__(self, model, batcher, *, cuda_default=True):
super().__init__()
self.model = model
self.batcher = batcher
self.memory = U.memories.DefaultMemory()
self.losses = []
self.register_losses()
self.callbacks = U.Callback()
self.register_callbacks()
self.logger = None
# Enable cuda if wanted
self.cuda_enabled = cuda_default and torch.cuda.is_available()
if self.cuda_enabled:
self.model.cuda()
@property
@abstractmethod
def batch_keys(self):
"""
The batch keys needed for computing all losses.
This is done to reduce overhead when sampling a dataloader,
it makes sure only the requested keys are being sampled.
"""
@property
@abstractmethod
def register_losses(self):
"""
Append losses to ``self.losses``, the losses are used
at :meth:`optimizer_step` for calculating the gradients.
Parameters
----------
batch: dict
The batch should contain all the information necessary
to compute the gradients.
"""
@staticmethod
@abstractmethod
def output_layer(input_shape, action_info):
"""
The final layer of the model, will be appended to the model head.
Parameters
----------
input_shape: int or tuple
The shape of the input to this layer.
action_info: dict
Dictionary containing information about the action space.
Examples
--------
The output of most PG models have the same dimension as the action,
but the output of the Value models is rank 1. This is where this is defined.
"""
@property
def body(self):
return self.model.layers[0]
@property
def head(self):
return self.model.layers[1]
@property
def name(self):
return self.__class__.__name__
def num_steps(self):
return self.batcher.num_steps
def register_loss(self, func):
self.losses.append(func)
def register_callbacks(self):
self.callbacks.register_cleanup(self.write_logs)
self.callbacks.register_cleanup(self.clear_memory)
def clear_memory(self, batch):
self.memory.clear()
def calculate_loss(self, batch):
losses = {f.__name__: f(batch) for f in self.losses}
self.memory.losses.append(losses)
return sum(losses.values())
def forward(self, x):
"""
Defines the computation performed at every call.
Parameters
----------
x: numpy.ndarray
The environment state.
"""
return self.model(x)
def attach_logger(self, logger):
"""
Register a logger to this model.
Parameters
----------
logger: torchrl.utils.logger
"""
self.logger = logger
def wrap_name(self, name):
return "/".join([self.name, name])
def add_log(self, name, value, **kwargs):
self.logger.add_log(name=self.wrap_name(name), value=value, **kwargs)
def add_tf_only_log(self, name, value, **kwargs):
self.logger.add_tf_only_log(name=self.wrap_name(name), value=value, **kwargs)
def add_debug_log(self, name, value, **kwargs):
self.logger.add_debug(name=self.wrap_name(name), value=value, **kwargs)
def add_histogram_log(self, name, values, **kwargs):
self.logger.add_histogram(name=self.wrap_name(name), values=values, **kwargs)
def write_logs(self, batch):
"""
Write logs to the terminal and to a tf log file.
Parameters
----------
batch: Batch
Some logs might need the batch for calculation.
"""
total_loss = 0
for k in self.memory.losses[0]:
partial_loss = 0
for loss in self.memory.losses:
partial_loss += loss[k]
partial_loss = partial_loss / len(self.memory.losses)
total_loss += partial_loss
self.add_tf_only_log("/".join(["Loss", k]), partial_loss, precision=4)
self.add_log("Loss/Total", total_loss, precision=4)
@classmethod
def from_config(cls, config, batcher=None, body=None, head=None, **kwargs):
"""
Creates a model from a configuration file.
Parameters
----------
config: Config
Should contatin at least a network definition (``nn_config`` section).
env: torchrl.envs
A torchrl environment (Default is None and must be present in the config).
kwargs: key-word arguments
Extra arguments that will be passed to the class constructor.
Returns
-------
torchrl.models
A TorchRL model.
"""
# env = env or U.env_from_config(config)
# config.pop('env', None)
if not "body" in config.nn_config:
config.nn_config.body = []
if not "head" in config.nn_config:
config.nn_config.head = []
nn_config = config.pop("nn_config")
model = U.nn_from_config(
config=nn_config,
state_info=batcher.get_state_info(),
action_info=batcher.get_action_info(),
body=body,
head=head,
)
output_layer = cls.output_layer(
input_shape=model.get_output_shape(batcher.get_state_info().shape),
action_info=batcher.get_action_info(),
)
model.layers.head.append(output_layer)
return cls(model=model, batcher=batcher, **config.as_dict(), **kwargs)
@classmethod
def from_file(cls, file_path, *args, **kwargs):
config = U.Config.load(file_path)
return cls.from_config(config, *args, **kwargs)
@classmethod
def from_arch(cls, arch, *args, **kwargs):
module_path = os.path.abspath(os.path.dirname(__file__))
path = os.path.join(module_path, "archs", arch)
return cls.from_file(file_path=path, *args, **kwargs) | /reward-0.0.4.tar.gz/reward-0.0.4/torchrl/models/base_model.py | 0.811937 | 0.445891 | base_model.py | pypi |
import torch
import torch.nn.functional as F
from torch.distributions import kl_divergence
from torchrl.models import BasePGModel
class SurrogatePGModel(BasePGModel):
def train(self, batch, num_epochs=1):
batch["actions"] = self._to_tensor(batch["actions"])
batch["advantages"] = self._to_tensor(batch["advantages"]).view(-1, 1)
with torch.no_grad():
batch["old_log_probs"] = torch.stack(
[
dist.log_prob(action).sum()
for dist, action in zip(self.saved_dists, batch["actions"])
]
)
super().train(batch=batch, num_epochs=num_epochs)
self.saved_dists = []
def calculate_prob_ratio(self, batch, new_dists):
new_log_probs = torch.stack(
[
new_dist.log_prob(action).sum()
for new_dist, action in zip(new_dists, batch["actions"])
]
)
prob_ratio = (new_log_probs - batch["old_log_probs"]).exp()
return prob_ratio
def add_surrogate_pg_loss(self, batch, new_dists):
prob_ratio = self.calculate_prob_ratio(batch, new_dists)
surrogate = prob_ratio * batch["advantages"]
loss = -surrogate.sum()
self.losses.append(loss)
def add_losses(self, batch):
new_parameters = self.forward(batch["state_ts"])
new_dists = [self.create_dist(p) for p in new_parameters]
self.add_surrogate_pg_loss(batch, new_dists)
self.add_value_nn_loss(batch)
def kl_divergence(self, new_dists):
kl_divs = [
kl_divergence(old_dist, new_dist).sum()
for old_dist, new_dist in zip(self.saved_dists, new_dists)
]
return torch.stack(kl_divs).mean()
def write_logs(self, batch):
new_parameters = self.forward(batch["state_ts"])
new_dists = [self.create_dist(p) for p in new_parameters]
self.logger.add_log("Policy/Entropy", self.entropy(new_dists).item())
self.logger.add_log(
"Policy/KL_div", self.kl_divergence(new_dists).item(), precision=5
) | /reward-0.0.4.tar.gz/reward-0.0.4/torchrl/models/surrogate_pg_model_deprecated.py | 0.787523 | 0.422505 | surrogate_pg_model_deprecated.py | pypi |
import torch
import torchrl.utils as U
from torchrl.models import SurrogatePGModel
from torch.distributions.kl import kl_divergence
class PPOAdaptiveModel(SurrogatePGModel):
"""
Proximal Policy Optimization as described in https://arxiv.org/pdf/1707.06347.pdf.
Parameters
----------
num_epochs: int
How many times to train over the entire dataset (Default is 10).
"""
def __init__(self, model, batcher, *, kl_target=0.01, kl_penalty=1., **kwargs):
super().__init__(model=model, batcher=batcher, **kwargs)
self.kl_target_fn = U.make_callable(kl_target)
self.kl_penalty = kl_penalty
@property
def kl_target(self):
return self.kl_target_fn(self.num_steps)
def register_losses(self):
self.register_loss(self.surrogate_pg_loss)
self.register_loss(self.kl_penalty_loss)
self.register_loss(self.hinge_loss)
self.register_loss(self.entropy_loss)
def register_callbacks(self):
super().register_callbacks()
self.callbacks.register_on_mini_batch_start(self.add_kl_div)
self.callbacks.register_on_epoch_end(self.add_new_dist)
self.callbacks.register_on_epoch_end(self.kl_early_stopping)
self.callbacks.register_on_train_end(self.kl_penalty_adjust)
def kl_penalty_loss(self, batch):
loss = self.kl_penalty * batch.kl_div
return loss
def hinge_loss(self, batch):
loss = 50 * max(0, batch.kl_div - 2. * self.kl_target) ** 2
return loss
def add_kl_div(self, batch):
batch.kl_div = (
kl_divergence(self.memory.old_dists[batch.idxs], self.memory.new_dists)
.sum(-1)
.mean()
)
def kl_penalty_adjust(self, batch):
# Adjust KL penalty
if self.kl_div < self.kl_target / 1.5:
self.kl_penalty /= 2
if self.kl_div > self.kl_target * 1.5:
self.kl_penalty *= 2
def kl_early_stopping(self, batch):
if self.kl_div > 4 * self.kl_target:
print("Early stopping")
return True
def write_logs(self, batch):
super().write_logs(batch)
self.add_log("KL Target", self.kl_target, precision=4)
self.add_log("KL Penalty", self.kl_penalty, precision=4) | /reward-0.0.4.tar.gz/reward-0.0.4/torchrl/models/ppo_adaptive_model.py | 0.880951 | 0.360433 | ppo_adaptive_model.py | pypi |
import torch
import torch.nn.functional as F
from torch.distributions import Categorical, Normal
import torchrl.utils as U
from torchrl.models import BaseModel
class BasePGModel(BaseModel):
"""
Base class for all Policy Gradient Models, has some basic functionalities.
Parameters
----------
policy_nn_config: Config
Config object specifying the network structure.
value_nn_config: Config
Config object specifying the network structure
(Default is None, meaning no value network is used).
share_body: bool
If True, use the same body for the policy and value
networks (Default is False).
"""
def __init__(
self, policy_nn_config, value_nn_config=None, share_body=False, **kwargs
):
self.policy_nn_config = policy_nn_config
self.value_nn_config = value_nn_config
self.share_body = share_body
self.saved_dists = []
super().__init__(**kwargs)
def create_networks(self):
"""
This function is called by the base class. Creates the policy and
the value networks specified by the configs passed in ``__init__``.
"""
self.policy_nn = self.net_from_config(self.policy_nn_config)
if self.value_nn_config is not None:
body = self.policy_nn.body if self.share_body else None
self.value_nn = self.net_from_config(self.value_nn_config, body=body)
else:
self.value_nn = None
def create_dist(self, parameters):
"""
Specify how the policy distributions should be created.
The type of the distribution depends on the environment.
Parameters
----------
parameters
"""
if self.action_info["dtype"] == "discrete":
logits = parameters
return Categorical(logits=logits)
elif self.action_info["dtype"] == "continuous":
means = parameters[:, 0]
std_devs = parameters[:, 1].exp()
return Normal(loc=means, scale=std_devs)
else:
raise ValueError(
"No distribution is defined for {} actions".format(
self.action_info["dtype"]
)
)
def forward(self, x):
"""
Forward the policy network
Parameters
----------
x: numpy.ndarray
The environment state.
Returns
-------
numpy.ndarray
Action probabilities
"""
return self.policy_nn.head(self.policy_nn.body(x))
def select_action(self, state):
"""
Uses the values given by ``self.forward`` to select an action.
If the action space is discrete the values will be assumed to represent
probabilities of a categorical distribution.
If the action space is continuous the values will be assumed to represent
the mean and variance of a normal distribution.
Parameters
----------
state: numpy.ndarray
The environment state.
Returns
-------
action: int or numpy.ndarray
"""
parameters = self.forward(state)
dist = self.create_dist(parameters[0])
action = dist.sample()
self.saved_dists.append(dist)
return U.to_numpy(action)
def add_value_nn_loss(self, batch):
"""
Adds the loss of the value network to internal list of losses.
Parameters
----------
batch: dict
Should contain all the necessary information for computing the loss.
"""
state_values = self.value_nn.head(self.value_nn.body(batch["state_ts"]))
vtarget = self._to_tensor(batch["vtarget"])
loss = F.mse_loss(input=state_values.view(-1), target=vtarget)
self.losses.append(loss)
# Add logs
self.logger.add_log("Loss/value_nn/mse", loss.item())
ev = 1 - torch.var(vtarget - state_values.view(-1)) / torch.var(vtarget)
# ev = U.explained_var(vtarget, state_values.view(-1))
self.logger.add_log("Value_NN/explained_variance", ev.item())
self.logger.add_log("vtarget_var", torch.var(vtarget).item())
self.logger.add_log("value_nn_var", torch.var(state_values).item())
self.logger.add_log("vtarget_mean", vtarget.mean().item())
self.logger.add_log("value_nn_mean", state_values.mean().item())
def add_state_values(self, traj):
"""
Adds to the trajectory the state values estimated by the value network.
Parameters
----------
traj: dict
A dictionary with the transitions. Should contain a key named
``state_ts`` that will be used to feed the value network.
"""
if self.value_nn is not None:
state_values = self.value_nn.head(self.value_nn.body(traj["state_ts"]))
state_values = state_values.data.view(-1).cpu().numpy()
traj["state_values"] = state_values
else:
pass
def entropy(self, dists):
"""
Calculates the mean entropy of the distributions.
Parameters
----------
dists: list
List of distributions.
"""
entropies = [dist.entropy().sum() for dist in dists]
return torch.stack(entropies).mean()
def write_logs(self, batch):
"""
Uses the current logger to write to the console.
"""
self.logger.add_log("Policy/Entropy", self.entropy(self.saved_dists).item()) | /reward-0.0.4.tar.gz/reward-0.0.4/torchrl/models/base_pg_model_deprecated.py | 0.943634 | 0.464051 | base_pg_model_deprecated.py | pypi |
from collections import OrderedDict
from torchrl.utils import get_obj, Config
from torchrl.nn import SequentialExtended
def auto_input_shape(obj_config, input_shape):
"""
Create the right input parameter for the type of layer
Parameters
----------
obj_config: dict
A dict containing the function and the parameters for creating the object.
input_shape: list
The input dimensions.
"""
name = obj_config["func"].__name__
if "FlattenLinear" in name:
obj_config["in_features"] = input_shape
elif "ActionLinear" in name:
obj_config["in_features"] = input_shape
elif "Linear" in name:
assert len(input_shape) == 1, "Input rank for Linear must be one,"
"for higher ranks inputs consider using FlattenLinear"
obj_config["in_features"] = input_shape[0]
elif "Conv2d" in name:
obj_config["in_channels"] = input_shape[0]
else:
raise ValueError("Auto input for {} not supported".format(name))
def get_module_list(config, input_shape, action_info):
"""
Receives a config object and creates a list of layers.
Parameters
----------
config: Config
The configuration object that should contain the basic network structure.
input_shape: list
The input dimensions.
action_info: dict
Dict containing information about the environment actions (e.g. shape).
Returns
-------
list of layers
A list containing all the instantiated layers.
"""
module_list = []
for i, obj_config in enumerate(config):
# Calculate the input shape for the first layer
if i == 0:
auto_input_shape(obj_config, input_shape)
# An `Action` layer has the output shape equals to the action shape
if "ActionLinear" in obj_config["func"].__name__:
obj_config["action_info"] = action_info
module_list.append(get_obj(obj_config))
return module_list
def nn_from_config(config, state_info, action_info, body=None, head=None):
"""
Creates a pytorch model following the instructions of config.
Parameters
----------
config: Config
The configuration object that should contain the basic network structure.
state_info: dict
Dict containing information about the environment states (e.g. shape).
action_info: dict
Dict containing information about the environment actions (e.g. shape).
body: Module
If given use it instead of creating (Default is None).
head: Module
If given use it instead of creating (Default is None).
Returns
-------
torchrl.SequentialExtended
A torchrl NN (basically a pytorch NN with extended functionalities).
"""
if body is None:
body_list = get_module_list(
config=config.body, input_shape=state_info.shape, action_info=action_info
)
body = SequentialExtended(*body_list)
if head is None:
head_list = get_module_list(
config=config.head,
input_shape=body.get_output_shape(state_info.shape),
action_info=action_info,
)
head = SequentialExtended(*head_list)
return SequentialExtended(OrderedDict([("body", body), ("head", head)])) | /reward-0.0.4.tar.gz/reward-0.0.4/torchrl/utils/net_builder.py | 0.949646 | 0.516595 | net_builder.py | pypi |
import numpy as np
import torch
from torch.utils.data import DataLoader, TensorDataset
from torchrl.utils.memories import SimpleMemory
from torchrl.utils import to_tensor, join_first_dims
class Batch(SimpleMemory):
def __len__(self):
return len(self["state_t"])
def apply_to_all(self, func):
return Batch((k, func(v)) for k, v in self.items())
def apply_to_keys(self, func, keys):
return Batch((k, func(self[k])) for k in keys)
def sample(self, num_mini_batches, shuffle):
keys = list(self.keys())
return self.sample_keys(
keys=keys, num_mini_batches=num_mini_batches, shuffle=shuffle
)
def sample_keys(self, keys, num_mini_batches, shuffle):
self["idxs"] = torch.arange(len(self)).long()
keys = keys + ["idxs"]
if num_mini_batches > 1:
values = [self[k] for k in keys]
batch_size = len(self) // num_mini_batches
dataset = TensorDataset(*values)
data_loader = DataLoader(dataset, batch_size=batch_size, shuffle=shuffle)
for data in data_loader:
yield Batch((k, v) for k, v in zip(keys, data))
else:
yield self
def concat_batch(self):
# func = lambda x: x.reshape((-1, *x.shape[2:])) if (
# isinstance(x, (np.ndarray, torch.Tensor))) else x
func = (
lambda x: join_first_dims(x, num_dims=2)
if (isinstance(x, (np.ndarray, torch.Tensor)))
else x
)
return self.apply_to_all(func)
def to_array_or_tensor(self):
new_batch = Batch()
for k, v in self.items():
if isinstance(v[0], np.ndarray):
new_batch[k] = np.stack(v)
elif isinstance(v[0], torch.Tensor):
new_batch[k] = torch.stack(v)
else:
new_batch[k] = v
return new_batch
@classmethod
def from_trajs(cls, trajs):
return cls(**Batch.concat_trajectories(trajs))
@staticmethod
def concat_trajectories(trajs):
batch = dict()
for key in trajs[0]:
batch[key] = np.concatenate([t[key] for t in trajs])
return batch | /reward-0.0.4.tar.gz/reward-0.0.4/torchrl/utils/batch.py | 0.788665 | 0.536616 | batch.py | pypi |
import numpy as np
import torch
from numbers import Number
from collections import OrderedDict
from torch.autograd import Variable
from torchrl.utils import EPSILON
import cv2
from functools import wraps
def get_obj(config):
"""
Creates an object based on the given config.
Parameters
----------
config: dict
A dict containing the function and the parameters for creating the object.
Returns
-------
obj
The created object.
"""
func = config.pop("func")
obj = func(**config)
config["func"] = func
return obj
def env_from_config(config):
"""
Tries to create an environment from a configuration obj.
Parameters
----------
config: Config
Configuration file containing the environment function.
Returns
-------
env: torchrl.envs
A torchrl environment.
Raises
------
AttributeError
If no env is defined in the config obj.
"""
try:
env = get_obj(config.env.as_dict())
except AttributeError:
raise ValueError(
"The env must be defined in the config " "or passed as an argument"
)
return env
def to_np(value):
if isinstance(value, Number):
return np.array(value)
if isinstance(value, np.ndarray):
return value
if isinstance(value, torch.Tensor):
return value.detach().cpu().numpy()
if isinstance(value, LazyArray):
return np.array(value)
# If iterable
try:
return np.array([to_np(v) for v in value])
except TypeError:
return np.array(value)
raise ValueError("Data type {} not supported".format(value.__class__.__name__))
# TODO: What to do with other types? lists, etc..
def to_tensor(x, cuda_default=True):
if isinstance(x, np.ndarray):
# pytorch doesn't support bool
if x.dtype == "bool":
x = x.astype("int")
# we want only single precision floats
if x.dtype == "float64":
x = x.astype("float32")
x = torch.from_numpy(x)
if isinstance(x, torch.Tensor) and cuda_default and torch.cuda.is_available():
x = x.cuda()
return x
def explained_var(target, preds):
"""
Calculates the explained variance between two datasets.
Useful for estimating the quality of the value function
Parameters
----------
target: np.array
Target dataset.
preds: np.array
Predictions array.
Returns
-------
float
The explained variance.
"""
return 1 - (target.squeeze() - preds.squeeze()).var() / target.view(-1).var()
def normalize(array):
"""
Normalize an array by subtracting the mean and diving by the std dev.
"""
return (array - np.mean(array)) / (np.std(array) + EPSILON)
def one_hot(array, num_classes):
return np.eye(num_classes)[array]
def make_callable(x):
if callable(x):
return x
try:
return [make_callable(v) for v in x]
except TypeError:
return lambda *args, **kwargs: x
def join_first_dims(x, num_dims):
return x.reshape((-1, *x.shape[num_dims:]))
class LazyArray:
"""
Inspired by OpenAI `LazyFrames <https://goo.gl/nTmVW8>`_ this object stores numpy
arrays as lists, so no unnecessary memory is used when storing arrays that point to
the same memory, this is a memory optimization trick for the `ReplayBuffer`.
Beyond this optimization, an optional transform function can be passed, this function
is executed lazily only when `LazyFrames` gets converted to a numpy array.
Parameters
----------
data: list
A list of numpy arrays.
transform: function
A function that is applied lazily to the array.
"""
def __init__(self, data, transform=None, **kwargs):
self.data = data
self.transform = transform
self.kwargs = kwargs
def __array__(self):
arr = to_np(self.data, **self.kwargs)
if self.transform is not None:
arr = self.transform(arr)
return arr
def __iter__(self):
for v in self.data:
yield LazyArray(v, **self.kwargs)
@property
def shape(self):
return self.__array__().shape | /reward-0.0.4.tar.gz/reward-0.0.4/torchrl/utils/utils.py | 0.858837 | 0.611034 | utils.py | pypi |
from abc import ABCMeta
from collections import OrderedDict
import yaml
from yaml.representer import Representer
# Support for abstract classes
Representer.add_representer(ABCMeta, Representer.represent_name)
class Config:
"""
Configuration object used for initializing an Agent.
It maintains the order from which the attributes have been set.
Parameters
----------
configs: Keyword arguments
Additional parameters that will be stored.
Returns
-------
Config object
An object containing all configuration details (with possibly nested Config).
"""
def __init__(self, *args, **kwargs):
# We want to maintain the order of the attributes,
# this is especially necessary when defining NNs architectures
self.__dict__["_attrs"] = OrderedDict()
for i, value in enumerate(args):
self._nested_loader("attr{}".format(i), value)
for key, value in kwargs.items():
self._nested_loader(key, value)
def __getattr__(self, value):
try:
return self.__dict__["_attrs"][value]
except:
raise AttributeError(value)
def __setattr__(self, key, value):
self.__dict__["_attrs"][key] = value
def __repr__(self):
return yaml.dump(self.as_dict(), default_flow_style=False)
def __iter__(self):
yield from self.as_dict()
def _nested_loader(self, key, value):
if isinstance(value, OrderedDict):
return self.new_section(key, **value)
else:
setattr(self, key, value)
def items(self):
return self.as_dict().items()
def values(self):
return self.as_dict().values()
def pop(self, *args, **kwargs):
return self.as_dict().pop(*args, **kwargs)
def get(self, *args, **kwargs):
return self.as_dict().get(*args, **kwargs)
def update(self, config):
self.as_dict().update(config.as_dict())
def as_dict(self):
"""
Returns all object attributes as a nested OrderedDict.
Returns
-------
dict
Nested OrderedDict containing all object attributes.
"""
return self.__dict__["_attrs"]
def as_list(self):
return list(self.as_dict().values())
def new_section(self, name, **configs):
"""
Creates a new Config object and add as an attribute of this instance.
Parameters
----------
name: str
Name of the new section.
configs: Keyword arguments
Parameters that will be stored in this section, accepts nested parameters.
Examples
--------
Simple use case::
config.new_section('new_section_name', attr1=value1, attr2=value2, ...)
Nested parameters::
config.new_section('new_section_name', attr1=Config(attr1=value1, attr2=value2))
It's possible to access the variable like so::
config.new_section_name.attr1
"""
self._nested_loader(name, Config(**configs))
def save(self, file_path):
"""
Saves current configuration to a JSON file.
The configuration is stored as a nested dictionary (maintaining the order).
Parameters
----------
file_path: str
Path to write the file
"""
with open(file_path + ".yaml", "w") as f:
yaml.dump(self, f, default_flow_style=False)
@classmethod
def from_default(cls, name):
"""
Loads configuration from a default agent.
Parameters
----------
name: str
Name of the desired config file ('VanillaPG', add_more)
Returns
-------
Config
A configuration object loaded from a JSON file
"""
if name == "PPO":
return cls.load("CHANGE")
@staticmethod
def load(file_path):
"""
Loads configuration from a JSON file.
Parameters
----------
file_path: str
Path of the file to be loaded.
Returns
-------
Config
A configuration object loaded from a JSON file
"""
with open(file_path + ".yaml", "r") as f:
return yaml.load(f) | /reward-0.0.4.tar.gz/reward-0.0.4/torchrl/utils/config/config.py | 0.882782 | 0.233215 | config.py | pypi |
import gym
from torchrl.envs.base_env import BaseEnv
import torchrl.utils as U
class GymEnv(BaseEnv):
"""
Creates and wraps a gym environment.
Parameters
----------
env_name: str
The Gym ID of the env. For a list of available envs check
`this <https://gym.openai.com/envs/>`_ page.
wrappers: list
List of wrappers to be applied on the env.
Each wrapper should be a function that receives and returns the env.
"""
def __init__(self, env_name, **kwargs):
super().__init__(env_name, **kwargs)
def _create_env(self):
env = gym.make(self.env_name)
return env
@property
def simulator(self):
return GymEnv
def reset(self):
"""
Calls the reset method on the gym environment.
Returns
-------
state: numpy.ndarray
A numpy array with the state information.
"""
return self.env.reset()
def step(self, action):
"""
Calls the step method on the gym environment.
Parameters
----------
action: int or float or numpy.ndarray
The action to be executed in the environment, it should be an int for
discrete enviroments and float for continuous. There's also the possibility
of executing multiple actions (if the environment supports so),
in this case it should be a numpy.ndarray.
Returns
-------
next_state: numpy.ndarray
A numpy array with the state information.
reward: float
The reward.
done: bool
Flag indicating the termination of the episode.
"""
if self.get_action_info().space == "discrete":
action = int(action)
next_state, reward, done, info = self.env.step(action)
return next_state, reward, done, info
def record(self, path):
self.env = Monitor(env=self.env, directory=path, video_callable=lambda x: True)
def get_state_info(self):
"""
Dictionary containing the shape and type of the state space.
If it is continuous, also contains the minimum and maximum value.
"""
return GymEnv.get_space_info(self.env.observation_space)
def get_action_info(self):
"""
Dictionary containing the shape and type of the action space.
If it is continuous, also contains the minimum and maximum value.
"""
return GymEnv.get_space_info(self.env.action_space)
def sample_random_action(self):
return self.env.action_space.sample()
def seed(self, value):
self.env.seed(value)
def update_config(self, config):
"""
Updates a Config object to include information about the environment.
Parameters
----------
config: Config
Object used for storing configuration.
"""
super().update_config(config)
config.env.obj.update(dict(wrappers=self.wrappers))
def close(self):
self.env.close()
@staticmethod
def get_space_info(space):
"""
Gets the shape of the possible types of states in gym.
Parameters
----------
space: gym.spaces
Space object that describes the valid actions and observations
Returns
-------
dict
Dictionary containing the space shape and type
"""
if isinstance(space, gym.spaces.Box):
return U.memories.SimpleMemory(
shape=space.shape,
low_bound=space.low,
high_bound=space.high,
space="continuous",
dtype=space.dtype,
)
if isinstance(space, gym.spaces.Discrete):
return U.memories.SimpleMemory(
shape=space.n, space="discrete", dtype=space.dtype
)
if isinstance(space, gym.spaces.MultiDiscrete):
return U.memories.SimpleMemory(
shape=space.shape, space="multi_discrete", dtype=space.dtype
) | /reward-0.0.4.tar.gz/reward-0.0.4/torchrl/envs/gym_env.py | 0.94651 | 0.630059 | gym_env.py | pypi |
from abc import ABC, abstractmethod
class BaseEnv(ABC):
"""
Abstract base class used for implementing new environments.
Includes some basic functionalities, like the option to use a running mean
and standard deviation for normalizing states.
Parameters
----------
env_name: str
The environment name.
fixed_normalize_states: bool
If True, use the state min and max value to normalize the states (Default is False).
running_normalize_states: bool
If True, use the running mean and std to normalize the states (Default is False).
scale_reward: bool
If True, use the running std to scale the rewards (Default is False).
"""
def __init__(self, env_name):
self.env_name = env_name
self.env = self._create_env()
def __str__(self):
return "<{}>".format(type(self).__name__)
@abstractmethod
def get_state_info(self):
"""
Returns a dict containing information about the state space.
The dict should contain two keys: ``shape`` indicating the state shape,
and ``dtype`` indicating the state type.
Example
-------
State space containing 4 continuous actions::
return dict(shape=(4,), dtype='continuous')
"""
@abstractmethod
def get_action_info(self):
"""
Returns a dict containing information about the action space.
The dict should contain two keys: ``shape`` indicating the action shape,
and ``dtype`` indicating the action type.
If dtype is ``int`` it will be assumed a discrete action space.
Example
-------
Action space containing 4 float numbers::
return dict(shape=(4,), dtype='float')
"""
@property
@abstractmethod
def simulator(self):
"""
Returns the name of the simulator being used as a string.
"""
@abstractmethod
def _create_env(self):
"""
Creates ans returns an environment.
Returns
-------
Environment object.
"""
@abstractmethod
def reset(self):
"""
Resets the environment to an initial state.
Returns
-------
numpy.ndarray
A numpy array with the state information.
"""
@abstractmethod
def step(self, action):
"""
Receives an action and execute it on the environment.
Parameters
----------
action: int or float or numpy.ndarray
The action to be executed in the environment, it should be an ``int``
for discrete enviroments and ``float`` for continuous. There's also
the possibility of executing multiple actions (if the environment
supports so), in this case it should be a ``numpy.ndarray``.
Returns
-------
next_state: numpy.ndarray
A numpy array with the state information.
reward: float
The reward.
done: bool
Flag indicating the termination of the episode.
info: dict
Dict containing additional information about the state.
"""
@property
def num_lives(self):
raise NotImplementedError
@property
def unwrapped(self):
return self
def sample_random_action(self):
raise NotImplementedError
def record(self, path):
raise NotImplementedError
def close(self):
raise NotImplementedError
def update_config(self, config):
"""
Updates a Config object to include information about the environment.
Parameters
----------
config: Config
Object used for storing configuration.
"""
config.new_section(
"env",
obj=dict(func=self.simulator, env_name=self.env_name),
state_info=dict(
(key, value)
for key, value in self.get_state_info().items()
if key not in ("low_bound", "high_bound")
),
action_info=self.get_action_info(),
) | /reward-0.0.4.tar.gz/reward-0.0.4/torchrl/envs/base_env.py | 0.963532 | 0.670221 | base_env.py | pypi |
from typing import Generic, TypeVar, Any, SupportsFloat
ObsType = TypeVar("ObsType")
ActType = TypeVar("ActType")
RenderFrame = TypeVar("RenderFrame")
class RewardsEnv(Generic[ObsType, ActType]):
"""
rewards_envs generic class for implementing and integrating the rewards.ai agents environment. An environment can be partially or fully observed by
an agent. A single RewardsEnv supports for a single agent environment. Although support for multi-agents interacting in a single environment will
there in future releases.
Note:
RewardsEnv follows the open source standards and patterns similar to [Farama-Foundation/Gymnasium](https://github.com/Farama-Foundation/Gymnasium/)
The main API methods that are implemented in this class are:
- :meth: `reset` - Resets the environment with the very initil state. This is called after completion of each episode or before calling method: step.
This returns two things. The current environment state/observation and the environment info.
- :meth: `step` - Updates an environment with actions returning the next agent observation, the reward for taking that actions,
if the environment has terminated or truncated due to the latest action and information from the environment about the step, i.e. metrics, debug info etc.
- :meth:`render` - Renders the environments to help visualise what the agent see, examples modes are "human", "rgb_array", "ansi" for text.
- :meth:`close` - Closes the environment, important when external software is used, i.e. pygame for rendering, databases
Environments have additional attributes for users to understand the implementation
- :attr: `current_observation` - Returns the current observation of that environment
- :attr:`action_space` - (TBD) The Space object corresponding to valid actions, all valid actions should be contained within the space.
- :attr:`observation_space` - (TBD) The Space object corresponding to valid observations, all valid observations should be contained within the space.
- :attr:`reward_range` - (TBD) A tuple corresponding to the minimum and maximum possible rewards for an agent over an episode.
The default reward range is set to :math:`(-\infty,+\infty)`.
``super().reset(seed=seed)`` and when assessing ``self.np_random``.
"""
def reset(self) -> tuple[ObsType, dict[str, Any]]:
"""
Resets the environment to the initial state. Returning the initial observation and the encvironment info
Args:
None
Returns:
observation (ObsType) : Observation of the initial state. This will be an element of :attr:`observation_space` which is
typically a numpy.ndarray, which can be of any form, image, or a different type of observation.
info (dictionary) : It provides the information of the given environment
"""
raise NotImplementedError
def close(self) -> None:
"""
After the user has finished using the environment, it closes the code necesary to "clean up"
the environment.
"""
pass
def step(self, action: ActType) -> tuple[ObsType, SupportsFloat, bool, dict[str, Any]]:
"""
Runs one timestep of the environment's dynamics using the agent's action. When the end
of the episode is reached, it is necessary to call :meth:`reset` to reset the environment's
state for the next episode.
Args:
action (ActType): an action provided by the agent to update the environment state.
Returns:
observation (ObsType): An element of the environment's :attr:`observation_space` as the next observation due to the agent actions.
An example is a numpy array containing the positions and velocities of the pole in CartPole.
reward (SupportsFloat): The reward as a result of taking the action.
terminated (bool): Whether the agent reaches the terminal state (as defined under the MDP of the task)
which can be positive or negative. An example is reaching the goal state or moving into the lava from
the Sutton and Barton, Gridworld. If true, the user needs to call :meth:`reset`.
info (dict): Contains auxiliary diagnostic information (helpful for debugging, learning, and logging).
This might, for instance, contain: metrics that describe the agent's performance state, variables that are
hidden from observations, or individual reward terms that are combined to produce the total reward.
In OpenAI Gym <v26, it contains "TimeLimit.truncated" to distinguish truncation and termination,
however this is deprecated in favour of returning terminated and truncated variables.
"""
raise NotImplementedError
def render(self) -> RenderFrame | list[RenderFrame] | None:
"""
Compute the render frames as specified by :attr:`render_mode` during the initialization of the environment
rewards.ai only supports two types of render method in the current version.
- rgb_array : This returns a numpy array of the image of the given current state
- human : Current version of rewards.ai returns the pugame window
"""
raise NotImplementedError
@property
def current_observation(self) -> ObsType:
"""
Returns the current observation of a particular timestamp
"""
raise NotImplementedError
@property
def current_action(self) -> ActType:
"""
Returns the current action that has been taken by the agent.
"""
raise NotImplementedError
@property
def default_action_space(self) -> ActType:
"""Returns the default action space provided by the environment
Returns:
ActType: Default action space
"""
raise NotImplementedError
@property
def default_state_space(self) -> ObsType:
"""Returns the default state/observation space provided by the environment
Returns:
ActType: Default state/observation space
"""
raise NotImplementedError
@property
def mode(self) -> str:
"""
Specifies the current mode in which the agent and the environment is running.
Options:
- training : Choosing training will choose the provided training environments and trigger optimizatiion process
- evaluation : Choosing evaluation will choose the provided evaluation environments and no optimization will be triggered
"""
raise NotImplementedError | /rewards_envs-0.1.3.tar.gz/rewards_envs-0.1.3/rewards_envs/core.py | 0.94913 | 0.870982 | core.py | pypi |
import os
import math
import pygame
from pathlib import Path
import matplotlib.pyplot as plt
from pydantic import BaseModel, Field
from typing import Any, Callable, Dict, List, Optional, Tuple, Union, SupportsFloat
class CarConfig(BaseModel):
car_scale: int = Field(default=500)
drive_factor: int = Field(default=12)
car_fps: int = Field(default=15)
car_angle: int = Field(default=0)
car_rect_size: Tuple[int, int] = Field(default=(200, 50))
car_velocity_vector: Tuple[float, float] = Field(default=(0.8, 0.0))
car_rotational_velocity: Union[int, float] = Field(default=15)
car_direction: Union[int, float] = Field(default=0)
car_is_alive: bool = Field(default=True)
car_reward: int = Field(default=(0,))
car_radar: List[Union[int, float]] = Field(default=[0, 0, 0, 0, 0])
render_mode: str = Field(default="human", title="Frame rendering mode")
render_fps: int = Field(
default=30, title="Frames per second for rendering")
screen_size : Tuple[int, int] = Field(default=(800, 700))
class CarGame:
def __init__(self, mode: str, track_num: int, reward_function: Optional[Callable] = None, config: Optional[CarConfig] = None):
"""TBD
Args:
mode (str): This indicates in which mode the car environment is in.
It is required for track selection
config (Optional[CarConfig], optional): All the car configurations. Defaults to None.
reward_function (Optional[Callable], optional): The reward function for car's evaluation
"""
self.mode = mode
self.parent_path = str(Path(__file__).parent)
self.metadata = {
'render_modes': ['human', 'rgb_array'],
'modes' : ['training', 'evaluation'],
'render_fps': 30,
'training_car_tracks': {
1: "track-1.png",
2: "track-2.png",
3: "track-3.png",
},
'evaluation_car_tracks': {
1: "track-1.png"
},
'car_image' : 'car.png',
'assets_path': str(os.path.join(self.parent_path, "assets"))
}
self.config = CarConfig() if config is None else config
self.reward_function = self._default_reward_function if reward_function is None else reward_function
assert self.config.render_mode in self.metadata['render_modes']
assert self.mode in self.metadata['modes']
assert (self.mode == "training" and (track_num >= 1 or track_num <= 3)) or (self.mode == "evaluation" and track_num == 1)
# Load car and track path
self.track_options = self.metadata['training_car_tracks'] if mode == "training" else self.metadata['evaluation_car_tracks']
self.track_image_path = os.path.join(
self.metadata['assets_path'], mode, self.track_options[track_num]
).replace(os.sep, '/')
self.car_image_path = os.path.join(self.metadata['assets_path'], self.metadata['car_image'])
# Building Car and Track Paths
self.track_image = pygame.image.load(self.track_image_path)
self.car_image = pygame.transform.scale(
pygame.image.load(
self.car_image_path), (self.config.car_scale, self.config.car_scale),
)
# Building PyGame Screen only if the choosen rendering mode is "human"
self.screen = pygame.display.set_mode(
self.config.screen_size) if self.config.render_mode == "human" else pygame.Surface(self.config.screen_size)
self.screen.fill((0, 0, 0))
# All the basic car configurations
self.angle = self.config.car_angle
self.original_image = self.car_image
self.image = pygame.transform.rotozoom(self.original_image, self.angle, 0.1)
self.rect = self.image.get_rect(center=self.config.car_rect_size)
self.vel_vector = pygame.math.Vector2(self.config.car_velocity_vector)
self.rotation_vel = self.config.car_rotational_velocity
self.direction = self.config.car_direction # current action
self.drive_factor = self.config.drive_factor
self.alive = self.config.car_is_alive # current terminal
self.radars = self.config.car_radar # current state space
self.reward = 0 # current reward
# Additional configuration
self.clock = pygame.time.Clock()
self.track = self.track_image
self.iterations = 0
self.FPS = self.config.car_fps
# current observation space
self.params = {
"is_alive": None,
"observation": None,
"direction": None,
"rotational_velocity": None,
}
self.info = {
'direction' : self.direction,
'rotational_velocity' : self.rotation_vel
}
print("=> Game initialization finished")
def _default_reward_function(self, props: Dict[str, Any]) -> Union[int, float]:
if props["is_alive"]:
return 1
return 0
def initialize(self) -> Union[List[float], Dict[str, Any]]:
"""
Initializes the car environment with all the default properties
Returns:
info : List[List[float], Dict[str, Any]]. A List that returns the current environment state and the current environment information.
"""
self.angle = self.config.car_angle
self.original_image = self.car_image
self.image = pygame.transform.rotozoom(self.original_image, self.angle, 0.1)
self.rect = self.image.get_rect(center=self.config.car_rect_size)
self.vel_vector = pygame.math.Vector2(self.config.car_velocity_vector)
self.rotation_vel = self.config.car_rotational_velocity
self.direction = self.config.car_direction
self.drive_factor = self.config.drive_factor
self.alive = self.config.car_is_alive
self.radars = self.config.car_radar
self.reward = 0
self.info['direction'] = self.direction
self.info['rotational_velocity'] = self.rotation_vel
return self.radars, self.info
def _did_quit(self):
"""
Quits the game when user presses the 'quit'/'close' key.
"""
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
def _did_collide(self):
"""
Checks the status whether the car collied or not. If the car collides,
then `isAlive` is False and game terminates.
TODO:
-----
- This function needs to be checked
"""
length = 20 # parameter to be know n
collision_point_right = [
int(
self.rect.center[0]
+ math.cos(math.radians(self.angle + 18)) * length
),
int(
self.rect.center[1]
- math.sin(math.radians(self.angle + 18)) * length
),
]
collision_point_left = [
int(
self.rect.center[0]
+ math.cos(math.radians(self.angle - 18)) * length
),
int(
self.rect.center[1]
- math.sin(math.radians(self.angle - 18)) * length
),
]
try:
if self.screen.get_at(collision_point_right) == pygame.Color(
173, 255, 133, 255
) or self.screen.get_at(collision_point_left) == pygame.Color(
173, 255, 133, 255
):
self.alive = False
pygame.draw.circle(
self.screen, (0, 255, 255, 0), collision_point_right, 4
)
pygame.draw.circle(
self.screen, (0, 255, 255, 0), collision_point_left, 4
)
except:
self.alive = False
def _did_rotate(self):
"""
Checks whether the car rotates off the track and took wrong direction or not
TODO:
-----
The function implementation needs to be checked
"""
if self.direction == 1:
self.angle -= self.rotation_vel
self.vel_vector.rotate_ip(self.rotation_vel)
if self.direction == -1:
self.angle += self.rotation_vel
self.vel_vector.rotate_ip(-self.rotation_vel)
self.image = pygame.transform.rotozoom(
self.original_image, self.angle, 0.1
)
self.rect = self.image.get_rect(center=self.rect.center)
def _update_radar(self, i: int, radar_angle: Union[int, float]) -> None:
"""
The Car is made up of 6 radars. At every step this functions updates the radar to get
the current direction and also updates the overall current status of the car.
Args:
i (int): The current index number
radar_angle (Union[int, float]): The current angles in the radar.
"""
length = 0
x = int(self.rect.center[0])
y = int(self.rect.center[1])
try:
while (
not self.screen.get_at(
(x, y)) == pygame.Color(173, 255, 133, 255)
and length < 200
):
length += 1
x = int(
self.rect.center[0]
+ math.cos(math.radians(self.angle + radar_angle)) * length
)
y = int(
self.rect.center[1]
- math.sin(math.radians(self.angle + radar_angle)) * length
)
pygame.draw.line(
self.screen, (255, 255, 255, 255), self.rect.center, (x, y), 1
)
pygame.draw.circle(self.screen, (0, 255, 0, 0), (x, y), 3)
dist = int(
math.sqrt(
math.pow(self.rect.center[0] - x, 2)
+ math.pow(self.rect.center[1] - y, 2)
)
)
self.radars[i] = dist
except:
self.alive = False
def _drive(self):
"""
Drives the car's center vector to the next state
TODO
----
- Need to check why the value is 12 and nothing else
"""
self.rect.center += self.vel_vector * self.drive_factor
def draw(self) -> None:
"""
Draws the car on the screen
"""
self.screen.blit(self.track, (0, 0))
self.screen.blit(self.image, self.rect.topleft)
def timeTicking(self):
self.clock.tick(self.FPS)
def get_current_state(self, action: List[int]) -> Dict[str, Any]:
"""
Returns the current state of the car. This states are determined the parameters
of the Car mentioned below:
- `isAlive` : This parameter determine whether the game is finished or not
- `obs` : This represents the car's current observation.
- `dir` : This represents the car's current direction
- `rotationVel` : This represents the current rotational velocity of the car.
Where each of the parameters are the keys of the dictionary returned by this function.
Args:
action (List[int]): The current action of the agent
Returns:
Dict[str, Any]: The current state of the car
"""
if action[0] == 1:
self.direction = -1
elif action[1] == 1:
self.direction = 1
elif action[2] == 1:
self.direction = 0
self._drive()
elif action[3] == 1:
self.vel_vector.scale_to_length(0.8)
self.rotation_vel = 15
elif action[4] == 1:
self.vel_vector.scale_to_length(1.2)
self.rotation_vel = 10
elif action[5] == 1:
self.vel_vector.scale_to_length(1.6)
self.rotation_vel = 7
else:
self.direction = 0
self._did_rotate()
self._did_collide()
for i, radar_angle in enumerate((-60, -30, 0, 30, 60)):
self._update_radar(i, radar_angle) # to be implemented
if (
self.radars[0] < 15
and self.radars[4] < 15
and self.radars[1] < 25
and self.radars[2] < 25
and self.radars[3] < 25
):
self.alive = False
else:
self.alive = True
self.params = {
"is_alive": self.alive,
"observation": self.radars,
"direction": self.direction,
"rotational_velocity": self.rotation_vel,
}
return self.params
def step(self, action: List[int]) -> List[Any]:
"""
Plays a single step of the game. This function is called by the agent during each step.
Current version of rewards-envs does not provides `info` of that given step. However,it
will be supported in the future versions.
Args:
action (List[int]): The current action of the agent
Returns:
List[Any]: [observation, reward, terminated, info]
"""
self.iterations += 1
if self.config.render_mode == "human":
self._did_quit()
self.draw()
self.radars = [0, 0, 0, 0, 0]
self._drive()
current_state_params = self.get_current_state(action=action)
current_reward = self.reward_function(current_state_params)
self.reward += current_reward
observation = current_state_params['observation']
terminated = current_state_params['is_alive']
return observation, self.reward, terminated, self.info
# TODO: Needs to deprecated soon in the next major release
def play_step(self, action: List[int]) -> List[Any]:
"""
Plays a single step of the game. This function is called by the agent during each step.
Args:
action (List[int]): The current action of the agent
Returns:
List[Any]: [current_reward, is_alive, overall_reward]
"""
self.iterations += 1
if self.config.render_mode == "human":
self._did_quit()
self.draw()
self.radars = [0, 0, 0, 0, 0]
self._drive()
current_state_params = self.get_current_state(action=action)
current_reward = self.reward_function(current_state_params)
self.reward += current_reward
if self.config.render_mode == "rgb_array":
pixel_data = pygame.surfarray.array3d(self.screen)
return current_reward, not self.alive, self.reward, pixel_data
else:
return current_reward, not self.alive, self.reward
if __name__ == '__main__':
car_game = CarGame(mode="training", track_num=1) | /rewards_envs-0.1.3.tar.gz/rewards_envs-0.1.3/rewards_envs/engines/pygame/car_race/car_race.py | 0.840324 | 0.395543 | car_race.py | pypi |
import os
import json
import torch
import shutil
from .config import CONFIG
from typing import Optional, Union, Any, Dict
class RootMeanSquaredError(torch.nn.Module):
def __init__(self):
"""Root mean squared error function in PyTorch"""
super(RootMeanSquaredError, self).__init__()
def forward(self, x, y):
return torch.sqrt(torch.mean((x - y) ** 2))
class MeanAbsoluteError(torch.nn.Module):
def __init__(self):
"""
Mean absolute error function in PyTorch
"""
super(MeanAbsoluteError, self).__init__()
def forward(self, x, y):
return torch.mean(torch.abs(x - y))
def get_home_path():
# get the home directory using os
return os.path.expanduser("~")
def update_graphing_file(session_id: str, data: dict, dir: Optional[str] = None, name: Optional[str] = None) -> None:
f = open("D:/Prototypes/rewards.ai/training-platform\src/assets/temp.json", "w")
f.write(json.dumps(data))
def create_folder_struct(dir: Optional[str] = None, name: Optional[str] = None) -> None:
"""Creates a root folder to store all the session configuration
Args:
dir (Optional[str], optional): _description_. Defaults to None.
name (Optional[str], optional): _description_. Defaults to None.
"""
dir = get_home_path() if dir is None else dir
name = CONFIG['REWARDS_PARENT_CONFIG_DIR'] if name is None else name
if not os.path.exists(os.path.join(dir, name)):
os.mkdir(os.path.join(dir, name))
else:
print("=> Folder already exists")
return os.path.join(dir, name)
def create_session(session_name: str, session_root_dir: Optional[str] = None) -> Union[str, bool]:
"""Creates a folder inside the root folder to store all the configuration for a particular session
Args:
session_dir (Optional[str], optional): _description_. Defaults to None.
session_name (Optional[str], optional): _description_. Defaults to None.
"""
# TODO : Make a proper return value to detect error
session_root_dir = (
os.path.join(get_home_path(), CONFIG["REWARDS_PARENT_CONFIG_DIR"])
if session_root_dir is None
else session_root_dir
)
session_dir = os.path.join(session_root_dir, session_name)
if not os.path.exists(session_dir):
os.mkdir(session_dir)
os.mkdir(os.path.join(session_dir, CONFIG["REWARDS_CONFIG_MODEL_FOLDER_NAME"]))
# create a metrics.json
null_metrics = []
with open(os.path.join(session_dir, "metrics.json"), "w") as metrics_file:
json.dump(null_metrics, metrics_file)
def add_inside_session(
session_id: str, config_name: str, rewrite: bool = False, multi_config: bool = False, **kwargs
):
"""Add configuration inside a session folder
Args:
session_id (str): _description_
config_name (str): _description_
rewrite (bool) : True if it needs to rewrite the existing json,
multi_config (bool) : True if it has a multiple json configuration, then
the configuration will be in the form of: [{}, {}, .. ]
"""
session_root_dir = os.path.join(get_home_path(), CONFIG['REWARDS_PARENT_CONFIG_DIR'])
configuration = [dict(kwargs)] if multi_config else dict(kwargs)
save_path = os.path.join(session_root_dir, session_id, f"{config_name}.json")
if rewrite and os.path.exists(save_path):
existing_config = json.load(open(save_path, "r"))
if multi_config:
existing_config.append(configuration)
else:
existing_config[list(configuration.keys())[0]] = list(configuration.values())[0]
configuration = existing_config
with open(save_path, "w") as json_file:
json.dump(configuration, json_file)
def add_dict_inside_session(
session_id : str, configuration : dict, config_name : str,
session_root_folder : Optional[str] = None, rewrite : bool = False, multi_config : bool = False
):
session_root_dir = os.path.join(
get_home_path(),
CONFIG['REWARDS_PARENT_CONFIG_DIR']) if session_root_folder is None else os.path.join(
session_root_folder, CONFIG['REWARDS_PARENT_CONFIG_DIR']
)
save_path = os.path.join(session_root_dir, session_id, f'{config_name}.json')
if rewrite and os.path.exists(save_path):
existing_config = json.load(open(save_path, "r"))
if multi_config:
existing_config.append(configuration)
else:
existing_config[list(configuration.keys())[0]] = list(configuration.values())[0]
configuration = existing_config
with open(save_path, "w") as json_file:
json.dump(configuration, json_file)
def get_session_files(session_id: str, session_root_dir: Optional[str] = None) -> Dict[str, Any]:
"""Get all the files inside a session configuration folder.
Args:
session_id (str): The name of the session sub folder inside the root directory
session_root_dir (Optional[str], optional): The directory where all the session config are been kept. Defaults to None.
Returns:
Dict[str, Any]: All the configurations for a particular session
"""
session_root_dir = (
os.path.join(get_home_path(), CONFIG['REWARDS_PARENT_CONFIG_DIR'])
if session_root_dir is None
else session_root_dir
)
session_dir = os.path.join(session_root_dir, session_id)
all_configs = {}
for json_file_name in os.listdir(session_dir):
if json_file_name.endswith(".json") and not json_file_name.startswith("metrics"):
json_dict = json.load(open(os.path.join(session_dir, json_file_name), "r"))
all_configs[json_file_name.split(".json")[0]] = json_dict
# TODO: Show the metrics and model configurations so that it can be displayed in fronend
return all_configs
def get_all_sessions_info(session_root_dir : Optional[str] = None) -> Dict[str, Dict[str, Any]]:
"""
Get all the sessions information present for a particular user.
Args:
session_root_dir : Optional[str] -> The root directory where all the sessions are stored
Returns:
Dict[str, Dict[str, Any]]: All the configurations for all the sessions
"""
all_session_infos = {}
session_root_dir = (
os.path.join(get_home_path(), CONFIG['REWARDS_PARENT_CONFIG_DIR'])
if session_root_dir is None
else session_root_dir
)
session_id_paths = [
os.path.join(
session_root_dir, session_id
) for session_id in os.listdir(session_root_dir)]
for session_id, session_id_path in zip(os.listdir(session_root_dir), session_id_paths):
all_session_infos[session_id] = get_session_files(session_id=session_id, session_root_dir=session_root_dir)
return all_session_infos
def delete_session(session_id : str, session_root_dir : Optional[str] = None):
"""Deletes a particulart session
Args:
session_id (str): The name of the session sub folder inside the root directory
session_root_dir (Optional[str], optional): The directory where all the session config are been kept. Defaults to None.
"""
session_root_dir = (
os.path.join(get_home_path(), CONFIG['REWARDS_PARENT_CONFIG_DIR'])
if session_root_dir is None
else session_root_dir
)
session_dir = os.path.join(session_root_dir, session_id)
try:
shutil.rmtree(session_dir)
return {
'status' : 200,
'message': f'Session {session_dir} deleted succesfully'
}
except Exception as e:
return {
'status' : 500,
'message': f'Internal server error {e}'
} | /rewards_experimental-0.0.2-py3-none-any.whl/rewards_experimental/utils.py | 0.742141 | 0.216746 | utils.py | pypi |
import os
import torch
import random
import numpy as np
import torch.nn as nn
import torch.optim as optim
from typing import Any, List, Union
from .agent import Agent
from .utils import RootMeanSquaredError, MeanAbsoluteError
class QTrainer(Agent):
def __init__(self, **training_params):
self.lr = training_params["lr"]
self.gamma = training_params["gamma"]
self.epsilon = training_params["epsilon"]
print(self.epsilon)
self.model = training_params["model"]
loss_fn, optimizer_info = self._get_loss_optimizer_info(
training_params["loss"], training_params["optimizer"]
)
self.criterion = loss_fn()
self.optimizer = optimizer_info(self.model.parameters(), lr=self.lr)
super(QTrainer, self).__init__(
model=self.model,
lr=self.lr,
epsilon=self.epsilon,
gamma=self.gamma,
checkpoint_folder_path=training_params['checkpoint_folder_path'],
model_name=training_params['model_name']
)
def _get_loss_optimizer_info(
self, loss: str, optimizer: str
) -> List[Union[int, str, float]]:
"""_summary_
Args:
loss (str): _description_
optimizer (str): _description_
Returns:
List[str, Union[int, str, float]]: _description_
"""
loss_info = {
"mse": torch.nn.MSELoss,
"rmse": RootMeanSquaredError,
"mae": MeanAbsoluteError,
}
optimizer_info = {
"adam": optim.Adam,
"rmsprop": optim.RMSprop,
"adagrad": optim.Adagrad,
}
return loss_info[loss], optimizer_info[optimizer]
def step(
self,
state: Any,
action: Union[np.ndarray, List[Union[float, int]]],
reward: Union[float, int],
next_state: Any,
done: bool,
) -> None:
"""Single step function for a single episode for the agent's training
Args:
state (Any): The current state of the environment
action (Union[np.ndarray, List[Union[float, int]]]): The action taken by the agent
reward (Union[float, int]): The reward that the agent gets
next_state (Any): Next state after the action taken by the agent
done (bool): Whether the game terminates or not
"""
state = torch.tensor(state, dtype=torch.float32)
next_state = torch.tensor(next_state, dtype=torch.float32)
action = torch.tensor(action, dtype=torch.float32)
reward = torch.tensor(reward, dtype=torch.float32)
if len(state.shape) == 1:
state = torch.unsqueeze(state, dim=0)
next_state = torch.unsqueeze(next_state, dim=0)
action = torch.unsqueeze(action, dim=0)
reward = torch.unsqueeze(reward, dim=0)
done = (done,)
state_prediction = self.model(state)
state_target = (
state_prediction.clone()
) # TODO: Why target is the same as prediction?
for idx in range(len(done)):
Q_new = reward[idx]
if not done[idx]:
Q_new = reward[idx] + self.gamma * torch.max(
self.model(next_state[idx])
)
state_target[idx][torch.argmax(action[idx]).item()] = Q_new
self.optimizer.zero_grad()
loss = self.criterion(state_target, state_prediction)
loss.backward()
self.optimizer.step()
def train_long_memory(self) -> None:
"""Trains the agent for a longer step saving state-actions to the memory
print("stepping")
Returns:
None
"""
if len(self.memory) > self.BATCH_SIZE:
mini_sample = random.sample(self.memory, self.BATCH_SIZE)
else:
mini_sample = self.memory
# what this line is doing?
states, actions, rewards, next_states, dones = zip(*mini_sample)
self.step(states, actions, rewards, next_states, dones)
def train_short_memory(self, state, action, reward, next_state, done):
"""Trains the agent for a single step without any memory search
Args:
state (_type_): _description_
action (_type_): _description_
reward (_type_): _description_
next_state (_type_): _description_
done (function): _description_
Returns:
_type_: _description_
"""
return self.step(state, action, reward, next_state, done)
def train_step(self, game):
state_old = self.get_state(game)
final_move = self.get_action(state_old, mode="training")
if game.config.render_mode == "rgb_array":
reward, done, score, pixel_data = game.play_step(final_move)
else:
reward, done, score = game.play_step(final_move)
state_new = self.get_state(game)
self.train_short_memory(state_old, final_move, reward, state_new, done)
self.remember(state_old, final_move, reward, state_new, done)
return [reward, done, score, pixel_data] if game.config.render_mode == "rgb_array" else [reward, done, score]
def evaluate(self, game):
state_old = self.get_state(game)
final_move = self.get_action(state_old, mode="evaluation")
if game.config.render_mode == "rgb_array":
reward, done, score, pixel_data = game.play_step(final_move)
else:
reward, done, score = game.play_step(final_move)
return [reward, done, score, pixel_data] if game.config.render_mode == "rgb_array" else [reward, done, score] | /rewards_experimental-0.0.2-py3-none-any.whl/rewards_experimental/trainer.py | 0.847842 | 0.346196 | trainer.py | pypi |
# **rewards**
### A low code sdk for crearing custom environments and deep RL agents.
<br>
### **Installation**
**`[linux]`**
Installing `rewards` is easy in linux. First clone the repository by running
```bash
git clone https://github.com/rewards/rewards.git
```
One cloned go to the repository and make sure `make` is installed. If not installed just run:
```bash
sudo apt install cmake
```
Once done, now create a new virtual environment and install dependencies. You can achieve
this by running the following:
```bash
make virtualenv
make install
```
This should install all the dependencies and our sdk `rewards:v1.0.0`.
<br>
**`[windows]`**
For installation in windows, it's also simple. All you have to do is just clone the repository same as before. Then create a new virtual environment.
```bash
virtualenv .venv
```
Load the virtual environment
```
.\venv\Scripts\Activate
```
Now go to the repository and install all the dependencies
```bash
python setup.py install
```
<br>
### **Getting started**
**`rewards`** is mainly made for two important reasons.
- First we want to make learning reinforcement learning easy, by introducing this low code framework. So that folks do not need to spend more time in making environments or other stuff. All they can focus is on creating different agents, models and expeiment with them.
- We want to make it as interactive and begginer friendly as possible. So we are also introducing **`rewards-platform`** which where we gamified the experience of learning RL.
- If playing games can be fun and competitive then why not RL? Hence with **`rewards-platform`** and **`rewards`** you can host and join ongoing competition and learn RL with your friends.
**NOTE**: Our coming enterprise version is mainly focussed to build the same but for RL/Robotics based
companies where we want to ensure that their focus lies more on the research rather creating environments and other configurations.
**Take a look on how to get started with a sample experiment**
Currently this version of **`rewards`** only supports a single game and environment. That is `car-race`. We will be adding support for more environments (including gym, unity, and custom environments) very soon.
So let's go ahead and see how to get started with a sample experiment.
```python
from rewards import workflow
configs = workflow.WorkFlowConfigurations(
EXPERIMENT_NAME="Exp 3",
MODE="training",
LAYER_CONFIG=[[5, 64], [64, 3]]
)
flow = workflow.RLWorkFlow(configs)
flow.run_single_episode()
```
First you call our sdk's `workflow` module. The workflow module helps us to
- Create Environments and configure environments
- Create models and configure them
- Run the whole experiment and log all the results
All at one place. We first get started with writing our own configuration using
```python
configs = workflow.WorkFlowConfigurations(
EXPERIMENT_NAME="Exp 3",
MODE="training",
LAYER_CONFIG=[[5, 64], [64, 3]]
)
```
**Here is the table of configuration and what they means**
| Configuration Name | TYPE | What it does | Default value | Options |
| ------------------ | --------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| EXPERIMENT_NAME | str | It tells what is the name of the experiment. The name of the experiment will be logged inside user's weights and biases projects dashbord. | sample RL experiment | any string |
| ENVIRONMENT_NAME | str | It states the name of the environment. `rewards:v1.0.0` only supports one environment for now and that is `car-race`. | car-race | NULL |
| ENVIRONMENT_WORLD | int | According to our convention we keep some environments for training and some for testing (which are unseen). At one point of time, you can only train your agent on one single train environment. | 1 | 0/1/2 |
| MODE | str | This tells us which mode the agent is been running i.e. either in train or test mode. | training | training/testing |
| CONTROL SPEED | float | For our car environment user can set the control speed of the car environment. | 0.05 | (0 - 1] |
| TRAIN_SPEED | int | For our car environment user can set the control speed of the car environment. | 100 | 1 - 100 |
| SCREEN_SIZE | Tuple | The size of the pygame window. | (800, 700) | User' choice |
| LR | float | Learning rate | 0.01 | User' choice |
| LOSS | str | Loss function name | mse | mse , rmse, mae |
| OPTIMIZER | str | Optimizer name | adam |adam, rmsprop, adagrad |
| GAMMA | float | Hyper parameter `gamme` value | 0.99 | 0 - 1 |
| EPSILON | float | Hyper parameter `epsilon` value | 0.99 | 0 - 1 |
| LAYER_CONFIG | List[List[int]] | This expects a list of list. Where the inner list will have only two values [input neurons, output neurons]. This configuration will help us to build the neural network for our agent. The first value for the current environment must be 3. | [[5, 64], [64, 3]] | Here user can add more values but the values `5` in the first and `3` in the last must be fixed for this current environment that we are supporting. Example: <br> `[[5, ...], [..., ...], ...., [..., 3]]`, <br> Where `...` can be any value. We recommend to keep it between (1 - 256) |
| CHECKPOINT_PATH | str | The model checkpoint path from where it should be loaded. | `./models` | User's choice |
| REWARD_FUNCTION | Callable | Users are expected to write some reward function (Callable) and then have to use this reward function for agent's training.| ```def default_reward_function(props): if props["isAlive"]: return 1 return 0 ```| User's choice <br> **some important parameters** <br><br> `isAlive` represents whether the car is alive or not. So on that basis we can penalize our agent. <br><br> `obs` The car's radar's oberservations values. (more on documentation) <br> <br>`rotationVel` Car's rotational velocity value (more on documentation) |
So above is a quick overview of how to use different reward configurations. Now once configuration part is done, load those configuration to `RLWorkFlow()` and run for a single episodes.
**NOTE:** Make sure you have `weights and biases` installed. You can install that using:
```bash
pip install wandb
```
After this log in / create a new account. Then authorize it inside the command line by typing
```bash
wandb auth
```
After this you are ready to run the above code:
```python
from rewards import workflow
configs = workflow.WorkFlowConfigurations(
EXPERIMENT_NAME="Exp 3",
MODE="training",
LAYER_CONFIG=[[5, 64], [64, 3]]
)
flow = workflow.RLWorkFlow(configs)
flow.run_single_episode()
```
Here you will be able to see the game, and a very nice dashboard with all the runs and configurations and nice graphs. Stay tuned with `rewards.ai` for further updates, documentation and examples. | /rewards-1.0.2.tar.gz/rewards-1.0.2/README.md | 0.790247 | 0.873593 | README.md | pypi |
import inspect
import time
from dataclasses import dataclass
from typing import Callable, List, Optional, Tuple, Union
import pandas as pd
import pygame
import torch
import wandb
from .envs.car import CarGame
from .models import LinearQNet
from .trainer import QTrainer
# TODO: Make a video recording feature (that will record and upload to W&B dashboard once training is complete)
# TODO: Things that is to be tracked in wandb
# - Live metrics of the plots
# - CPU usages (default)
# - All the configurations
# - Once experiment is complete then upload the recorded pygame environment
def default_reward_function(props):
if props["isAlive"]:
return 1
return 0
@dataclass(kw_only=True)
class WorkFlowConfigurations:
# wandb experiment
EXPERIMENT_NAME: str = "sample RL experiment"
# Environment configuration
ENVIRONMENT_NAME: str = "car-race"
ENVIRONMENT_WORLD: Union[str, int] = 1
# Game configuration
MODE: str = "training"
CONTROL_SPEED: float = 0.05
TRAIN_SPEED: int = 100
SCREEN_SIZE: Optional[Tuple] = (800, 700)
# Training configuration
LR: float = 0.01
LOSS: str = "mse"
OPTIMIZER: str = "adam"
# RL Configuration
GAMMA: float = 0.99
EPSILON: float = 0.99
# Model configuration
LAYER_CONFIG: Union[List[List[int]], torch.nn.Module] = None # required
CHECKPOINT_PATH: Optional[str] = None
REWARD_FUNCTION: Callable = None # required
class RLWorkFlow:
def __init__(
self, experiment_configuration: Optional[WorkFlowConfigurations] = None
) -> None:
"""
**RLWorkFlow** is the module which ables us to run complete RL experiments
"""
self.config = (
WorkFlowConfigurations()
if experiment_configuration is None
else experiment_configuration
)
# Build model
if isinstance(self.config.LAYER_CONFIG, torch.nn.Module):
self.model = self.config.LAYER_CONFIG
else:
self.model = LinearQNet(self.config.LAYER_CONFIG) if self.config.LAYER_CONFIG is not None else LinearQNet([[5, 64], [64, 3]])
# Build Agent
self.agent = QTrainer(
lr=self.config.LR,
gamma=self.config.GAMMA,
epsilon=self.config.EPSILON,
model=self.model,
loss=self.config.LOSS,
optimizer=self.config.OPTIMIZER,
checkpoint_path=self.config.CHECKPOINT_PATH,
)
# Once everything is done then upload all configurations to wandb
wandb_config = self.config.__dict__.copy()
wandb_config["REWARD_FUNCTION"] = inspect.getsource(
self.config.REWARD_FUNCTION if self.config.REWARD_FUNCTION is not None else default_reward_function
)
if isinstance(self.model, torch.nn.Module):
wandb_config.pop("LAYER_CONFIG")
wandb_config.pop("CHECKPOINT_PATH")
self.run = wandb.init(
project=self.config.EXPERIMENT_NAME, config=wandb_config
)
config_dataframe = pd.DataFrame(
data={
"configuration name": list(wandb_config.keys()),
"configuration": [
str(ele) for ele in list(wandb_config.values())
],
}
)
config_table = wandb.Table(dataframe=config_dataframe)
config_table_artifact = wandb.Artifact(
"configuration_artifact", type="dataset"
)
config_table_artifact.add(config_table, "configuration_table")
self.run.log({"Configuration": config_table})
self.run.log_artifact(config_table_artifact)
# Build Game
# TODO:
# For now we are assuming that we only have just one game and so we are keeping
# all the game and env config at one place. In next set of version this will be
# different as we will support it for multiple pre-built envs and custom envs
# Build PyGame
self.screen = pygame.display.set_mode(
self.config.SCREEN_SIZE, pygame.HIDDEN
)
reward_func = self.config.REWARD_FUNCTION if self.config.REWARD_FUNCTION is not None else default_reward_function
self.game = CarGame(
frame=self.screen,
track_num=self.config.ENVIRONMENT_WORLD,
reward_func=reward_func,
)
def run_single_episode(self):
total_score, record = 0, 0
try:
while True:
time.sleep(0.01)
pygame.display.update()
self.game.FPS = self.config.TRAIN_SPEED
reward, done, score = self.agent.train_step(self.game)
self.game.timeTicking()
if done:
self.game.initialize()
self.agent.n_games += 1
self.agent.train_long_memory()
if score > record:
self.agent.model.save()
record = score
total_score += score
if self.agent.n_games != 0:
self.run.log(
{
"episode score": score,
"mean score": total_score / self.agent.n_games,
}
)
self.run.log({"Num Games": self.agent.n_games})
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
break
except pygame.error:
print("pygame error") | /rewardsAI-0.0.9-py3-none-any.whl/rewards/workflow.py | 0.613352 | 0.253613 | workflow.py | pypi |
import os
import random
from collections import deque
from dataclasses import dataclass
from pathlib import Path
from typing import Any, List, Optional, Union
import numpy as np
import torch
# TODO:
# - Use Hydra for configuration management
@dataclass
class AgentConf:
MAX_MEMORY: int = 100000
BATCH_SIZE: int = 1000
PARENT_PATH: str = str(Path(__file__).parent.parent)
DEVICE: str = "cpu"
class Agent(AgentConf):
def __init__(
self,
model: torch.nn.Module,
checkpoint_path: Optional[str] = None,
lr: float = 0.01,
epsilon: float = 0.25,
gamma: float = 0.9,
) -> None:
super(Agent, self).__init__()
"""The Agent class which acts as a RL agent similar like Open AI's gym agent
Args:
model (torch.nn.Module): _description_
checkpoint_path (Optional[str], optional): The model checkpoint to load its weight. Defaults to False.
lr (float, optional): _description_. Defaults to 0.01
epsilon (float, optional): _description_. Defaults to 0.25.
gamma (float, optional): _description_. Defaults to 0.9.
"""
self.n_games = 0
self.epsilon = epsilon
self.lr = lr
self.gamma = gamma
self.memory = deque(maxlen=self.MAX_MEMORY)
self.model = model
if checkpoint_path:
self.model.load_state_dict(
torch.load(
os.path.join(checkpoint_path),
map_location=self.DEVICE,
)
)
self.model.eval()
def get_state(self, game: Any) -> np.ndarray:
"""Returns the current state of the game.
NOTE: Some Assumptions:
- We assume that the game environment is made using pygame
- We also assume that the agent inside the game uses `radars` that keeps track of its all position and other parameters.
Args:
game (rewards.env.car.CarGame): The current game environment of pygame.
Returns:
np.ndarray: An array of the state of the game. Here it is the agent's radars.
"""
# TODO: Check the type of game
state = game.radars
return np.array(state, dtype=int)
def remember(
self,
state: np.ndarray,
action: Union[np.ndarray, List[int]],
reward: Union[int, float],
next_state: np.ndarray,
done: bool,
) -> List[Union[float, int]]:
"""Remmembers the state of the game for the exploration phase of the agent
Args:
state (np.ndarray): The current state of the agent
action (Union[np.ndarray, List[int]]): Action taken by the agent
reward (Union[int, float]): Reward that the agent gets
next_state (np.ndarray): The next state which the agent takes after taking the current action
done (bool): Whether the game is finished or not.
Returns:
List[Union[float, int]]: The final move after exploration which is an action
"""
self.memory.append((state, action, reward, next_state, done))
def get_action(self, state):
"""
Args:
state (_type_): _description_
Returns:
_type_: _description_
"""
self.epsilon = 25
final_move = [
0,
0,
0,
] # TODO: need to make a general array of zeros which matches with the length of action
if random.randint(0, 100) < self.epsilon:
move = random.randint(0, 2)
final_move[move] = 1
else:
state0 = torch.tensor(state, dtype=torch.float)
prediction = self.model(state0)
move = torch.argmax(prediction).item()
final_move[move] = 1
return final_move | /rewardsAI-0.0.9-py3-none-any.whl/rewards/agent.py | 0.704567 | 0.525125 | agent.py | pypi |
import os
import random
from typing import Any, List, Union
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from .agent import Agent
# TODO:
# - Move RMSE, MAE to utils module
class RootMeanSquaredError(torch.nn.Module):
def __init__(self):
"""Root mean squared error function in PyTorch"""
super(RootMeanSquaredError, self).__init__()
def forward(self, x, y):
return torch.sqrt(torch.mean((x - y) ** 2))
class MeanAbsoluteError(torch.nn.Module):
def __init__(self):
"""
Mean absolute error function in PyTorch
"""
super(MeanAbsoluteError, self).__init__()
def forward(self, x, y):
return torch.mean(torch.abs(x - y))
class QTrainer(Agent):
def __init__(self, **training_params):
self.lr = training_params["lr"]
self.gamma = training_params["gamma"]
self.epsilon = training_params["epsilon"]
self.model = training_params["model"]
loss_fn, optimizer_info = self._get_loss_optimizer_info(
training_params["loss"], training_params["optimizer"]
)
self.criterion = loss_fn()
self.optimizer = optimizer_info(self.model.parameters(), lr=self.lr)
if training_params["checkpoint_path"]:
self.model.load_state_dict(
torch.load(
training_params["checkpoint_path"], map_location="cpu"
)
)
super(QTrainer, self).__init__(
model=self.model,
lr=self.lr,
epsilon=self.epsilon,
gamma=self.gamma,
)
def _get_loss_optimizer_info(
self, loss: str, optimizer: str
) -> List[Union[int, str, float]]:
"""_summary_
Args:
loss (str): _description_
optimizer (str): _description_
Returns:
List[str, Union[int, str, float]]: _description_
"""
loss_info = {
"mse": torch.nn.MSELoss,
"rmse": RootMeanSquaredError,
"mae": MeanAbsoluteError,
}
optimizer_info = {
"adam": optim.Adam,
"rmsprop": optim.RMSprop,
"adagrad": optim.Adagrad,
}
return loss_info[loss], optimizer_info[optimizer]
def step(
self,
state: Any,
action: Union[np.ndarray, List[Union[float, int]]],
reward: Union[float, int],
next_state: Any,
done: bool,
) -> None:
"""Single step function for a single episode for the agent's training
Args:
state (Any): The current state of the environment
action (Union[np.ndarray, List[Union[float, int]]]): The action taken by the agent
reward (Union[float, int]): The reward that the agent gets
next_state (Any): Next state after the action taken by the agent
done (bool): Whether the game terminates or not
"""
state = torch.tensor(state, dtype=torch.float32)
next_state = torch.tensor(next_state, dtype=torch.float32)
action = torch.tensor(action, dtype=torch.float32)
reward = torch.tensor(reward, dtype=torch.float32)
if len(state.shape) == 1:
state = torch.unsqueeze(state, dim=0)
next_state = torch.unsqueeze(next_state, dim=0)
action = torch.unsqueeze(action, dim=0)
reward = torch.unsqueeze(reward, dim=0)
done = (done,)
state_prediction = self.model(state)
state_target = (
state_prediction.clone()
) # TODO: Why target is the same as prediction?
for idx in range(len(done)):
Q_new = reward[idx]
if not done[idx]:
Q_new = reward[idx] + self.gamma * torch.max(
self.model(next_state[idx])
)
state_target[idx][torch.argmax(action[idx]).item()] = Q_new
self.optimizer.zero_grad()
loss = self.criterion(state_target, state_prediction)
loss.backward()
self.optimizer.step()
def train_long_memory(self) -> None:
"""Trains the agent for a longer step saving state-actions to the memory
Returns:
None
"""
if len(self.memory) > self.BATCH_SIZE:
mini_sample = random.sample(self.memory, self.BATCH_SIZE)
else:
mini_sample = self.memory
states, actions, rewards, next_states, dones = zip(*mini_sample)
self.step(states, actions, rewards, next_states, dones)
def train_short_memory(self, state, action, reward, next_state, done):
"""Trains the agent for a single step without any memory search
Args:
state (_type_): _description_
action (_type_): _description_
reward (_type_): _description_
next_state (_type_): _description_
done (function): _description_
Returns:
_type_: _description_
"""
return self.step(state, action, reward, next_state, done)
def train_step(self, game: Any) -> List[Union[int, float, bool]]:
"""
Defines a single train step for an agent where the agent performs
some action in a given state to get next state, current rewards, and
its status of completion
Args:
game (Any): The game environment
Returns:
(List[Union[int, float], bool, Union[int, float]]) : [current_reward, done, score]
"""
state_old = self.get_state(game)
final_move = self.get_action(state_old)
reward, done, score = game.play_step(final_move)
state_new = self.get_state(game)
self.train_short_memory(state_old, final_move, reward, state_new, done)
self.remember(state_old, final_move, reward, state_new, done)
return reward, done, score | /rewardsAI-0.0.9-py3-none-any.whl/rewards/trainer.py | 0.824533 | 0.555073 | trainer.py | pypi |
import os
from typing import List, Optional
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
class DeepNet(nn.Module):
"""
Base Class for all the models that will be added from here and inherited from this class.
"""
def __init__(self) -> None:
super(DeepNet, self).__init__()
def save(
self, filename: str = "model.pth", folder_path: Optional[str] = None
) -> None:
"""
Save the model to a file.
Args:
filename (str, optional): The file name. Defaults to "model.pth".
folder_path (str, optional): The folder path to save the model. Defaults to "None".
Returns:
None
"""
if folder_path is None:
folder_path = "./models"
if not os.path.exists(folder_path):
os.mkdir(folder_path)
filename = os.path.join(folder_path, filename)
torch.save(self.state_dict(), filename)
else:
filename = os.path.join(folder_path, filename)
torch.save(self.state_dict(), filename)
print(f"=> model saved as: {filename}")
def load(self, filename: str, folder_path: Optional[str] = None) -> None:
"""
Load the model from a file.
Args:
filename (str): The file name.
folder_path (str, optional): The folder path to save the model. Defaults to "None".
Returns:
None
"""
try:
model_path = (
filename
if folder_path is None
else os.path.join(folder_path, filename)
)
self.load_state_dict(torch.load(model_path, map_location="cpu"))
self.eval()
except Exception as e:
print(e)
print(f"=> model not found at: {model_path}")
class LinearQNet(DeepNet):
def __init__(self, layers_conf: List[List[int]]):
"""Basic LinearQNet for the agent model.
Args:
layers_conf (List[List[int]]): The list of layers. Each element will be a list
example: [[in_dim, out_dim], [in_dim, out_dim]]
"""
super(LinearQNet, self).__init__()
self.layers_conf = layers_conf
self.num_layers = len(layers_conf)
self.layers = nn.ModuleList()
for i in range(self.num_layers):
self.layers.append(nn.Linear(layers_conf[i][0], layers_conf[i][1]))
def forward(self, x):
for i in range(self.num_layers - 1):
x = F.relu(self.layers[i](x))
return self.layers[-1](x) | /rewardsAI-0.0.9-py3-none-any.whl/rewards/models.py | 0.846451 | 0.290723 | models.py | pypi |
"""Network clients used to communicate with the Rewind server."""
import logging
import zmq
logger = logging.getLogger(__name__)
class QueryException(Exception):
"""Raised when rewind server returns an error.
Usually this exception means you have used a non-existing query key.
"""
pass
def query_events(socket, from_=None, to=None):
"""Yield a queried range of events.
Parameters:
socket -- ZeroMQ socket to use. It must be previously connected to
a Rewind instance and of type REQ.
from_ -- the (optional) event id for the (chronologically) earliest end
of the range. It is exclusive. If not specified, or None, all
events from beginning of time are queried for.
to -- the (optional) event id for the (chronologically) latest end of
the range. It is exclusive. If not specified, or None, all
events up to the latest event seen are queried for.
Raises `QueryException` if a query failed. Usually this is raised because a
given `from_` or `to` does not exist in the event store.
This function returns nothing, but yields events that are returned.
"""
assert from_ is None or isinstance(from_, bytes)
assert to is None or isinstance(to, bytes)
first_msg = True
done = False
while not done:
# _real_query(...) are giving us events in small batches
done, events = _real_query(socket, from_, to)
for eventid, eventdata in events:
if first_msg:
assert eventid != from_, "First message ID wrong"
first_msg = False
from_ = eventid
yield (eventid, eventdata)
def _real_query(socket, from_, to):
"""Make the actual query for events.
Since the Rewind streams events in batches, this method might not
receive all requested events.
Returns the tuple `(done, events)` where
* `done` is a boolean whether the limited query result reached the
end, or whether there's more events that need to be collected.
* `events` is a list of `(eventid, eventdata)` event tuples where
* `eventid` is a unique string the signifies the event; and
* `eventdata` is a byte string containing the serialized event.
"""
assert from_ is None or isinstance(from_, bytes), type(from_)
assert to is None or isinstance(to, bytes), type(to)
socket.send(b'QUERY', zmq.SNDMORE)
socket.send(from_ if from_ else b'', zmq.SNDMORE)
socket.send(to if to else b'')
more = True
done = False
events = []
while more:
data = socket.recv()
if data == b"END":
assert not socket.getsockopt(zmq.RCVMORE)
done = True
elif data.startswith(b"ERROR"):
assert not socket.getsockopt(zmq.RCVMORE)
raise QueryException("Could not query: {0}".format(data))
else:
eventid = data
assert isinstance(eventid, bytes), type(eventid)
assert socket.getsockopt(zmq.RCVMORE)
eventdata = socket.recv()
eventtuple = (eventid, eventdata)
events.append(eventtuple)
if not socket.getsockopt(zmq.RCVMORE):
more = False
return done, events
def _get_single_streamed_event(streamsock):
"""Retrieve a streamed event off a socket.
Parameters:
streamsock -- the stream socket to be reading from.
Returns a tuple consisting of:
eventid -- the ID of the streamed event
lasteventid -- the ID of the previous streamed event. Can be empty for
the first event (which pretty much never happens)
eventdata -- the (serialized) data for the event.
"""
eventid = streamsock.recv()
assert streamsock.getsockopt(zmq.RCVMORE)
lasteventid = streamsock.recv()
assert streamsock.getsockopt(zmq.RCVMORE)
eventdata = streamsock.recv()
assert not streamsock.getsockopt(zmq.RCVMORE)
return eventid, lasteventid, eventdata
def yield_events_after(streamsock, reqsock, lasteventid=None):
"""Generator that yields all the missed out events.
Parameters:
lasteventid -- the event id of the last seen event.
TODO: Handle when there is no lasteventid.
"""
assert lasteventid is None or isinstance(lasteventid, bytes)
funclogger = logger.getChild('yield_events_after')
cureventid, preveventid, evdata = _get_single_streamed_event(streamsock)
if preveventid != lasteventid and preveventid != b'':
# Making sure we did not reach high watermark inbetween here.
msg = ('Seem to have reached high watermark. Doing manually querying'
' to catch up.')
funclogger.info(msg)
for qeventid, qeventdata in query_events(reqsock, lasteventid,
preveventid):
# Note that this for loop's last event will be preveventid since
# its last element is inclusive.
yield qeventid, qeventdata
yield cureventid, evdata
def publish_event(socket, event):
"""Publish a new event to Rewind.
Parameters:
socket -- a ZeroMQ REQ socket connected to a Rewind instance.
event -- event to be published. Is instance of bytes.
"""
assert isinstance(event, bytes), type(event)
socket.send(b'PUBLISH', zmq.SNDMORE)
socket.send(event)
response = socket.recv()
assert response == b'PUBLISHED'
assert not socket.getsockopt(zmq.RCVMORE) | /rewind-client-0.3.0.tar.gz/rewind-client-0.3.0/rewind/client/__init__.py | 0.890413 | 0.49646 | __init__.py | pypi |
=======
Rewind
=======
.. image:: https://secure.travis-ci.org/JensRantil/rewind.png?branch=develop
:target: http://travis-ci.org/#!/JensRantil/rewind
Have you ever been nervous of all those DBMSs schema changes when you
are deploying your applications? They are gonna take too long, or break
backward compatibility? Have you ever thought "Crap, I wish I had stored
that information since earlier"? Have you ever felt your writing
patterns and your reading patterns differ a lot, making things harder to
scale?
CQRS (Command-Query Response Segregation) is an architectural pattern
that aims to solve these issues by splitting up your architectural
system into two parts:
* A *write side* that takes care of validating input and optimizes for
fast writes. The write side takes commands and outputs corresponding
events if the command validates correctly.
* A *read side* that listens to incoming events from the write side. The
read side is optimized for fast reads.
A core concept in CQRS is the *event store* which sits inbetween the
write and the read side. The event store takes care of three things:
* persisting all events to disk.
* being a hub/broker replicating all events from the write to the read
side of things.
* it allows fast querying of events so that different parts of the system
can be synced back on track and new components can be brought back in
play.
*Rewind* is an event store application that talks ZeroMQ. It is written
in Python and supports multiple storage backends.
Installing
==========
PyPi
----
Rewind `exists on PyPi`_ and can be downloaded by issuing::
$ pip install rewind
.. _exists on PyPi: http://pypi.python.org/pypi/rewind/
Manual install
--------------
Rewind uses basic ``setuptools``. Installation can be used done as
follows::
$ git clone https://github.com/JensRantil/rewind.git
$ cd rewind
$ python setup.py install
However **NOTE**, that this will install Rewind globally in your Python
environment and is NOT recommended. Please refer to virtualenv_ on how to
create a virtual environment.
.. _virtualenv: http://www.virtualenv.org
Talking to `rewind`
===================
There is a preexisting Python client package, `rewind-client`_. If you
are not writing a new client you might want to skip the following
section below.
.. _rewind-client: https://github.com/JensRantil/rewind-client
Rewind has two different wire protocols. Each is using ZeroMQ as low
level transport protocol. Each wire protocol has one single ZeroMQ
endpoint in Rewind:
* **A request/response socket for Rewind.** It is used for publishing
new events and querying chronological slices of all events throughout
time.
* **A streaming socket.** It is used by all clients that are interested
in all new incoming events.
Each endpoint is configurable through command line when starting
``rewind``. Issue ``rewind --help`` to get a list of the specific
command line arguments ``rewind`` can handle.
*Note that the wire protocol is still under heavy development. Pull
requests and improvement proposals are welcome!*
Request/response socket
-----------------------
The socket for querying Rewind is the one which has the most advanced
wire protocol. The socket is of type RES and takes commands. A typical
converation between a client (C) and Rewind (R) looks like this::
C: Request
R: Response
C: Request
R: Response
...
Request types
`````````````
Each request is a multipart message. The first part is a string that
specifies the type of request. There are multiple request types:
PUBLISH
'''''''
Used for publishing an event. The next message part is a blob of bytes
that is supposed to be a serialized event of some form. Rewind does not
know anything about the serialization format. It always simply stores
the bytes.
Each new incoming/published event triggers that it is to be streamed out
to all listening clients.
On successful reception of an event, Rewind responds with the ASCII
bytes ``PUBLISHED``. See "Error response" below for error response.
QUERY
'''''
Used for querying for older events. For the ``QUERY`` request type the
next two message parts must be:
* Contains an optional event id, or an empty part. Restricts the
earliest (chronologically) incoming message that we are interested in
to all messages received after the event with the specified event id.
Note that this does not include the message with the specified event
id. If this part of the message is empty, no lower restriction is made
and messages will be returned starting from the first event ever seen.
* Contains an optional event id, or an empty part. Restricts the latest
(chronologically) incoming message that we are interested in to all
messages received before, or including, the event with the specified
event id. If this part of the message is empty, no upper restriction
is made and messages will be returned starting from the first event
ever seen.
If you are a data structure type-of-guy you could view Rewind as a
distributed insert-ordered map (event id => event) that allows querying
of ranges of events based on event ids.
There are two types of responses that can be given upon a query:
* An error. See "Error response" below.
* The response of an event query is a resultset containing events. It's
a multipart message containing frames like so; eventid #1, event #1,
eventid #2, event #2, eventid #3, event #3, ... where eventid #X is
the event id for event X. At most 100 messages will be returned. If
Rewind did not cap number of events, the result will be appended by a
last frame containing the ASCII content ``END``. It is up to the
client to make requests repeatedly if the result set is capped.
Error response
``````````````
If anything goes wrong, a single message starting with the ASCII text
``ERROR`` will be sent with the response. This means an error occured.
The rest of message contains a human readable description of the actual
error that occured. This information can be highly useful for remote
clients to debug any problems that might arise.
Event stream
------------
Every incoming event gets broadcast to all sockets connected to the
streaming socket. The streaming socket a ZeroMQ socket of type PUB.
Every message received automatically gets assigned a unique event id
(UUID, type 1) by Rewind. This event id is used for querying events (see
below). Each sent message from the streaming is a multipart message that
consists of two parts:
1. The event ID. The client should view this as a series of bytes.
2. The previous event ID. This information is useful to know whether
ZeroMQ high-water mark kicked in while syncing up a client while
querying for older events. If streaming has just begun, this message
part can be empty and can thus be ignored.
3. The event content. This is the exact same bytes that were
correspondingly sent to the receiving socket.
Developing
==========
Getting started developing `rewind` is quite straightforward. The
library uses `setuptools` and standard Python project layout for tests
etcetera.
Checking out
------------
To start developing you need to install the ZeroMQ library on your system
beforehand.
This is how you check out the `rewind` library into a virtual environment::
cd <your development directory>
virtualenv --no-site-packages rewind
cd rewind
git clone http://<rewind GIT URL> src
Workin' the code
----------------
Every time you want to work on `rewind` you want to change directory
into the source folder and activate the virtual environment scope (so
that you don't touch the global Python environment)::
cd src
source ../bin/activate
The first time you've checked the project out, you want to initialize
development mode::
python setup.py develop
Runnin' them tests
------------------
Running the test suite is done by issuing::
python setup.py nosetests
. Nose is configured to automagically spit out test coverage information
after the whole test suite has been executed.
As always, try to run the test suite *before* starting to mess with the
code. That way you know nothing was broken beforehand.
`The Rewind central github repository`_ also has `Travis CI`
integration that can be accessed at
http://travis-ci.org/#!/JensRantil/rewind Every time a pull request is
being made to https://github.com/JensRantil/rewind, Travis CI will make
a commend about whether the pull request breaks the test suite or not.
.. _The Rewind central github repository: https://github.com/JensRantil/rewind
.. _Travis CI: http://travis-ci.org
Helping out
===========
Spelling mistakes, bad grammar, new storage backends, wire format
improvements, test improvements and other feature additions are all
welcome. Please issue pull requests or create an issue if you'd like to
discuss it on Github.
Why the name "Rewind"?
======================
Pick and choose:
* Rewind can look at what happened in the past and replay the events
since then.
* It's time to rewind and rethink the way we are overusing DBMS's and
the way we are storing our data.
Author
======
This package has been developed by Jens Rantil <jens.rantil@gmail.com>.
You can also reach me through snailmail at::
Jens Rantil
Lilla Södergatan 6A
22353 Lund
SWEDEN
| /rewind-0.3.1.tar.gz/rewind-0.3.1/README.rst | 0.806319 | 0.818156 | README.rst | pypi |
from warnings import warn
from werkzeug.datastructures import ImmutableMultiDict
class dictobj(dict):
""" a dict-like object:
* whose values can also be get/set using the `obj.key` notation
* object[key] returns None if the key is not known
"""
def __getattr__(self, name):
return self[name]
def __setattr__(self, name, value):
self[name] = value
def __getitem__(self, name):
if name in self:
return super(dictobj, self).__getitem__(name)
return None
def copy(self):
return self.__class__((k, self[k]) for k in self)
class argsdict(dictobj):
types = {}
defaults = {}
def __init__(self, reqargs=None, defaults=None, types=None):
""" transforms the request args (or any such dict) for convenience :
* be a malleable dictobj (whose missing attrs/keys default to None)
* set the default values (if any, defaults is a mapping from keys
to a scalar or a collable)
* coerce to the wanted types (if any, types is a mapping from keys
to a type or factory function)
"""
super(argsdict, self).__init__()
if reqargs is None: # copy constructor
return
if not isinstance(reqargs, ImmutableMultiDict):
for k, v in reqargs.items():
self[k] = v
self._set_defaults(defaults)
return
defaults = defaults or self.defaults
types = types or self.types
for key, val in reqargs.to_dict(flat=False).items():
# when sending json, attributes land as `<attribute>[]`
islist = key.endswith('[]')
key = key.rstrip('[]')
targettype = types.get(key)
# signal if there is any discrepancy and force to tuple
if islist and targettype not in (list, tuple):
warn('element %r is a sequence but its expected type is %r' %
(key, targettype))
targettype = tuple
# val can be an str or a sequence of strs
# hence `not filter(None, val)` gets us all
# the falsy values ('', [''])
if not list(filter(None, val)): # py3k: force to list
# no value -> default value
default = defaults.get(key)
self[key] = default() if callable(default) else default
else:
self[key] = val if targettype in (list, tuple) else val[0]
# type coercion
if targettype:
self[key] = targettype(self[key])
self._set_defaults(defaults)
def _set_defaults(self, defaults=None):
defaults = defaults or self.defaults
# complete entries with mandatory defaults
for key, val in defaults.items():
if key not in self:
self[key] = val() if callable(val) else val
def copy(self):
new = self.__class__()
for k in self:
new[k] = self[k]
return new | /rework_ui-0.14.0-py3-none-any.whl/rework_ui/helper.py | 0.91487 | 0.339745 | helper.py | pypi |
import analyzing_data_slice_win_design
import global_data
import numpy as np
import ctypes
from PyQt5 import QtWidgets
class AnalyzingDataSliceWin(QtWidgets.QMainWindow, analyzing_data_slice_win_design.Ui_MainWindow):
def __init__(self):
super().__init__()
self.setupUi(self)
self.setFixedSize(self.gridLayout_2.sizeHint())
self.setWindowTitle("Анализ данных АЦП")
self.__init_tools()
self.pushButton_RunHandling.clicked.connect(self.__setting_set)
def __init_tools(self):
self.label_DataSizeVal.setText(str(len(global_data.voltage_arr)))
measure_time = (len(global_data.voltage_arr) - 1) / global_data.sample_freq
max_thrown_points = round(global_data.points_on_channel * 0.4)
self.spinBox_ThrownPoints.setMaximum(max_thrown_points)
self.spinBox_ThrownPoints.setMinimum(0)
self.spinBox_ThrownPoints.setValue(min(3, max_thrown_points))
self.label_MeasureTimeVal.setText(str(measure_time))
self.spinBox_AnalyzingTStart.setDecimals(3)
self.spinBox_AnalyzingTStart.setMinimum(0)
self.spinBox_AnalyzingTStart.setMaximum(measure_time)
self.spinBox_AnalyzingTStart.setValue(0)
self.spinBox_AnalyzingTEnd.setDecimals(3)
self.spinBox_AnalyzingTEnd.setMinimum(0)
self.spinBox_AnalyzingTEnd.setMaximum(measure_time)
self.spinBox_AnalyzingTEnd.setValue(measure_time)
# self.sliderAnalyzingTime.setMaximum(measure_time * 1000)
# self.sliderAnalyzingTime.setSliderPosition((0.0, measure_time * 1000))
def __setting_set(self):
global_data.analyzing_time_start = min(self.spinBox_AnalyzingTStart.value(), self.spinBox_AnalyzingTEnd.value())
global_data.analyzing_time_end = max(self.spinBox_AnalyzingTStart.value(), self.spinBox_AnalyzingTEnd.value())
self.spinBox_AnalyzingTStart.setValue(global_data.analyzing_time_start)
self.spinBox_AnalyzingTEnd.setValue(global_data.analyzing_time_end)
# global_data.analyzing_time_start, global_data.analyzing_time_end = self.sliderAnalyzingTime.sliderPosition()
# global_data.analyzing_time_start /= 1000
# global_data.analyzing_time_end /= 1000
global_data.data_handle_success = self.__handle_bin_file()
self.close()
def __tools_in_handle_mode(self):
self.pushButton_RunHandling.setEnabled(False)
self.spinBox_ThrownPoints.setEnabled(False)
self.spinBox_AnalyzingTStart.setEnabled(False)
self.spinBox_AnalyzingTEnd.setEnabled(False)
#self.sliderAnalyzingTime.setEnabled(False)
QtWidgets.QApplication.processEvents()
def __handle_bin_file(self):
self.__tools_in_handle_mode()
handle_success = False
sample_freq_Hz = global_data.sample_freq
one_support_signal_time_duration = global_data.one_channel_signal_duration
points_to_throw_from_start_end = self.spinBox_ThrownPoints.value()
measure_time = (len(global_data.voltage_arr) - 1) / global_data.sample_freq
voltage_arr_start_ind = max(0, round(global_data.analyzing_time_start / measure_time * len(global_data.voltage_arr)))
voltage_arr_end_ind = min(len(global_data.voltage_arr), round(global_data.analyzing_time_end / measure_time * len(global_data.voltage_arr)))
global_data.voltage_arr = global_data.voltage_arr[voltage_arr_start_ind:voltage_arr_end_ind]
global_data.analyzing_time_start = voltage_arr_start_ind / global_data.sample_freq
global_data.analyzing_time_end = voltage_arr_end_ind / global_data.sample_freq
# Step 1.0 - create ideal support signal
data_size = len(global_data.voltage_arr)
global_data.channels_arr = np.zeros((data_size), dtype=np.int8) # 1,2,3,4 - support channels; 5,6,7,8 - lamels channels; 0 - thrown
global_data.channel_average_val_arr = np.full((data_size), -1.0, dtype=global_data.voltage_arr.dtype)
channel_size = round(sample_freq_Hz * one_support_signal_time_duration)
ideal_support_signal = np.zeros((4 * channel_size), dtype=global_data.voltage_arr.dtype)
for i in range(len(ideal_support_signal)):
if 0 <= i < len(ideal_support_signal) / 4:
ideal_support_signal[i] = 0.0
elif len(ideal_support_signal) / 4 <= i < len(ideal_support_signal) / 2:
ideal_support_signal[i] = 2.5 / 4
elif len(ideal_support_signal) / 2 <= i < 3 * len(ideal_support_signal) / 4:
ideal_support_signal[i] = 2.5 / 2
else:
ideal_support_signal[i] = 3 * 2.5 / 4
# Step2 - cross-correlation search to find support signal areas
self.label_status.setText("Статус: поиск опорных каналов в сигнале")
QtWidgets.QApplication.processEvents()
support_signal_areas = []
try:
ideal_support_signal_mean = np.mean(ideal_support_signal)
shifted_ideal_support_signal = ideal_support_signal - ideal_support_signal_mean
shifted_ideal_support_signal = np.flip(shifted_ideal_support_signal)
shifted_voltage_arr = global_data.voltage_arr - ideal_support_signal_mean
cross_corell_res = np.convolve(shifted_voltage_arr, shifted_ideal_support_signal, mode="valid")
max_cross_corell_val = np.max(cross_corell_res)
support_signal_areas = np.where(cross_corell_res >= 0.8 * max_cross_corell_val, 1.0, 0.0)
support_signal_areas_tail = np.full((data_size - len(support_signal_areas)), 1.0)
support_signal_areas = np.concatenate((support_signal_areas, support_signal_areas_tail))
except:
ctypes.windll.user32.MessageBoxW(0, "Ошибка в процессе применения кросс-корелляции для поиска опорного сигнала",
"Program error", 1)
return handle_success
# Step3 - evaluate channels count
self.label_status.setText("Статус: оценка кол-ва каналов")
QtWidgets.QApplication.processEvents()
channels_count = 8
try:
support_signals_starts = []
is_zero_seq = False
for i in range(30 * len(ideal_support_signal)):
if support_signal_areas[i] == 0:
is_zero_seq = True
else:
if is_zero_seq:
support_signals_starts.append(i)
is_zero_seq = False
signal_periods = [abs(support_signals_starts[i - 1] - support_signals_starts[i]) for i in
range(1, len(support_signals_starts))]
signal_period = np.median(signal_periods)
channels_count = round(4.0 * signal_period / len(ideal_support_signal))
if channels_count < 4:
ctypes.windll.user32.MessageBoxW(0, "Слишком мало каналов данных: " + str(channels_count),
"Program error", 1)
return handle_success
except:
return handle_success
if channels_count != global_data.channels_count:
ctypes.windll.user32.MessageBoxW(0, "Заявлено " + str(global_data.channels_count) +
" канала. Определено при анализе сигнала: " + str(channels_count),
"Program error", 1)
return handle_success
# Step4 - find first best support signal match
self.label_status.setText("Статус: поиск первого опорного сигнала")
QtWidgets.QApplication.processEvents()
best_match = 10 ** 10
best_match_ind = -1
for i in range(round(3 * len(ideal_support_signal) * channels_count / 4)):
if i + len(ideal_support_signal) > data_size:
break
cur_analyzing_subsignal = global_data.voltage_arr[i: i + len(ideal_support_signal)]
cur_diff_with_ideal_signal = (ideal_support_signal - cur_analyzing_subsignal)
cur_match = np.sum(cur_diff_with_ideal_signal * cur_diff_with_ideal_signal)
if best_match_ind < 0 or cur_match < best_match:
best_match = cur_match
best_match_ind = i
best_match_ind = max(0, best_match_ind)
# Step5 - data separation by channels
expect_average_data_size = round(1.5 * data_size / (channels_count * global_data.sample_freq * global_data.one_channel_signal_duration))
global_data.all_channels_average_vals = np.zeros((channels_count, expect_average_data_size), dtype=global_data.voltage_arr.dtype)
global_data.all_channels_average_time = np.zeros((expect_average_data_size))
self.label_status.setText("Статус: распределение по каналам (0 %)")
QtWidgets.QApplication.processEvents()
handled_data_percents_shown = np.full((100), False, dtype = np.bool_)
try:
first_periods_count = int((len(list(range(best_match_ind, -1, -1 * channel_size))) - 1) / channels_count)
counter = first_periods_count
cur_channel = channels_count
for i in range(best_match_ind, -1, -1 * channel_size):
ch_start = max(0, i - channel_size + points_to_throw_from_start_end)
ch_end = i - points_to_throw_from_start_end
if ch_end > ch_start:
global_data.channels_arr[ch_start: ch_end] = cur_channel
ch_mean = np.mean(global_data.voltage_arr[ch_start: ch_end])
global_data.channel_average_val_arr[ch_start: ch_end] = ch_mean
if counter > 0:
global_data.all_channels_average_vals[cur_channel - 1][counter - 1] = ch_mean
if cur_channel == 1:
global_data.all_channels_average_time[counter - 1] = global_data.analyzing_time_start + max(0, i - channel_size) / sample_freq_Hz
counter -= 1
cur_channel = channels_count if cur_channel == 1 else (cur_channel - 1)
real_average_data_size = first_periods_count
start_period_ind = best_match_ind
while True:
if start_period_ind == -1 or start_period_ind >= data_size:
break
expected_next_start_period_ind = (start_period_ind + channels_count * channel_size)
cur_handled_data_percent = np.clip(round(100.0 * expected_next_start_period_ind / data_size), 0, 99)
if not handled_data_percents_shown[cur_handled_data_percent]:
handled_data_percents_shown[cur_handled_data_percent] = True
self.label_status.setText("Статус: распределение по каналам (" + str(cur_handled_data_percent) + " %)")
QtWidgets.QApplication.processEvents()
next_start_period_ind = -1 if expected_next_start_period_ind >= data_size else expected_next_start_period_ind
if next_start_period_ind != -1:
cur_best_match_ind = next_start_period_ind
for i in range(max(0, next_start_period_ind - 2), min(next_start_period_ind + 3, data_size)):
if support_signal_areas[i] > 0.5 and abs(global_data.voltage_arr[cur_best_match_ind]) > abs(global_data.voltage_arr[i]):
cur_best_match_ind = i
next_start_period_ind = cur_best_match_ind
if real_average_data_size < expect_average_data_size:
global_data.all_channels_average_time[real_average_data_size] = global_data.analyzing_time_start + start_period_ind / sample_freq_Hz
for i in range(channels_count - 1):
ch_start = min(start_period_ind + i * channel_size + points_to_throw_from_start_end, data_size)
ch_end = min(start_period_ind + (i + 1) * channel_size - points_to_throw_from_start_end, data_size)
if ch_end > ch_start:
global_data.channels_arr[ch_start: ch_end] = i + 1
ch_mean = np.mean(global_data.voltage_arr[ch_start: ch_end])
global_data.channel_average_val_arr[ch_start: ch_end] = ch_mean
if real_average_data_size < expect_average_data_size:
global_data.all_channels_average_vals[i][real_average_data_size] = ch_mean
last_ch_start = min(start_period_ind + (channels_count - 1) * channel_size + points_to_throw_from_start_end, data_size) if next_start_period_ind > 0 else min(start_period_ind + (channels_count - 1) * channel_size + points_to_throw_from_start_end, data_size)
last_ch_end = next_start_period_ind - points_to_throw_from_start_end if next_start_period_ind > 0 else min(start_period_ind + channels_count * channel_size - points_to_throw_from_start_end, data_size)
if last_ch_end > last_ch_start:
global_data.channels_arr[last_ch_start: last_ch_end] = channels_count
last_ch_mean = np.mean(global_data.voltage_arr[last_ch_start: last_ch_end])
global_data.channel_average_val_arr[last_ch_start: last_ch_end] = last_ch_mean
if real_average_data_size < expect_average_data_size:
global_data.all_channels_average_vals[channels_count - 1][real_average_data_size] = last_ch_mean
real_average_data_size += 1
start_period_ind = next_start_period_ind
global_data.all_channels_average_time.resize(real_average_data_size, refcheck=False)
global_data.all_channels_average_vals = global_data.all_channels_average_vals[:, 0:real_average_data_size]
except:
ctypes.windll.user32.MessageBoxW(0, "Ошибка в процессе распределения данных по " + str(channels_count) + " каналам",
"Program error", 1)
return handle_success
handle_success = True
return handle_success | /rewrite_for_file-0.1.0.tar.gz/rewrite_for_file-0.1.0/rewrite_for_file/AnalyzingDataSliceWin.py | 0.41478 | 0.223398 | AnalyzingDataSliceWin.py | pypi |
# Rex: an open-source quadruped robot

The goal of this project is to train an open-source 3D printed quadruped robot exploring
`Reinforcement Learning` and `OpenAI Gym`. The aim is to let the robot learns domestic and generic tasks in the simulations and then
successfully transfer the knowledge (`Control Policies`) on the real robot without any other manual tuning.
This project is mostly inspired by the incredible works done by Boston Dynamics.
## Related repositories
[rexctl](https://github.com/nicrusso7/rexctl) - A CLI application to bootstrap and control Rex running the trained `Control Policies`.
[rex-cloud](https://github.com/nicrusso7/rex-cloud) - A CLI application to train Rex on the cloud.
# Rex-gym: OpenAI Gym environments and tools
This repository contains a collection of `OpenAI Gym Environments` used to train Rex, the Rex URDF model,
the learning agent implementation (PPO) and some scripts to start the training session and visualise the learned `Control Polices`.
This CLI application allows batch training, policy reproduction and single training rendered sessions.
# Installation
Create a `Python 3.7` virtual environment, e.g. using `Anaconda`
```
conda create -n rex python=3.7 anaconda
conda activate rex
```
## PyPI package
Install the public `rex-gym` package:
```
pip install rex_gym
```
## Install from source
Clone this repository and run from the root of the project:
```
pip install .
```
# CLI usage
Run ``` rex-gym --help ``` to display the available commands and ``` rex-gym COMMAND_NAME --help ``` to show the help
message for a specific command.
Use the `--arg` flag to eventually set the simulation arguments. For a full list check out the [environments parameters](#environments-parameters).
To switch between the `Open Loop` and the `Bezier controller (inverse kinematics)` modes, just append either the `--open-loop` or `--inverse-kinematics` flags.
```
rex-gym COMMAND_NAME -ik
rex-gym COMMAND_NAME -ol
```
For more info about the modes check out [the learning approach](#learning-approach).
## Policy player: run a pre-trained agent
To start a pre-trained agent (play a learned `Control Policy`):
```
rex-gym policy --env ENV_NAME
```
## Train: Run a single training simulation
To start a single agent rendered session (`agents=1`, `render=True`):
```
rex-gym train --playground True --env ENV_NAME --log-dir LOG_DIR_PATH
```
## Train: Start a new batch training simulation
To start a new batch training session:
```
rex-gym train --env ENV_NAME --log-dir LOG_DIR_PATH
```
# Robot platform
## Mark 1
The robot used for this first version is the [Spotmicro](https://www.thingiverse.com/thing:3445283) made by [Deok-yeon Kim](https://www.thingiverse.com/KDY0523/about).
I've printed the components using a Creality Ender3 3D printer, with PLA and TPU+.

The hardware used is listed in this [wiki](https://github.com/nicrusso7/rexctl/wiki/Mark-I).
The idea is to extend the robot adding components like a robotic arm on the top of the rack and a LiDAR sensor in the next versions alongside
fixing some design issue to support a better (and easier) calibration and more reliable servo motors.
# Simulation model
## Base model
Rex is a 12 joints robot with 3 motors (`Shoulder`, `Leg` and `Foot`) for each leg.
The robot `base` model is imported in `pyBullet` using an [URDF file](rex_gym/util/pybullet_data/assets/urdf/rex.urdf).
The servo motors are modelled in the `model/motor.py` class.

## Robotic arm
The `arm` model has the open source 6DOF robotic arm [Poppy Ergo Jr](https://github.com/poppy-project/poppy-ergo-jr) equipped on the top of the
rack.

To switch between `base` and `arm` models use the `--mark` flag.
# Learning approach
This library uses the `Proximal Policy Optimization (PPO)` algorithm with a hybrid policy defined as:
```a(t, o) = a(t) + π(o)```
It can be varied continuously from fully user-specified to entirely learned from scratch.
If we want to use a user-specified policy, we can set both the lower and the upper bounds of `π(o)` to be zero.
If we want a policy that is learned from scratch, we can set `a(t) = 0` and give the feedback component `π(o)` a wide output range.
By varying the open loop signal and the output bound of the feedback component, we can decide how much user control is applied to the system.
A twofold approach is used to implement the Rex `Gym Environments`: `Bezier controller` and `Open Loop`.
The `Bezier controller` implements a fully user-specified policy. The controller uses the `Inverse Kinematics` model (see `model/kinematics.py`)
to generate the gait.
The `Open Loop` mode consists, in some cases, in let the system lean from scratch (setting the open loop component `a(t) = 0`) while others
just providing a simple trajectory reference (e.g. `a(t) = sin(t)`).
The purpose is to compare the learned policies and scores using those two different approach.
# Tasks
This is the list of tasks this experiment want to cover:
1. Basic controls:
1. **Static poses - Frame a point standing on the spot.**
- [x] Bezier controller
- [ ] Open Loop signal
2. **Gallop**
- forward
- [x] Bezier controller
- [x] Open Loop signal
- backward
- [ ] Bezier controller
- [ ] Open Loop signal
3. **Walk**
- forward
- [x] Bezier controller
- [x] Open Loop signal
- backward
- [x] Bezier controller
- [ ] Open Loop signal
4. **Turn - on the spot**
- [x] Bezier controller
- [x] Open Loop signal
5. **Stand up - from the floor**
- [ ] Bezier controller
- [x] Open Loop signal
2. Navigate uneven terrains:
- [x] Random heightfield, hill, mount
- [ ] Maze
- [ ] Stairs
3. **Open a door**
4. **Grab an object**
5. **Fall recovery**
6. **Reach a specific point in a map**
7. **Map an open space**
# Terrains
To set a specific terrain, use the `--terrain` flag. The default terrain is the standard `plane`. This feature is quite useful to
test the policy robustness.
## Random heightfield
Use the `--terrain random` flag to generate a random heighfield pattern. This pattern is updated at every 'Reset' step.

## Hills
Use the `--terrain hills` flag to generate an uneven terrain.

## Mounts
Use the `--terrain mounts` flag to generate this scenario.

## Maze
Use the `--terrain maze` flag to generate this scenario.

# Environments
## Basic Controls: Static poses
Goal: Move Rex base to assume static poses standing on the spot.
### Inverse kinematic
The gym environment is used to learn how to gracefully assume a pose avoiding too fast transactions.
It uses a one-dimensional `action space` with a feedback component `π(o)` with bounds `[-0.1, 0.1]`.
The feedback is applied to a sigmoid function to orchestrate the movement.
When the `--playground` flag is used, it's possible to use the pyBullet UI to manually set a specific pose altering the robot base position
(`x`,`y`,`z`) and orientation (`roll`, `pitch`, `jaw`).

## Basic Controls: Gallop
Goal: Gallop straight on and stop at a desired position.
In order to make the learning more robust, the Rex target position is randomly chosen at every 'Reset' step.
### Bezier controller
This gym environment is used to learn how to gracefully start the gait and then stop it after reaching the target position (on the `x` axis).
It uses two-dimensional `action space` with a feedback component `π(o)` with bounds `[-0.3, 0.3]`. The feedback component is applied to two ramp functions
used to orchestrate the gait. A correct start contributes to void the drift effect generated by the gait in the resulted learned policy.

### Open Loop signal
This gym environment is used to let the system learn the gait from scratch. The `action space` has 4 dimensions, two for the front legs and feet
and two for the rear legs and feet, with the feedback component output bounds `[−0.3, 0.3]`.

## Basic Controls: Walk
Goal: Walk straight on and stop at a desired position.
In order to make the learning more robust, the Rex target position is randomly chosen at every 'Reset' step.
### Bezier controller
This gym environment is used to learn how to gracefully start the gait and then stop it after reaching the target position (on the `x` axis).
It uses two-dimensional `action space` with a feedback component `π(o)` with bounds `[-0.4, 0.4]`. The feedback component is applied to two ramp functions
used to orchestrate the gait. A correct start contributes to void the drift effect generated by the gait in the resulted learned policy.
#### Forward

#### Backwards

### Open Loop signal
This gym environment uses a sinusoidal trajectory reference to alternate the Rex legs during the gait.
```
leg(t) = 0.1 cos(2π/T*t)
foot(t) = 0.2 cos(2π/T*t)
```
The feedback component has very small bounds: `[-0.01, 0.01]`. A ramp function are used to start and stop the gait gracefully.

## Basic Controls: Turn on the spot
Goal: Reach a target orientation turning on the spot.
In order to make the learning more robust, the Rex start orientation and target are randomly chosen at every 'Reset' step.
### Bezier controller
This gym environment is used to optimise the `step_length` and `step_rotation` arguments used by the `GaitPlanner` to implement the 'steer' gait.
It uses a two-dimensional `action space` with a feedback component `π(o)` with bounds `[-0.05, 0.05]`.

### Open loop
This environment is used to learn a 'steer-on-the-spot' gait, allowing Rex to moving towards a specific orientation.
It uses a two-dimensional `action space` with a small feedback component `π(o)` with bounds `[-0.05, 0.05]` to optimise the `shoulder` and `foot` angles
during the gait.

## Basic Controls: Stand up
Goal: Stand up starting from the standby position
This environment introduces the `rest_postion`, ideally the position assumed when Rex is in standby.
### Open loop
The `action space` is equals to 1 with a feedback component `π(o)` with bounds `[-0.1, 0.1]` used to optimise the signal timing.
The signal function applies a 'brake' forcing Rex to assume an halfway position before completing the movement.

# Environments parameters
| Environment | `env` flag | `arg` flag |
| ----------- | ---------- | ---------- |
| Galloping | gallop | `target_position` |
| Walking | walk | `target_position` |
| Turn | turn | `init_orient`, `target_orient` |
| Stand up | standup | N.A |
| `arg` | Description |
| ----- | ----------- |
| init_orient | The starting orientation in rad. |
| target_orient | The target orientation in rad. |
| target_position | The target position (`x` axis). |
| Flags | Description |
| ----- | ----------- |
| log-dir | The path where the log directory will be created. (Required) |
| playground | A boolean to start a single training rendered session |
| agents-number | Set the number of parallel agents |
## PPO Agent configuration
You may want to edit the PPO agent's default configuration, especially the number of parallel agents launched during
the simulation.
Use the `--agents-number` flag, e.g. `--agents-number 10`.
This configuration will launch 10 agents (threads) in parallel to train your model.
The default value is setup in the `agents/scripts/configs.py` script:
```
def default():
"""Default configuration for PPO."""
# General
...
num_agents = 20
```
# Credits
## Papers
[Sim-to-Real: Learning Agile Locomotion For Quadruped Robots](https://arxiv.org/pdf/1804.10332.pdf) and all the related papers. Google Brain, Google X, Google DeepMind - Minitaur Ghost Robotics.
[Inverse Kinematic Analysis Of A Quadruped Robot](https://www.researchgate.net/publication/320307716_Inverse_Kinematic_Analysis_Of_A_Quadruped_Robot)
[Leg Trajectory Planning for Quadruped Robots with High-Speed Trot Gait](https://www.researchgate.net/publication/332374021_Leg_Trajectory_Planning_for_Quadruped_Robots_with_High-Speed_Trot_Gait)
## Robot platform v1
[Deok-yeon Kim](https://www.thingiverse.com/KDY0523/about) creator of SpotMini.
The awesome [Poppy Project](https://github.com/poppy-project).
SpotMicro CAD files: [SpotMicroAI](https://github.com/FlorianWilk/SpotMicroAI) community.
## Inspiring projects
The kinematics model was inspired by the great work done by [Miguel Ayuso](https://hackaday.io/project/171456-diy-hobby-servos-quadruped-robot).
| /rex_gym-0.2.7.tar.gz/rex_gym-0.2.7/README.md | 0.855851 | 0.989491 | README.md | pypi |
r"""Running a pre-trained ppo agent on rex environments"""
import logging
import os
import site
import time
import tensorflow.compat.v1 as tf
from rex_gym.agents.scripts import utility
from rex_gym.agents.ppo import simple_ppo_agent
from rex_gym.util import flag_mapper
class PolicyPlayer:
def __init__(self, env_id: str, args: dict, signal_type: str):
self.gym_dir_path = str(site.getsitepackages()[-1])
self.env_id = env_id
self.args = args
self.signal_type = signal_type
self.args['debug'] = True
def play(self):
if self.signal_type:
self.args['signal_type'] = self.signal_type
else:
self.signal_type = flag_mapper.DEFAULT_SIGNAL[self.env_id]
policy_id = f"{self.env_id}_{self.signal_type}"
policy_path = flag_mapper.ENV_ID_TO_POLICY[policy_id][0]
policy_dir = os.path.join(self.gym_dir_path, policy_path)
config = utility.load_config(policy_dir)
policy_layers = config.policy_layers
value_layers = config.value_layers
env = config.env(render=True, **self.args)
network = config.network
checkpoint = os.path.join(policy_dir, flag_mapper.ENV_ID_TO_POLICY[policy_id][1])
with tf.Session() as sess:
agent = simple_ppo_agent.SimplePPOPolicy(sess,
env,
network,
policy_layers=policy_layers,
value_layers=value_layers,
checkpoint=checkpoint)
sum_reward = 0
observation = env.reset()
while True:
action = agent.get_action([observation])
observation, reward, done, _ = env.step(action[0])
time.sleep(0.002)
sum_reward += reward
logging.info(f"Reward={sum_reward}")
if done:
break | /rex_gym-0.2.7.tar.gz/rex_gym-0.2.7/rex_gym/playground/policy_player.py | 0.510985 | 0.298542 | policy_player.py | pypi |
"""Memory that stores episodes."""
import tensorflow as tf
class EpisodeMemory(object):
def __init__(self, template, capacity, max_length, scope):
"""
Create a memory that stores episodes.
Each transition tuple consists of quantities specified by the template.
These quantities would typically be be observartions, actions, rewards, and
done indicators.
Args:
template: List of tensors to derive shapes and dtypes of each transition.
capacity: Number of episodes, or rows, hold by the memory.
max_length: Allocated sequence length for the episodes.
scope: Variable scope to use for internal variables.
"""
self._capacity = capacity
self._max_length = max_length
with tf.compat.v1.variable_scope(scope) as scope:
self._scope = scope
self._length = tf.Variable(tf.zeros(capacity, tf.int32), False)
self._buffers = [
tf.Variable(tf.zeros([capacity, max_length] + elem.shape.as_list(), elem.dtype), False)
for elem in template
]
def length(self, rows=None):
"""Tensor holding the current length of episodes.
Args:
rows: Episodes to select length from, defaults to all.
Returns:
Batch tensor of sequence lengths.
"""
rows = tf.range(self._capacity) if rows is None else rows
return tf.gather(self._length, rows)
def append(self, transitions, rows=None):
"""Append a batch of transitions to rows of the memory.
Args:
transitions: Tuple of transition quantities with batch dimension.
rows: Episodes to append to, defaults to all.
Returns:
Operation.
"""
rows = tf.range(self._capacity) if rows is None else rows
assert rows.shape.ndims == 1
assert_capacity = tf.compat.v1.assert_less(rows, self._capacity, message='capacity exceeded')
with tf.control_dependencies([assert_capacity]):
assert_max_length = tf.compat.v1.assert_less(tf.gather(self._length, rows),
self._max_length,
message='max length exceeded')
append_ops = []
with tf.control_dependencies([assert_max_length]):
for buffer_, elements in zip(self._buffers, transitions):
timestep = tf.gather(self._length, rows)
indices = tf.stack([rows, timestep], 1)
append_ops.append(tf.compat.v1.scatter_nd_update(buffer_, indices, elements))
with tf.control_dependencies(append_ops):
episode_mask = tf.reduce_sum(tf.one_hot(rows, self._capacity, dtype=tf.int32), 0)
return self._length.assign_add(episode_mask)
def replace(self, episodes, length, rows=None):
"""Replace full episodes.
Args:
episodes: Tuple of transition quantities with batch and time dimensions.
length: Batch of sequence lengths.
rows: Episodes to replace, defaults to all.
Returns:
Operation.
"""
rows = tf.range(self._capacity) if rows is None else rows
assert rows.shape.ndims == 1
assert_capacity = tf.compat.v1.assert_less(rows, self._capacity, message='capacity exceeded')
with tf.control_dependencies([assert_capacity]):
assert_max_length = tf.compat.v1.assert_less_equal(length,
self._max_length,
message='max length exceeded')
replace_ops = []
with tf.control_dependencies([assert_max_length]):
for buffer_, elements in zip(self._buffers, episodes):
replace_op = tf.compat.v1.scatter_update(buffer_, rows, elements)
replace_ops.append(replace_op)
with tf.control_dependencies(replace_ops):
return tf.compat.v1.scatter_update(self._length, rows, length)
def data(self, rows=None):
"""Access a batch of episodes from the memory.
Padding elements after the length of each episode are unspecified and might
contain old data.
Args:
rows: Episodes to select, defaults to all.
Returns:
Tuple containing a tuple of transition quantiries with batch and time
dimensions, and a batch of sequence lengths.
"""
rows = tf.range(self._capacity) if rows is None else rows
assert rows.shape.ndims == 1
episode = [tf.gather(buffer_, rows) for buffer_ in self._buffers]
length = tf.gather(self._length, rows)
return episode, length
def clear(self, rows=None):
"""Reset episodes in the memory.
Internally, this only sets their lengths to zero. The memory entries will
be overridden by future calls to append() or replace().
Args:
rows: Episodes to clear, defaults to all.
Returns:
Operation.
"""
rows = tf.range(self._capacity) if rows is None else rows
assert rows.shape.ndims == 1
return tf.compat.v1.scatter_update(self._length, rows, tf.zeros_like(rows)) | /rex_gym-0.2.7.tar.gz/rex_gym-0.2.7/rex_gym/agents/ppo/memory.py | 0.970951 | 0.667486 | memory.py | pypi |
"""Normalize tensors based on streaming estimates of mean and variance."""
import tensorflow as tf
class StreamingNormalize(object):
"""Normalize tensors based on streaming estimates of mean and variance."""
def __init__(self, template, center=True, scale=True, clip=10, name='normalize'):
"""Normalize tensors based on streaming estimates of mean and variance.
Centering the value, scaling it by the standard deviation, and clipping
outlier values are optional.
Args:
template: Example tensor providing shape and dtype of the vaule to track.
center: Python boolean indicating whether to subtract mean from values.
scale: Python boolean indicating whether to scale values by stddev.
clip: If and when to clip normalized values.
name: Parent scope of operations provided by this class.
"""
self._center = center
self._scale = scale
self._clip = clip
self._name = name
with tf.name_scope(name):
self._count = tf.Variable(0, False)
self._mean = tf.Variable(tf.zeros_like(template), False)
self._var_sum = tf.Variable(tf.zeros_like(template), False)
def transform(self, value):
"""Normalize a single or batch tensor.
Applies the activated transformations in the constructor using current
estimates of mean and variance.
Args:
value: Batch or single value tensor.
Returns:
Normalized batch or single value tensor.
"""
with tf.name_scope(self._name + '/transform'):
no_batch_dim = value.shape.ndims == self._mean.shape.ndims
if no_batch_dim:
# Add a batch dimension if necessary.
value = value[None, ...]
if self._center:
value -= self._mean[None, ...]
if self._scale:
# We cannot scale before seeing at least two samples.
value /= tf.cond(
self._count > 1, lambda: self._std() + 1e-8, lambda: tf.ones_like(self._var_sum))[None]
if self._clip:
value = tf.clip_by_value(value, -self._clip, self._clip)
# Remove batch dimension if necessary.
if no_batch_dim:
value = value[0]
return tf.debugging.check_numerics(value, 'value')
def update(self, value):
"""Update the mean and variance estimates.
Args:
value: Batch or single value tensor.
Returns:
Summary tensor.
"""
with tf.name_scope(self._name + '/update'):
if value.shape.ndims == self._mean.shape.ndims:
# Add a batch dimension if necessary.
value = value[None, ...]
count = tf.shape(value)[0]
with tf.control_dependencies([self._count.assign_add(count)]):
step = tf.cast(self._count, tf.float32)
mean_delta = tf.reduce_sum(value - self._mean[None, ...], 0)
new_mean = self._mean + mean_delta / step
new_mean = tf.cond(self._count > 1, lambda: new_mean, lambda: value[0])
var_delta = (value - self._mean[None, ...]) * (value - new_mean[None, ...])
new_var_sum = self._var_sum + tf.reduce_sum(var_delta, 0)
with tf.control_dependencies([new_mean, new_var_sum]):
update = self._mean.assign(new_mean), self._var_sum.assign(new_var_sum)
with tf.control_dependencies(update):
if value.shape.ndims == 1:
value = tf.reduce_mean(value)
return self._summary('value', tf.reduce_mean(value))
def reset(self):
"""Reset the estimates of mean and variance.
Resets the full state of this class.
Returns:
Operation.
"""
with tf.name_scope(self._name + '/reset'):
return tf.group(self._count.assign(0), self._mean.assign(tf.zeros_like(self._mean)),
self._var_sum.assign(tf.zeros_like(self._var_sum)))
def summary(self):
"""Summary string of mean and standard deviation.
Returns:
Summary tensor.
"""
with tf.name_scope(self._name + '/summary'):
mean_summary = tf.cond(self._count > 0, lambda: self._summary('mean', self._mean), str)
std_summary = tf.cond(self._count > 1, lambda: self._summary('stddev', self._std()), str)
return tf.compat.v1.summary.merge([mean_summary, std_summary])
def _std(self):
"""Computes the current estimate of the standard deviation.
Note that the standard deviation is not defined until at least two samples
were seen.
Returns:
Tensor of current variance.
"""
variance = tf.cond(self._count > 1, lambda: self._var_sum / tf.cast(
self._count - 1, tf.float32), lambda: tf.ones_like(self._var_sum) * float('nan'))
# The epsilon corrects for small negative variance values caused by
# the algorithm. It was empirically chosen to work with all environments
# tested.
return tf.sqrt(variance + 1e-4)
def _summary(self, name, tensor):
"""Create a scalar or histogram summary matching the rank of the tensor.
Args:
name: Name for the summary.
tensor: Tensor to summarize.
Returns:
Summary tensor.
"""
if tensor.shape.ndims == 0:
return tf.compat.v1.summary.scalar(name, tensor)
else:
return tf.compat.v1.summary.histogram(name, tensor) | /rex_gym-0.2.7.tar.gz/rex_gym-0.2.7/rex_gym/agents/ppo/normalize.py | 0.985936 | 0.704376 | normalize.py | pypi |
import tensorflow as tf
from . import normalize
from ..scripts import utility
class SimplePPOPolicy(object):
"""A simple PPO policy that is independent to the PPO infrastructure.
This class restores the policy network from a tensorflow checkpoint that was
learned from PPO training. The purpose of this class is to conveniently
visualize a learned policy or deploy the learned policy on real robots without
need to change the PPO evaluation infrastructure:
https://cs.corp.google.com/piper///depot/google3/robotics/reinforcement_learning/agents/scripts/visualize.py.
"""
def __init__(self, sess, env, network, policy_layers, value_layers, checkpoint):
self.env = env
self.sess = sess
observation_size = len(env.observation_space.low)
action_size = len(env.action_space.low)
self.observation_placeholder = tf.compat.v1.placeholder(tf.float32, [None, observation_size],
name="Input")
self._observ_filter = normalize.StreamingNormalize(self.observation_placeholder[0],
center=True,
scale=True,
clip=5,
name="normalize_observ")
self._restore_policy(network,
policy_layers=policy_layers,
value_layers=value_layers,
action_size=action_size,
checkpoint=checkpoint)
def _restore_policy(self, network, policy_layers, value_layers, action_size, checkpoint):
"""Restore the PPO policy from a TensorFlow checkpoint.
Args:
network: The neural network definition.
policy_layers: A tuple specify the number of layers and number of neurons
of each layer for the policy network.
value_layers: A tuple specify the number of layers and number of neurons
of each layer for the value network.
action_size: The dimension of the action space.
checkpoint: The checkpoint path.
"""
observ = self._observ_filter.transform(self.observation_placeholder)
with tf.compat.v1.variable_scope("network/rnn"):
self.network = network(policy_layers=policy_layers,
value_layers=value_layers,
action_size=action_size)
with tf.compat.v1.variable_scope("temporary"):
self.last_state = tf.Variable(self.network.zero_state(1, tf.float32), False)
self.sess.run(self.last_state.initializer)
with tf.compat.v1.variable_scope("network"):
(mean_action, _, _), new_state = tf.nn.dynamic_rnn(self.network,
observ[:, None],
tf.ones(1),
self.last_state,
tf.float32,
swap_memory=True)
self.mean_action = mean_action
self.update_state = self.last_state.assign(new_state)
saver = utility.define_saver(exclude=(r"temporary/.*",))
saver.restore(self.sess, checkpoint)
def get_action(self, observation):
normalized_observation = self._normalize_observ(observation)
normalized_action, _ = self.sess.run(
[self.mean_action, self.update_state],
feed_dict={self.observation_placeholder: normalized_observation})
action = self._denormalize_action(normalized_action)
return action[:, 0]
def _denormalize_action(self, action):
min_ = self.env.action_space.low
max_ = self.env.action_space.high
action = (action + 1) / 2 * (max_ - min_) + min_
return action
def _normalize_observ(self, observ):
min_ = self.env.observation_space.low
max_ = self.env.observation_space.high
observ = 2 * (observ - min_) / (max_ - min_) - 1
return observ | /rex_gym-0.2.7.tar.gz/rex_gym-0.2.7/rex_gym/agents/ppo/simple_ppo_agent.py | 0.941021 | 0.590927 | simple_ppo_agent.py | pypi |
"""Utilities for the PPO algorithm."""
import collections
import math
import re
import tensorflow as tf
from tensorflow.python.client import device_lib
def create_nested_vars(tensors):
"""Create variables matching a nested tuple of tensors.
Args:
tensors: Nested tuple of list of tensors.
Returns:
Nested tuple or list of variables.
"""
if isinstance(tensors, (tuple, list)):
return type(tensors)(create_nested_vars(tensor) for tensor in tensors)
return tf.Variable(tensors, False)
def reinit_nested_vars(variables, indices=None):
"""Reset all variables in a nested tuple to zeros.
Args:
variables: Nested tuple or list of variaables.
indices: Indices along the first dimension to reset, defaults to all.
Returns:
Operation.
"""
if isinstance(variables, (tuple, list)):
return tf.group(*[reinit_nested_vars(variable, indices) for variable in variables])
if indices is None:
return variables.assign(tf.zeros_like(variables))
else:
zeros = tf.zeros([tf.shape(indices)[0]] + variables.shape[1:].as_list())
return tf.compat.v1.scatter_update(variables, indices, zeros)
def assign_nested_vars(variables, tensors):
"""Assign tensors to matching nested tuple of variables.
Args:
variables: Nested tuple or list of variables to update.
tensors: Nested tuple or list of tensors to assign.
Returns:
Operation.
"""
if isinstance(variables, (tuple, list)):
return tf.group(
*[assign_nested_vars(variable, tensor) for variable, tensor in zip(variables, tensors)])
return variables.assign(tensors)
def discounted_return(reward, length, discount):
"""Discounted Monte-Carlo returns."""
timestep = tf.range(reward.shape[1].value)
mask = tf.cast(timestep[None, :] < length[:, None], tf.float32)
return_ = tf.reverse(
tf.transpose(
tf.scan(lambda agg, cur: cur + discount * agg,
tf.transpose(tf.reverse(mask * reward, [1]), [1, 0]),
tf.zeros_like(reward[:, -1]), 1, False), [1, 0]), [1])
return tf.debugging.check_numerics(tf.stop_gradient(return_), 'return')
def fixed_step_return(reward, value, length, discount, window):
"""N-step discounted return."""
timestep = tf.range(reward.shape[1].value)
mask = tf.cast(timestep[None, :] < length[:, None], tf.float32)
return_ = tf.zeros_like(reward)
for _ in range(window):
return_ += reward
reward = discount * tf.concat([reward[:, 1:], tf.zeros_like(reward[:, -1:])], 1)
return_ += discount ** window * tf.concat(
[value[:, window:], tf.zeros_like(value[:, -window:]), 1])
return tf.debugging.check_numerics(tf.stop_gradient(mask * return_), 'return')
def lambda_return(reward, value, length, discount, lambda_):
"""TD-lambda returns."""
timestep = tf.range(reward.shape[1].value)
mask = tf.cast(timestep[None, :] < length[:, None], tf.float32)
sequence = mask * reward + discount * value * (1 - lambda_)
discount = mask * discount * lambda_
sequence = tf.stack([sequence, discount], 2)
return_ = tf.reverse(
tf.transpose(
tf.scan(lambda agg, cur: cur[0] + cur[1] * agg,
tf.transpose(tf.reverse(sequence, [1]), [1, 2, 0]), tf.zeros_like(value[:, -1]),
1, False), [1, 0]), [1])
return tf.debugging.check_numerics(tf.stop_gradient(return_), 'return')
def lambda_advantage(reward, value, length, discount):
"""Generalized Advantage Estimation."""
timestep = tf.range(reward.shape[1].value)
mask = tf.cast(timestep[None, :] < length[:, None], tf.float32)
next_value = tf.concat([value[:, 1:], tf.zeros_like(value[:, -1:])], 1)
delta = reward + discount * next_value - value
advantage = tf.reverse(
tf.transpose(
tf.scan(lambda agg, cur: cur + discount * agg,
tf.transpose(tf.reverse(mask * delta, [1]), [1, 0]), tf.zeros_like(delta[:, -1]),
1, False), [1, 0]), [1])
return tf.debugging.check_numerics(tf.stop_gradient(advantage), 'advantage')
def diag_normal_kl(mean0, logstd0, mean1, logstd1):
"""Epirical KL divergence of two normals with diagonal covariance."""
logstd0_2, logstd1_2 = 2 * logstd0, 2 * logstd1
return 0.5 * (tf.reduce_sum(tf.exp(logstd0_2 - logstd1_2), -1) + tf.reduce_sum(
(mean1 - mean0) ** 2 / tf.exp(logstd1_2), -1) + tf.reduce_sum(logstd1_2, -1) -
tf.reduce_sum(logstd0_2, -1) - mean0.shape[-1].value)
def diag_normal_logpdf(mean, logstd, loc):
"""Log density of a normal with diagonal covariance."""
constant = -0.5 * (math.log(2 * math.pi) + logstd)
value = -0.5 * ((loc - mean) / tf.exp(logstd)) ** 2
return tf.reduce_sum(constant + value, -1)
def diag_normal_entropy(mean, logstd):
"""Empirical entropy of a normal with diagonal covariance."""
constant = mean.shape[-1].value * math.log(2 * math.pi * math.e)
return (constant + tf.reduce_sum(2 * logstd, 1)) / 2
def available_gpus():
"""List of GPU device names detected by TensorFlow."""
local_device_protos = device_lib.list_local_devices()
return [x.name for x in local_device_protos if x.device_type == 'GPU']
def gradient_summaries(grad_vars, groups=None, scope='gradients'):
"""Create histogram summaries of the gradient.
Summaries can be grouped via regexes matching variables names.
Args:
grad_vars: List of (gradient, variable) tuples as returned by optimizers.
groups: Mapping of name to regex for grouping summaries.
scope: Name scope for this operation.
Returns:
Summary tensor.
"""
groups = groups or {r'all': r'.*'}
grouped = collections.defaultdict(list)
for grad, var in grad_vars:
if grad is None:
continue
for name, pattern in groups.items():
if re.match(pattern, var.name):
name = re.sub(pattern, name, var.name)
grouped[name].append(grad)
for name in groups:
if name not in grouped:
tf.compat.v1.logging.warn("No variables matching '{}' group.".format(name))
summaries = []
for name, grads in grouped.items():
grads = [tf.reshape(grad, [-1]) for grad in grads]
grads = tf.concat(grads, 0)
summaries.append(tf.compat.v1.summary.histogram(scope + '/' + name, grads))
return tf.compat.v1.summary.merge(summaries)
def variable_summaries(vars_, groups=None, scope='weights'):
"""Create histogram summaries for the provided variables.
Summaries can be grouped via regexes matching variables names.
Args:
vars_: List of variables to summarize.
groups: Mapping of name to regex for grouping summaries.
scope: Name scope for this operation.
Returns:
Summary tensor.
"""
groups = groups or {r'all': r'.*'}
grouped = collections.defaultdict(list)
for var in vars_:
for name, pattern in groups.items():
if re.match(pattern, var.name):
name = re.sub(pattern, name, var.name)
grouped[name].append(var)
for name in groups:
if name not in grouped:
tf.compat.v1.logging.warn("No variables matching '{}' group.".format(name))
summaries = []
for name, vars_ in grouped.items():
vars_ = [tf.reshape(var, [-1]) for var in vars_]
vars_ = tf.concat(vars_, 0)
summaries.append(tf.compat.v1.summary.histogram(scope + '/' + name, vars_))
return tf.compat.v1.summary.merge(summaries) | /rex_gym-0.2.7.tar.gz/rex_gym-0.2.7/rex_gym/agents/ppo/utility.py | 0.948238 | 0.562237 | utility.py | pypi |
import collections
import tensorflow as tf
import tensorflow_probability as tfp
from . import memory
from . import normalize
from . import utility
_NetworkOutput = collections.namedtuple('NetworkOutput', 'policy, mean, logstd, value, state')
class PPOAlgorithm(object):
"""A vectorized implementation of the PPO algorithm by John Schulman."""
def __init__(self, batch_env, step, is_training, should_log, config):
"""Create an instance of the PPO algorithm.
Args:
batch_env: In-graph batch environment.
step: Integer tensor holding the current training step.
is_training: Boolean tensor for whether the algorithm should train.
should_log: Boolean tensor for whether summaries should be returned.
config: Object containing the agent configuration as attributes.
"""
self._batch_env = batch_env
self._step = step
self._is_training = is_training
self._should_log = should_log
self._config = config
self._observ_filter = normalize.StreamingNormalize(self._batch_env.observ[0],
center=True,
scale=True,
clip=5,
name='normalize_observ')
self._reward_filter = normalize.StreamingNormalize(self._batch_env.reward[0],
center=False,
scale=True,
clip=10,
name='normalize_reward')
# Memory stores tuple of observ, action, mean, logstd, reward.
template = (self._batch_env.observ[0], self._batch_env.action[0], self._batch_env.action[0],
self._batch_env.action[0], self._batch_env.reward[0])
self._memory = memory.EpisodeMemory(template, config.update_every, config.max_length, 'memory')
self._memory_index = tf.Variable(0, False)
use_gpu = self._config.use_gpu and utility.available_gpus()
with tf.device('/gpu:0' if use_gpu else '/cpu:0'):
# Create network variables for later calls to reuse.
self._network(tf.zeros_like(self._batch_env.observ)[:, None],
tf.ones(len(self._batch_env)),
reuse=None)
cell = self._config.network(self._batch_env.action.shape[1].value)
with tf.compat.v1.variable_scope('ppo_temporary'):
self._episodes = memory.EpisodeMemory(template, len(batch_env), config.max_length,
'episodes')
self._last_state = utility.create_nested_vars(cell.zero_state(len(batch_env), tf.float32))
self._last_action = tf.Variable(tf.zeros_like(self._batch_env.action),
False,
name='last_action')
self._last_mean = tf.Variable(tf.zeros_like(self._batch_env.action),
False,
name='last_mean')
self._last_logstd = tf.Variable(tf.zeros_like(self._batch_env.action),
False,
name='last_logstd')
self._penalty = tf.Variable(self._config.kl_init_penalty, False, dtype=tf.float32)
self._policy_optimizer = self._config.policy_optimizer(self._config.policy_lr,
name='policy_optimizer')
self._value_optimizer = self._config.value_optimizer(self._config.value_lr,
name='value_optimizer')
def begin_episode(self, agent_indices):
"""Reset the recurrent states and stored episode.
Args:
agent_indices: 1D tensor of batch indices for agents starting an episode.
Returns:
Summary tensor.
"""
with tf.name_scope('begin_episode/'):
reset_state = utility.reinit_nested_vars(self._last_state, agent_indices)
reset_buffer = self._episodes.clear(agent_indices)
with tf.control_dependencies([reset_state, reset_buffer]):
return tf.constant('')
def perform(self, observ):
"""Compute batch of actions and a summary for a batch of observation.
Args:
observ: Tensor of a batch of observations for all agents.
Returns:
Tuple of action batch tensor and summary tensor.
"""
with tf.name_scope('perform/'):
observ = self._observ_filter.transform(observ)
network = self._network(observ[:, None], tf.ones(observ.shape[0]), self._last_state)
action = tf.cond(self._is_training, network.policy.sample, lambda: network.mean)
logprob = network.policy.log_prob(action)[:, 0]
# pylint: disable=g-long-lambda
summary = tf.cond(
self._should_log, lambda: tf.compat.v1.summary.merge([
tf.compat.v1.summary.histogram('mean', network.mean[:, 0]),
tf.compat.v1.summary.histogram('std', tf.exp(network.logstd[:, 0])),
tf.compat.v1.summary.histogram('action', action[:, 0]),
tf.compat.v1.summary.histogram('logprob', logprob)
]), str)
# Remember current policy to append to memory in the experience callback.
with tf.control_dependencies([
utility.assign_nested_vars(self._last_state, network.state),
self._last_action.assign(action[:, 0]),
self._last_mean.assign(network.mean[:, 0]),
self._last_logstd.assign(network.logstd[:, 0])
]):
return tf.debugging.check_numerics(action[:, 0], 'action'), tf.identity(summary)
def experience(self, observ, action, reward, unused_done, unused_nextob):
"""Process the transition tuple of the current step.
When training, add the current transition tuple to the memory and update
the streaming statistics for observations and rewards. A summary string is
returned if requested at this step.
Args:
observ: Batch tensor of observations.
action: Batch tensor of actions.
reward: Batch tensor of rewards.
unused_done: Batch tensor of done flags.
unused_nextob: Batch tensor of successor observations.
Returns:
Summary tensor.
"""
with tf.name_scope('experience/'):
return tf.cond(self._is_training, lambda: self._define_experience(observ, action, reward),
str)
def _define_experience(self, observ, action, reward):
"""Implement the branch of experience() entered during training."""
update_filters = tf.compat.v1.summary.merge(
[self._observ_filter.update(observ),
self._reward_filter.update(reward)])
with tf.control_dependencies([update_filters]):
if self._config.train_on_agent_action:
# NOTE: Doesn't seem to change much.
action = self._last_action
batch = observ, action, self._last_mean, self._last_logstd, reward
append = self._episodes.append(batch, tf.range(len(self._batch_env)))
with tf.control_dependencies([append]):
norm_observ = self._observ_filter.transform(observ)
norm_reward = tf.reduce_mean(self._reward_filter.transform(reward))
# pylint: disable=g-long-lambda
summary = tf.cond(
self._should_log, lambda: tf.compat.v1.summary.merge([
update_filters,
self._observ_filter.summary(),
self._reward_filter.summary(),
tf.compat.v1.summary.scalar('memory_size', self._memory_index),
tf.compat.v1.summary.histogram('normalized_observ', norm_observ),
tf.compat.v1.summary.histogram('action', self._last_action),
tf.compat.v1.summary.scalar('normalized_reward', norm_reward)
]), str)
return summary
def end_episode(self, agent_indices):
"""Add episodes to the memory and perform update steps if memory is full.
During training, add the collected episodes of the batch indices that
finished their episode to the memory. If the memory is full, train on it,
and then clear the memory. A summary string is returned if requested at
this step.
Args:
agent_indices: 1D tensor of batch indices for agents starting an episode.
Returns:
Summary tensor.
"""
with tf.name_scope('end_episode/'):
return tf.cond(self._is_training, lambda: self._define_end_episode(agent_indices), str)
def _define_end_episode(self, agent_indices):
"""Implement the branch of end_episode() entered during training."""
episodes, length = self._episodes.data(agent_indices)
space_left = self._config.update_every - self._memory_index
use_episodes = tf.range(tf.minimum(tf.shape(agent_indices)[0], space_left))
episodes = [tf.gather(elem, use_episodes) for elem in episodes]
append = self._memory.replace(episodes, tf.gather(length, use_episodes),
use_episodes + self._memory_index)
with tf.control_dependencies([append]):
inc_index = self._memory_index.assign_add(tf.shape(use_episodes)[0])
with tf.control_dependencies([inc_index]):
memory_full = self._memory_index >= self._config.update_every
return tf.cond(memory_full, self._training, str)
def _training(self):
"""Perform multiple training iterations of both policy and value baseline.
Training on the episodes collected in the memory. Reset the memory
afterwards. Always returns a summary string.
Returns:
Summary tensor.
"""
with tf.name_scope('training'):
assert_full = tf.compat.v1.assert_equal(self._memory_index, self._config.update_every)
with tf.control_dependencies([assert_full]):
data = self._memory.data()
(observ, action, old_mean, old_logstd, reward), length = data
with tf.control_dependencies([tf.compat.v1.assert_greater(length, 0)]):
length = tf.identity(length)
observ = self._observ_filter.transform(observ)
reward = self._reward_filter.transform(reward)
policy_summary = self._update_policy(observ, action, old_mean, old_logstd, reward, length)
with tf.control_dependencies([policy_summary]):
value_summary = self._update_value(observ, reward, length)
with tf.control_dependencies([value_summary]):
penalty_summary = self._adjust_penalty(observ, old_mean, old_logstd, length)
with tf.control_dependencies([penalty_summary]):
clear_memory = tf.group(self._memory.clear(), self._memory_index.assign(0))
with tf.control_dependencies([clear_memory]):
weight_summary = utility.variable_summaries(tf.compat.v1.trainable_variables(),
self._config.weight_summaries)
return tf.compat.v1.summary.merge([policy_summary, value_summary, penalty_summary, weight_summary])
def _update_value(self, observ, reward, length):
"""Perform multiple update steps of the value baseline.
We need to decide for the summary of one iteration, and thus choose the one
after half of the iterations.
Args:
observ: Sequences of observations.
reward: Sequences of reward.
length: Batch of sequence lengths.
Returns:
Summary tensor.
"""
with tf.name_scope('update_value'):
loss, summary = tf.scan(lambda _1, _2: self._update_value_step(observ, reward, length),
tf.range(self._config.update_epochs_value), [0., ''],
parallel_iterations=1)
print_loss = tf.Print(0, [tf.reduce_mean(loss)], 'value loss: ')
with tf.control_dependencies([loss, print_loss]):
return summary[self._config.update_epochs_value // 2]
def _update_value_step(self, observ, reward, length):
"""Compute the current value loss and perform a gradient update step.
Args:
observ: Sequences of observations.
reward: Sequences of reward.
length: Batch of sequence lengths.
Returns:
Tuple of loss tensor and summary tensor.
"""
loss, summary = self._value_loss(observ, reward, length)
gradients, variables = (zip(*self._value_optimizer.compute_gradients(loss)))
optimize = self._value_optimizer.apply_gradients(zip(gradients, variables))
summary = tf.compat.v1.summary.merge([
summary,
tf.compat.v1.summary.scalar('gradient_norm', tf.linalg.global_norm(gradients)),
utility.gradient_summaries(zip(gradients, variables), dict(value=r'.*'))
])
with tf.control_dependencies([optimize]):
return [tf.identity(loss), tf.identity(summary)]
def _value_loss(self, observ, reward, length):
"""Compute the loss function for the value baseline.
The value loss is the difference between empirical and approximated returns
over the collected episodes. Returns the loss tensor and a summary strin.
Args:
observ: Sequences of observations.
reward: Sequences of reward.
length: Batch of sequence lengths.
Returns:
Tuple of loss tensor and summary tensor.
"""
with tf.name_scope('value_loss'):
value = self._network(observ, length).value
return_ = utility.discounted_return(reward, length, self._config.discount)
advantage = return_ - value
value_loss = 0.5 * self._mask(advantage ** 2, length)
summary = tf.compat.v1.summary.merge([
tf.compat.v1.summary.histogram('value_loss', value_loss),
tf.compat.v1.summary.scalar('avg_value_loss', tf.reduce_mean(value_loss))
])
value_loss = tf.reduce_mean(value_loss)
return tf.debugging.check_numerics(value_loss, 'value_loss'), summary
def _update_policy(self, observ, action, old_mean, old_logstd, reward, length):
"""Perform multiple update steps of the policy.
The advantage is computed once at the beginning and shared across
iterations. We need to decide for the summary of one iteration, and thus
choose the one after half of the iterations.
Args:
observ: Sequences of observations.
action: Sequences of actions.
old_mean: Sequences of action means of the behavioral policy.
old_logstd: Sequences of action log stddevs of the behavioral policy.
reward: Sequences of rewards.
length: Batch of sequence lengths.
Returns:
Summary tensor.
"""
with tf.name_scope('update_policy'):
return_ = utility.discounted_return(reward, length, self._config.discount)
value = self._network(observ, length).value
if self._config.gae_lambda:
advantage = utility.lambda_return(reward, value, length, self._config.discount,
self._config.gae_lambda)
else:
advantage = return_ - value
mean, variance = tf.nn.moments(advantage, axes=[0, 1], keep_dims=True)
advantage = (advantage - mean) / (tf.sqrt(variance) + 1e-8)
advantage = tf.Print(
advantage, [tf.reduce_mean(return_), tf.reduce_mean(value)], 'return and value: ')
advantage = tf.Print(advantage, [tf.reduce_mean(advantage)], 'normalized advantage: ')
# pylint: disable=g-long-lambda
loss, summary = tf.scan(lambda _1, _2: self._update_policy_step(
observ, action, old_mean, old_logstd, advantage, length),
tf.range(self._config.update_epochs_policy), [0., ''],
parallel_iterations=1)
print_loss = tf.Print(0, [tf.reduce_mean(loss)], 'policy loss: ')
with tf.control_dependencies([loss, print_loss]):
return summary[self._config.update_epochs_policy // 2]
def _update_policy_step(self, observ, action, old_mean, old_logstd, advantage, length):
"""Compute the current policy loss and perform a gradient update step.
Args:
observ: Sequences of observations.
action: Sequences of actions.
old_mean: Sequences of action means of the behavioral policy.
old_logstd: Sequences of action log stddevs of the behavioral policy.
advantage: Sequences of advantages.
length: Batch of sequence lengths.
Returns:
Tuple of loss tensor and summary tensor.
"""
network = self._network(observ, length)
loss, summary = self._policy_loss(network.mean, network.logstd, old_mean, old_logstd, action,
advantage, length)
gradients, variables = (zip(*self._policy_optimizer.compute_gradients(loss)))
optimize = self._policy_optimizer.apply_gradients(zip(gradients, variables))
summary = tf.compat.v1.summary.merge([
summary,
tf.compat.v1.summary.scalar('gradient_norm', tf.linalg.global_norm(gradients)),
utility.gradient_summaries(zip(gradients, variables), dict(policy=r'.*'))
])
with tf.control_dependencies([optimize]):
return [tf.identity(loss), tf.identity(summary)]
def _policy_loss(self, mean, logstd, old_mean, old_logstd, action, advantage, length):
"""Compute the policy loss composed of multiple components.
1. The policy gradient loss is importance sampled from the data-collecting
policy at the beginning of training.
2. The second term is a KL penalty between the policy at the beginning of
training and the current policy.
3. Additionally, if this KL already changed more than twice the target
amount, we activate a strong penalty discouraging further divergence.
Args:
mean: Sequences of action means of the current policy.
logstd: Sequences of action log stddevs of the current policy.
old_mean: Sequences of action means of the behavioral policy.
old_logstd: Sequences of action log stddevs of the behavioral policy.
action: Sequences of actions.
advantage: Sequences of advantages.
length: Batch of sequence lengths.
Returns:
Tuple of loss tensor and summary tensor.
"""
with tf.name_scope('policy_loss'):
entropy = utility.diag_normal_entropy(mean, logstd)
kl = tf.reduce_mean(
self._mask(utility.diag_normal_kl(old_mean, old_logstd, mean, logstd), length), 1)
policy_gradient = tf.exp(
utility.diag_normal_logpdf(mean, logstd, action) -
utility.diag_normal_logpdf(old_mean, old_logstd, action))
surrogate_loss = -tf.reduce_mean(
self._mask(policy_gradient * tf.stop_gradient(advantage), length), 1)
kl_penalty = self._penalty * kl
cutoff_threshold = self._config.kl_target * self._config.kl_cutoff_factor
cutoff_count = tf.reduce_sum(tf.cast(kl > cutoff_threshold, tf.int32))
with tf.control_dependencies(
[tf.cond(cutoff_count > 0, lambda: tf.Print(0, [cutoff_count], 'kl cutoff! '), int)]):
kl_cutoff = (self._config.kl_cutoff_coef * tf.cast(kl > cutoff_threshold, tf.float32) *
(kl - cutoff_threshold) ** 2)
policy_loss = surrogate_loss + kl_penalty + kl_cutoff
summary = tf.compat.v1.summary.merge([
tf.compat.v1.summary.histogram('entropy', entropy),
tf.compat.v1.summary.histogram('kl', kl),
tf.compat.v1.summary.histogram('surrogate_loss', surrogate_loss),
tf.compat.v1.summary.histogram('kl_penalty', kl_penalty),
tf.compat.v1.summary.histogram('kl_cutoff', kl_cutoff),
tf.compat.v1.summary.histogram('kl_penalty_combined', kl_penalty + kl_cutoff),
tf.compat.v1.summary.histogram('policy_loss', policy_loss),
tf.compat.v1.summary.scalar('avg_surr_loss', tf.reduce_mean(surrogate_loss)),
tf.compat.v1.summary.scalar('avg_kl_penalty', tf.reduce_mean(kl_penalty)),
tf.compat.v1.summary.scalar('avg_policy_loss', tf.reduce_mean(policy_loss))
])
policy_loss = tf.reduce_mean(policy_loss, 0)
return tf.debugging.check_numerics(policy_loss, 'policy_loss'), summary
def _adjust_penalty(self, observ, old_mean, old_logstd, length):
"""Adjust the KL policy between the behavioral and current policy.
Compute how much the policy actually changed during the multiple
update steps. Adjust the penalty strength for the next training phase if we
overshot or undershot the target divergence too much.
Args:
observ: Sequences of observations.
old_mean: Sequences of action means of the behavioral policy.
old_logstd: Sequences of action log stddevs of the behavioral policy.
length: Batch of sequence lengths.
Returns:
Summary tensor.
"""
with tf.name_scope('adjust_penalty'):
network = self._network(observ, length)
assert_change = tf.compat.v1.assert_equal(tf.reduce_all(tf.equal(network.mean, old_mean)),
False,
message='policy should change')
print_penalty = tf.Print(0, [self._penalty], 'current penalty: ')
with tf.control_dependencies([assert_change, print_penalty]):
kl_change = tf.reduce_mean(
self._mask(utility.diag_normal_kl(old_mean, old_logstd, network.mean, network.logstd),
length))
kl_change = tf.Print(kl_change, [kl_change], 'kl change: ')
maybe_increase = tf.cond(
kl_change > 1.3 * self._config.kl_target,
# pylint: disable=g-long-lambda
lambda: tf.Print(self._penalty.assign(self._penalty * 1.5), [0], 'increase penalty '),
float)
maybe_decrease = tf.cond(
kl_change < 0.7 * self._config.kl_target,
# pylint: disable=g-long-lambda
lambda: tf.Print(self._penalty.assign(self._penalty / 1.5), [0], 'decrease penalty '),
float)
with tf.control_dependencies([maybe_increase, maybe_decrease]):
return tf.compat.v1.summary.merge([
tf.compat.v1.summary.scalar('kl_change', kl_change),
tf.compat.v1.summary.scalar('penalty', self._penalty)
])
def _mask(self, tensor, length):
"""Set padding elements of a batch of sequences to zero.
Useful to then safely sum along the time dimension.
Args:
tensor: Tensor of sequences.
length: Batch of sequence lengths.
Returns:
Masked sequences.
"""
with tf.name_scope('mask'):
range_ = tf.range(tensor.shape[1].value)
mask = tf.cast(range_[None, :] < length[:, None], tf.float32)
masked = tensor * mask
return tf.debugging.check_numerics(masked, 'masked')
def _network(self, observ, length=None, state=None, reuse=True):
"""Compute the network output for a batched sequence of observations.
Optionally, the initial state can be specified. The weights should be
reused for all calls, except for the first one. Output is a named tuple
containing the policy as a TensorFlow distribution, the policy mean and log
standard deviation, the approximated state value, and the new recurrent
state.
Args:
observ: Sequences of observations.
length: Batch of sequence lengths.
state: Batch of initial recurrent states.
reuse: Python boolean whether to reuse previous variables.
Returns:
NetworkOutput tuple.
"""
with tf.compat.v1.variable_scope('network', reuse=reuse):
observ = tf.convert_to_tensor(observ)
use_gpu = self._config.use_gpu and utility.available_gpus()
with tf.device('/gpu:0' if use_gpu else '/cpu:0'):
observ = tf.debugging.check_numerics(observ, 'observ')
cell = self._config.network(self._batch_env.action.shape[1].value)
(mean, logstd, value), state = tf.nn.dynamic_rnn(cell,
observ,
length,
state,
tf.float32,
swap_memory=True)
mean = tf.debugging.check_numerics(mean, 'mean')
logstd = tf.debugging.check_numerics(logstd, 'logstd')
value = tf.debugging.check_numerics(value, 'value')
policy = tfp.distributions.MultivariateNormalDiag(mean, tf.exp(logstd))
return _NetworkOutput(policy, mean, logstd, value, state) | /rex_gym-0.2.7.tar.gz/rex_gym-0.2.7/rex_gym/agents/ppo/algorithm.py | 0.939032 | 0.296629 | algorithm.py | pypi |
"""Networks for the PPO algorithm defined as recurrent cells."""
import tensorflow as tf
_MEAN_WEIGHTS_INITIALIZER = tf.contrib.layers.variance_scaling_initializer(factor=0.1)
_LOGSTD_INITIALIZER = tf.random_normal_initializer(-1, 1e-10)
class LinearGaussianPolicy(tf.contrib.rnn.RNNCell):
"""Indepent linear network with a tanh at the end for policy and feedforward network for the value.
The policy network outputs the mean action and the log standard deviation
is learned as indepent parameter vector.
"""
def __init__(self,
policy_layers,
value_layers,
action_size,
mean_weights_initializer=_MEAN_WEIGHTS_INITIALIZER,
logstd_initializer=_LOGSTD_INITIALIZER):
self._policy_layers = policy_layers
self._value_layers = value_layers
self._action_size = action_size
self._mean_weights_initializer = mean_weights_initializer
self._logstd_initializer = logstd_initializer
@property
def state_size(self):
unused_state_size = 1
return unused_state_size
@property
def output_size(self):
return self._action_size, self._action_size, tf.TensorShape([])
def __call__(self, observation, state):
with tf.variable_scope('policy'):
x = tf.contrib.layers.flatten(observation)
mean = tf.contrib.layers.fully_connected(x,
self._action_size,
tf.tanh,
weights_initializer=self._mean_weights_initializer)
logstd = tf.get_variable('logstd', mean.shape[1:], tf.float32, self._logstd_initializer)
logstd = tf.tile(logstd[None, ...], [tf.shape(mean)[0]] + [1] * logstd.shape.ndims)
with tf.variable_scope('value'):
x = tf.contrib.layers.flatten(observation)
for size in self._value_layers:
x = tf.contrib.layers.fully_connected(x, size, tf.nn.relu)
value = tf.contrib.layers.fully_connected(x, 1, None)[:, 0]
return (mean, logstd, value), state
class ForwardGaussianPolicy(tf.contrib.rnn.RNNCell):
"""Independent feed forward networks for policy and value.
The policy network outputs the mean action and the log standard deviation
is learned as independent parameter vector.
"""
def __init__(self,
policy_layers,
value_layers,
action_size,
mean_weights_initializer=_MEAN_WEIGHTS_INITIALIZER,
logstd_initializer=_LOGSTD_INITIALIZER):
self._policy_layers = policy_layers
self._value_layers = value_layers
self._action_size = action_size
self._mean_weights_initializer = mean_weights_initializer
self._logstd_initializer = logstd_initializer
@property
def state_size(self):
unused_state_size = 1
return unused_state_size
@property
def output_size(self):
return self._action_size, self._action_size, tf.TensorShape([])
def __call__(self, observation, state):
with tf.compat.v1.variable_scope('policy'):
x = tf.contrib.layers.flatten(observation)
for size in self._policy_layers:
x = tf.contrib.layers.fully_connected(x, size, tf.nn.relu)
mean = tf.contrib.layers.fully_connected(x,
self._action_size,
tf.tanh,
weights_initializer=self._mean_weights_initializer)
logstd = tf.compat.v1.get_variable('logstd', mean.shape[1:], tf.float32, self._logstd_initializer)
logstd = tf.tile(logstd[None, ...], [tf.shape(mean)[0]] + [1] * logstd.shape.ndims)
with tf.compat.v1.variable_scope('value'):
x = tf.contrib.layers.flatten(observation)
for size in self._value_layers:
x = tf.contrib.layers.fully_connected(x, size, tf.nn.relu)
value = tf.contrib.layers.fully_connected(x, 1, None)[:, 0]
return (mean, logstd, value), state
class RecurrentGaussianPolicy(tf.contrib.rnn.RNNCell):
"""Independent recurrent policy and feed forward value networks.
The policy network outputs the mean action and the log standard deviation
is learned as independent parameter vector. The last policy layer is recurrent
and uses a GRU cell.
"""
def __init__(self,
policy_layers,
value_layers,
action_size,
mean_weights_initializer=_MEAN_WEIGHTS_INITIALIZER,
logstd_initializer=_LOGSTD_INITIALIZER):
self._policy_layers = policy_layers
self._value_layers = value_layers
self._action_size = action_size
self._mean_weights_initializer = mean_weights_initializer
self._logstd_initializer = logstd_initializer
self._cell = tf.contrib.rnn.GRUBlockCell(100)
@property
def state_size(self):
return self._cell.state_size
@property
def output_size(self):
return self._action_size, self._action_size, tf.TensorShape([])
def __call__(self, observation, state):
with tf.variable_scope('policy'):
x = tf.contrib.layers.flatten(observation)
for size in self._policy_layers[:-1]:
x = tf.contrib.layers.fully_connected(x, size, tf.nn.relu)
x, state = self._cell(x, state)
mean = tf.contrib.layers.fully_connected(x,
self._action_size,
tf.tanh,
weights_initializer=self._mean_weights_initializer)
logstd = tf.get_variable('logstd', mean.shape[1:], tf.float32, self._logstd_initializer)
logstd = tf.tile(logstd[None, ...], [tf.shape(mean)[0]] + [1] * logstd.shape.ndims)
with tf.variable_scope('value'):
x = tf.contrib.layers.flatten(observation)
for size in self._value_layers:
x = tf.contrib.layers.fully_connected(x, size, tf.nn.relu)
value = tf.contrib.layers.fully_connected(x, 1, None)[:, 0]
return (mean, logstd, value), state | /rex_gym-0.2.7.tar.gz/rex_gym-0.2.7/rex_gym/agents/scripts/networks.py | 0.942639 | 0.483526 | networks.py | pypi |
"""Example configurations using the PPO algorithm."""
from ..ppo.algorithm import PPOAlgorithm
from ..scripts import networks
def default():
"""Default configuration for PPO."""
# General
algorithm = PPOAlgorithm
num_agents = 25
eval_episodes = 25
use_gpu = False
# Network
network = networks.ForwardGaussianPolicy
weight_summaries = dict(all=r'.*', policy=r'.*/policy/.*', value=r'.*/value/.*')
policy_layers = 200, 100
value_layers = 200, 100
init_mean_factor = 0.05
init_logstd = -1
# Optimization
update_every = 25
policy_optimizer = 'AdamOptimizer'
value_optimizer = 'AdamOptimizer'
update_epochs_policy = 50
update_epochs_value = 50
policy_lr = 1e-4
value_lr = 3e-4
# Losses
discount = 0.985
kl_target = 1e-2
kl_cutoff_factor = 2
kl_cutoff_coef = 1000
kl_init_penalty = 1
return locals()
def gallop_ik():
"""Configuration for Rex gallop task based on inverse kinematics controller."""
locals().update(default())
# Environment
env = 'RexGalloping-v0'
max_length = 2000
steps = 1e6 # 1M
return locals()
def gallop_ol():
"""Configuration for Rex gallop task based on open loop controller."""
locals().update(default())
# Environment
env = 'RexGalloping-v0'
max_length = 2000
steps = 2e6 # 2M
return locals()
def walk_ol():
"""Configuration for Rex walk task based on open loop controller."""
locals().update(default())
# Environment
env = 'RexWalk-v0'
max_length = 2000
steps = 2e6 # 2M
return locals()
def walk_ik():
"""Configuration for Rex walk task based on inverse kinematics controller."""
locals().update(default())
# Environment
env = 'RexWalk-v0'
max_length = 2000
steps = 1e6 # 1M
return locals()
def turn_ol():
"""Configuration for Rex turn task."""
locals().update(default())
# Environment
env = 'RexTurn-v0'
max_length = 1000
steps = 1e6 # 1M
return locals()
def turn_ik():
"""Configuration for Rex turn task."""
locals().update(default())
# Environment
env = 'RexTurn-v0'
max_length = 1000
steps = 1e6 # 1M
return locals()
def standup_ol():
"""Configuration for Rex stand up task."""
locals().update(default())
# Environment
env = 'RexStandup-v0'
max_length = 500
steps = 1e6 # 1M
return locals()
def go():
"""Configuration for Rex go-to task."""
locals().update(default())
# Environment
env = 'RexGo-v0'
max_length = 1000
steps = 5e6 # 5M
return locals()
def poses_ik():
"""Configuration for Rex reach-a-pose task."""
locals().update(default())
# Environment
env = 'RexPoses-v0'
max_length = 1000
steps = 1e6 # 1M
return locals() | /rex_gym-0.2.7.tar.gz/rex_gym-0.2.7/rex_gym/agents/scripts/configs.py | 0.89217 | 0.407805 | configs.py | pypi |
"""Utilities for using reinforcement learning algorithms."""
import logging
import os
import re
import warnings
import ruamel.yaml as yaml
import tensorflow as tf
from rex_gym.agents.tools import wrappers
from rex_gym.agents.tools.attr_dict import AttrDict
from rex_gym.agents.tools.batch_env import BatchEnv
from rex_gym.agents.tools.count_weights import count_weights
from rex_gym.agents.tools.in_graph_batch_env import InGraphBatchEnv
from rex_gym.agents.tools.simulate import simulate
warnings.simplefilter('ignore', yaml.error.UnsafeLoaderWarning)
warnings.simplefilter('ignore', yaml.error.ReusedAnchorWarning)
def define_simulation_graph(batch_env, algo_cls, config):
"""Define the algortihm and environment interaction.
Args:
batch_env: In-graph environments object.
algo_cls: Constructor of a batch algorithm.
config: Configuration object for the algorithm.
Returns:
Object providing graph elements via attributes.
"""
step = tf.Variable(0, False, dtype=tf.int32, name='global_step')
is_training = tf.compat.v1.placeholder(tf.bool, name='is_training')
should_log = tf.compat.v1.placeholder(tf.bool, name='should_log')
do_report = tf.compat.v1.placeholder(tf.bool, name='do_report')
force_reset = tf.compat.v1.placeholder(tf.bool, name='force_reset')
algo = algo_cls(batch_env, step, is_training, should_log, config)
done, score, summary = simulate(batch_env, algo, should_log, force_reset)
message = 'Graph contains {} trainable variables.'
tf.compat.v1.logging.info(message.format(count_weights()))
return AttrDict(locals())
def define_batch_env(constructor, num_agents, env_processes):
"""Create environments and apply all desired wrappers.
Args:
constructor: Constructor of an OpenAI gym environment.
num_agents: Number of environments to combine in the batch.
env_processes: Whether to step environment in external processes.
Returns:
In-graph environments object.
"""
with tf.compat.v1.variable_scope('environments'):
if env_processes:
envs = [wrappers.ExternalProcess(constructor) for _ in range(num_agents)]
else:
envs = [constructor() for _ in range(num_agents)]
batch_env = BatchEnv(envs, blocking=not env_processes)
batch_env = InGraphBatchEnv(batch_env)
return batch_env
def define_saver(exclude=None):
"""Create a saver for the variables we want to checkpoint.
Args:
exclude: List of regexes to match variable names to exclude.
Returns:
Saver object.
"""
variables = []
exclude = exclude or []
exclude = [re.compile(regex) for regex in exclude]
for variable in tf.compat.v1.global_variables():
if any(regex.match(variable.name) for regex in exclude):
continue
variables.append(variable)
saver = tf.compat.v1.train.Saver(variables, keep_checkpoint_every_n_hours=5)
return saver
def define_network(constructor, config, action_size):
"""Constructor for the recurrent cell for the algorithm.
Args:
constructor: Callable returning the network as RNNCell.
config: Object providing configurations via attributes.
action_size: Integer indicating the amount of action dimensions.
Returns:
Created recurrent cell object.
"""
mean_weights_initializer = (tf.contrib.layers.variance_scaling_initializer(
factor=config.init_mean_factor))
logstd_initializer = tf.random_normal_initializer(config.init_logstd, 1e-10)
network = constructor(config.policy_layers,
config.value_layers,
action_size,
mean_weights_initializer=mean_weights_initializer,
logstd_initializer=logstd_initializer)
return network
def initialize_variables(sess, saver, logdir, checkpoint=None, resume=None):
"""Initialize or restore variables from a checkpoint if available.
Args:
sess: Session to initialize variables in.
saver: Saver to restore variables.
logdir: Directory to search for checkpoints.
checkpoint: Specify what checkpoint name to use; defaults to most recent.
resume: Whether to expect recovering a checkpoint or starting a new run.
Raises:
ValueError: If resume expected but no log directory specified.
RuntimeError: If no resume expected but a checkpoint was found.
"""
sess.run(tf.group(tf.compat.v1.local_variables_initializer(), tf.compat.v1.global_variables_initializer()))
if resume and not (logdir or checkpoint):
raise ValueError('Need to specify logdir to resume a checkpoint.')
if logdir:
state = tf.train.get_checkpoint_state(logdir)
if checkpoint:
checkpoint = os.path.join(logdir, checkpoint)
if not checkpoint and state and state.model_checkpoint_path:
checkpoint = state.model_checkpoint_path
if checkpoint and resume is False:
message = 'Found unexpected checkpoint when starting a new run.'
raise RuntimeError(message)
if checkpoint:
saver.restore(sess, checkpoint)
def save_config(config, logdir=None):
"""Save a new configuration by name.
If a logging directory is specified, is will be created and the configuration
will be stored there. Otherwise, a log message will be printed.
Args:
config: Configuration object.
logdir: Location for writing summaries and checkpoints if specified.
Returns:
Configuration object.
"""
if logdir:
with config.unlocked:
config.logdir = logdir
message = 'Start a new run and write summaries and checkpoints to {}.'
tf.compat.v1.logging.info(message.format(config.logdir))
tf.io.gfile.makedirs(config.logdir)
config_path = os.path.join(config.logdir, 'config.yaml')
with tf.io.gfile.GFile(config_path, 'w') as file_:
yaml.dump(config, file_, default_flow_style=False)
else:
message = ('Start a new run without storing summaries and checkpoints since no '
'logging directory was specified.')
tf.logging.info(message)
return config
def load_config(logdir):
"""Load a configuration from the log directory.
Args:
logdir: The logging directory containing the configuration file.
Raises:
IOError: The logging directory does not contain a configuration file.
Returns:
Configuration object.
"""
config_path = logdir and os.path.join(logdir, 'config.yaml')
if not config_path or not tf.io.gfile.exists(config_path):
message = ('Cannot resume an existing run since the logging directory does not '
'contain a configuration file.')
raise IOError(message)
with tf.io.gfile.GFile(config_path, 'r') as file_:
config = yaml.load(file_)
message = 'Resume run and write summaries and checkpoints to {}.'
tf.compat.v1.logging.info(message.format(config.logdir))
return config
def set_up_logging():
"""Configure the TensorFlow logger."""
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.INFO)
logging.getLogger('tensorflow').propagate = False | /rex_gym-0.2.7.tar.gz/rex_gym-0.2.7/rex_gym/agents/scripts/utility.py | 0.88833 | 0.42674 | utility.py | pypi |
"""Batch of environments inside the TensorFlow graph."""
import gym
import tensorflow as tf
class InGraphBatchEnv(object):
"""Batch of environments inside the TensorFlow graph.
The batch of environments will be stepped and reset inside of the graph using
a tf.py_func(). The current batch of observations, actions, rewards, and done
flags are held in according variables.
"""
def __init__(self, batch_env):
"""Batch of environments inside the TensorFlow graph.
Args:
batch_env: Batch environment.
"""
self._batch_env = batch_env
observ_shape = self._parse_shape(self._batch_env.observation_space)
observ_dtype = self._parse_dtype(self._batch_env.observation_space)
action_shape = self._parse_shape(self._batch_env.action_space)
action_dtype = self._parse_dtype(self._batch_env.action_space)
with tf.compat.v1.variable_scope('env_temporary'):
self._observ = tf.Variable(tf.zeros((len(self._batch_env),) + observ_shape, observ_dtype),
name='observ',
trainable=False)
self._action = tf.Variable(tf.zeros((len(self._batch_env),) + action_shape, action_dtype),
name='action',
trainable=False)
self._reward = tf.Variable(tf.zeros((len(self._batch_env),), tf.float32),
name='reward',
trainable=False)
self._done = tf.Variable(tf.cast(tf.ones((len(self._batch_env),)), tf.bool),
name='done',
trainable=False)
def __getattr__(self, name):
"""Forward unimplemented attributes to one of the original environments.
Args:
name: Attribute that was accessed.
Returns:
Value behind the attribute name in one of the original environments.
"""
return getattr(self._batch_env, name)
def __len__(self):
"""Number of combined environments."""
return len(self._batch_env)
def __getitem__(self, index):
"""Access an underlying environment by index."""
return self._batch_env[index]
def simulate(self, action):
"""Step the batch of environments.
The results of the step can be accessed from the variables defined below.
Args:
action: Tensor holding the batch of actions to apply.
Returns:
Operation.
"""
with tf.name_scope('environment/simulate'):
if action.dtype in (tf.float16, tf.float32, tf.float64):
action = tf.debugging.check_numerics(action, 'action')
observ_dtype = self._parse_dtype(self._batch_env.observation_space)
observ, reward, done = tf.numpy_function(lambda a: self._batch_env.step(a)[:3], [action],
[observ_dtype, tf.float32, tf.bool],
name='step')
observ = tf.debugging.check_numerics(observ, 'observ')
reward = tf.debugging.check_numerics(reward, 'reward')
return tf.group(self._observ.assign(observ), self._action.assign(action),
self._reward.assign(reward), self._done.assign(done))
def reset(self, indices=None):
"""Reset the batch of environments.
Args:
indices: The batch indices of the environments to reset; defaults to all.
Returns:
Batch tensor of the new observations.
"""
if indices is None:
indices = tf.range(len(self._batch_env))
observ_dtype = self._parse_dtype(self._batch_env.observation_space)
observ = tf.numpy_function(self._batch_env.reset, [indices], observ_dtype, name='reset')
observ = tf.debugging.check_numerics(observ, 'observ')
reward = tf.zeros_like(indices, tf.float32)
done = tf.zeros_like(indices, tf.bool)
with tf.control_dependencies([
tf.compat.v1.scatter_update(self._observ, indices, observ),
tf.compat.v1.scatter_update(self._reward, indices, reward),
tf.compat.v1.scatter_update(self._done, indices, done)
]):
return tf.identity(observ)
@property
def observ(self):
"""Access the variable holding the current observation."""
return self._observ
@property
def action(self):
"""Access the variable holding the last recieved action."""
return self._action
@property
def reward(self):
"""Access the variable holding the current reward."""
return self._reward
@property
def done(self):
"""Access the variable indicating whether the episode is done."""
return self._done
def close(self):
"""Send close messages to the external process and join them."""
self._batch_env.close()
def _parse_shape(self, space):
"""Get a tensor shape from a OpenAI Gym space.
Args:
space: Gym space.
Returns:
Shape tuple.
"""
if isinstance(space, gym.spaces.Discrete):
return ()
if isinstance(space, gym.spaces.Box):
return space.shape
raise NotImplementedError()
def _parse_dtype(self, space):
"""Get a tensor dtype from a OpenAI Gym space.
Args:
space: Gym space.
Returns:
TensorFlow data type.
"""
if isinstance(space, gym.spaces.Discrete):
return tf.int32
if isinstance(space, gym.spaces.Box):
return tf.float32
raise NotImplementedError() | /rex_gym-0.2.7.tar.gz/rex_gym-0.2.7/rex_gym/agents/tools/in_graph_batch_env.py | 0.974203 | 0.719069 | in_graph_batch_env.py | pypi |
"""Combine multiple environments to step them in batch."""
import numpy as np
class BatchEnv(object):
"""Combine multiple environments to step them in batch."""
def __init__(self, envs, blocking):
"""Combine multiple environments to step them in batch.
To step environments in parallel, environments must support a
`blocking=False` argument to their step and reset functions that makes them
return callables instead to receive the result at a later time.
Args:
envs: List of environments.
blocking: Step environments after another rather than in parallel.
Raises:
ValueError: Environments have different observation or action spaces.
"""
self._envs = envs
self._blocking = blocking
observ_space = self._envs[0].observation_space
if not all(env.observation_space == observ_space for env in self._envs):
raise ValueError('All environments must use the same observation space.')
action_space = self._envs[0].action_space
if not all(env.action_space == action_space for env in self._envs):
raise ValueError('All environments must use the same observation space.')
def __len__(self):
"""Number of combined environments."""
return len(self._envs)
def __getitem__(self, index):
"""Access an underlying environment by index."""
return self._envs[index]
def __getattr__(self, name):
"""Forward unimplemented attributes to one of the original environments.
Args:
name: Attribute that was accessed.
Returns:
Value behind the attribute name one of the wrapped environments.
"""
return getattr(self._envs[0], name)
def step(self, action):
"""Forward a batch of actions to the wrapped environments.
Args:
action: Batched action to apply to the environment.
Raises:
ValueError: Invalid actions.
Returns:
Batch of observations, rewards, and done flags.
"""
actions = action
for index, (env, action) in enumerate(zip(self._envs, actions)):
if not env.action_space.contains(action):
message = 'Invalid action at index {}: {}'
raise ValueError(message.format(index, action))
if self._blocking:
transitions = [env.step(action) for env, action in zip(self._envs, actions)]
else:
transitions = [env.step(action, blocking=False) for env, action in zip(self._envs, actions)]
transitions = [transition() for transition in transitions]
observs, rewards, dones, infos = zip(*transitions)
observ = np.stack(observs)
reward = np.stack(rewards)
done = np.stack(dones)
info = tuple(infos)
return observ, reward, done, info
def reset(self, indices=None):
"""Reset the environment and convert the resulting observation.
Args:
indices: The batch indices of environments to reset; defaults to all.
Returns:
Batch of observations.
"""
if indices is None:
indices = np.arange(len(self._envs))
if self._blocking:
observs = [self._envs[index].reset() for index in indices]
else:
observs = [self._envs[index].reset(blocking=False) for index in indices]
observs = [observ() for observ in observs]
observ = np.stack(observs)
return observ
def close(self):
"""Send close messages to the external process and join them."""
for env in self._envs:
if hasattr(env, 'close'):
env.close() | /rex_gym-0.2.7.tar.gz/rex_gym-0.2.7/rex_gym/agents/tools/batch_env.py | 0.959364 | 0.69795 | batch_env.py | pypi |
"""Mock environment for testing reinforcement learning code."""
import gym
import gym.spaces
import numpy as np
class MockEnvironment(object):
"""Generate random agent input and keep track of statistics."""
def __init__(self, observ_shape, action_shape, min_duration, max_duration):
"""Generate random agent input and keep track of statistics.
Args:
observ_shape: Shape for the random observations.
action_shape: Shape for the action space.
min_duration: Minimum number of steps per episode.
max_duration: Maximum number of steps per episode.
Attributes:
steps: List of actual simulated lengths for all episodes.
durations: List of decided lengths for all episodes.
"""
self._observ_shape = observ_shape
self._action_shape = action_shape
self._min_duration = min_duration
self._max_duration = max_duration
self._random = np.random.RandomState(0)
self.steps = []
self.durations = []
@property
def observation_space(self):
low = np.zeros(self._observ_shape)
high = np.ones(self._observ_shape)
return gym.spaces.Box(low, high)
@property
def action_space(self):
low = np.zeros(self._action_shape)
high = np.ones(self._action_shape)
return gym.spaces.Box(low, high)
@property
def unwrapped(self):
return self
def step(self, action):
assert self.action_space.contains(action)
assert self.steps[-1] < self.durations[-1]
self.steps[-1] += 1
observ = self._current_observation()
reward = self._current_reward()
done = self.steps[-1] >= self.durations[-1]
info = {}
return observ, reward, done, info
def reset(self):
duration = self._random.randint(self._min_duration, self._max_duration + 1)
self.steps.append(0)
self.durations.append(duration)
return self._current_observation()
def _current_observation(self):
return self._random.uniform(0, 1, self._observ_shape)
def _current_reward(self):
return self._random.uniform(-1, 1) | /rex_gym-0.2.7.tar.gz/rex_gym-0.2.7/rex_gym/agents/tools/mock_environment.py | 0.962831 | 0.640425 | mock_environment.py | pypi |
"""Execute operations in a loop and coordinate logging and checkpoints."""
import collections
import os
import tensorflow as tf
from . import streaming_mean
_Phase = collections.namedtuple(
'Phase', 'name, writer, op, batch, steps, feed, report_every, log_every,'
'checkpoint_every')
class Loop(object):
"""Execute operations in a loop and coordinate logging and checkpoints.
Supports multiple phases, that define their own operations to run, and
intervals for reporting scores, logging summaries, and storing checkpoints.
All class state is stored in-graph to properly recover from checkpoints.
"""
def __init__(self, logdir, step=None, log=None, report=None, reset=None):
"""Execute operations in a loop and coordinate logging and checkpoints.
The step, log, report, and report arguments will get created if not
provided. Reset is used to indicate switching to a new phase, so that the
model can start a new computation in case its computation is split over
multiple training steps.
Args:
logdir: Will contain checkpoints and summaries for each phase.
step: Variable of the global step (optional).
log: Tensor indicating to the model to compute summary tensors.
report: Tensor indicating to the loop to report the current mean score.
reset: Tensor indicating to the model to start a new computation.
"""
self._logdir = logdir
self._step = (tf.Variable(0, False, name='global_step') if step is None else step)
self._log = tf.placeholder(tf.bool) if log is None else log
self._report = tf.placeholder(tf.bool) if report is None else report
self._reset = tf.placeholder(tf.bool) if reset is None else reset
self._phases = []
def add_phase(self,
name,
done,
score,
summary,
steps,
report_every=None,
log_every=None,
checkpoint_every=None,
feed=None):
"""Add a phase to the loop protocol.
If the model breaks long computation into multiple steps, the done tensor
indicates whether the current score should be added to the mean counter.
For example, in reinforcement learning we only have a valid score at the
end of the episode.
Score and done tensors can either be scalars or vectors, to support
single and batched computations.
Args:
name: Name for the phase, used for the summary writer.
done: Tensor indicating whether current score can be used.
score: Tensor holding the current, possibly intermediate, score.
summary: Tensor holding summary string to write if not an empty string.
steps: Duration of the phase in steps.
report_every: Yield mean score every this number of steps.
log_every: Request summaries via `log` tensor every this number of steps.
checkpoint_every: Write checkpoint every this number of steps.
feed: Additional feed dictionary for the session run call.
Raises:
ValueError: Unknown rank for done or score tensors.
"""
done = tf.convert_to_tensor(done, tf.bool)
score = tf.convert_to_tensor(score, tf.float32)
summary = tf.convert_to_tensor(summary, tf.string)
feed = feed or {}
if done.shape.ndims is None or score.shape.ndims is None:
raise ValueError("Rank of 'done' and 'score' tensors must be known.")
writer = self._logdir and tf.compat.v1.summary.FileWriter(
os.path.join(self._logdir, name), tf.compat.v1.get_default_graph(), flush_secs=60)
op = self._define_step(done, score, summary)
batch = 1 if score.shape.ndims == 0 else score.shape[0].value
self._phases.append(
_Phase(name, writer, op, batch, int(steps), feed, report_every, log_every,
checkpoint_every))
def run(self, sess, saver, max_step=None):
"""Run the loop schedule for a specified number of steps.
Call the operation of the current phase until the global step reaches the
specified maximum step. Phases are repeated over and over in the order they
were added.
Args:
sess: Session to use to run the phase operation.
saver: Saver used for checkpointing.
max_step: Run the operations until the step reaches this limit.
Yields:
Reported mean scores.
"""
global_step = sess.run(self._step)
steps_made = 1
while True:
if max_step and global_step >= max_step:
break
phase, epoch, steps_in = self._find_current_phase(global_step)
phase_step = epoch * phase.steps + steps_in
if steps_in % phase.steps < steps_made:
message = '\n' + ('-' * 50) + '\n'
message += 'Phase {} (phase step {}, global step {}).'
tf.compat.v1.logging.info(message.format(phase.name, phase_step, global_step))
# Populate book keeping tensors.
phase.feed[self._reset] = (steps_in < steps_made)
phase.feed[self._log] = (phase.writer and
self._is_every_steps(phase_step, phase.batch, phase.log_every))
phase.feed[self._report] = (self._is_every_steps(phase_step, phase.batch,
phase.report_every))
summary, mean_score, global_step, steps_made = sess.run(phase.op, phase.feed)
if self._is_every_steps(phase_step, phase.batch, phase.checkpoint_every):
self._store_checkpoint(sess, saver, global_step)
if self._is_every_steps(phase_step, phase.batch, phase.report_every):
yield mean_score
if summary and phase.writer:
# We want smaller phases to catch up at the beginnig of each epoch so
# that their graphs are aligned.
longest_phase = max(phase.steps for phase in self._phases)
summary_step = epoch * longest_phase + steps_in
phase.writer.add_summary(summary, summary_step)
@staticmethod
def _is_every_steps(phase_step, batch, every):
"""Determine whether a periodic event should happen at this step.
Args:
phase_step: The incrementing step.
batch: The number of steps progressed at once.
every: The interval of the periode.
Returns:
Boolean of whether the event should happen.
"""
if not every:
return False
covered_steps = range(phase_step, phase_step + batch)
return any((step + 1) % every == 0 for step in covered_steps)
def _find_current_phase(self, global_step):
"""Determine the current phase based on the global step.
This ensures continuing the correct phase after restoring checkoints.
Args:
global_step: The global number of steps performed across all phases.
Returns:
Tuple of phase object, epoch number, and phase steps within the epoch.
"""
epoch_size = sum(phase.steps for phase in self._phases)
epoch = int(global_step // epoch_size)
steps_in = global_step % epoch_size
for phase in self._phases:
if steps_in < phase.steps:
return phase, epoch, steps_in
steps_in -= phase.steps
def _define_step(self, done, score, summary):
"""Combine operations of a phase.
Keeps track of the mean score and when to report it.
Args:
done: Tensor indicating whether current score can be used.
score: Tensor holding the current, possibly intermediate, score.
summary: Tensor holding summary string to write if not an empty string.
Returns:
Tuple of summary tensor, mean score, and new global step. The mean score
is zero for non reporting steps.
"""
if done.shape.ndims == 0:
done = done[None]
if score.shape.ndims == 0:
score = score[None]
score_mean = streaming_mean.StreamingMean((), tf.float32)
with tf.control_dependencies([done, score, summary]):
done_score = tf.gather(score, tf.where(done)[:, 0])
submit_score = tf.cond(tf.reduce_any(done), lambda: score_mean.submit(done_score), tf.no_op)
with tf.control_dependencies([submit_score]):
mean_score = tf.cond(self._report, score_mean.clear, float)
steps_made = tf.shape(score)[0]
next_step = self._step.assign_add(steps_made)
with tf.control_dependencies([mean_score, next_step]):
return tf.identity(summary), mean_score, next_step, steps_made
def _store_checkpoint(self, sess, saver, global_step):
"""Store a checkpoint if a log directory was provided to the constructor.
The directory will be created if needed.
Args:
sess: Session containing variables to store.
saver: Saver used for checkpointing.
global_step: Step number of the checkpoint name.
"""
if not self._logdir or not saver:
return
tf.io.gfile.makedirs(self._logdir)
filename = os.path.join(self._logdir, 'model.ckpt')
saver.save(sess, filename, global_step) | /rex_gym-0.2.7.tar.gz/rex_gym-0.2.7/rex_gym/agents/tools/loop.py | 0.958972 | 0.51623 | loop.py | pypi |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.