code stringlengths 114 1.05M | path stringlengths 3 312 | quality_prob float64 0.5 0.99 | learning_prob float64 0.2 1 | filename stringlengths 3 168 | kind stringclasses 1
value |
|---|---|---|---|---|---|
import torch.nn as nn
import torch.nn.functional as F
import torch
from onmt.encoders.encoder import EncoderBase
class ImageEncoder(EncoderBase):
"""A simple encoder CNN -> RNN for image src.
Args:
num_layers (int): number of encoder layers.
bidirectional (bool): bidirectional encoder.
rnn_size (int): size of hidden states of the rnn.
dropout (float): dropout probablity.
"""
def __init__(self, num_layers, bidirectional, rnn_size, dropout,
image_chanel_size=3):
super(ImageEncoder, self).__init__()
self.num_layers = num_layers
self.num_directions = 2 if bidirectional else 1
self.hidden_size = rnn_size
self.layer1 = nn.Conv2d(image_chanel_size, 64, kernel_size=(3, 3),
padding=(1, 1), stride=(1, 1))
self.layer2 = nn.Conv2d(64, 128, kernel_size=(3, 3),
padding=(1, 1), stride=(1, 1))
self.layer3 = nn.Conv2d(128, 256, kernel_size=(3, 3),
padding=(1, 1), stride=(1, 1))
self.layer4 = nn.Conv2d(256, 256, kernel_size=(3, 3),
padding=(1, 1), stride=(1, 1))
self.layer5 = nn.Conv2d(256, 512, kernel_size=(3, 3),
padding=(1, 1), stride=(1, 1))
self.layer6 = nn.Conv2d(512, 512, kernel_size=(3, 3),
padding=(1, 1), stride=(1, 1))
self.batch_norm1 = nn.BatchNorm2d(256)
self.batch_norm2 = nn.BatchNorm2d(512)
self.batch_norm3 = nn.BatchNorm2d(512)
src_size = 512
dropout = dropout[0] if type(dropout) is list else dropout
self.rnn = nn.LSTM(src_size, int(rnn_size / self.num_directions),
num_layers=num_layers,
dropout=dropout,
bidirectional=bidirectional)
self.pos_lut = nn.Embedding(1000, src_size)
@classmethod
def from_opt(cls, opt, embeddings=None):
"""Alternate constructor."""
if embeddings is not None:
raise ValueError("Cannot use embeddings with ImageEncoder.")
# why is the model_opt.__dict__ check necessary?
if "image_channel_size" not in opt.__dict__:
image_channel_size = 3
else:
image_channel_size = opt.image_channel_size
return cls(
opt.enc_layers,
opt.brnn,
opt.enc_rnn_size,
opt.dropout[0] if type(opt.dropout) is list else opt.dropout,
image_channel_size
)
def load_pretrained_vectors(self, opt):
"""Pass in needed options only when modify function definition."""
pass
def forward(self, src, lengths=None):
"""See :func:`onmt.encoders.encoder.EncoderBase.forward()`"""
batch_size = src.size(0)
# (batch_size, 64, imgH, imgW)
# layer 1
src = F.relu(self.layer1(src[:, :, :, :] - 0.5), True)
# (batch_size, 64, imgH/2, imgW/2)
src = F.max_pool2d(src, kernel_size=(2, 2), stride=(2, 2))
# (batch_size, 128, imgH/2, imgW/2)
# layer 2
src = F.relu(self.layer2(src), True)
# (batch_size, 128, imgH/2/2, imgW/2/2)
src = F.max_pool2d(src, kernel_size=(2, 2), stride=(2, 2))
# (batch_size, 256, imgH/2/2, imgW/2/2)
# layer 3
# batch norm 1
src = F.relu(self.batch_norm1(self.layer3(src)), True)
# (batch_size, 256, imgH/2/2, imgW/2/2)
# layer4
src = F.relu(self.layer4(src), True)
# (batch_size, 256, imgH/2/2/2, imgW/2/2)
src = F.max_pool2d(src, kernel_size=(1, 2), stride=(1, 2))
# (batch_size, 512, imgH/2/2/2, imgW/2/2)
# layer 5
# batch norm 2
src = F.relu(self.batch_norm2(self.layer5(src)), True)
# (batch_size, 512, imgH/2/2/2, imgW/2/2/2)
src = F.max_pool2d(src, kernel_size=(2, 1), stride=(2, 1))
# (batch_size, 512, imgH/2/2/2, imgW/2/2/2)
src = F.relu(self.batch_norm3(self.layer6(src)), True)
# # (batch_size, 512, H, W)
all_outputs = []
for row in range(src.size(2)):
inp = src[:, :, row, :].transpose(0, 2) \
.transpose(1, 2)
row_vec = torch.Tensor(batch_size).type_as(inp.data) \
.long().fill_(row)
pos_emb = self.pos_lut(row_vec)
with_pos = torch.cat(
(pos_emb.view(1, pos_emb.size(0), pos_emb.size(1)), inp), 0)
outputs, hidden_t = self.rnn(with_pos)
all_outputs.append(outputs)
out = torch.cat(all_outputs, 0)
return hidden_t, out, lengths
def update_dropout(self, dropout):
self.rnn.dropout = dropout | /rxn_opennmt_py-1.1.4-py3-none-any.whl/onmt/encoders/image_encoder.py | 0.950698 | 0.538437 | image_encoder.py | pypi |
import math
import torch.nn as nn
from torch.nn.utils.rnn import pack_padded_sequence as pack
from torch.nn.utils.rnn import pad_packed_sequence as unpack
from onmt.utils.rnn_factory import rnn_factory
from onmt.encoders.encoder import EncoderBase
class AudioEncoder(EncoderBase):
"""A simple encoder CNN -> RNN for audio input.
Args:
rnn_type (str): Type of RNN (e.g. GRU, LSTM, etc).
enc_layers (int): Number of encoder layers.
dec_layers (int): Number of decoder layers.
brnn (bool): Bidirectional encoder.
enc_rnn_size (int): Size of hidden states of the rnn.
dec_rnn_size (int): Size of the decoder hidden states.
enc_pooling (str): A comma separated list either of length 1
or of length ``enc_layers`` specifying the pooling amount.
dropout (float): dropout probablity.
sample_rate (float): input spec
window_size (int): input spec
"""
def __init__(self, rnn_type, enc_layers, dec_layers, brnn,
enc_rnn_size, dec_rnn_size, enc_pooling, dropout,
sample_rate, window_size):
super(AudioEncoder, self).__init__()
self.enc_layers = enc_layers
self.rnn_type = rnn_type
self.dec_layers = dec_layers
num_directions = 2 if brnn else 1
self.num_directions = num_directions
assert enc_rnn_size % num_directions == 0
enc_rnn_size_real = enc_rnn_size // num_directions
assert dec_rnn_size % num_directions == 0
self.dec_rnn_size = dec_rnn_size
dec_rnn_size_real = dec_rnn_size // num_directions
self.dec_rnn_size_real = dec_rnn_size_real
self.dec_rnn_size = dec_rnn_size
input_size = int(math.floor((sample_rate * window_size) / 2) + 1)
enc_pooling = enc_pooling.split(',')
assert len(enc_pooling) == enc_layers or len(enc_pooling) == 1
if len(enc_pooling) == 1:
enc_pooling = enc_pooling * enc_layers
enc_pooling = [int(p) for p in enc_pooling]
self.enc_pooling = enc_pooling
if type(dropout) is not list:
dropout = [dropout]
if max(dropout) > 0:
self.dropout = nn.Dropout(dropout[0])
else:
self.dropout = None
self.W = nn.Linear(enc_rnn_size, dec_rnn_size, bias=False)
self.batchnorm_0 = nn.BatchNorm1d(enc_rnn_size, affine=True)
self.rnn_0, self.no_pack_padded_seq = \
rnn_factory(rnn_type,
input_size=input_size,
hidden_size=enc_rnn_size_real,
num_layers=1,
dropout=dropout[0],
bidirectional=brnn)
self.pool_0 = nn.MaxPool1d(enc_pooling[0])
for l in range(enc_layers - 1):
batchnorm = nn.BatchNorm1d(enc_rnn_size, affine=True)
rnn, _ = \
rnn_factory(rnn_type,
input_size=enc_rnn_size,
hidden_size=enc_rnn_size_real,
num_layers=1,
dropout=dropout[0],
bidirectional=brnn)
setattr(self, 'rnn_%d' % (l + 1), rnn)
setattr(self, 'pool_%d' % (l + 1),
nn.MaxPool1d(enc_pooling[l + 1]))
setattr(self, 'batchnorm_%d' % (l + 1), batchnorm)
@classmethod
def from_opt(cls, opt, embeddings=None):
"""Alternate constructor."""
if embeddings is not None:
raise ValueError("Cannot use embeddings with AudioEncoder.")
return cls(
opt.rnn_type,
opt.enc_layers,
opt.dec_layers,
opt.brnn,
opt.enc_rnn_size,
opt.dec_rnn_size,
opt.audio_enc_pooling,
opt.dropout,
opt.sample_rate,
opt.window_size)
def forward(self, src, lengths=None):
"""See :func:`onmt.encoders.encoder.EncoderBase.forward()`"""
batch_size, _, nfft, t = src.size()
src = src.transpose(0, 1).transpose(0, 3).contiguous() \
.view(t, batch_size, nfft)
orig_lengths = lengths
lengths = lengths.view(-1).tolist()
for l in range(self.enc_layers):
rnn = getattr(self, 'rnn_%d' % l)
pool = getattr(self, 'pool_%d' % l)
batchnorm = getattr(self, 'batchnorm_%d' % l)
stride = self.enc_pooling[l]
packed_emb = pack(src, lengths)
memory_bank, tmp = rnn(packed_emb)
memory_bank = unpack(memory_bank)[0]
t, _, _ = memory_bank.size()
memory_bank = memory_bank.transpose(0, 2)
memory_bank = pool(memory_bank)
lengths = [int(math.floor((length - stride) / stride + 1))
for length in lengths]
memory_bank = memory_bank.transpose(0, 2)
src = memory_bank
t, _, num_feat = src.size()
src = batchnorm(src.contiguous().view(-1, num_feat))
src = src.view(t, -1, num_feat)
if self.dropout and l + 1 != self.enc_layers:
src = self.dropout(src)
memory_bank = memory_bank.contiguous().view(-1, memory_bank.size(2))
memory_bank = self.W(memory_bank).view(-1, batch_size,
self.dec_rnn_size)
state = memory_bank.new_full((self.dec_layers * self.num_directions,
batch_size, self.dec_rnn_size_real), 0)
if self.rnn_type == 'LSTM':
# The encoder hidden is (layers*directions) x batch x dim.
encoder_final = (state, state)
else:
encoder_final = state
return encoder_final, memory_bank, orig_lengths.new_tensor(lengths)
def update_dropout(self, dropout):
self.dropout.p = dropout
for i in range(self.enc_layers - 1):
getattr(self, 'rnn_%d' % i).dropout = dropout | /rxn_opennmt_py-1.1.4-py3-none-any.whl/onmt/encoders/audio_encoder.py | 0.888692 | 0.431165 | audio_encoder.py | pypi |
import os
import torch
from collections import deque
from onmt.utils.logging import logger
from copy import deepcopy
def build_model_saver(model_opt, opt, model, fields, optim):
model_saver = ModelSaver(opt.save_model,
model,
model_opt,
fields,
optim,
opt.keep_checkpoint)
return model_saver
class ModelSaverBase(object):
"""Base class for model saving operations
Inherited classes must implement private methods:
* `_save`
* `_rm_checkpoint
"""
def __init__(self, base_path, model, model_opt, fields, optim,
keep_checkpoint=-1):
self.base_path = base_path
self.model = model
self.model_opt = model_opt
self.fields = fields
self.optim = optim
self.last_saved_step = None
self.keep_checkpoint = keep_checkpoint
if keep_checkpoint > 0:
self.checkpoint_queue = deque([], maxlen=keep_checkpoint)
def save(self, step, moving_average=None):
"""Main entry point for model saver
It wraps the `_save` method with checks and apply `keep_checkpoint`
related logic
"""
if self.keep_checkpoint == 0 or step == self.last_saved_step:
return
if moving_average:
save_model = deepcopy(self.model)
for avg, param in zip(moving_average, save_model.parameters()):
param.data.copy_(avg.data)
else:
save_model = self.model
chkpt, chkpt_name = self._save(step, save_model)
self.last_saved_step = step
if moving_average:
del save_model
if self.keep_checkpoint > 0:
if len(self.checkpoint_queue) == self.checkpoint_queue.maxlen:
todel = self.checkpoint_queue.popleft()
self._rm_checkpoint(todel)
self.checkpoint_queue.append(chkpt_name)
def _save(self, step):
"""Save a resumable checkpoint.
Args:
step (int): step number
Returns:
(object, str):
* checkpoint: the saved object
* checkpoint_name: name (or path) of the saved checkpoint
"""
raise NotImplementedError()
def _rm_checkpoint(self, name):
"""Remove a checkpoint
Args:
name(str): name that indentifies the checkpoint
(it may be a filepath)
"""
raise NotImplementedError()
class ModelSaver(ModelSaverBase):
"""Simple model saver to filesystem"""
def _save(self, step, model):
model_state_dict = model.state_dict()
model_state_dict = {k: v for k, v in model_state_dict.items()
if 'generator' not in k}
generator_state_dict = model.generator.state_dict()
# NOTE: We need to trim the vocab to remove any unk tokens that
# were not originally here.
vocab = deepcopy(self.fields)
for side in ["src", "tgt"]:
keys_to_pop = []
if hasattr(vocab[side], "fields"):
unk_token = vocab[side].fields[0][1].vocab.itos[0]
for key, value in vocab[side].fields[0][1].vocab.stoi.items():
if value == 0 and key != unk_token:
keys_to_pop.append(key)
for key in keys_to_pop:
vocab[side].fields[0][1].vocab.stoi.pop(key, None)
checkpoint = {
'model': model_state_dict,
'generator': generator_state_dict,
'vocab': vocab,
'opt': self.model_opt,
'optim': self.optim.state_dict(),
}
logger.info("Saving checkpoint %s_step_%d.pt" % (self.base_path, step))
checkpoint_path = '%s_step_%d.pt' % (self.base_path, step)
torch.save(checkpoint, checkpoint_path)
return checkpoint, checkpoint_path
def _rm_checkpoint(self, name):
os.remove(name) | /rxn_opennmt_py-1.1.4-py3-none-any.whl/onmt/models/model_saver.py | 0.772831 | 0.212212 | model_saver.py | pypi |
"""Train models."""
import os
import signal
import torch
import onmt.opts as opts
import onmt.utils.distributed
from onmt.utils.misc import set_random_seed
from onmt.utils.logging import init_logger, logger
from onmt.train_single import main as single_main
from onmt.utils.parse import ArgumentParser
from onmt.inputters.inputter import build_dataset_iter, \
load_old_vocab, old_style_vocab, build_dataset_iter_multiple
from itertools import cycle
def train(opt):
ArgumentParser.validate_train_opts(opt)
ArgumentParser.update_model_opts(opt)
ArgumentParser.validate_model_opts(opt)
# Load checkpoint if we resume from a previous training.
if opt.train_from:
logger.info('Loading checkpoint from %s' % opt.train_from)
checkpoint = torch.load(opt.train_from,
map_location=lambda storage, loc: storage)
logger.info('Loading vocab from checkpoint at %s.' % opt.train_from)
vocab = checkpoint['vocab']
else:
vocab = torch.load(opt.data + '.vocab.pt')
# check for code where vocab is saved instead of fields
# (in the future this will be done in a smarter way)
if old_style_vocab(vocab):
fields = load_old_vocab(
vocab, opt.model_type, dynamic_dict=opt.copy_attn)
else:
fields = vocab
if len(opt.data_ids) > 1:
train_shards = []
for train_id in opt.data_ids:
shard_base = "train_" + train_id
train_shards.append(shard_base)
train_iter = build_dataset_iter_multiple(train_shards, fields, opt)
else:
if opt.data_ids[0] is not None and opt.data_ids[0] != 'None':
shard_base = "train_" + opt.data_ids[0]
else:
shard_base = "train"
train_iter = build_dataset_iter(shard_base, fields, opt)
nb_gpu = len(opt.gpu_ranks)
if opt.world_size > 1:
queues = []
mp = torch.multiprocessing.get_context('spawn')
semaphore = mp.Semaphore(opt.world_size * opt.queue_size)
# Create a thread to listen for errors in the child processes.
error_queue = mp.SimpleQueue()
error_handler = ErrorHandler(error_queue)
# Train with multiprocessing.
procs = []
for device_id in range(nb_gpu):
q = mp.Queue(opt.queue_size)
queues += [q]
procs.append(mp.Process(target=run, args=(
opt, device_id, error_queue, q, semaphore), daemon=True))
procs[device_id].start()
logger.info(" Starting process pid: %d " % procs[device_id].pid)
error_handler.add_child(procs[device_id].pid)
producer = mp.Process(target=batch_producer,
args=(train_iter, queues, semaphore, opt,),
daemon=True)
producer.start()
error_handler.add_child(producer.pid)
for p in procs:
p.join()
producer.terminate()
elif nb_gpu == 1: # case 1 GPU only
single_main(opt, 0)
else: # case only CPU
single_main(opt, -1)
def batch_producer(generator_to_serve, queues, semaphore, opt):
init_logger(opt.log_file)
set_random_seed(opt.seed, False)
# generator_to_serve = iter(generator_to_serve)
def pred(x):
"""
Filters batches that belong only
to gpu_ranks of current node
"""
for rank in opt.gpu_ranks:
if x[0] % opt.world_size == rank:
return True
generator_to_serve = filter(
pred, enumerate(generator_to_serve))
def next_batch(device_id):
new_batch = next(generator_to_serve)
semaphore.acquire()
return new_batch[1]
b = next_batch(0)
for device_id, q in cycle(enumerate(queues)):
b.dataset = None
if isinstance(b.src, tuple):
b.src = tuple([_.to(torch.device(device_id))
for _ in b.src])
else:
b.src = b.src.to(torch.device(device_id))
b.tgt = b.tgt.to(torch.device(device_id))
b.indices = b.indices.to(torch.device(device_id))
b.alignment = b.alignment.to(torch.device(device_id)) \
if hasattr(b, 'alignment') else None
b.src_map = b.src_map.to(torch.device(device_id)) \
if hasattr(b, 'src_map') else None
# hack to dodge unpicklable `dict_keys`
b.fields = list(b.fields)
q.put(b)
b = next_batch(device_id)
def run(opt, device_id, error_queue, batch_queue, semaphore):
""" run process """
try:
gpu_rank = onmt.utils.distributed.multi_init(opt, device_id)
if gpu_rank != opt.gpu_ranks[device_id]:
raise AssertionError("An error occurred in \
Distributed initialization")
single_main(opt, device_id, batch_queue, semaphore)
except KeyboardInterrupt:
pass # killed by parent, do nothing
except Exception:
# propagate exception to parent process, keeping original traceback
import traceback
error_queue.put((opt.gpu_ranks[device_id], traceback.format_exc()))
class ErrorHandler(object):
"""A class that listens for exceptions in children processes and propagates
the tracebacks to the parent process."""
def __init__(self, error_queue):
""" init error handler """
import signal
import threading
self.error_queue = error_queue
self.children_pids = []
self.error_thread = threading.Thread(
target=self.error_listener, daemon=True)
self.error_thread.start()
signal.signal(signal.SIGUSR1, self.signal_handler)
def add_child(self, pid):
""" error handler """
self.children_pids.append(pid)
def error_listener(self):
""" error listener """
(rank, original_trace) = self.error_queue.get()
self.error_queue.put((rank, original_trace))
os.kill(os.getpid(), signal.SIGUSR1)
def signal_handler(self, signalnum, stackframe):
""" signal handler """
for pid in self.children_pids:
os.kill(pid, signal.SIGINT) # kill children processes
(rank, original_trace) = self.error_queue.get()
msg = """\n\n-- Tracebacks above this line can probably
be ignored --\n\n"""
msg += original_trace
raise Exception(msg)
def _get_parser():
parser = ArgumentParser(description='train.py')
opts.config_opts(parser)
opts.model_opts(parser)
opts.train_opts(parser)
return parser
def main():
parser = _get_parser()
opt = parser.parse_args()
train(opt)
if __name__ == "__main__":
main() | /rxn_opennmt_py-1.1.4-py3-none-any.whl/onmt/bin/train.py | 0.680666 | 0.198278 | train.py | pypi |
"""Train models."""
import os
import signal
import torch
import onmt.opts as opts
import onmt.utils.distributed
from onmt.utils.misc import set_random_seed
from onmt.utils.logging import init_logger, logger
from onmt.train_single import main as single_main
from onmt.utils.parse import ArgumentParser
from onmt.inputters.inputter import build_dataset_iter, \
load_old_vocab, old_style_vocab, build_dataset_iter_multiple
from itertools import cycle
import torch.cuda.profiler as profiler
import pyprof2
pyprof2.init()
def train(opt):
ArgumentParser.validate_train_opts(opt)
ArgumentParser.update_model_opts(opt)
ArgumentParser.validate_model_opts(opt)
# Load checkpoint if we resume from a previous training.
if opt.train_from:
logger.info('Loading checkpoint from %s' % opt.train_from)
checkpoint = torch.load(opt.train_from,
map_location=lambda storage, loc: storage)
logger.info('Loading vocab from checkpoint at %s.' % opt.train_from)
vocab = checkpoint['vocab']
else:
vocab = torch.load(opt.data + '.vocab.pt')
# check for code where vocab is saved instead of fields
# (in the future this will be done in a smarter way)
if old_style_vocab(vocab):
fields = load_old_vocab(
vocab, opt.model_type, dynamic_dict=opt.copy_attn)
else:
fields = vocab
if len(opt.data_ids) > 1:
train_shards = []
for train_id in opt.data_ids:
shard_base = "train_" + train_id
train_shards.append(shard_base)
train_iter = build_dataset_iter_multiple(train_shards, fields, opt)
else:
if opt.data_ids[0] is not None and opt.data_ids[0] != 'None':
shard_base = "train_" + opt.data_ids[0]
else:
shard_base = "train"
train_iter = build_dataset_iter(shard_base, fields, opt)
nb_gpu = len(opt.gpu_ranks)
if opt.world_size > 1:
queues = []
mp = torch.multiprocessing.get_context('spawn')
semaphore = mp.Semaphore(opt.world_size * opt.queue_size)
# Create a thread to listen for errors in the child processes.
error_queue = mp.SimpleQueue()
error_handler = ErrorHandler(error_queue)
# Train with multiprocessing.
procs = []
for device_id in range(nb_gpu):
q = mp.Queue(opt.queue_size)
queues += [q]
procs.append(mp.Process(target=run, args=(
opt, device_id, error_queue, q, semaphore), daemon=True))
procs[device_id].start()
logger.info(" Starting process pid: %d " % procs[device_id].pid)
error_handler.add_child(procs[device_id].pid)
producer = mp.Process(target=batch_producer,
args=(train_iter, queues, semaphore, opt,),
daemon=True)
producer.start()
error_handler.add_child(producer.pid)
for p in procs:
p.join()
producer.terminate()
elif nb_gpu == 1: # case 1 GPU only
single_main(opt, 0)
else: # case only CPU
single_main(opt, -1)
def batch_producer(generator_to_serve, queues, semaphore, opt):
init_logger(opt.log_file)
set_random_seed(opt.seed, False)
# generator_to_serve = iter(generator_to_serve)
def pred(x):
"""
Filters batches that belong only
to gpu_ranks of current node
"""
for rank in opt.gpu_ranks:
if x[0] % opt.world_size == rank:
return True
generator_to_serve = filter(
pred, enumerate(generator_to_serve))
def next_batch(device_id):
new_batch = next(generator_to_serve)
semaphore.acquire()
return new_batch[1]
b = next_batch(0)
for device_id, q in cycle(enumerate(queues)):
b.dataset = None
if isinstance(b.src, tuple):
b.src = tuple([_.to(torch.device(device_id))
for _ in b.src])
else:
b.src = b.src.to(torch.device(device_id))
b.tgt = b.tgt.to(torch.device(device_id))
b.indices = b.indices.to(torch.device(device_id))
b.alignment = b.alignment.to(torch.device(device_id)) \
if hasattr(b, 'alignment') else None
b.src_map = b.src_map.to(torch.device(device_id)) \
if hasattr(b, 'src_map') else None
# hack to dodge unpicklable `dict_keys`
b.fields = list(b.fields)
q.put(b)
b = next_batch(device_id)
def run(opt, device_id, error_queue, batch_queue, semaphore):
""" run process """
try:
gpu_rank = onmt.utils.distributed.multi_init(opt, device_id)
if gpu_rank != opt.gpu_ranks[device_id]:
raise AssertionError("An error occurred in \
Distributed initialization")
single_main(opt, device_id, batch_queue, semaphore)
except KeyboardInterrupt:
pass # killed by parent, do nothing
except Exception:
# propagate exception to parent process, keeping original traceback
import traceback
error_queue.put((opt.gpu_ranks[device_id], traceback.format_exc()))
class ErrorHandler(object):
"""A class that listens for exceptions in children processes and propagates
the tracebacks to the parent process."""
def __init__(self, error_queue):
""" init error handler """
import signal
import threading
self.error_queue = error_queue
self.children_pids = []
self.error_thread = threading.Thread(
target=self.error_listener, daemon=True)
self.error_thread.start()
signal.signal(signal.SIGUSR1, self.signal_handler)
def add_child(self, pid):
""" error handler """
self.children_pids.append(pid)
def error_listener(self):
""" error listener """
(rank, original_trace) = self.error_queue.get()
self.error_queue.put((rank, original_trace))
os.kill(os.getpid(), signal.SIGUSR1)
def signal_handler(self, signalnum, stackframe):
""" signal handler """
for pid in self.children_pids:
os.kill(pid, signal.SIGINT) # kill children processes
(rank, original_trace) = self.error_queue.get()
msg = """\n\n-- Tracebacks above this line can probably
be ignored --\n\n"""
msg += original_trace
raise Exception(msg)
def _get_parser():
parser = ArgumentParser(description='train.py')
opts.config_opts(parser)
opts.model_opts(parser)
opts.train_opts(parser)
return parser
def main():
parser = _get_parser()
opt = parser.parse_args()
train(opt)
if __name__ == "__main__":
main() | /rxn_opennmt_py-1.1.4-py3-none-any.whl/onmt/bin/train_profile.py | 0.681727 | 0.219212 | train_profile.py | pypi |
import codecs
import glob
import gc
import torch
from collections import Counter, defaultdict
from onmt.utils.logging import init_logger, logger
from onmt.utils.misc import split_corpus
import onmt.inputters as inputters
import onmt.opts as opts
from onmt.utils.parse import ArgumentParser
from onmt.inputters.inputter import _build_fields_vocab,\
_load_vocab
from functools import partial
from multiprocessing import Pool
def check_existing_pt_files(opt, corpus_type, ids, existing_fields):
""" Check if there are existing .pt files to avoid overwriting them """
existing_shards = []
for maybe_id in ids:
if maybe_id:
shard_base = corpus_type + "_" + maybe_id
else:
shard_base = corpus_type
pattern = opt.save_data + '.{}.*.pt'.format(shard_base)
if glob.glob(pattern):
if opt.overwrite:
maybe_overwrite = ("will be overwritten because "
"`-overwrite` option is set.")
else:
maybe_overwrite = ("won't be overwritten, pass the "
"`-overwrite` option if you want to.")
logger.warning("Shards for corpus {} already exist, {}"
.format(shard_base, maybe_overwrite))
existing_shards += [maybe_id]
return existing_shards
def process_one_shard(corpus_params, params):
corpus_type, fields, src_reader, tgt_reader, opt, existing_fields,\
src_vocab, tgt_vocab = corpus_params
i, (src_shard, tgt_shard, maybe_id, filter_pred) = params
# create one counter per shard
sub_sub_counter = defaultdict(Counter)
assert len(src_shard) == len(tgt_shard)
logger.info("Building shard %d." % i)
dataset = inputters.Dataset(
fields,
readers=([src_reader, tgt_reader]
if tgt_reader else [src_reader]),
data=([("src", src_shard), ("tgt", tgt_shard)]
if tgt_reader else [("src", src_shard)]),
dirs=([opt.src_dir, None]
if tgt_reader else [opt.src_dir]),
sort_key=inputters.str2sortkey[opt.data_type],
filter_pred=filter_pred
)
if corpus_type == "train" and existing_fields is None:
for ex in dataset.examples:
for name, field in fields.items():
if ((opt.data_type == "audio") and (name == "src")):
continue
try:
f_iter = iter(field)
except TypeError:
f_iter = [(name, field)]
all_data = [getattr(ex, name, None)]
else:
all_data = getattr(ex, name)
for (sub_n, sub_f), fd in zip(
f_iter, all_data):
has_vocab = (sub_n == 'src' and
src_vocab is not None) or \
(sub_n == 'tgt' and
tgt_vocab is not None)
if (hasattr(sub_f, 'sequential')
and sub_f.sequential and not has_vocab):
val = fd
sub_sub_counter[sub_n].update(val)
if maybe_id:
shard_base = corpus_type + "_" + maybe_id
else:
shard_base = corpus_type
data_path = "{:s}.{:s}.{:d}.pt".\
format(opt.save_data, shard_base, i)
logger.info(" * saving %sth %s data shard to %s."
% (i, shard_base, data_path))
dataset.save(data_path)
del dataset.examples
gc.collect()
del dataset
gc.collect()
return sub_sub_counter
def maybe_load_vocab(corpus_type, counters, opt):
src_vocab = None
tgt_vocab = None
existing_fields = None
if corpus_type == "train":
if opt.src_vocab != "":
try:
logger.info("Using existing vocabulary...")
existing_fields = torch.load(opt.src_vocab)
except torch.serialization.pickle.UnpicklingError:
logger.info("Building vocab from text file...")
src_vocab, src_vocab_size = _load_vocab(
opt.src_vocab, "src", counters,
opt.src_words_min_frequency)
if opt.tgt_vocab != "":
tgt_vocab, tgt_vocab_size = _load_vocab(
opt.tgt_vocab, "tgt", counters,
opt.tgt_words_min_frequency)
return src_vocab, tgt_vocab, existing_fields
def build_save_dataset(corpus_type, fields, src_reader, tgt_reader, opt):
assert corpus_type in ['train', 'valid']
if corpus_type == 'train':
counters = defaultdict(Counter)
srcs = opt.train_src
tgts = opt.train_tgt
ids = opt.train_ids
elif corpus_type == 'valid':
counters = None
srcs = [opt.valid_src]
tgts = [opt.valid_tgt]
ids = [None]
src_vocab, tgt_vocab, existing_fields = maybe_load_vocab(
corpus_type, counters, opt)
existing_shards = check_existing_pt_files(
opt, corpus_type, ids, existing_fields)
# every corpus has shards, no new one
if existing_shards == ids and not opt.overwrite:
return
def shard_iterator(srcs, tgts, ids, existing_shards,
existing_fields, corpus_type, opt):
"""
Builds a single iterator yielding every shard of every corpus.
"""
for src, tgt, maybe_id in zip(srcs, tgts, ids):
if maybe_id in existing_shards:
if opt.overwrite:
logger.warning("Overwrite shards for corpus {}"
.format(maybe_id))
else:
if corpus_type == "train":
assert existing_fields is not None,\
("A 'vocab.pt' file should be passed to "
"`-src_vocab` when adding a corpus to "
"a set of already existing shards.")
logger.warning("Ignore corpus {} because "
"shards already exist"
.format(maybe_id))
continue
if ((corpus_type == "train" or opt.filter_valid)
and tgt is not None):
filter_pred = partial(
inputters.filter_example,
use_src_len=opt.data_type == "text",
max_src_len=opt.src_seq_length,
max_tgt_len=opt.tgt_seq_length)
else:
filter_pred = None
src_shards = split_corpus(src, opt.shard_size)
tgt_shards = split_corpus(tgt, opt.shard_size)
for i, (ss, ts) in enumerate(zip(src_shards, tgt_shards)):
yield (i, (ss, ts, maybe_id, filter_pred))
shard_iter = shard_iterator(srcs, tgts, ids, existing_shards,
existing_fields, corpus_type, opt)
with Pool(opt.num_threads) as p:
dataset_params = (corpus_type, fields, src_reader, tgt_reader,
opt, existing_fields, src_vocab, tgt_vocab)
func = partial(process_one_shard, dataset_params)
for sub_counter in p.imap(func, shard_iter):
if sub_counter is not None:
for key, value in sub_counter.items():
counters[key].update(value)
if corpus_type == "train":
vocab_path = opt.save_data + '.vocab.pt'
if existing_fields is None:
fields = _build_fields_vocab(
fields, counters, opt.data_type,
opt.share_vocab, opt.vocab_size_multiple,
opt.src_vocab_size, opt.src_words_min_frequency,
opt.tgt_vocab_size, opt.tgt_words_min_frequency)
else:
fields = existing_fields
torch.save(fields, vocab_path)
def build_save_vocab(train_dataset, fields, opt):
fields = inputters.build_vocab(
train_dataset, fields, opt.data_type, opt.share_vocab,
opt.src_vocab, opt.src_vocab_size, opt.src_words_min_frequency,
opt.tgt_vocab, opt.tgt_vocab_size, opt.tgt_words_min_frequency,
vocab_size_multiple=opt.vocab_size_multiple
)
vocab_path = opt.save_data + '.vocab.pt'
torch.save(fields, vocab_path)
def count_features(path):
"""
path: location of a corpus file with whitespace-delimited tokens and
│-delimited features within the token
returns: the number of features in the dataset
"""
with codecs.open(path, "r", "utf-8") as f:
first_tok = f.readline().split(None, 1)[0]
return len(first_tok.split(u"│")) - 1
def preprocess(opt):
ArgumentParser.validate_preprocess_args(opt)
torch.manual_seed(opt.seed)
init_logger(opt.log_file)
logger.info("Extracting features...")
src_nfeats = 0
tgt_nfeats = 0
for src, tgt in zip(opt.train_src, opt.train_tgt):
src_nfeats += count_features(src) if opt.data_type == 'text' \
else 0
tgt_nfeats += count_features(tgt) # tgt always text so far
logger.info(" * number of source features: %d." % src_nfeats)
logger.info(" * number of target features: %d." % tgt_nfeats)
logger.info("Building `Fields` object...")
fields = inputters.get_fields(
opt.data_type,
src_nfeats,
tgt_nfeats,
dynamic_dict=opt.dynamic_dict,
src_truncate=opt.src_seq_length_trunc,
tgt_truncate=opt.tgt_seq_length_trunc)
src_reader = inputters.str2reader[opt.data_type].from_opt(opt)
tgt_reader = inputters.str2reader["text"].from_opt(opt)
logger.info("Building & saving training data...")
build_save_dataset(
'train', fields, src_reader, tgt_reader, opt)
if opt.valid_src and opt.valid_tgt:
logger.info("Building & saving validation data...")
build_save_dataset('valid', fields, src_reader, tgt_reader, opt)
def _get_parser():
parser = ArgumentParser(description='preprocess.py')
opts.config_opts(parser)
opts.preprocess_opts(parser)
return parser
def main():
parser = _get_parser()
opt = parser.parse_args()
preprocess(opt)
if __name__ == "__main__":
main() | /rxn_opennmt_py-1.1.4-py3-none-any.whl/onmt/bin/preprocess.py | 0.439988 | 0.183795 | preprocess.py | pypi |
from enum import Enum
from onmt.utils.logging import logger
class PatienceEnum(Enum):
IMPROVING = 0
DECREASING = 1
STOPPED = 2
class Scorer(object):
def __init__(self, best_score, name):
self.best_score = best_score
self.name = name
def is_improving(self, stats):
raise NotImplementedError()
def is_decreasing(self, stats):
raise NotImplementedError()
def update(self, stats):
self.best_score = self._caller(stats)
def __call__(self, stats, **kwargs):
return self._caller(stats)
def _caller(self, stats):
raise NotImplementedError()
class PPLScorer(Scorer):
def __init__(self):
super(PPLScorer, self).__init__(float("inf"), "ppl")
def is_improving(self, stats):
return stats.ppl() < self.best_score
def is_decreasing(self, stats):
return stats.ppl() > self.best_score
def _caller(self, stats):
return stats.ppl()
class AccuracyScorer(Scorer):
def __init__(self):
super(AccuracyScorer, self).__init__(float("-inf"), "acc")
def is_improving(self, stats):
return stats.accuracy() > self.best_score
def is_decreasing(self, stats):
return stats.accuracy() < self.best_score
def _caller(self, stats):
return stats.accuracy()
DEFAULT_SCORERS = [PPLScorer(), AccuracyScorer()]
SCORER_BUILDER = {
"ppl": PPLScorer,
"accuracy": AccuracyScorer
}
def scorers_from_opts(opt):
if opt.early_stopping_criteria is None:
return DEFAULT_SCORERS
else:
scorers = []
for criterion in set(opt.early_stopping_criteria):
assert criterion in SCORER_BUILDER.keys(), \
"Criterion {} not found".format(criterion)
scorers.append(SCORER_BUILDER[criterion]())
return scorers
class EarlyStopping(object):
def __init__(self, tolerance, scorers=DEFAULT_SCORERS):
"""
Callable class to keep track of early stopping.
Args:
tolerance(int): number of validation steps without improving
scorer(fn): list of scorers to validate performance on dev
"""
self.tolerance = tolerance
self.stalled_tolerance = self.tolerance
self.current_tolerance = self.tolerance
self.early_stopping_scorers = scorers
self.status = PatienceEnum.IMPROVING
self.current_step_best = 0
def __call__(self, valid_stats, step):
"""
Update the internal state of early stopping mechanism, whether to
continue training or stop the train procedure.
Checks whether the scores from all pre-chosen scorers improved. If
every metric improve, then the status is switched to improving and the
tolerance is reset. If every metric deteriorate, then the status is
switched to decreasing and the tolerance is also decreased; if the
tolerance reaches 0, then the status is changed to stopped.
Finally, if some improved and others not, then it's considered stalled;
after tolerance number of stalled, the status is switched to stopped.
:param valid_stats: Statistics of dev set
"""
if self.status == PatienceEnum.STOPPED:
# Don't do anything
return
if all([scorer.is_improving(valid_stats) for scorer
in self.early_stopping_scorers]):
self._update_increasing(valid_stats, step)
elif all([scorer.is_decreasing(valid_stats) for scorer
in self.early_stopping_scorers]):
self._update_decreasing()
else:
self._update_stalled()
def _update_stalled(self):
self.stalled_tolerance -= 1
logger.info(
"Stalled patience: {}/{}".format(self.stalled_tolerance,
self.tolerance))
if self.stalled_tolerance == 0:
logger.info(
"Training finished after stalled validations. Early Stop!"
)
self._log_best_step()
self._decreasing_or_stopped_status_update(self.stalled_tolerance)
def _update_increasing(self, valid_stats, step):
self.current_step_best = step
for scorer in self.early_stopping_scorers:
logger.info(
"Model is improving {}: {:g} --> {:g}.".format(
scorer.name, scorer.best_score, scorer(valid_stats))
)
# Update best score of each criteria
scorer.update(valid_stats)
# Reset tolerance
self.current_tolerance = self.tolerance
self.stalled_tolerance = self.tolerance
# Update current status
self.status = PatienceEnum.IMPROVING
def _update_decreasing(self):
# Decrease tolerance
self.current_tolerance -= 1
# Log
logger.info(
"Decreasing patience: {}/{}".format(self.current_tolerance,
self.tolerance)
)
# Log
if self.current_tolerance == 0:
logger.info("Training finished after not improving. Early Stop!")
self._log_best_step()
self._decreasing_or_stopped_status_update(self.current_tolerance)
def _log_best_step(self):
logger.info("Best model found at step {}".format(
self.current_step_best))
def _decreasing_or_stopped_status_update(self, tolerance):
self.status = PatienceEnum.DECREASING \
if tolerance > 0 \
else PatienceEnum.STOPPED
def is_improving(self):
return self.status == PatienceEnum.IMPROVING
def has_stopped(self):
return self.status == PatienceEnum.STOPPED | /rxn_opennmt_py-1.1.4-py3-none-any.whl/onmt/utils/earlystopping.py | 0.884589 | 0.417331 | earlystopping.py | pypi |
import configargparse as cfargparse
import os
import torch
import onmt.opts as opts
from onmt.utils.logging import logger
class ArgumentParser(cfargparse.ArgumentParser):
def __init__(
self,
config_file_parser_class=cfargparse.YAMLConfigFileParser,
formatter_class=cfargparse.ArgumentDefaultsHelpFormatter,
**kwargs):
super(ArgumentParser, self).__init__(
config_file_parser_class=config_file_parser_class,
formatter_class=formatter_class,
**kwargs)
@classmethod
def defaults(cls, *args):
"""Get default arguments added to a parser by all ``*args``."""
dummy_parser = cls()
for callback in args:
callback(dummy_parser)
defaults = dummy_parser.parse_known_args([])[0]
return defaults
@classmethod
def update_model_opts(cls, model_opt):
if model_opt.word_vec_size > 0:
model_opt.src_word_vec_size = model_opt.word_vec_size
model_opt.tgt_word_vec_size = model_opt.word_vec_size
if model_opt.layers > 0:
model_opt.enc_layers = model_opt.layers
model_opt.dec_layers = model_opt.layers
if model_opt.rnn_size > 0:
model_opt.enc_rnn_size = model_opt.rnn_size
model_opt.dec_rnn_size = model_opt.rnn_size
model_opt.brnn = model_opt.encoder_type == "brnn"
if model_opt.copy_attn_type is None:
model_opt.copy_attn_type = model_opt.global_attention
@classmethod
def validate_model_opts(cls, model_opt):
assert model_opt.model_type in ["text", "img", "audio", "vec"], \
"Unsupported model type %s" % model_opt.model_type
# this check is here because audio allows the encoder and decoder to
# be different sizes, but other model types do not yet
same_size = model_opt.enc_rnn_size == model_opt.dec_rnn_size
assert model_opt.model_type == 'audio' or same_size, \
"The encoder and decoder rnns must be the same size for now"
assert model_opt.rnn_type != "SRU" or model_opt.gpu_ranks, \
"Using SRU requires -gpu_ranks set."
if model_opt.share_embeddings:
if model_opt.model_type != "text":
raise AssertionError(
"--share_embeddings requires --model_type text.")
@classmethod
def ckpt_model_opts(cls, ckpt_opt):
# Load default opt values, then overwrite with the opts in
# the checkpoint. That way, if there are new options added,
# the defaults are used.
opt = cls.defaults(opts.model_opts)
opt.__dict__.update(ckpt_opt.__dict__)
return opt
@classmethod
def validate_train_opts(cls, opt):
if opt.epochs:
raise AssertionError(
"-epochs is deprecated please use -train_steps.")
if opt.truncated_decoder > 0 and max(opt.accum_count) > 1:
raise AssertionError("BPTT is not compatible with -accum > 1")
if opt.gpuid:
raise AssertionError(
"gpuid is deprecated see world_size and gpu_ranks")
if torch.cuda.is_available() and not opt.gpu_ranks:
logger.info("WARNING: You have a CUDA device, \
should run with -gpu_ranks")
if opt.world_size < len(opt.gpu_ranks):
raise AssertionError(
"parameter counts of -gpu_ranks must be less or equal "
"than -world_size.")
if opt.world_size == len(opt.gpu_ranks) and \
min(opt.gpu_ranks) > 0:
raise AssertionError(
"-gpu_ranks should have master(=0) rank "
"unless -world_size is greater than len(gpu_ranks).")
assert len(opt.data_ids) == len(opt.data_weights), \
"Please check -data_ids and -data_weights options!"
assert len(opt.dropout) == len(opt.dropout_steps), \
"Number of dropout values must match accum_steps values"
assert len(opt.attention_dropout) == len(opt.dropout_steps), \
"Number of attention_dropout values must match accum_steps values"
@classmethod
def validate_translate_opts(cls, opt):
if opt.beam_size != 1 and opt.random_sampling_topk != 1:
raise ValueError('Can either do beam search OR random sampling.')
@classmethod
def validate_preprocess_args(cls, opt):
assert opt.max_shard_size == 0, \
"-max_shard_size is deprecated. Please use \
-shard_size (number of examples) instead."
assert opt.shuffle == 0, \
"-shuffle is not implemented. Please shuffle \
your data before pre-processing."
assert len(opt.train_src) == len(opt.train_tgt), \
"Please provide same number of src and tgt train files!"
assert len(opt.train_src) == len(opt.train_ids), \
"Please provide proper -train_ids for your data!"
for file in opt.train_src + opt.train_tgt:
assert os.path.isfile(file), "Please check path of %s" % file
assert not opt.valid_src or os.path.isfile(opt.valid_src), \
"Please check path of your valid src file!"
assert not opt.valid_tgt or os.path.isfile(opt.valid_tgt), \
"Please check path of your valid tgt file!"
assert not opt.src_vocab or os.path.isfile(opt.src_vocab), \
"Please check path of your src vocab!"
assert not opt.tgt_vocab or os.path.isfile(opt.tgt_vocab), \
"Please check path of your tgt vocab!" | /rxn_opennmt_py-1.1.4-py3-none-any.whl/onmt/utils/parse.py | 0.811974 | 0.388473 | parse.py | pypi |
import torch
import torch.optim as optim
from torch.nn.utils import clip_grad_norm_
import operator
import functools
from copy import copy
from math import sqrt
import types
import importlib
from onmt.utils.misc import fn_args
def build_torch_optimizer(model, opt):
"""Builds the PyTorch optimizer.
We use the default parameters for Adam that are suggested by
the original paper https://arxiv.org/pdf/1412.6980.pdf
These values are also used by other established implementations,
e.g. https://www.tensorflow.org/api_docs/python/tf/train/AdamOptimizer
https://keras.io/optimizers/
Recently there are slightly different values used in the paper
"Attention is all you need"
https://arxiv.org/pdf/1706.03762.pdf, particularly the value beta2=0.98
was used there however, beta2=0.999 is still arguably the more
established value, so we use that here as well
Args:
model: The model to optimize.
opt. The dictionary of options.
Returns:
A ``torch.optim.Optimizer`` instance.
"""
params = [p for p in model.parameters() if p.requires_grad]
betas = [opt.adam_beta1, opt.adam_beta2]
if opt.optim == 'sgd':
optimizer = optim.SGD(params, lr=opt.learning_rate)
elif opt.optim == 'adagrad':
optimizer = optim.Adagrad(
params,
lr=opt.learning_rate,
initial_accumulator_value=opt.adagrad_accumulator_init)
elif opt.optim == 'adadelta':
optimizer = optim.Adadelta(params, lr=opt.learning_rate)
elif opt.optim == 'adafactor':
optimizer = AdaFactor(
params,
non_constant_decay=True,
enable_factorization=True,
weight_decay=0)
elif opt.optim == 'adam':
optimizer = optim.Adam(
params,
lr=opt.learning_rate,
betas=betas,
eps=1e-9)
elif opt.optim == 'sparseadam':
dense = []
sparse = []
for name, param in model.named_parameters():
if not param.requires_grad:
continue
# TODO: Find a better way to check for sparse gradients.
if 'embed' in name:
sparse.append(param)
else:
dense.append(param)
optimizer = MultipleOptimizer(
[optim.Adam(
dense,
lr=opt.learning_rate,
betas=betas,
eps=1e-8),
optim.SparseAdam(
sparse,
lr=opt.learning_rate,
betas=betas,
eps=1e-8)])
elif opt.optim == 'fusedadam':
# we use here a FusedAdam() copy of an old Apex repo
optimizer = FusedAdam(
params,
lr=opt.learning_rate,
betas=betas)
else:
raise ValueError('Invalid optimizer type: ' + opt.optim)
if opt.model_dtype == 'fp16':
import apex
if opt.optim != 'fusedadam':
# In this case use the new AMP API from apex
loss_scale = "dynamic" if opt.loss_scale == 0 else opt.loss_scale
model, optimizer = apex.amp.initialize(
[model, model.generator],
optimizer,
opt_level=opt.apex_opt_level,
loss_scale=loss_scale,
keep_batchnorm_fp32=None)
else:
# In this case use the old FusedAdam with FP16_optimizer wrapper
static_loss_scale = opt.loss_scale
dynamic_loss_scale = opt.loss_scale == 0
optimizer = apex.optimizers.FP16_Optimizer(
optimizer,
static_loss_scale=static_loss_scale,
dynamic_loss_scale=dynamic_loss_scale)
return optimizer
def make_learning_rate_decay_fn(opt):
"""Returns the learning decay function from options."""
if opt.decay_method == 'noam':
return functools.partial(
noam_decay,
warmup_steps=opt.warmup_steps,
model_size=opt.rnn_size)
elif opt.decay_method == 'noamwd':
return functools.partial(
noamwd_decay,
warmup_steps=opt.warmup_steps,
model_size=opt.rnn_size,
rate=opt.learning_rate_decay,
decay_steps=opt.decay_steps,
start_step=opt.start_decay_steps)
elif opt.decay_method == 'rsqrt':
return functools.partial(
rsqrt_decay, warmup_steps=opt.warmup_steps)
elif opt.start_decay_steps is not None:
return functools.partial(
exponential_decay,
rate=opt.learning_rate_decay,
decay_steps=opt.decay_steps,
start_step=opt.start_decay_steps)
def noam_decay(step, warmup_steps, model_size):
"""Learning rate schedule described in
https://arxiv.org/pdf/1706.03762.pdf.
"""
return (
model_size ** (-0.5) *
min(step ** (-0.5), step * warmup_steps**(-1.5)))
def noamwd_decay(step, warmup_steps,
model_size, rate, decay_steps, start_step=0):
"""Learning rate schedule optimized for huge batches
"""
return (
model_size ** (-0.5) *
min(step ** (-0.5), step * warmup_steps**(-1.5)) *
rate ** (max(step - start_step + decay_steps, 0) // decay_steps))
def exponential_decay(step, rate, decay_steps, start_step=0):
"""A standard exponential decay, scaling the learning rate by :obj:`rate`
every :obj:`decay_steps` steps.
"""
return rate ** (max(step - start_step + decay_steps, 0) // decay_steps)
def rsqrt_decay(step, warmup_steps):
"""Decay based on the reciprocal of the step square root."""
return 1.0 / sqrt(max(step, warmup_steps))
class MultipleOptimizer(object):
""" Implement multiple optimizers needed for sparse adam """
def __init__(self, op):
""" ? """
self.optimizers = op
@property
def param_groups(self):
param_groups = []
for optimizer in self.optimizers:
param_groups.extend(optimizer.param_groups)
return param_groups
def zero_grad(self):
""" ? """
for op in self.optimizers:
op.zero_grad()
def step(self):
""" ? """
for op in self.optimizers:
op.step()
@property
def state(self):
""" ? """
return {k: v for op in self.optimizers for k, v in op.state.items()}
def state_dict(self):
""" ? """
return [op.state_dict() for op in self.optimizers]
def load_state_dict(self, state_dicts):
""" ? """
assert len(state_dicts) == len(self.optimizers)
for i in range(len(state_dicts)):
self.optimizers[i].load_state_dict(state_dicts[i])
class Optimizer(object):
"""
Controller class for optimization. Mostly a thin
wrapper for `optim`, but also useful for implementing
rate scheduling beyond what is currently available.
Also implements necessary methods for training RNNs such
as grad manipulations.
"""
def __init__(self,
optimizer,
learning_rate,
learning_rate_decay_fn=None,
max_grad_norm=None):
"""Initializes the controller.
Args:
optimizer: A ``torch.optim.Optimizer`` instance.
learning_rate: The initial learning rate.
learning_rate_decay_fn: An optional callable taking the current step
as argument and return a learning rate scaling factor.
max_grad_norm: Clip gradients to this global norm.
"""
self._optimizer = optimizer
self._learning_rate = learning_rate
self._learning_rate_decay_fn = learning_rate_decay_fn
self._max_grad_norm = max_grad_norm or 0
self._training_step = 1
self._decay_step = 1
self._fp16 = None
@classmethod
def from_opt(cls, model, opt, checkpoint=None):
"""Builds the optimizer from options.
Args:
cls: The ``Optimizer`` class to instantiate.
model: The model to optimize.
opt: The dict of user options.
checkpoint: An optional checkpoint to load states from.
Returns:
An ``Optimizer`` instance.
"""
optim_opt = opt
optim_state_dict = None
if opt.train_from and checkpoint is not None:
optim = checkpoint['optim']
ckpt_opt = checkpoint['opt']
ckpt_state_dict = {}
if isinstance(optim, Optimizer): # Backward compatibility.
ckpt_state_dict['training_step'] = optim._step + 1
ckpt_state_dict['decay_step'] = optim._step + 1
ckpt_state_dict['optimizer'] = optim.optimizer.state_dict()
else:
ckpt_state_dict = optim
if opt.reset_optim == 'none':
# Load everything from the checkpoint.
optim_opt = ckpt_opt
optim_state_dict = ckpt_state_dict
elif opt.reset_optim == 'all':
# Build everything from scratch.
pass
elif opt.reset_optim == 'states':
# Reset optimizer, keep options.
optim_opt = ckpt_opt
optim_state_dict = ckpt_state_dict
del optim_state_dict['optimizer']
elif opt.reset_optim == 'keep_states':
# Reset options, keep optimizer.
optim_state_dict = ckpt_state_dict
optimizer = cls(
build_torch_optimizer(model, optim_opt),
optim_opt.learning_rate,
learning_rate_decay_fn=make_learning_rate_decay_fn(optim_opt),
max_grad_norm=optim_opt.max_grad_norm)
if opt.model_dtype == "fp16":
if opt.optim == "fusedadam":
optimizer._fp16 = "legacy"
else:
optimizer._fp16 = "amp"
if optim_state_dict:
optimizer.load_state_dict(optim_state_dict)
return optimizer
@property
def training_step(self):
"""The current training step."""
return self._training_step
def learning_rate(self):
"""Returns the current learning rate."""
if self._learning_rate_decay_fn is None:
return self._learning_rate
scale = self._learning_rate_decay_fn(self._decay_step)
return scale * self._learning_rate
def state_dict(self):
return {
'training_step': self._training_step,
'decay_step': self._decay_step,
'optimizer': self._optimizer.state_dict()
}
def load_state_dict(self, state_dict):
self._training_step = state_dict['training_step']
# State can be partially restored.
if 'decay_step' in state_dict:
self._decay_step = state_dict['decay_step']
if 'optimizer' in state_dict:
self._optimizer.load_state_dict(state_dict['optimizer'])
def zero_grad(self):
"""Zero the gradients of optimized parameters."""
self._optimizer.zero_grad()
def backward(self, loss):
"""Wrapper for backward pass. Some optimizer requires ownership of the
backward pass."""
if self._fp16 == "amp":
import apex
with apex.amp.scale_loss(loss, self._optimizer) as scaled_loss:
scaled_loss.backward()
elif self._fp16 == "legacy":
kwargs = {}
if "update_master_grads" in fn_args(self._optimizer.backward):
kwargs["update_master_grads"] = True
self._optimizer.backward(loss, **kwargs)
else:
loss.backward()
def step(self):
"""Update the model parameters based on current gradients.
Optionally, will employ gradient modification or update learning
rate.
"""
learning_rate = self.learning_rate()
if self._fp16 == "legacy":
if hasattr(self._optimizer, "update_master_grads"):
self._optimizer.update_master_grads()
if hasattr(self._optimizer, "clip_master_grads") and \
self._max_grad_norm > 0:
self._optimizer.clip_master_grads(self._max_grad_norm)
for group in self._optimizer.param_groups:
group['lr'] = learning_rate
if self._fp16 is None and self._max_grad_norm > 0:
clip_grad_norm_(group['params'], self._max_grad_norm)
self._optimizer.step()
self._decay_step += 1
self._training_step += 1
# Code below is an implementation of https://arxiv.org/pdf/1804.04235.pdf
# inspired but modified from https://github.com/DeadAt0m/adafactor-pytorch
class AdaFactor(torch.optim.Optimizer):
def __init__(self, params, lr=None, beta1=0.9, beta2=0.999, eps1=1e-30,
eps2=1e-3, cliping_threshold=1, non_constant_decay=True,
enable_factorization=True, ams_grad=True, weight_decay=0):
enable_momentum = beta1 != 0
if non_constant_decay:
ams_grad = False
defaults = dict(lr=lr, beta1=beta1, beta2=beta2, eps1=eps1,
eps2=eps2, cliping_threshold=cliping_threshold,
weight_decay=weight_decay, ams_grad=ams_grad,
enable_factorization=enable_factorization,
enable_momentum=enable_momentum,
non_constant_decay=non_constant_decay)
super(AdaFactor, self).__init__(params, defaults)
def __setstate__(self, state):
super(AdaFactor, self).__setstate__(state)
def _experimental_reshape(self, shape):
temp_shape = shape[2:]
if len(temp_shape) == 1:
new_shape = (shape[0], shape[1]*shape[2])
else:
tmp_div = len(temp_shape) // 2 + len(temp_shape) % 2
new_shape = (shape[0]*functools.reduce(operator.mul,
temp_shape[tmp_div:], 1),
shape[1]*functools.reduce(operator.mul,
temp_shape[:tmp_div], 1))
return new_shape, copy(shape)
def _check_shape(self, shape):
'''
output1 - True - algorithm for matrix, False - vector;
output2 - need reshape
'''
if len(shape) > 2:
return True, True
elif len(shape) == 2:
return True, False
elif len(shape) == 2 and (shape[0] == 1 or shape[1] == 1):
return False, False
else:
return False, False
def _rms(self, x):
return sqrt(torch.mean(x.pow(2)))
def step(self, closure=None):
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError('Adam does not support sparse \
gradients, use SparseAdam instead')
is_matrix, is_need_reshape = self._check_shape(grad.size())
new_shape = p.data.size()
if is_need_reshape and group['enable_factorization']:
new_shape, old_shape = \
self._experimental_reshape(p.data.size())
grad = grad.view(new_shape)
state = self.state[p]
if len(state) == 0:
state['step'] = 0
if group['enable_momentum']:
state['exp_avg'] = torch.zeros(new_shape,
dtype=torch.float32,
device=p.grad.device)
if is_matrix and group['enable_factorization']:
state['exp_avg_sq_R'] = \
torch.zeros((1, new_shape[1]),
dtype=torch.float32,
device=p.grad.device)
state['exp_avg_sq_C'] = \
torch.zeros((new_shape[0], 1),
dtype=torch.float32,
device=p.grad.device)
else:
state['exp_avg_sq'] = torch.zeros(new_shape,
dtype=torch.float32,
device=p.grad.device)
if group['ams_grad']:
state['exp_avg_sq_hat'] = \
torch.zeros(new_shape, dtype=torch.float32,
device=p.grad.device)
if group['enable_momentum']:
exp_avg = state['exp_avg']
if is_matrix and group['enable_factorization']:
exp_avg_sq_r = state['exp_avg_sq_R']
exp_avg_sq_c = state['exp_avg_sq_C']
else:
exp_avg_sq = state['exp_avg_sq']
if group['ams_grad']:
exp_avg_sq_hat = state['exp_avg_sq_hat']
state['step'] += 1
lr_t = group['lr']
lr_t *= max(group['eps2'], self._rms(p.data))
if group['enable_momentum']:
if group['non_constant_decay']:
beta1_t = group['beta1'] * \
(1 - group['beta1'] ** (state['step'] - 1)) \
/ (1 - group['beta1'] ** state['step'])
else:
beta1_t = group['beta1']
exp_avg.mul_(beta1_t).add_(1 - beta1_t, grad)
if group['non_constant_decay']:
beta2_t = group['beta2'] * \
(1 - group['beta2'] ** (state['step'] - 1)) / \
(1 - group['beta2'] ** state['step'])
else:
beta2_t = group['beta2']
if is_matrix and group['enable_factorization']:
exp_avg_sq_r.mul_(beta2_t). \
add_(1 - beta2_t, torch.sum(torch.mul(grad, grad).
add_(group['eps1']),
dim=0, keepdim=True))
exp_avg_sq_c.mul_(beta2_t). \
add_(1 - beta2_t, torch.sum(torch.mul(grad, grad).
add_(group['eps1']),
dim=1, keepdim=True))
v = torch.mul(exp_avg_sq_c,
exp_avg_sq_r).div_(torch.sum(exp_avg_sq_r))
else:
exp_avg_sq.mul_(beta2_t). \
addcmul_(1 - beta2_t, grad, grad). \
add_((1 - beta2_t)*group['eps1'])
v = exp_avg_sq
g = grad
if group['enable_momentum']:
g = torch.div(exp_avg, 1 - beta1_t ** state['step'])
if group['ams_grad']:
torch.max(exp_avg_sq_hat, v, out=exp_avg_sq_hat)
v = exp_avg_sq_hat
u = torch.div(g, (torch.div(v, 1 - beta2_t **
state['step'])).sqrt().add_(group['eps1']))
else:
u = torch.div(g, v.sqrt())
u.div_(max(1, self._rms(u) / group['cliping_threshold']))
p.data.add_(-lr_t * (u.view(old_shape) if is_need_reshape and
group['enable_factorization'] else u))
if group['weight_decay'] != 0:
p.data.add_(-group['weight_decay'] * lr_t, p.data)
return loss
class FusedAdam(torch.optim.Optimizer):
"""Implements Adam algorithm. Currently GPU-only.
Requires Apex to be installed via
``python setup.py install --cuda_ext --cpp_ext``.
It has been proposed in `Adam: A Method for Stochastic Optimization`_.
Arguments:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups.
lr (float, optional): learning rate. (default: 1e-3)
betas (Tuple[float, float], optional): coefficients used for computing
running averages of gradient and its square.
(default: (0.9, 0.999))
eps (float, optional): term added to the denominator to improve
numerical stability. (default: 1e-8)
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
amsgrad (boolean, optional): whether to use the AMSGrad variant of this
algorithm from the paper `On the Convergence of Adam and Beyond`_
(default: False) NOT SUPPORTED in FusedAdam!
eps_inside_sqrt (boolean, optional): in the 'update parameters' step,
adds eps to the bias-corrected second moment estimate before
evaluating square root instead of adding it to the square root of
second moment estimate as in the original paper. (default: False)
.. _Adam: A Method for Stochastic Optimization:
https://arxiv.org/abs/1412.6980
.. _On the Convergence of Adam and Beyond:
https://openreview.net/forum?id=ryQu7f-RZ
"""
def __init__(self, params,
lr=1e-3, bias_correction=True,
betas=(0.9, 0.999), eps=1e-8, eps_inside_sqrt=False,
weight_decay=0., max_grad_norm=0., amsgrad=False):
global fused_adam_cuda
fused_adam_cuda = importlib.import_module("fused_adam_cuda")
if amsgrad:
raise RuntimeError('AMSGrad variant not supported.')
defaults = dict(lr=lr, bias_correction=bias_correction,
betas=betas, eps=eps, weight_decay=weight_decay,
max_grad_norm=max_grad_norm)
super(FusedAdam, self).__init__(params, defaults)
self.eps_mode = 0 if eps_inside_sqrt else 1
def step(self, closure=None, grads=None, output_params=None,
scale=1., grad_norms=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
grads (list of tensors, optional): weight gradient to use for the
optimizer update. If gradients have type torch.half, parameters
are expected to be in type torch.float. (default: None)
output params (list of tensors, optional): A reduced precision copy
of the updated weights written out in addition to the regular
updated weights. Have to be of same type as gradients.
(default: None)
scale (float, optional): factor to divide gradient tensor values
by before applying to weights. (default: 1)
"""
loss = None
if closure is not None:
loss = closure()
if grads is None:
grads_group = [None]*len(self.param_groups)
# backward compatibility
# assuming a list/generator of parameter means single group
elif isinstance(grads, types.GeneratorType):
grads_group = [grads]
elif type(grads[0]) != list:
grads_group = [grads]
else:
grads_group = grads
if output_params is None:
output_params_group = [None]*len(self.param_groups)
elif isinstance(output_params, types.GeneratorType):
output_params_group = [output_params]
elif type(output_params[0]) != list:
output_params_group = [output_params]
else:
output_params_group = output_params
if grad_norms is None:
grad_norms = [None]*len(self.param_groups)
for group, grads_this_group, output_params_this_group, \
grad_norm in zip(self.param_groups, grads_group,
output_params_group, grad_norms):
if grads_this_group is None:
grads_this_group = [None]*len(group['params'])
if output_params_this_group is None:
output_params_this_group = [None]*len(group['params'])
# compute combined scale factor for this group
combined_scale = scale
if group['max_grad_norm'] > 0:
# norm is in fact norm*scale
clip = ((grad_norm / scale) + 1e-6) / group['max_grad_norm']
if clip > 1:
combined_scale = clip * scale
bias_correction = 1 if group['bias_correction'] else 0
for p, grad, output_param in zip(group['params'],
grads_this_group,
output_params_this_group):
# note: p.grad should not ever be set for correct operation of
# mixed precision optimizer that sometimes sends None gradients
if p.grad is None and grad is None:
continue
if grad is None:
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError('FusedAdam does not support sparse \
gradients, please consider \
SparseAdam instead')
state = self.state[p]
# State initialization
if len(state) == 0:
state['step'] = 0
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(p.data)
# Exponential moving average of squared gradient values
state['exp_avg_sq'] = torch.zeros_like(p.data)
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
beta1, beta2 = group['betas']
state['step'] += 1
out_p = torch.tensor([], dtype=torch.float) if output_param \
is None else output_param
fused_adam_cuda.adam(p.data,
out_p,
exp_avg,
exp_avg_sq,
grad,
group['lr'],
beta1,
beta2,
group['eps'],
combined_scale,
state['step'],
self.eps_mode,
bias_correction,
group['weight_decay'])
return loss | /rxn_opennmt_py-1.1.4-py3-none-any.whl/onmt/utils/optimizers.py | 0.8156 | 0.459561 | optimizers.py | pypi |
from __future__ import print_function
import time
from datetime import datetime
import onmt
from onmt.utils.logging import logger
def build_report_manager(opt, gpu_rank):
if opt.tensorboard and gpu_rank == 0:
from torch.utils.tensorboard import SummaryWriter
tensorboard_log_dir = opt.tensorboard_log_dir
if not opt.train_from:
tensorboard_log_dir += datetime.now().strftime("/%b-%d_%H-%M-%S")
writer = SummaryWriter(tensorboard_log_dir, comment="Unmt")
elif opt.mlflow and gpu_rank == 0:
writer = MLflowSummaryWriter()
elif opt.wandb and gpu_rank == 0:
writer = WandbSummaryWriter()
else:
writer = None
report_mgr = ReportMgr(opt.report_every, start_time=-1,
tensorboard_writer=writer)
return report_mgr
class ReportMgrBase(object):
"""
Report Manager Base class
Inherited classes should override:
* `_report_training`
* `_report_step`
"""
def __init__(self, report_every, start_time=-1.):
"""
Args:
report_every(int): Report status every this many sentences
start_time(float): manually set report start time. Negative values
means that you will need to set it later or use `start()`
"""
self.report_every = report_every
self.progress_step = 0
self.start_time = start_time
def start(self):
self.start_time = time.time()
def log(self, *args, **kwargs):
logger.info(*args, **kwargs)
def report_training(self, step, num_steps, learning_rate,
report_stats, multigpu=False):
"""
This is the user-defined batch-level traing progress
report function.
Args:
step(int): current step count.
num_steps(int): total number of batches.
learning_rate(float): current learning rate.
report_stats(Statistics): old Statistics instance.
Returns:
report_stats(Statistics): updated Statistics instance.
"""
if self.start_time < 0:
raise ValueError("""ReportMgr needs to be started
(set 'start_time' or use 'start()'""")
if step % self.report_every == 0:
if multigpu:
report_stats = \
onmt.utils.Statistics.all_gather_stats(report_stats)
self._report_training(
step, num_steps, learning_rate, report_stats)
self.progress_step += 1
return onmt.utils.Statistics()
else:
return report_stats
def _report_training(self, *args, **kwargs):
""" To be overridden """
raise NotImplementedError()
def report_step(self, lr, step, train_stats=None, valid_stats=None):
"""
Report stats of a step
Args:
train_stats(Statistics): training stats
valid_stats(Statistics): validation stats
lr(float): current learning rate
"""
self._report_step(
lr, step, train_stats=train_stats, valid_stats=valid_stats)
def _report_step(self, *args, **kwargs):
raise NotImplementedError()
class ReportMgr(ReportMgrBase):
def __init__(self, report_every, start_time=-1., tensorboard_writer=None):
"""
A report manager that writes statistics on standard output as well as
(optionally) TensorBoard
Args:
report_every(int): Report status every this many sentences
tensorboard_writer(:obj:`tensorboard.SummaryWriter`):
The TensorBoard Summary writer to use or None
"""
super(ReportMgr, self).__init__(report_every, start_time)
self.tensorboard_writer = tensorboard_writer
def maybe_log_tensorboard(self, stats, prefix, learning_rate, step):
if self.tensorboard_writer is not None:
stats.log_tensorboard(
prefix, self.tensorboard_writer, learning_rate, step)
def _report_training(self, step, num_steps, learning_rate,
report_stats):
"""
See base class method `ReportMgrBase.report_training`.
"""
report_stats.output(step, num_steps,
learning_rate, self.start_time)
# Log the progress using the number of batches on the x-axis.
# For wandb use step (1000, 2000, .. ) instead of progress_step
# (1,2,3, ...) for training, as otherwise it raises an error
# after logging the validation stats with the progress_step
# because of the too "old" logs.
if isinstance(self.tensorboard_writer, WandbSummaryWriter):
self.maybe_log_tensorboard(report_stats,
"progress",
learning_rate,
step)
else: # default onmt behaviour
self.maybe_log_tensorboard(report_stats,
"progress",
learning_rate,
self.progress_step)
report_stats = onmt.utils.Statistics()
return report_stats
def _report_step(self, lr, step, train_stats=None, valid_stats=None):
"""
See base class method `ReportMgrBase.report_step`.
"""
if train_stats is not None:
self.log('Train perplexity: %g' % train_stats.ppl())
self.log('Train accuracy: %g' % train_stats.accuracy())
self.maybe_log_tensorboard(train_stats,
"train",
lr,
step)
if valid_stats is not None:
self.log('Validation perplexity: %g' % valid_stats.ppl())
self.log('Validation accuracy: %g' % valid_stats.accuracy())
self.maybe_log_tensorboard(valid_stats,
"valid",
lr,
step)
class MLflowSummaryWriter(object):
"""
Map Summarywriter add_scalar function to mlflow log_metric
"""
def __init__(self):
pass
def add_scalar(self, tag, scalar_value, global_step=None):
import mlflow
# mlflow cannot display metric that include '/' char
tag = tag.replace('/', '_')
mlflow.log_metric(tag, scalar_value, step=global_step)
class WandbSummaryWriter(object):
"""
Map Summarywriter add_scalar function to mlflow log_metric
"""
def __init__(self):
pass
def add_scalar(self, tag, scalar_value, global_step=None):
import wandb
# mlflow cannot display metric that include '/' char
tag = tag.replace('/', '_')
wandb.log({tag: scalar_value}, step=global_step) | /rxn_opennmt_py-1.1.4-py3-none-any.whl/onmt/utils/report_manager.py | 0.871516 | 0.255013 | report_manager.py | pypi |
from __future__ import print_function
import math
import pickle
import torch.distributed
from onmt.utils.logging import logger
def is_master(opt, device_id):
return opt.gpu_ranks[device_id] == 0
def multi_init(opt, device_id):
dist_init_method = 'tcp://{master_ip}:{master_port}'.format(
master_ip=opt.master_ip,
master_port=opt.master_port)
dist_world_size = opt.world_size
torch.distributed.init_process_group(
backend=opt.gpu_backend, init_method=dist_init_method,
world_size=dist_world_size, rank=opt.gpu_ranks[device_id])
gpu_rank = torch.distributed.get_rank()
if not is_master(opt, device_id):
logger.disabled = True
return gpu_rank
def all_reduce_and_rescale_tensors(tensors, rescale_denom,
buffer_size=10485760):
"""All-reduce and rescale tensors in chunks of the specified size.
Args:
tensors: list of Tensors to all-reduce
rescale_denom: denominator for rescaling summed Tensors
buffer_size: all-reduce chunk size in bytes
"""
# buffer size in bytes, determine equiv. # of elements based on data type
buffer_t = tensors[0].new(
math.ceil(buffer_size / tensors[0].element_size())).zero_()
buffer = []
def all_reduce_buffer():
# copy tensors into buffer_t
offset = 0
for t in buffer:
numel = t.numel()
buffer_t[offset:offset+numel].copy_(t.view(-1))
offset += numel
# all-reduce and rescale
torch.distributed.all_reduce(buffer_t[:offset])
buffer_t.div_(rescale_denom)
# copy all-reduced buffer back into tensors
offset = 0
for t in buffer:
numel = t.numel()
t.view(-1).copy_(buffer_t[offset:offset+numel])
offset += numel
filled = 0
for t in tensors:
sz = t.numel() * t.element_size()
if sz > buffer_size:
# tensor is bigger than buffer, all-reduce and rescale directly
torch.distributed.all_reduce(t)
t.div_(rescale_denom)
elif filled + sz > buffer_size:
# buffer is full, all-reduce and replace buffer with grad
all_reduce_buffer()
buffer = [t]
filled = sz
else:
# add tensor to buffer
buffer.append(t)
filled += sz
if len(buffer) > 0:
all_reduce_buffer()
def all_gather_list(data, max_size=4096):
"""Gathers arbitrary data from all nodes into a list."""
world_size = torch.distributed.get_world_size()
if not hasattr(all_gather_list, '_in_buffer') or \
max_size != all_gather_list._in_buffer.size():
all_gather_list._in_buffer = torch.cuda.ByteTensor(max_size)
all_gather_list._out_buffers = [
torch.cuda.ByteTensor(max_size)
for i in range(world_size)
]
in_buffer = all_gather_list._in_buffer
out_buffers = all_gather_list._out_buffers
enc = pickle.dumps(data)
enc_size = len(enc)
if enc_size + 2 > max_size:
raise ValueError(
'encoded data exceeds max_size: {}'.format(enc_size + 2))
assert max_size < 255*256
in_buffer[0] = enc_size // 255 # this encoding works for max_size < 65k
in_buffer[1] = enc_size % 255
in_buffer[2:enc_size+2] = torch.ByteTensor(list(enc))
torch.distributed.all_gather(out_buffers, in_buffer.cuda())
results = []
for i in range(world_size):
out_buffer = out_buffers[i]
size = (255 * out_buffer[0].item()) + out_buffer[1].item()
bytes_list = bytes(out_buffer[2:size+2].tolist())
result = pickle.loads(bytes_list)
results.append(result)
return results | /rxn_opennmt_py-1.1.4-py3-none-any.whl/onmt/utils/distributed.py | 0.755005 | 0.355439 | distributed.py | pypi |
from __future__ import division
import time
import math
import sys
from onmt.utils.logging import logger
class Statistics(object):
"""
Accumulator for loss statistics.
Currently calculates:
* accuracy
* perplexity
* elapsed time
"""
def __init__(self, loss=0, n_words=0, n_correct=0):
self.loss = loss
self.n_words = n_words
self.n_correct = n_correct
self.n_src_words = 0
self.start_time = time.time()
@staticmethod
def all_gather_stats(stat, max_size=4096):
"""
Gather a `Statistics` object accross multiple process/nodes
Args:
stat(:obj:Statistics): the statistics object to gather
accross all processes/nodes
max_size(int): max buffer size to use
Returns:
`Statistics`, the update stats object
"""
stats = Statistics.all_gather_stats_list([stat], max_size=max_size)
return stats[0]
@staticmethod
def all_gather_stats_list(stat_list, max_size=4096):
"""
Gather a `Statistics` list accross all processes/nodes
Args:
stat_list(list([`Statistics`])): list of statistics objects to
gather accross all processes/nodes
max_size(int): max buffer size to use
Returns:
our_stats(list([`Statistics`])): list of updated stats
"""
from torch.distributed import get_rank
from onmt.utils.distributed import all_gather_list
# Get a list of world_size lists with len(stat_list) Statistics objects
all_stats = all_gather_list(stat_list, max_size=max_size)
our_rank = get_rank()
our_stats = all_stats[our_rank]
for other_rank, stats in enumerate(all_stats):
if other_rank == our_rank:
continue
for i, stat in enumerate(stats):
our_stats[i].update(stat, update_n_src_words=True)
return our_stats
def update(self, stat, update_n_src_words=False):
"""
Update statistics by suming values with another `Statistics` object
Args:
stat: another statistic object
update_n_src_words(bool): whether to update (sum) `n_src_words`
or not
"""
self.loss += stat.loss
self.n_words += stat.n_words
self.n_correct += stat.n_correct
if update_n_src_words:
self.n_src_words += stat.n_src_words
def accuracy(self):
""" compute accuracy """
return 100 * (self.n_correct / self.n_words)
def xent(self):
""" compute cross entropy """
return self.loss / self.n_words
def ppl(self):
""" compute perplexity """
return math.exp(min(self.loss / self.n_words, 100))
def elapsed_time(self):
""" compute elapsed time """
return time.time() - self.start_time
def output(self, step, num_steps, learning_rate, start):
"""Write out statistics to stdout.
Args:
step (int): current step
n_batch (int): total batches
start (int): start time of step.
"""
t = self.elapsed_time()
step_fmt = "%2d" % step
if num_steps > 0:
step_fmt = "%s/%5d" % (step_fmt, num_steps)
logger.info(
("Step %s; acc: %6.2f; ppl: %5.2f; xent: %4.2f; " +
"lr: %7.5f; %3.0f/%3.0f tok/s; %6.0f sec")
% (step_fmt,
self.accuracy(),
self.ppl(),
self.xent(),
learning_rate,
self.n_src_words / (t + 1e-5),
self.n_words / (t + 1e-5),
time.time() - start))
sys.stdout.flush()
def log_tensorboard(self, prefix, writer, learning_rate, step):
""" display statistics to tensorboard """
t = self.elapsed_time()
writer.add_scalar(prefix + "/xent", self.xent(), step)
writer.add_scalar(prefix + "/ppl", self.ppl(), step)
writer.add_scalar(prefix + "/accuracy", self.accuracy(), step)
writer.add_scalar(prefix + "/tgtper", self.n_words / t, step)
writer.add_scalar(prefix + "/lr", learning_rate, step) | /rxn_opennmt_py-1.1.4-py3-none-any.whl/onmt/utils/statistics.py | 0.820146 | 0.318194 | statistics.py | pypi |
import torch
import torch.nn as nn
from onmt.decoders.decoder import DecoderBase
from onmt.modules import MultiHeadedAttention, AverageAttention
from onmt.modules.position_ffn import PositionwiseFeedForward
from onmt.utils.misc import sequence_mask
class TransformerDecoderLayer(nn.Module):
"""
Args:
d_model (int): the dimension of keys/values/queries in
:class:`MultiHeadedAttention`, also the input size of
the first-layer of the :class:`PositionwiseFeedForward`.
heads (int): the number of heads for MultiHeadedAttention.
d_ff (int): the second-layer of the :class:`PositionwiseFeedForward`.
dropout (float): dropout probability.
self_attn_type (string): type of self-attention scaled-dot, average
"""
def __init__(self, d_model, heads, d_ff, dropout, attention_dropout,
self_attn_type="scaled-dot", max_relative_positions=0,
aan_useffn=False):
super(TransformerDecoderLayer, self).__init__()
if self_attn_type == "scaled-dot":
self.self_attn = MultiHeadedAttention(
heads, d_model, dropout=dropout,
max_relative_positions=max_relative_positions)
elif self_attn_type == "average":
self.self_attn = AverageAttention(d_model,
dropout=attention_dropout,
aan_useffn=aan_useffn)
self.context_attn = MultiHeadedAttention(
heads, d_model, dropout=attention_dropout)
self.feed_forward = PositionwiseFeedForward(d_model, d_ff, dropout)
self.layer_norm_1 = nn.LayerNorm(d_model, eps=1e-6)
self.layer_norm_2 = nn.LayerNorm(d_model, eps=1e-6)
self.drop = nn.Dropout(dropout)
def forward(self, inputs, memory_bank, src_pad_mask, tgt_pad_mask,
layer_cache=None, step=None):
"""
Args:
inputs (FloatTensor): ``(batch_size, 1, model_dim)``
memory_bank (FloatTensor): ``(batch_size, src_len, model_dim)``
src_pad_mask (LongTensor): ``(batch_size, 1, src_len)``
tgt_pad_mask (LongTensor): ``(batch_size, 1, 1)``
Returns:
(FloatTensor, FloatTensor):
* output ``(batch_size, 1, model_dim)``
* attn ``(batch_size, 1, src_len)``
"""
dec_mask = None
if step is None:
tgt_len = tgt_pad_mask.size(-1)
future_mask = torch.ones(
[tgt_len, tgt_len],
device=tgt_pad_mask.device,
dtype=torch.uint8)
future_mask = future_mask.triu_(1).view(1, tgt_len, tgt_len)
# BoolTensor was introduced in pytorch 1.2
try:
future_mask = future_mask.bool()
except AttributeError:
pass
dec_mask = torch.gt(tgt_pad_mask + future_mask, 0)
input_norm = self.layer_norm_1(inputs)
if isinstance(self.self_attn, MultiHeadedAttention):
query, attn = self.self_attn(input_norm, input_norm, input_norm,
mask=dec_mask,
layer_cache=layer_cache,
attn_type="self")
elif isinstance(self.self_attn, AverageAttention):
query, attn = self.self_attn(input_norm, mask=dec_mask,
layer_cache=layer_cache, step=step)
query = self.drop(query) + inputs
query_norm = self.layer_norm_2(query)
mid, attn = self.context_attn(memory_bank, memory_bank, query_norm,
mask=src_pad_mask,
layer_cache=layer_cache,
attn_type="context")
output = self.feed_forward(self.drop(mid) + query)
return output, attn
def update_dropout(self, dropout, attention_dropout):
self.self_attn.update_dropout(attention_dropout)
self.context_attn.update_dropout(attention_dropout)
self.feed_forward.update_dropout(dropout)
self.drop.p = dropout
class TransformerDecoder(DecoderBase):
"""The Transformer decoder from "Attention is All You Need".
:cite:`DBLP:journals/corr/VaswaniSPUJGKP17`
.. mermaid::
graph BT
A[input]
B[multi-head self-attn]
BB[multi-head src-attn]
C[feed forward]
O[output]
A --> B
B --> BB
BB --> C
C --> O
Args:
num_layers (int): number of encoder layers.
d_model (int): size of the model
heads (int): number of heads
d_ff (int): size of the inner FF layer
copy_attn (bool): if using a separate copy attention
self_attn_type (str): type of self-attention scaled-dot, average
dropout (float): dropout parameters
embeddings (onmt.modules.Embeddings):
embeddings to use, should have positional encodings
"""
def __init__(self, num_layers, d_model, heads, d_ff,
copy_attn, self_attn_type, dropout, attention_dropout,
embeddings, max_relative_positions, aan_useffn):
super(TransformerDecoder, self).__init__()
self.embeddings = embeddings
# Decoder State
self.state = {}
self.transformer_layers = nn.ModuleList(
[TransformerDecoderLayer(d_model, heads, d_ff, dropout,
attention_dropout, self_attn_type=self_attn_type,
max_relative_positions=max_relative_positions,
aan_useffn=aan_useffn)
for i in range(num_layers)])
# previously, there was a GlobalAttention module here for copy
# attention. But it was never actually used -- the "copy" attention
# just reuses the context attention.
self._copy = copy_attn
self.layer_norm = nn.LayerNorm(d_model, eps=1e-6)
@classmethod
def from_opt(cls, opt, embeddings):
"""Alternate constructor."""
return cls(
opt.dec_layers,
opt.dec_rnn_size,
opt.heads,
opt.transformer_ff,
opt.copy_attn,
opt.self_attn_type,
opt.dropout[0] if type(opt.dropout) is list else opt.dropout,
opt.attention_dropout[0] if type(opt.attention_dropout)
is list else opt.dropout,
embeddings,
opt.max_relative_positions,
opt.aan_useffn)
def init_state(self, src, memory_bank, enc_hidden):
"""Initialize decoder state."""
self.state["src"] = src
self.state["cache"] = None
def map_state(self, fn):
def _recursive_map(struct, batch_dim=0):
for k, v in struct.items():
if v is not None:
if isinstance(v, dict):
_recursive_map(v)
else:
struct[k] = fn(v, batch_dim)
self.state["src"] = fn(self.state["src"], 1)
if self.state["cache"] is not None:
_recursive_map(self.state["cache"])
def detach_state(self):
self.state["src"] = self.state["src"].detach()
def forward(self, tgt, memory_bank, step=None, **kwargs):
"""Decode, possibly stepwise."""
if step == 0:
self._init_cache(memory_bank)
tgt_words = tgt[:, :, 0].transpose(0, 1)
emb = self.embeddings(tgt, step=step)
assert emb.dim() == 3 # len x batch x embedding_dim
output = emb.transpose(0, 1).contiguous()
src_memory_bank = memory_bank.transpose(0, 1).contiguous()
pad_idx = self.embeddings.word_padding_idx
src_lens = kwargs["memory_lengths"]
src_max_len = self.state["src"].shape[0]
src_pad_mask = ~sequence_mask(src_lens, src_max_len).unsqueeze(1)
tgt_pad_mask = tgt_words.data.eq(pad_idx).unsqueeze(1) # [B, 1, T_tgt]
for i, layer in enumerate(self.transformer_layers):
layer_cache = self.state["cache"]["layer_{}".format(i)] \
if step is not None else None
output, attn = layer(
output,
src_memory_bank,
src_pad_mask,
tgt_pad_mask,
layer_cache=layer_cache,
step=step)
output = self.layer_norm(output)
dec_outs = output.transpose(0, 1).contiguous()
attn = attn.transpose(0, 1).contiguous()
attns = {"std": attn}
if self._copy:
attns["copy"] = attn
# TODO change the way attns is returned dict => list or tuple (onnx)
return dec_outs, attns
def _init_cache(self, memory_bank):
self.state["cache"] = {}
batch_size = memory_bank.size(1)
depth = memory_bank.size(-1)
for i, layer in enumerate(self.transformer_layers):
layer_cache = {"memory_keys": None, "memory_values": None}
if isinstance(layer.self_attn, AverageAttention):
layer_cache["prev_g"] = torch.zeros((batch_size, 1, depth),
device=memory_bank.device)
else:
layer_cache["self_keys"] = None
layer_cache["self_values"] = None
self.state["cache"]["layer_{}".format(i)] = layer_cache
def update_dropout(self, dropout, attention_dropout):
self.embeddings.update_dropout(dropout)
for layer in self.transformer_layers:
layer.update_dropout(dropout, attention_dropout) | /rxn_opennmt_py-1.1.4-py3-none-any.whl/onmt/decoders/transformer.py | 0.918881 | 0.291368 | transformer.py | pypi |
import torch
import torch.nn as nn
from onmt.encoders.encoder import EncoderBase
from onmt.decoders.decoder import DecoderBase
from onmt.models import NMTModel
import onmt.model_builder
class EnsembleDecoderOutput(object):
"""Wrapper around multiple decoder final hidden states."""
def __init__(self, model_dec_outs):
self.model_dec_outs = tuple(model_dec_outs)
def squeeze(self, dim=None):
"""Delegate squeeze to avoid modifying
:func:`onmt.translate.translator.Translator.translate_batch()`
"""
return EnsembleDecoderOutput([
x.squeeze(dim) for x in self.model_dec_outs])
def __getitem__(self, index):
return self.model_dec_outs[index]
class EnsembleEncoder(EncoderBase):
"""Dummy Encoder that delegates to individual real Encoders."""
def __init__(self, model_encoders):
super(EnsembleEncoder, self).__init__()
self.model_encoders = nn.ModuleList(model_encoders)
def forward(self, src, lengths=None):
enc_hidden, memory_bank, _ = zip(*[
model_encoder(src, lengths)
for model_encoder in self.model_encoders])
return enc_hidden, memory_bank, lengths
class EnsembleDecoder(DecoderBase):
"""Dummy Decoder that delegates to individual real Decoders."""
def __init__(self, model_decoders):
model_decoders = nn.ModuleList(model_decoders)
attentional = any([dec.attentional for dec in model_decoders])
super(EnsembleDecoder, self).__init__(attentional)
self.model_decoders = model_decoders
def forward(self, tgt, memory_bank, memory_lengths=None, step=None):
"""See :func:`onmt.decoders.decoder.DecoderBase.forward()`."""
# Memory_lengths is a single tensor shared between all models.
# This assumption will not hold if Translator is modified
# to calculate memory_lengths as something other than the length
# of the input.
dec_outs, attns = zip(*[
model_decoder(
tgt, memory_bank[i],
memory_lengths=memory_lengths, step=step)
for i, model_decoder in enumerate(self.model_decoders)])
mean_attns = self.combine_attns(attns)
return EnsembleDecoderOutput(dec_outs), mean_attns
def combine_attns(self, attns):
result = {}
for key in attns[0].keys():
result[key] = torch.stack(
[attn[key] for attn in attns if attn[key] is not None]).mean(0)
return result
def init_state(self, src, memory_bank, enc_hidden):
""" See :obj:`RNNDecoderBase.init_state()` """
for i, model_decoder in enumerate(self.model_decoders):
model_decoder.init_state(src, memory_bank[i], enc_hidden[i])
def map_state(self, fn):
for model_decoder in self.model_decoders:
model_decoder.map_state(fn)
class EnsembleGenerator(nn.Module):
"""
Dummy Generator that delegates to individual real Generators,
and then averages the resulting target distributions.
"""
def __init__(self, model_generators, raw_probs=False):
super(EnsembleGenerator, self).__init__()
self.model_generators = nn.ModuleList(model_generators)
self._raw_probs = raw_probs
def forward(self, hidden, attn=None, src_map=None):
"""
Compute a distribution over the target dictionary
by averaging distributions from models in the ensemble.
All models in the ensemble must share a target vocabulary.
"""
distributions = torch.stack(
[mg(h) if attn is None else mg(h, attn, src_map)
for h, mg in zip(hidden, self.model_generators)]
)
if self._raw_probs:
return torch.log(torch.exp(distributions).mean(0))
else:
return distributions.mean(0)
class EnsembleModel(NMTModel):
"""Dummy NMTModel wrapping individual real NMTModels."""
def __init__(self, models, raw_probs=False):
encoder = EnsembleEncoder(model.encoder for model in models)
decoder = EnsembleDecoder(model.decoder for model in models)
super(EnsembleModel, self).__init__(encoder, decoder)
self.generator = EnsembleGenerator(
[model.generator for model in models], raw_probs)
self.models = nn.ModuleList(models)
def load_test_model(opt):
"""Read in multiple models for ensemble."""
shared_fields = None
shared_model_opt = None
models = []
for model_path in opt.models:
fields, model, model_opt = \
onmt.model_builder.load_test_model(opt, model_path=model_path)
if shared_fields is None:
shared_fields = fields
else:
for key, field in fields.items():
try:
f_iter = iter(field)
except TypeError:
f_iter = [(key, field)]
for sn, sf in f_iter:
if sf is not None and 'vocab' in sf.__dict__:
sh_field = shared_fields[key]
try:
sh_f_iter = iter(sh_field)
except TypeError:
sh_f_iter = [(key, sh_field)]
sh_f_dict = dict(sh_f_iter)
assert sf.vocab.stoi == sh_f_dict[sn].vocab.stoi, \
"Ensemble models must use the same " \
"preprocessed data"
models.append(model)
if shared_model_opt is None:
shared_model_opt = model_opt
ensemble_model = EnsembleModel(models, opt.avg_raw_probs)
return shared_fields, ensemble_model, shared_model_opt | /rxn_opennmt_py-1.1.4-py3-none-any.whl/onmt/decoders/ensemble.py | 0.947064 | 0.326298 | ensemble.py | pypi |
import torch
import torch.nn as nn
from onmt.modules import ConvMultiStepAttention, GlobalAttention
from onmt.utils.cnn_factory import shape_transform, GatedConv
from onmt.decoders.decoder import DecoderBase
SCALE_WEIGHT = 0.5 ** 0.5
class CNNDecoder(DecoderBase):
"""Decoder based on "Convolutional Sequence to Sequence Learning"
:cite:`DBLP:journals/corr/GehringAGYD17`.
Consists of residual convolutional layers, with ConvMultiStepAttention.
"""
def __init__(self, num_layers, hidden_size, attn_type,
copy_attn, cnn_kernel_width, dropout, embeddings,
copy_attn_type):
super(CNNDecoder, self).__init__()
self.cnn_kernel_width = cnn_kernel_width
self.embeddings = embeddings
# Decoder State
self.state = {}
input_size = self.embeddings.embedding_size
self.linear = nn.Linear(input_size, hidden_size)
self.conv_layers = nn.ModuleList(
[GatedConv(hidden_size, cnn_kernel_width, dropout, True)
for i in range(num_layers)]
)
self.attn_layers = nn.ModuleList(
[ConvMultiStepAttention(hidden_size) for i in range(num_layers)]
)
# CNNDecoder has its own attention mechanism.
# Set up a separate copy attention layer if needed.
assert not copy_attn, "Copy mechanism not yet tested in conv2conv"
if copy_attn:
self.copy_attn = GlobalAttention(
hidden_size, attn_type=copy_attn_type)
else:
self.copy_attn = None
@classmethod
def from_opt(cls, opt, embeddings):
"""Alternate constructor."""
return cls(
opt.dec_layers,
opt.dec_rnn_size,
opt.global_attention,
opt.copy_attn,
opt.cnn_kernel_width,
opt.dropout[0] if type(opt.dropout) is list else opt.dropout,
embeddings,
opt.copy_attn_type)
def init_state(self, _, memory_bank, enc_hidden):
"""Init decoder state."""
self.state["src"] = (memory_bank + enc_hidden) * SCALE_WEIGHT
self.state["previous_input"] = None
def map_state(self, fn):
self.state["src"] = fn(self.state["src"], 1)
if self.state["previous_input"] is not None:
self.state["previous_input"] = fn(self.state["previous_input"], 1)
def detach_state(self):
self.state["previous_input"] = self.state["previous_input"].detach()
def forward(self, tgt, memory_bank, step=None, **kwargs):
""" See :obj:`onmt.modules.RNNDecoderBase.forward()`"""
if self.state["previous_input"] is not None:
tgt = torch.cat([self.state["previous_input"], tgt], 0)
dec_outs = []
attns = {"std": []}
if self.copy_attn is not None:
attns["copy"] = []
emb = self.embeddings(tgt)
assert emb.dim() == 3 # len x batch x embedding_dim
tgt_emb = emb.transpose(0, 1).contiguous()
# The output of CNNEncoder.
src_memory_bank_t = memory_bank.transpose(0, 1).contiguous()
# The combination of output of CNNEncoder and source embeddings.
src_memory_bank_c = self.state["src"].transpose(0, 1).contiguous()
emb_reshape = tgt_emb.contiguous().view(
tgt_emb.size(0) * tgt_emb.size(1), -1)
linear_out = self.linear(emb_reshape)
x = linear_out.view(tgt_emb.size(0), tgt_emb.size(1), -1)
x = shape_transform(x)
pad = torch.zeros(x.size(0), x.size(1), self.cnn_kernel_width - 1, 1)
pad = pad.type_as(x)
base_target_emb = x
for conv, attention in zip(self.conv_layers, self.attn_layers):
new_target_input = torch.cat([pad, x], 2)
out = conv(new_target_input)
c, attn = attention(base_target_emb, out,
src_memory_bank_t, src_memory_bank_c)
x = (x + (c + out) * SCALE_WEIGHT) * SCALE_WEIGHT
output = x.squeeze(3).transpose(1, 2)
# Process the result and update the attentions.
dec_outs = output.transpose(0, 1).contiguous()
if self.state["previous_input"] is not None:
dec_outs = dec_outs[self.state["previous_input"].size(0):]
attn = attn[:, self.state["previous_input"].size(0):].squeeze()
attn = torch.stack([attn])
attns["std"] = attn
if self.copy_attn is not None:
attns["copy"] = attn
# Update the state.
self.state["previous_input"] = tgt
# TODO change the way attns is returned dict => list or tuple (onnx)
return dec_outs, attns
def update_dropout(self, dropout):
for layer in self.conv_layers:
layer.dropout.p = dropout | /rxn_opennmt_py-1.1.4-py3-none-any.whl/onmt/decoders/cnn_decoder.py | 0.907702 | 0.471832 | cnn_decoder.py | pypi |
import torch
import torch.nn as nn
from onmt.models.stacked_rnn import StackedLSTM, StackedGRU
from onmt.modules import context_gate_factory, GlobalAttention
from onmt.utils.rnn_factory import rnn_factory
from onmt.utils.misc import aeq
class DecoderBase(nn.Module):
"""Abstract class for decoders.
Args:
attentional (bool): The decoder returns non-empty attention.
"""
def __init__(self, attentional=True):
super(DecoderBase, self).__init__()
self.attentional = attentional
@classmethod
def from_opt(cls, opt, embeddings):
"""Alternate constructor.
Subclasses should override this method.
"""
raise NotImplementedError
class RNNDecoderBase(DecoderBase):
"""Base recurrent attention-based decoder class.
Specifies the interface used by different decoder types
and required by :class:`~onmt.models.NMTModel`.
.. mermaid::
graph BT
A[Input]
subgraph RNN
C[Pos 1]
D[Pos 2]
E[Pos N]
end
G[Decoder State]
H[Decoder State]
I[Outputs]
F[memory_bank]
A--emb-->C
A--emb-->D
A--emb-->E
H-->C
C-- attn --- F
D-- attn --- F
E-- attn --- F
C-->I
D-->I
E-->I
E-->G
F---I
Args:
rnn_type (str):
style of recurrent unit to use, one of [RNN, LSTM, GRU, SRU]
bidirectional_encoder (bool) : use with a bidirectional encoder
num_layers (int) : number of stacked layers
hidden_size (int) : hidden size of each layer
attn_type (str) : see :class:`~onmt.modules.GlobalAttention`
attn_func (str) : see :class:`~onmt.modules.GlobalAttention`
coverage_attn (str): see :class:`~onmt.modules.GlobalAttention`
context_gate (str): see :class:`~onmt.modules.ContextGate`
copy_attn (bool): setup a separate copy attention mechanism
dropout (float) : dropout value for :class:`torch.nn.Dropout`
embeddings (onmt.modules.Embeddings): embedding module to use
reuse_copy_attn (bool): reuse the attention for copying
copy_attn_type (str): The copy attention style. See
:class:`~onmt.modules.GlobalAttention`.
"""
def __init__(self, rnn_type, bidirectional_encoder, num_layers,
hidden_size, attn_type="general", attn_func="softmax",
coverage_attn=False, context_gate=None,
copy_attn=False, dropout=0.0, embeddings=None,
reuse_copy_attn=False, copy_attn_type="general"):
super(RNNDecoderBase, self).__init__(
attentional=attn_type != "none" and attn_type is not None)
self.bidirectional_encoder = bidirectional_encoder
self.num_layers = num_layers
self.hidden_size = hidden_size
self.embeddings = embeddings
self.dropout = nn.Dropout(dropout)
# Decoder state
self.state = {}
# Build the RNN.
self.rnn = self._build_rnn(rnn_type,
input_size=self._input_size,
hidden_size=hidden_size,
num_layers=num_layers,
dropout=dropout)
# Set up the context gate.
self.context_gate = None
if context_gate is not None:
self.context_gate = context_gate_factory(
context_gate, self._input_size,
hidden_size, hidden_size, hidden_size
)
# Set up the standard attention.
self._coverage = coverage_attn
if not self.attentional:
if self._coverage:
raise ValueError("Cannot use coverage term with no attention.")
self.attn = None
else:
self.attn = GlobalAttention(
hidden_size, coverage=coverage_attn,
attn_type=attn_type, attn_func=attn_func
)
if copy_attn and not reuse_copy_attn:
if copy_attn_type == "none" or copy_attn_type is None:
raise ValueError(
"Cannot use copy_attn with copy_attn_type none")
self.copy_attn = GlobalAttention(
hidden_size, attn_type=copy_attn_type, attn_func=attn_func
)
else:
self.copy_attn = None
self._reuse_copy_attn = reuse_copy_attn and copy_attn
if self._reuse_copy_attn and not self.attentional:
raise ValueError("Cannot reuse copy attention with no attention.")
@classmethod
def from_opt(cls, opt, embeddings):
"""Alternate constructor."""
return cls(
opt.rnn_type,
opt.brnn,
opt.dec_layers,
opt.dec_rnn_size,
opt.global_attention,
opt.global_attention_function,
opt.coverage_attn,
opt.context_gate,
opt.copy_attn,
opt.dropout[0] if type(opt.dropout) is list
else opt.dropout,
embeddings,
opt.reuse_copy_attn,
opt.copy_attn_type)
def init_state(self, src, memory_bank, encoder_final):
"""Initialize decoder state with last state of the encoder."""
def _fix_enc_hidden(hidden):
# The encoder hidden is (layers*directions) x batch x dim.
# We need to convert it to layers x batch x (directions*dim).
if self.bidirectional_encoder:
hidden = torch.cat([hidden[0:hidden.size(0):2],
hidden[1:hidden.size(0):2]], 2)
return hidden
if isinstance(encoder_final, tuple): # LSTM
self.state["hidden"] = tuple(_fix_enc_hidden(enc_hid)
for enc_hid in encoder_final)
else: # GRU
self.state["hidden"] = (_fix_enc_hidden(encoder_final), )
# Init the input feed.
batch_size = self.state["hidden"][0].size(1)
h_size = (batch_size, self.hidden_size)
self.state["input_feed"] = \
self.state["hidden"][0].data.new(*h_size).zero_().unsqueeze(0)
self.state["coverage"] = None
def map_state(self, fn):
self.state["hidden"] = tuple(fn(h, 1) for h in self.state["hidden"])
self.state["input_feed"] = fn(self.state["input_feed"], 1)
if self._coverage and self.state["coverage"] is not None:
self.state["coverage"] = fn(self.state["coverage"], 1)
def detach_state(self):
self.state["hidden"] = tuple(h.detach() for h in self.state["hidden"])
self.state["input_feed"] = self.state["input_feed"].detach()
def forward(self, tgt, memory_bank, memory_lengths=None, step=None):
"""
Args:
tgt (LongTensor): sequences of padded tokens
``(tgt_len, batch, nfeats)``.
memory_bank (FloatTensor): vectors from the encoder
``(src_len, batch, hidden)``.
memory_lengths (LongTensor): the padded source lengths
``(batch,)``.
Returns:
(FloatTensor, dict[str, FloatTensor]):
* dec_outs: output from the decoder (after attn)
``(tgt_len, batch, hidden)``.
* attns: distribution over src at each tgt
``(tgt_len, batch, src_len)``.
"""
dec_state, dec_outs, attns = self._run_forward_pass(
tgt, memory_bank, memory_lengths=memory_lengths)
# Update the state with the result.
if not isinstance(dec_state, tuple):
dec_state = (dec_state,)
self.state["hidden"] = dec_state
self.state["input_feed"] = dec_outs[-1].unsqueeze(0)
self.state["coverage"] = None
if "coverage" in attns:
self.state["coverage"] = attns["coverage"][-1].unsqueeze(0)
# Concatenates sequence of tensors along a new dimension.
# NOTE: v0.3 to 0.4: dec_outs / attns[*] may not be list
# (in particular in case of SRU) it was not raising error in 0.3
# since stack(Variable) was allowed.
# In 0.4, SRU returns a tensor that shouldn't be stacke
if type(dec_outs) == list:
dec_outs = torch.stack(dec_outs)
for k in attns:
if type(attns[k]) == list:
attns[k] = torch.stack(attns[k])
return dec_outs, attns
def update_dropout(self, dropout):
self.dropout.p = dropout
self.embeddings.update_dropout(dropout)
class StdRNNDecoder(RNNDecoderBase):
"""Standard fully batched RNN decoder with attention.
Faster implementation, uses CuDNN for implementation.
See :class:`~onmt.decoders.decoder.RNNDecoderBase` for options.
Based around the approach from
"Neural Machine Translation By Jointly Learning To Align and Translate"
:cite:`Bahdanau2015`
Implemented without input_feeding and currently with no `coverage_attn`
or `copy_attn` support.
"""
def _run_forward_pass(self, tgt, memory_bank, memory_lengths=None):
"""
Private helper for running the specific RNN forward pass.
Must be overriden by all subclasses.
Args:
tgt (LongTensor): a sequence of input tokens tensors
``(len, batch, nfeats)``.
memory_bank (FloatTensor): output(tensor sequence) from the
encoder RNN of size ``(src_len, batch, hidden_size)``.
memory_lengths (LongTensor): the source memory_bank lengths.
Returns:
(Tensor, List[FloatTensor], Dict[str, List[FloatTensor]):
* dec_state: final hidden state from the decoder.
* dec_outs: an array of output of every time
step from the decoder.
* attns: a dictionary of different
type of attention Tensor array of every time
step from the decoder.
"""
assert self.copy_attn is None # TODO, no support yet.
assert not self._coverage # TODO, no support yet.
attns = {}
emb = self.embeddings(tgt)
if isinstance(self.rnn, nn.GRU):
rnn_output, dec_state = self.rnn(emb, self.state["hidden"][0])
else:
rnn_output, dec_state = self.rnn(emb, self.state["hidden"])
# Check
tgt_len, tgt_batch, _ = tgt.size()
output_len, output_batch, _ = rnn_output.size()
aeq(tgt_len, output_len)
aeq(tgt_batch, output_batch)
# Calculate the attention.
if not self.attentional:
dec_outs = rnn_output
else:
dec_outs, p_attn = self.attn(
rnn_output.transpose(0, 1).contiguous(),
memory_bank.transpose(0, 1),
memory_lengths=memory_lengths
)
attns["std"] = p_attn
# Calculate the context gate.
if self.context_gate is not None:
dec_outs = self.context_gate(
emb.view(-1, emb.size(2)),
rnn_output.view(-1, rnn_output.size(2)),
dec_outs.view(-1, dec_outs.size(2))
)
dec_outs = dec_outs.view(tgt_len, tgt_batch, self.hidden_size)
dec_outs = self.dropout(dec_outs)
return dec_state, dec_outs, attns
def _build_rnn(self, rnn_type, **kwargs):
rnn, _ = rnn_factory(rnn_type, **kwargs)
return rnn
@property
def _input_size(self):
return self.embeddings.embedding_size
class InputFeedRNNDecoder(RNNDecoderBase):
"""Input feeding based decoder.
See :class:`~onmt.decoders.decoder.RNNDecoderBase` for options.
Based around the input feeding approach from
"Effective Approaches to Attention-based Neural Machine Translation"
:cite:`Luong2015`
.. mermaid::
graph BT
A[Input n-1]
AB[Input n]
subgraph RNN
E[Pos n-1]
F[Pos n]
E --> F
end
G[Encoder]
H[memory_bank n-1]
A --> E
AB --> F
E --> H
G --> H
"""
def _run_forward_pass(self, tgt, memory_bank, memory_lengths=None):
"""
See StdRNNDecoder._run_forward_pass() for description
of arguments and return values.
"""
# Additional args check.
input_feed = self.state["input_feed"].squeeze(0)
input_feed_batch, _ = input_feed.size()
_, tgt_batch, _ = tgt.size()
aeq(tgt_batch, input_feed_batch)
# END Additional args check.
dec_outs = []
attns = {}
if self.attn is not None:
attns["std"] = []
if self.copy_attn is not None or self._reuse_copy_attn:
attns["copy"] = []
if self._coverage:
attns["coverage"] = []
emb = self.embeddings(tgt)
assert emb.dim() == 3 # len x batch x embedding_dim
dec_state = self.state["hidden"]
coverage = self.state["coverage"].squeeze(0) \
if self.state["coverage"] is not None else None
# Input feed concatenates hidden state with
# input at every time step.
for emb_t in emb.split(1):
decoder_input = torch.cat([emb_t.squeeze(0), input_feed], 1)
rnn_output, dec_state = self.rnn(decoder_input, dec_state)
if self.attentional:
decoder_output, p_attn = self.attn(
rnn_output,
memory_bank.transpose(0, 1),
memory_lengths=memory_lengths)
attns["std"].append(p_attn)
else:
decoder_output = rnn_output
if self.context_gate is not None:
# TODO: context gate should be employed
# instead of second RNN transform.
decoder_output = self.context_gate(
decoder_input, rnn_output, decoder_output
)
decoder_output = self.dropout(decoder_output)
input_feed = decoder_output
dec_outs += [decoder_output]
# Update the coverage attention.
if self._coverage:
coverage = p_attn if coverage is None else p_attn + coverage
attns["coverage"] += [coverage]
if self.copy_attn is not None:
_, copy_attn = self.copy_attn(
decoder_output, memory_bank.transpose(0, 1))
attns["copy"] += [copy_attn]
elif self._reuse_copy_attn:
attns["copy"] = attns["std"]
return dec_state, dec_outs, attns
def _build_rnn(self, rnn_type, input_size,
hidden_size, num_layers, dropout):
assert rnn_type != "SRU", "SRU doesn't support input feed! " \
"Please set -input_feed 0!"
stacked_cell = StackedLSTM if rnn_type == "LSTM" else StackedGRU
return stacked_cell(num_layers, input_size, hidden_size, dropout)
@property
def _input_size(self):
"""Using input feed by concatenating input with attention vectors."""
return self.embeddings.embedding_size + self.hidden_size
def update_dropout(self, dropout):
self.dropout.p = dropout
self.rnn.dropout.p = dropout
self.embeddings.update_dropout(dropout) | /rxn_opennmt_py-1.1.4-py3-none-any.whl/onmt/decoders/decoder.py | 0.93115 | 0.280406 | decoder.py | pypi |
import torch
class DecodeStrategy(object):
"""Base class for generation strategies.
Args:
pad (int): Magic integer in output vocab.
bos (int): Magic integer in output vocab.
eos (int): Magic integer in output vocab.
batch_size (int): Current batch size.
device (torch.device or str): Device for memory bank (encoder).
parallel_paths (int): Decoding strategies like beam search
use parallel paths. Each batch is repeated ``parallel_paths``
times in relevant state tensors.
min_length (int): Shortest acceptable generation, not counting
begin-of-sentence or end-of-sentence.
max_length (int): Longest acceptable sequence, not counting
begin-of-sentence (presumably there has been no EOS
yet if max_length is used as a cutoff).
block_ngram_repeat (int): Block beams where
``block_ngram_repeat``-grams repeat.
exclusion_tokens (set[int]): If a gram contains any of these
tokens, it may repeat.
return_attention (bool): Whether to work with attention too. If this
is true, it is assumed that the decoder is attentional.
Attributes:
pad (int): See above.
bos (int): See above.
eos (int): See above.
predictions (list[list[LongTensor]]): For each batch, holds a
list of beam prediction sequences.
scores (list[list[FloatTensor]]): For each batch, holds a
list of scores.
attention (list[list[FloatTensor or list[]]]): For each
batch, holds a list of attention sequence tensors
(or empty lists) having shape ``(step, inp_seq_len)`` where
``inp_seq_len`` is the length of the sample (not the max
length of all inp seqs).
alive_seq (LongTensor): Shape ``(B x parallel_paths, step)``.
This sequence grows in the ``step`` axis on each call to
:func:`advance()`.
is_finished (ByteTensor or NoneType): Shape
``(B, parallel_paths)``. Initialized to ``None``.
alive_attn (FloatTensor or NoneType): If tensor, shape is
``(step, B x parallel_paths, inp_seq_len)``, where ``inp_seq_len``
is the (max) length of the input sequence.
min_length (int): See above.
max_length (int): See above.
block_ngram_repeat (int): See above.
exclusion_tokens (set[int]): See above.
return_attention (bool): See above.
done (bool): See above.
"""
def __init__(self, pad, bos, eos, batch_size, device, parallel_paths,
min_length, block_ngram_repeat, exclusion_tokens,
return_attention, max_length):
# magic indices
self.pad = pad
self.bos = bos
self.eos = eos
# result caching
self.predictions = [[] for _ in range(batch_size)]
self.scores = [[] for _ in range(batch_size)]
self.attention = [[] for _ in range(batch_size)]
self.alive_seq = torch.full(
[batch_size * parallel_paths, 1], self.bos,
dtype=torch.long, device=device)
self.is_finished = torch.zeros(
[batch_size, parallel_paths],
dtype=torch.uint8, device=device)
self.alive_attn = None
self.min_length = min_length
self.max_length = max_length
self.block_ngram_repeat = block_ngram_repeat
self.exclusion_tokens = exclusion_tokens
self.return_attention = return_attention
self.done = False
def __len__(self):
return self.alive_seq.shape[1]
def ensure_min_length(self, log_probs):
if len(self) <= self.min_length:
log_probs[:, self.eos] = -1e20
def ensure_max_length(self):
# add one to account for BOS. Don't account for EOS because hitting
# this implies it hasn't been found.
if len(self) == self.max_length + 1:
self.is_finished.fill_(1)
def block_ngram_repeats(self, log_probs):
cur_len = len(self)
if self.block_ngram_repeat > 0 and cur_len > 1:
for path_idx in range(self.alive_seq.shape[0]):
# skip BOS
hyp = self.alive_seq[path_idx, 1:]
ngrams = set()
fail = False
gram = []
for i in range(cur_len - 1):
# Last n tokens, n = block_ngram_repeat
gram = (gram + [hyp[i].item()])[-self.block_ngram_repeat:]
# skip the blocking if any token in gram is excluded
if set(gram) & self.exclusion_tokens:
continue
if tuple(gram) in ngrams:
fail = True
ngrams.add(tuple(gram))
if fail:
log_probs[path_idx] = -10e20
def advance(self, log_probs, attn):
"""DecodeStrategy subclasses should override :func:`advance()`.
Advance is used to update ``self.alive_seq``, ``self.is_finished``,
and, when appropriate, ``self.alive_attn``.
"""
raise NotImplementedError()
def update_finished(self):
"""DecodeStrategy subclasses should override :func:`update_finished()`.
``update_finished`` is used to update ``self.predictions``,
``self.scores``, and other "output" attributes.
"""
raise NotImplementedError() | /rxn_opennmt_py-1.1.4-py3-none-any.whl/onmt/translate/decode_strategy.py | 0.903182 | 0.500549 | decode_strategy.py | pypi |
from __future__ import unicode_literals, print_function
import torch
from onmt.inputters.text_dataset import TextMultiField
class TranslationBuilder(object):
"""
Build a word-based translation from the batch output
of translator and the underlying dictionaries.
Replacement based on "Addressing the Rare Word
Problem in Neural Machine Translation" :cite:`Luong2015b`
Args:
data (onmt.inputters.Dataset): Data.
fields (List[Tuple[str, torchtext.data.Field]]): data fields
n_best (int): number of translations produced
replace_unk (bool): replace unknown words using attention
has_tgt (bool): will the batch have gold targets
"""
def __init__(self, data, fields, n_best=1, replace_unk=False,
has_tgt=False, phrase_table=""):
self.data = data
self.fields = fields
self._has_text_src = isinstance(
dict(self.fields)["src"], TextMultiField)
self.n_best = n_best
self.replace_unk = replace_unk
self.phrase_table = phrase_table
self.has_tgt = has_tgt
def _build_target_tokens(self, src, src_vocab, src_raw, pred, attn):
tgt_field = dict(self.fields)["tgt"].base_field
vocab = tgt_field.vocab
tokens = []
for tok in pred:
if tok < len(vocab):
tokens.append(vocab.itos[tok])
else:
tokens.append(src_vocab.itos[tok - len(vocab)])
if tokens[-1] == tgt_field.eos_token:
tokens = tokens[:-1]
break
if self.replace_unk and attn is not None and src is not None:
for i in range(len(tokens)):
if tokens[i] == tgt_field.unk_token:
_, max_index = attn[i][:len(src_raw)].max(0)
tokens[i] = src_raw[max_index.item()]
if self.phrase_table != "":
with open(self.phrase_table, "r") as f:
for line in f:
if line.startswith(src_raw[max_index.item()]):
tokens[i] = line.split('|||')[1].strip()
return tokens
def from_batch(self, translation_batch):
batch = translation_batch["batch"]
assert(len(translation_batch["gold_score"]) ==
len(translation_batch["predictions"]))
batch_size = batch.batch_size
preds, pred_score, attn, gold_score, indices = list(zip(
*sorted(zip(translation_batch["predictions"],
translation_batch["scores"],
translation_batch["attention"],
translation_batch["gold_score"],
batch.indices.data),
key=lambda x: x[-1])))
# Sorting
inds, perm = torch.sort(batch.indices)
if self._has_text_src:
src = batch.src[0][:, :, 0].index_select(1, perm)
else:
src = None
tgt = batch.tgt[:, :, 0].index_select(1, perm) \
if self.has_tgt else None
translations = []
for b in range(batch_size):
if self._has_text_src:
src_vocab = self.data.src_vocabs[inds[b]] \
if self.data.src_vocabs else None
src_raw = self.data.examples[inds[b]].src[0]
else:
src_vocab = None
src_raw = None
pred_sents = [self._build_target_tokens(
src[:, b] if src is not None else None,
src_vocab, src_raw,
preds[b][n], attn[b][n])
for n in range(self.n_best)]
gold_sent = None
if tgt is not None:
gold_sent = self._build_target_tokens(
src[:, b] if src is not None else None,
src_vocab, src_raw,
tgt[1:, b] if tgt is not None else None, None)
translation = Translation(
src[:, b] if src is not None else None,
src_raw, pred_sents, attn[b], pred_score[b],
gold_sent, gold_score[b]
)
translations.append(translation)
return translations
class Translation(object):
"""Container for a translated sentence.
Attributes:
src (LongTensor): Source word IDs.
src_raw (List[str]): Raw source words.
pred_sents (List[List[str]]): Words from the n-best translations.
pred_scores (List[List[float]]): Log-probs of n-best translations.
attns (List[FloatTensor]) : Attention distribution for each
translation.
gold_sent (List[str]): Words from gold translation.
gold_score (List[float]): Log-prob of gold translation.
"""
__slots__ = ["src", "src_raw", "pred_sents", "attns", "pred_scores",
"gold_sent", "gold_score"]
def __init__(self, src, src_raw, pred_sents,
attn, pred_scores, tgt_sent, gold_score):
self.src = src
self.src_raw = src_raw
self.pred_sents = pred_sents
self.attns = attn
self.pred_scores = pred_scores
self.gold_sent = tgt_sent
self.gold_score = gold_score
def log(self, sent_number):
"""
Log translation.
"""
msg = ['\nSENT {}: {}\n'.format(sent_number, self.src_raw)]
best_pred = self.pred_sents[0]
best_score = self.pred_scores[0]
pred_sent = ' '.join(best_pred)
msg.append('PRED {}: {}\n'.format(sent_number, pred_sent))
msg.append("PRED SCORE: {:.4f}\n".format(best_score))
if self.gold_sent is not None:
tgt_sent = ' '.join(self.gold_sent)
msg.append('GOLD {}: {}\n'.format(sent_number, tgt_sent))
msg.append(("GOLD SCORE: {:.4f}\n".format(self.gold_score)))
if len(self.pred_sents) > 1:
msg.append('\nBEST HYP:\n')
for score, sent in zip(self.pred_scores, self.pred_sents):
msg.append("[{:.4f}] {}\n".format(score, sent))
return "".join(msg) | /rxn_opennmt_py-1.1.4-py3-none-any.whl/onmt/translate/translation.py | 0.833155 | 0.414366 | translation.py | pypi |
from __future__ import division
import torch
from onmt.translate import penalties
import warnings
class Beam(object):
"""Class for managing the internals of the beam search process.
Takes care of beams, back pointers, and scores.
Args:
size (int): Number of beams to use.
pad (int): Magic integer in output vocab.
bos (int): Magic integer in output vocab.
eos (int): Magic integer in output vocab.
n_best (int): Don't stop until at least this many beams have
reached EOS.
cuda (bool): use gpu
global_scorer (onmt.translate.GNMTGlobalScorer): Scorer instance.
min_length (int): Shortest acceptable generation, not counting
begin-of-sentence or end-of-sentence.
stepwise_penalty (bool): Apply coverage penalty at every step.
block_ngram_repeat (int): Block beams where
``block_ngram_repeat``-grams repeat.
exclusion_tokens (set[int]): If a gram contains any of these
token indices, it may repeat.
"""
def __init__(self, size, pad, bos, eos,
n_best=1, cuda=False,
global_scorer=None,
min_length=0,
stepwise_penalty=False,
block_ngram_repeat=0,
exclusion_tokens=set()):
self.size = size
self.tt = torch.cuda if cuda else torch
# The score for each translation on the beam.
self.scores = self.tt.FloatTensor(size).zero_()
self.all_scores = []
# The backpointers at each time-step.
self.prev_ks = []
# The outputs at each time-step.
self.next_ys = [self.tt.LongTensor(size)
.fill_(pad)]
self.next_ys[0][0] = bos
# Has EOS topped the beam yet.
self._eos = eos
self.eos_top = False
# The attentions (matrix) for each time.
self.attn = []
# Time and k pair for finished.
self.finished = []
self.n_best = n_best
# Information for global scoring.
self.global_scorer = global_scorer
self.global_state = {}
# Minimum prediction length
self.min_length = min_length
# Apply Penalty at every step
self.stepwise_penalty = stepwise_penalty
self.block_ngram_repeat = block_ngram_repeat
self.exclusion_tokens = exclusion_tokens
@property
def current_predictions(self):
return self.next_ys[-1]
@property
def current_origin(self):
"""Get the backpointers for the current timestep."""
return self.prev_ks[-1]
def advance(self, word_probs, attn_out):
"""
Given prob over words for every last beam `wordLk` and attention
`attn_out`: Compute and update the beam search.
Args:
word_probs (FloatTensor): probs of advancing from the last step
``(K, words)``
attn_out (FloatTensor): attention at the last step
Returns:
bool: True if beam search is complete.
"""
num_words = word_probs.size(1)
if self.stepwise_penalty:
self.global_scorer.update_score(self, attn_out)
# force the output to be longer than self.min_length
cur_len = len(self.next_ys)
if cur_len <= self.min_length:
# assumes there are len(word_probs) predictions OTHER
# than EOS that are greater than -1e20
for k in range(len(word_probs)):
word_probs[k][self._eos] = -1e20
# Sum the previous scores.
if len(self.prev_ks) > 0:
beam_scores = word_probs + self.scores.unsqueeze(1)
# Don't let EOS have children.
for i in range(self.next_ys[-1].size(0)):
if self.next_ys[-1][i] == self._eos:
beam_scores[i] = -1e20
# Block ngram repeats
if self.block_ngram_repeat > 0:
le = len(self.next_ys)
for j in range(self.next_ys[-1].size(0)):
hyp, _ = self.get_hyp(le - 1, j)
ngrams = set()
fail = False
gram = []
for i in range(le - 1):
# Last n tokens, n = block_ngram_repeat
gram = (gram +
[hyp[i].item()])[-self.block_ngram_repeat:]
# Skip the blocking if it is in the exclusion list
if set(gram) & self.exclusion_tokens:
continue
if tuple(gram) in ngrams:
fail = True
ngrams.add(tuple(gram))
if fail:
beam_scores[j] = -10e20
else:
beam_scores = word_probs[0]
flat_beam_scores = beam_scores.view(-1)
best_scores, best_scores_id = flat_beam_scores.topk(self.size, 0,
True, True)
self.all_scores.append(self.scores)
self.scores = best_scores
# best_scores_id is flattened beam x word array, so calculate which
# word and beam each score came from
prev_k = best_scores_id / num_words
self.prev_ks.append(prev_k)
self.next_ys.append((best_scores_id - prev_k * num_words))
self.attn.append(attn_out.index_select(0, prev_k))
self.global_scorer.update_global_state(self)
for i in range(self.next_ys[-1].size(0)):
if self.next_ys[-1][i] == self._eos:
global_scores = self.global_scorer.score(self, self.scores)
s = global_scores[i]
self.finished.append((s, len(self.next_ys) - 1, i))
# End condition is when top-of-beam is EOS and no global score.
if self.next_ys[-1][0] == self._eos:
self.all_scores.append(self.scores)
self.eos_top = True
@property
def done(self):
return self.eos_top and len(self.finished) >= self.n_best
def sort_finished(self, minimum=None):
if minimum is not None:
i = 0
# Add from beam until we have minimum outputs.
while len(self.finished) < minimum:
global_scores = self.global_scorer.score(self, self.scores)
s = global_scores[i]
self.finished.append((s, len(self.next_ys) - 1, i))
i += 1
self.finished.sort(key=lambda a: -a[0])
scores = [sc for sc, _, _ in self.finished]
ks = [(t, k) for _, t, k in self.finished]
return scores, ks
def get_hyp(self, timestep, k):
"""Walk back to construct the full hypothesis."""
hyp, attn = [], []
for j in range(len(self.prev_ks[:timestep]) - 1, -1, -1):
hyp.append(self.next_ys[j + 1][k])
attn.append(self.attn[j][k])
k = self.prev_ks[j][k]
return hyp[::-1], torch.stack(attn[::-1])
class GNMTGlobalScorer(object):
"""NMT re-ranking.
Args:
alpha (float): Length parameter.
beta (float): Coverage parameter.
length_penalty (str): Length penalty strategy.
coverage_penalty (str): Coverage penalty strategy.
Attributes:
alpha (float): See above.
beta (float): See above.
length_penalty (callable): See :class:`penalties.PenaltyBuilder`.
coverage_penalty (callable): See :class:`penalties.PenaltyBuilder`.
has_cov_pen (bool): See :class:`penalties.PenaltyBuilder`.
has_len_pen (bool): See :class:`penalties.PenaltyBuilder`.
"""
@classmethod
def from_opt(cls, opt):
return cls(
opt.alpha,
opt.beta,
opt.length_penalty,
opt.coverage_penalty)
def __init__(self, alpha, beta, length_penalty, coverage_penalty):
self._validate(alpha, beta, length_penalty, coverage_penalty)
self.alpha = alpha
self.beta = beta
penalty_builder = penalties.PenaltyBuilder(coverage_penalty,
length_penalty)
self.has_cov_pen = penalty_builder.has_cov_pen
# Term will be subtracted from probability
self.cov_penalty = penalty_builder.coverage_penalty
self.has_len_pen = penalty_builder.has_len_pen
# Probability will be divided by this
self.length_penalty = penalty_builder.length_penalty
@classmethod
def _validate(cls, alpha, beta, length_penalty, coverage_penalty):
# these warnings indicate that either the alpha/beta
# forces a penalty to be a no-op, or a penalty is a no-op but
# the alpha/beta would suggest otherwise.
if length_penalty is None or length_penalty == "none":
if alpha != 0:
warnings.warn("Non-default `alpha` with no length penalty. "
"`alpha` has no effect.")
else:
# using some length penalty
if length_penalty == "wu" and alpha == 0.:
warnings.warn("Using length penalty Wu with alpha==0 "
"is equivalent to using length penalty none.")
if coverage_penalty is None or coverage_penalty == "none":
if beta != 0:
warnings.warn("Non-default `beta` with no coverage penalty. "
"`beta` has no effect.")
else:
# using some coverage penalty
if beta == 0.:
warnings.warn("Non-default coverage penalty with beta==0 "
"is equivalent to using coverage penalty none.")
def score(self, beam, logprobs):
"""Rescore a prediction based on penalty functions."""
len_pen = self.length_penalty(len(beam.next_ys), self.alpha)
normalized_probs = logprobs / len_pen
if not beam.stepwise_penalty:
penalty = self.cov_penalty(beam.global_state["coverage"],
self.beta)
normalized_probs -= penalty
return normalized_probs
def update_score(self, beam, attn):
"""Update scores of a Beam that is not finished."""
if "prev_penalty" in beam.global_state.keys():
beam.scores.add_(beam.global_state["prev_penalty"])
penalty = self.cov_penalty(beam.global_state["coverage"] + attn,
self.beta)
beam.scores.sub_(penalty)
def update_global_state(self, beam):
"""Keeps the coverage vector as sum of attentions."""
if len(beam.prev_ks) == 1:
beam.global_state["prev_penalty"] = beam.scores.clone().fill_(0.0)
beam.global_state["coverage"] = beam.attn[-1]
self.cov_total = beam.attn[-1].sum(1)
else:
self.cov_total += torch.min(beam.attn[-1],
beam.global_state['coverage']).sum(1)
beam.global_state["coverage"] = beam.global_state["coverage"] \
.index_select(0, beam.prev_ks[-1]).add(beam.attn[-1])
prev_penalty = self.cov_penalty(beam.global_state["coverage"],
self.beta)
beam.global_state["prev_penalty"] = prev_penalty | /rxn_opennmt_py-1.1.4-py3-none-any.whl/onmt/translate/beam.py | 0.887741 | 0.269518 | beam.py | pypi |
from __future__ import division
import torch
class PenaltyBuilder(object):
"""Returns the Length and Coverage Penalty function for Beam Search.
Args:
length_pen (str): option name of length pen
cov_pen (str): option name of cov pen
Attributes:
has_cov_pen (bool): Whether coverage penalty is None (applying it
is a no-op). Note that the converse isn't true. Setting beta
to 0 should force coverage length to be a no-op.
has_len_pen (bool): Whether length penalty is None (applying it
is a no-op). Note that the converse isn't true. Setting alpha
to 1 should force length penalty to be a no-op.
coverage_penalty (callable[[FloatTensor, float], FloatTensor]):
Calculates the coverage penalty.
length_penalty (callable[[int, float], float]): Calculates
the length penalty.
"""
def __init__(self, cov_pen, length_pen):
self.has_cov_pen = not self._pen_is_none(cov_pen)
self.coverage_penalty = self._coverage_penalty(cov_pen)
self.has_len_pen = not self._pen_is_none(length_pen)
self.length_penalty = self._length_penalty(length_pen)
@staticmethod
def _pen_is_none(pen):
return pen == "none" or pen is None
def _coverage_penalty(self, cov_pen):
if cov_pen == "wu":
return self.coverage_wu
elif cov_pen == "summary":
return self.coverage_summary
elif self._pen_is_none(cov_pen):
return self.coverage_none
else:
raise NotImplementedError("No '{:s}' coverage penalty.".format(
cov_pen))
def _length_penalty(self, length_pen):
if length_pen == "wu":
return self.length_wu
elif length_pen == "avg":
return self.length_average
elif self._pen_is_none(length_pen):
return self.length_none
else:
raise NotImplementedError("No '{:s}' length penalty.".format(
length_pen))
# Below are all the different penalty terms implemented so far.
# Subtract coverage penalty from topk log probs.
# Divide topk log probs by length penalty.
def coverage_wu(self, cov, beta=0.):
"""GNMT coverage re-ranking score.
See "Google's Neural Machine Translation System" :cite:`wu2016google`.
``cov`` is expected to be sized ``(*, seq_len)``, where ``*`` is
probably ``batch_size x beam_size`` but could be several
dimensions like ``(batch_size, beam_size)``. If ``cov`` is attention,
then the ``seq_len`` axis probably sums to (almost) 1.
"""
penalty = -torch.min(cov, cov.clone().fill_(1.0)).log().sum(-1)
return beta * penalty
def coverage_summary(self, cov, beta=0.):
"""Our summary penalty."""
penalty = torch.max(cov, cov.clone().fill_(1.0)).sum(-1)
penalty -= cov.size(-1)
return beta * penalty
def coverage_none(self, cov, beta=0.):
"""Returns zero as penalty"""
none = torch.zeros((1,), device=cov.device,
dtype=torch.float)
if cov.dim() == 3:
none = none.unsqueeze(0)
return none
def length_wu(self, cur_len, alpha=0.):
"""GNMT length re-ranking score.
See "Google's Neural Machine Translation System" :cite:`wu2016google`.
"""
return ((5 + cur_len) / 6.0) ** alpha
def length_average(self, cur_len, alpha=0.):
"""Returns the current sequence length."""
return cur_len
def length_none(self, cur_len, alpha=0.):
"""Returns unmodified scores."""
return 1.0 | /rxn_opennmt_py-1.1.4-py3-none-any.whl/onmt/translate/penalties.py | 0.915271 | 0.434221 | penalties.py | pypi |
import torch
from onmt.translate.decode_strategy import DecodeStrategy
def sample_with_temperature(logits, sampling_temp, keep_topk):
"""Select next tokens randomly from the top k possible next tokens.
Samples from a categorical distribution over the ``keep_topk`` words using
the category probabilities ``logits / sampling_temp``.
Args:
logits (FloatTensor): Shaped ``(batch_size, vocab_size)``.
These can be logits (``(-inf, inf)``) or log-probs (``(-inf, 0]``).
(The distribution actually uses the log-probabilities
``logits - logits.logsumexp(-1)``, which equals the logits if
they are log-probabilities summing to 1.)
sampling_temp (float): Used to scale down logits. The higher the
value, the more likely it is that a non-max word will be
sampled.
keep_topk (int): This many words could potentially be chosen. The
other logits are set to have probability 0.
Returns:
(LongTensor, FloatTensor):
* topk_ids: Shaped ``(batch_size, 1)``. These are
the sampled word indices in the output vocab.
* topk_scores: Shaped ``(batch_size, 1)``. These
are essentially ``(logits / sampling_temp)[topk_ids]``.
"""
if sampling_temp == 0.0 or keep_topk == 1:
# For temp=0.0, take the argmax to avoid divide-by-zero errors.
# keep_topk=1 is also equivalent to argmax.
topk_scores, topk_ids = logits.topk(1, dim=-1)
if sampling_temp > 0:
topk_scores /= sampling_temp
else:
logits = torch.div(logits, sampling_temp)
if keep_topk > 0:
top_values, top_indices = torch.topk(logits, keep_topk, dim=1)
kth_best = top_values[:, -1].view([-1, 1])
kth_best = kth_best.repeat([1, logits.shape[1]]).float()
# Set all logits that are not in the top-k to -10000.
# This puts the probabilities close to 0.
ignore = torch.lt(logits, kth_best)
logits = logits.masked_fill(ignore, -10000)
dist = torch.distributions.Multinomial(
logits=logits, total_count=1)
topk_ids = torch.argmax(dist.sample(), dim=1, keepdim=True)
topk_scores = logits.gather(dim=1, index=topk_ids)
return topk_ids, topk_scores
class RandomSampling(DecodeStrategy):
"""Select next tokens randomly from the top k possible next tokens.
The ``scores`` attribute's lists are the score, after applying temperature,
of the final prediction (either EOS or the final token in the event
that ``max_length`` is reached)
Args:
pad (int): See base.
bos (int): See base.
eos (int): See base.
batch_size (int): See base.
device (torch.device or str): See base ``device``.
min_length (int): See base.
max_length (int): See base.
block_ngram_repeat (int): See base.
exclusion_tokens (set[int]): See base.
return_attention (bool): See base.
max_length (int): See base.
sampling_temp (float): See
:func:`~onmt.translate.random_sampling.sample_with_temperature()`.
keep_topk (int): See
:func:`~onmt.translate.random_sampling.sample_with_temperature()`.
memory_length (LongTensor): Lengths of encodings. Used for
masking attention.
"""
def __init__(self, pad, bos, eos, batch_size, device,
min_length, block_ngram_repeat, exclusion_tokens,
return_attention, max_length, sampling_temp, keep_topk,
memory_length):
super(RandomSampling, self).__init__(
pad, bos, eos, batch_size, device, 1,
min_length, block_ngram_repeat, exclusion_tokens,
return_attention, max_length)
self.sampling_temp = sampling_temp
self.keep_topk = keep_topk
self.topk_scores = None
self.memory_length = memory_length
self.batch_size = batch_size
self.select_indices = torch.arange(self.batch_size,
dtype=torch.long, device=device)
self.original_batch_idx = torch.arange(self.batch_size,
dtype=torch.long, device=device)
def advance(self, log_probs, attn):
"""Select next tokens randomly from the top k possible next tokens.
Args:
log_probs (FloatTensor): Shaped ``(batch_size, vocab_size)``.
These can be logits (``(-inf, inf)``) or log-probs
(``(-inf, 0]``). (The distribution actually uses the
log-probabilities ``logits - logits.logsumexp(-1)``,
which equals the logits if they are log-probabilities summing
to 1.)
attn (FloatTensor): Shaped ``(1, B, inp_seq_len)``.
"""
self.ensure_min_length(log_probs)
self.block_ngram_repeats(log_probs)
topk_ids, self.topk_scores = sample_with_temperature(
log_probs, self.sampling_temp, self.keep_topk)
self.is_finished = topk_ids.eq(self.eos)
self.alive_seq = torch.cat([self.alive_seq, topk_ids], -1)
if self.return_attention:
if self.alive_attn is None:
self.alive_attn = attn
else:
self.alive_attn = torch.cat([self.alive_attn, attn], 0)
self.ensure_max_length()
def update_finished(self):
"""Finalize scores and predictions."""
# shape: (sum(~ self.is_finished), 1)
finished_batches = self.is_finished.view(-1).nonzero()
for b in finished_batches.view(-1):
b_orig = self.original_batch_idx[b]
self.scores[b_orig].append(self.topk_scores[b, 0])
self.predictions[b_orig].append(self.alive_seq[b, 1:])
self.attention[b_orig].append(
self.alive_attn[:, b, :self.memory_length[b]]
if self.alive_attn is not None else [])
self.done = self.is_finished.all()
if self.done:
return
is_alive = ~self.is_finished.view(-1)
self.alive_seq = self.alive_seq[is_alive]
if self.alive_attn is not None:
self.alive_attn = self.alive_attn[:, is_alive]
self.select_indices = is_alive.nonzero().view(-1)
self.original_batch_idx = self.original_batch_idx[is_alive] | /rxn_opennmt_py-1.1.4-py3-none-any.whl/onmt/translate/random_sampling.py | 0.882795 | 0.522324 | random_sampling.py | pypi |
from dataclasses import dataclass, field
from enum import Enum, auto
from pathlib import Path
from typing import Any, List, Optional
from hydra.core.config_store import ConfigStore
from omegaconf import MISSING, SI, OmegaConf
from rxn.reaction_preprocessing.utils import (
RandomType,
ReactionSection,
standardization_files_directory,
)
OmegaConf.register_new_resolver("stem", lambda p: Path(p).stem)
DEFAULT_ANNOTATION_FILES = [
str(standardization_files_directory() / "pistachio-210428.json"),
str(standardization_files_directory() / "catalyst-annotation-210428.json"),
str(standardization_files_directory() / "catalyst-annotation-210826.json"),
]
@dataclass
class DataConfig:
"""Configuration of data sources and intermediate storage.
Fields:
path: Absolute path to input data file.
name: Name of the input data file (without extension).
proc_dir: Directory for storing intermediate and final output files.
"""
path: str = MISSING
name: str = SI("${stem:${data.path}}")
proc_dir: str = MISSING
class FragmentBond(Enum):
DOT = "."
TILDE = "~"
class InitialDataFormat(Enum):
TXT = auto()
CSV = auto()
TSV = auto()
class Step(Enum):
IMPORT = auto()
STANDARDIZE = auto()
PREPROCESS = auto()
AUGMENT = auto()
SPLIT = auto()
TOKENIZE = auto()
@dataclass
class CommonConfig:
"""Configuration used by multiple steps.
Fields:
sequence: Ordered sequence of data transformation steps to perform.
fragment_bond: Token used to denote a fragment bond in the SMILES of the reactions to process.
reaction_column_name: Name of the reaction column for the data file.
keep_intermediate_columns: Whether the columns generated during preprocessing should be kept.
"""
sequence: List[Step] = field(
default_factory=lambda: [
Step.IMPORT,
Step.STANDARDIZE,
Step.PREPROCESS,
Step.SPLIT,
Step.TOKENIZE,
]
)
fragment_bond: FragmentBond = FragmentBond.DOT
reaction_column_name: str = "rxn"
keep_intermediate_columns: bool = False
@dataclass
class RxnImportConfig:
"""Configuration for the initial import of the reaction data.
Fields:
input_file: the input file path (.txt, .csv).
output_csv: the output file path.
initial_data_format: whether the input file is in TXT or CSV format.
reaction_column_name: name the column containing the reactions if the input
is in CSV format. The value is ignored if the input is not in CSV format.
reaction_column_name: how to name the column with the reaction SMILES in
the output CSV.
fragment_bond: token denoting a fragment bond in the reaction SMILES (after import).
remove_atom_mapping: whether to remove the atom mapping from the input reaction SMILES.
column_for_light: name of the column containing boolean values that indicate
whether the reaction happens under light. If specified, the light token will
be added to the precursors of the corresponding reactions.
column_for_heat: name of the column containing boolean values that indicate
whether the reaction happens under heat. If specified, the heat token will
be added to the precursors of the corresponding reactions.
keep_intermediate_columns: Whether the columns generated during preprocessing should be kept.
keep_original_rxn_column: if ``keep_intermediate_columns`` is False, determines whether
the original column with the raw reaction SMILES is to be kept or not.
"""
input_file: str = SI("${data.path}")
output_csv: str = SI("${data.proc_dir}/${data.name}.imported.csv")
data_format: InitialDataFormat = InitialDataFormat.CSV
input_csv_column_name: str = SI("${common.reaction_column_name}")
reaction_column_name: str = SI("${common.reaction_column_name}")
fragment_bond: FragmentBond = SI("${common.fragment_bond}")
remove_atom_mapping: bool = True
column_for_light: Optional[str] = None
column_for_heat: Optional[str] = None
keep_intermediate_columns: bool = SI("${common.keep_intermediate_columns}")
keep_original_rxn_column: bool = False
@dataclass
class StandardizeConfig:
"""Configuration for the standardization transformation step.
Fields:
input_file_path: The input CSV (one SMILES per line).
output_file_path: The output file path containing the result after standardization.
annotation_file_paths: The files to load the annotated molecules from.
discard_unannotated_metals: whether reactions containing unannotated
molecules with transition metals must be rejected.
fragment_bond: Token used to denote a fragment bond in the reaction SMILES.
reaction_column_name: Name of the reaction column for the data file.
remove_stereo_if_not_defined_in_precursors: Remove chiral centers from product.
keep_intermediate_columns: Whether the columns generated during preprocessing should be kept.
"""
input_file_path: str = SI("${rxn_import.output_csv}")
annotation_file_paths: List[str] = field(default_factory=lambda: [])
discard_unannotated_metals: bool = False
output_file_path: str = SI("${data.proc_dir}/${data.name}.standardized.csv")
fragment_bond: FragmentBond = SI("${common.fragment_bond}")
reaction_column_name: str = SI("${common.reaction_column_name}")
remove_stereo_if_not_defined_in_precursors: bool = False
keep_intermediate_columns: bool = SI("${common.keep_intermediate_columns}")
@dataclass
class PreprocessConfig:
"""Configuration for the preprocess transformation step.
Fields:
input_file_path: The input file path (one reaction SMARTS per line).
output_file_path: The output file path containing the result after preprocessing.
min_reactants: The minimum number of reactants.
max_reactants: The maximum number of reactants.
max_reactants_tokens: The maximum number of reactants tokens.
min_agents: The minimum number of agents.
max_agents: The maximum number of agents.
max_agents_tokens: The maximum number of agents tokens.
min_products: The minimum number of products.
max_products: The maximum number of products.
max_products_tokens: The maximum number of products tokens.
max_absolute_formal_charge: The maximum absolute formal charge.
fragment_bond: Token used to denote a fragment bond in the reaction SMILES.
reaction_column_name: Name of the reaction column for the data file.
keep_intermediate_columns: Whether the columns generated during preprocessing should be kept.
"""
input_file_path: str = SI("${standardize.output_file_path}")
output_file_path: str = SI("${data.proc_dir}/${data.name}.processed.csv")
min_reactants: int = 2
max_reactants: int = 10
max_reactants_tokens: int = 300
min_agents: int = 0
max_agents: int = 0
max_agents_tokens: int = 0
min_products: int = 1
max_products: int = 1
max_products_tokens: int = 200
max_absolute_formal_charge: int = 2
fragment_bond: FragmentBond = SI("${common.fragment_bond}")
reaction_column_name: str = SI("${common.reaction_column_name}")
keep_intermediate_columns: bool = SI("${common.keep_intermediate_columns}")
@dataclass
class AugmentConfig:
"""Configuration for the augmentation transformation step.
Fields:
input_file_path: The input file path (one SMILES per line).
output_file_path: The output file path.
tokenize: if tokenization is to be performed
random_type: The randomization type to be applied
permutations: number of randomic permutations for input SMILES
reaction_column_name: Name of the reaction column for the data file.
rxn_section_to_augment: The section of the rxn SMILES to augment.
"precursors" for augmenting only the precursors
"products" for augmenting only the products
fragment_bond: Token used to denote a fragment bond in the reaction SMILES.
keep_intermediate_columns: Whether the columns generated during preprocessing should be kept.
"""
input_file_path: str = SI("${preprocess.output_file_path}")
output_file_path: str = SI("${data.proc_dir}/${data.name}.augmented.csv")
tokenize: bool = True
random_type: RandomType = RandomType.unrestricted
permutations: int = 1
reaction_column_name: str = SI("${common.reaction_column_name}")
rxn_section_to_augment: ReactionSection = ReactionSection.precursors
fragment_bond: FragmentBond = SI("${common.fragment_bond}")
keep_intermediate_columns: bool = SI("${common.keep_intermediate_columns}")
@dataclass
class SplitConfig:
"""Configuration for the split transformation step.
Fields:
input_file_path: The input file path.
output_directory: The directory containing the files after splitting.
split_ratio: The split ratio between training, and test and validation sets.
reaction_column_name: Name of the reaction column for the data file.
index_column: The name of the column used to generate the hash which ensures
stable splitting. "products" and "precursors" are also allowed even if
they do not exist as columns.
hash_seed: Seed for the hashing function used for splitting.
shuffle_seed: Seed for shuffling the train split.
"""
input_file_path: str = SI("${preprocess.output_file_path}")
output_directory: str = SI("${data.proc_dir}")
split_ratio: float = 0.05
reaction_column_name: str = SI("${common.reaction_column_name}")
index_column: str = "products"
hash_seed: int = 42
shuffle_seed: int = 42
@dataclass
class InputOutputTuple:
inp: str = MISSING
out: str = MISSING
@dataclass
class InputOutputTupleWithColumnName:
inp: str = MISSING
out: str = MISSING
reaction_column_name: str = SI("${tokenize.reaction_column_name}")
@dataclass
class TokenizeConfig:
"""Configuration for the tokenization transformation step.
Fields:
input_output_pairs: Paths to the input and output files.
reaction_column_name: Name of the reaction column for the data file.
"""
input_output_pairs: List[InputOutputTupleWithColumnName] = field(
default_factory=lambda: [
InputOutputTupleWithColumnName(
SI("${data.proc_dir}/${data.name}.processed.train.csv"),
SI("${data.proc_dir}/${data.name}.processed.train"),
),
InputOutputTupleWithColumnName(
SI("${data.proc_dir}/${data.name}.processed.validation.csv"),
SI("${data.proc_dir}/${data.name}.processed.validation"),
),
InputOutputTupleWithColumnName(
SI("${data.proc_dir}/${data.name}.processed.test.csv"),
SI("${data.proc_dir}/${data.name}.processed.test"),
),
]
)
reaction_column_name: str = SI("${common.reaction_column_name}")
@dataclass
class Config:
data: DataConfig = field(default_factory=DataConfig)
common: CommonConfig = field(default_factory=CommonConfig)
rxn_import: RxnImportConfig = field(default_factory=RxnImportConfig)
standardize: StandardizeConfig = field(default_factory=StandardizeConfig)
preprocess: PreprocessConfig = field(default_factory=PreprocessConfig)
augment: AugmentConfig = field(default_factory=AugmentConfig)
split: SplitConfig = field(default_factory=SplitConfig)
tokenize: TokenizeConfig = field(default_factory=TokenizeConfig)
@classmethod
def from_generic_config(cls, config: Any) -> "Config":
cfg_dict = OmegaConf.merge(OmegaConf.structured(Config), config)
cfg = OmegaConf.to_object(cfg_dict)
return cfg # type: ignore
cs = ConfigStore.instance()
cs.store(group="data", name="base_data", node=DataConfig)
cs.store(group="common", name="base_common", node=CommonConfig)
cs.store(group="rxn_import", name="base_rxn_import", node=RxnImportConfig)
cs.store(group="standardize", name="base_standardize", node=StandardizeConfig)
cs.store(group="preprocess", name="base_preprocess", node=PreprocessConfig)
cs.store(group="augment", name="base_augment", node=AugmentConfig)
cs.store(group="tokenize", name="base_tokenize", node=TokenizeConfig)
cs.store(group="split", name="base_split", node=SplitConfig)
cs.store(name="base_config", node=Config) | /rxn_reaction_preprocessing-2.2.0-py3-none-any.whl/rxn/reaction_preprocessing/config.py | 0.910491 | 0.371678 | config.py | pypi |
import logging
from pathlib import Path
import hydra
from omegaconf import OmegaConf
from rxn.reaction_preprocessing import __version__
from rxn.reaction_preprocessing.augmenter import augment
from rxn.reaction_preprocessing.config import Config, Step
from rxn.reaction_preprocessing.importer import rxn_import
from rxn.reaction_preprocessing.preprocessor import preprocess
from rxn.reaction_preprocessing.smiles_tokenizer import tokenize
from rxn.reaction_preprocessing.stable_data_splitter import split
from rxn.reaction_preprocessing.standardizer import standardize
from rxn.reaction_preprocessing.utils import (
add_custom_logger_to_file,
overwrite_logging_format,
)
logger = logging.getLogger(__name__)
logger.addHandler(logging.NullHandler())
def preprocess_data(cfg: Config) -> None:
"""Preprocess data to generate a dataset for training transformer models."""
# Enforce config schema. Will also convert strings to Enums when necessary.
cfg = Config.from_generic_config(cfg)
logger.info(
f"Preprocessing reaction data with rxn-reaction-preprocessing, "
f"version {__version__}."
)
logger.info(
"Running with the following configuration:\n"
f"{OmegaConf.to_yaml(cfg, resolve=True)}"
)
# Create the output directory (may exist already if this function was
# called from the main script).
processing_dir = Path(cfg.data.proc_dir)
processing_dir.mkdir(parents=True, exist_ok=True)
# Save the config
with open(processing_dir / "preprocessing_config.yml", "wt") as f:
f.write(OmegaConf.to_yaml(cfg, resolve=True))
for step in cfg.common.sequence:
logger.info(f"Running step: {step.name}")
if step is Step.IMPORT:
rxn_import(cfg.rxn_import)
elif step is Step.STANDARDIZE:
standardize(cfg.standardize)
elif step is Step.PREPROCESS:
preprocess(cfg.preprocess)
elif step is Step.AUGMENT:
augment(cfg.augment)
elif step is Step.SPLIT:
split(cfg.split)
elif step is Step.TOKENIZE:
tokenize(cfg.tokenize)
@hydra.main(config_name="base_config", config_path=None)
def data_pipeline(cfg: Config) -> None:
"""Preprocess data to generate a dataset for training transformer models."""
# Enforce config schema. Will also convert strings to Enums when necessary.
cfg = Config.from_generic_config(cfg)
# Setup logging to file, and overwrite the log format
processing_dir = Path(cfg.data.proc_dir)
processing_dir.mkdir(parents=True, exist_ok=True)
add_custom_logger_to_file(processing_dir / "log.txt")
overwrite_logging_format()
preprocess_data(cfg)
if __name__ == "__main__":
data_pipeline() | /rxn_reaction_preprocessing-2.2.0-py3-none-any.whl/rxn/reaction_preprocessing/main.py | 0.674158 | 0.244526 | main.py | pypi |
from typing import List, Optional, Tuple
from rxn.chemutils.conversion import canonicalize_smiles
from rxn.chemutils.exceptions import InvalidSmiles
from rxn.chemutils.reaction_equation import ReactionEquation
from rxn.reaction_preprocessing.annotations.missing_annotation_detector import (
MissingAnnotationDetector,
)
from rxn.reaction_preprocessing.annotations.molecule_annotation import (
MoleculeAnnotation,
)
from rxn.reaction_preprocessing.annotations.molecule_replacer import MoleculeReplacer
from rxn.reaction_preprocessing.annotations.rejected_molecules_filter import (
RejectedMoleculesFilter,
)
from rxn.reaction_preprocessing.cleaner import remove_isotope_information
class MoleculeStandardizationError(ValueError):
"""Base class for standardization exceptions."""
class RejectedMolecule(MoleculeStandardizationError):
"""Exception raised when standardizing a molecule annotated as "Rejected"."""
def __init__(self, smiles: str):
"""
Args:
smiles: rejected SMILES string.
"""
super().__init__(f'Cannot standardize: rejected molecule "{smiles}"')
class MissingAnnotation(MoleculeStandardizationError):
"""Exception raised when standardizing a molecule that should be annotated."""
def __init__(self, smiles: str):
"""
Args:
smiles: rejected SMILES string.
"""
super().__init__(f'Cannot standardize: molecule "{smiles}" must be annotated.')
self.smiles = smiles
class MoleculeStandardizer:
"""
Class to standardize standalone molecules (reactions are standardized with
the Standardizer class).
Note that the standardization of one molecule may lead to a combination
of molecules, hence the functions return lists of strings.
"""
def __init__(
self,
annotations: Optional[List[MoleculeAnnotation]] = None,
discard_missing_annotations: bool = False,
canonicalize: bool = True,
):
"""
Args:
annotations: A list of MoleculeAnnotation objects used to perform
the substitutions /rejections. Defaults to an empty list.
discard_missing_annotations: whether reactions containing unannotated
molecules that should be must be rejected.
canonicalize: whether to canonicalize the compounds.
"""
if annotations is None:
annotations = []
self.discard_unannotated_metals = discard_missing_annotations
self.canonicalize = canonicalize
self.rejection_filter = RejectedMoleculesFilter.from_molecule_annotations(
annotations
)
self.missing_annotation_detector = (
MissingAnnotationDetector.from_molecule_annotations(annotations)
)
self.molecule_replacer = MoleculeReplacer.from_molecule_annotations(annotations)
def __call__(self, smiles: str) -> List[str]:
"""See doc for standardize()."""
return self.standardize(smiles)
def standardize(self, smiles: str) -> List[str]:
"""
Standardize a molecule.
The returned value is a list, because in some cases standardization
returns two independent molecules.
Args:
smiles: SMILES string to standardize. Use dots for fragment bonds!
Raises:
SanitizationError of one of its subclasses: error in sanitization.
InvalidSmiles: Invalid SMILES.
ValueError: "~" being used for fragment bonds.
Returns:
Standardized SMILES string.
"""
if "~" in smiles:
raise ValueError(f'MoleculeStandardizer must be used without "~": {smiles}')
# Discard isotope information
smiles = remove_isotope_information(smiles)
# Check validity of SMILES (may raise InvalidSmiles), and
# overwrite if canonicalization required
canonical_smiles = canonicalize_smiles(smiles)
if self.canonicalize:
smiles = canonical_smiles
# Check for rejected molecules
if not self.rejection_filter.is_valid_molecule_smiles(smiles):
raise RejectedMolecule(smiles)
# Check for non-annotated molecules
if self.discard_unannotated_metals:
if self.missing_annotation_detector.molecule_needs_annotation(smiles):
raise MissingAnnotation(smiles)
# Replace annotated molecules
return self.molecule_replacer.replace_molecule_smiles(smiles)
def standardize_in_equation(self, reaction: ReactionEquation) -> ReactionEquation:
"""
Do the molecule-wise standardization for a reaction equation.
Relies on standardize_in_equation_with_errors(), for modularity purposes.
Will propagate the exceptions raised in that function.
"""
# Ignoring the lists of SMILES returned in the tuple (which, by construction,
# will always be empty: if not, an exception will have been raised earlier).
reaction, *_ = self.standardize_in_equation_with_errors(
reaction, propagate_exceptions=True
)
return reaction
def standardize_in_equation_with_errors(
self, reaction: ReactionEquation, propagate_exceptions: bool = False
) -> Tuple[ReactionEquation, List[str], List[str], List[str]]:
"""
Do the molecule-wise standardization for a reaction equation, and get the reasons for
potential failures.
This function was originally implemented in Standardizer, and then moved here for more
modularity.
Args:
reaction: reaction to standardize.
propagate_exceptions: if True, will stop execution and raise directly
instead of collecting the SMILES leading to the failure. Not ideal,
but probably the only way (?) to not have duplicated code in the
function standardize_in_equation().
Returns:
Tuple:
- the standardized reaction equation (or an empty one if there was a failure).
- list of invalid SMILES in the reaction.
- list of rejected SMILES in the reaction.
- list of missing annotations in the reaction.
"""
missing_annotations = []
invalid_smiles = []
rejected_smiles = []
# Iterate over the reactants, agents, products and update the
# standardized reaction at the same time
standardized_reaction = ReactionEquation([], [], [])
for original_role_group, new_role_group in zip(reaction, standardized_reaction):
for smiles in original_role_group:
try:
new_role_group.extend(self.standardize(smiles))
except InvalidSmiles:
if propagate_exceptions:
raise
invalid_smiles.append(smiles)
except RejectedMolecule:
if propagate_exceptions:
raise
rejected_smiles.append(smiles)
except MissingAnnotation:
if propagate_exceptions:
raise
missing_annotations.append(smiles)
# If there was any error: replace by empty reaction equation (">>")
if invalid_smiles or rejected_smiles or missing_annotations:
standardized_reaction = ReactionEquation([], [], [])
return (
standardized_reaction,
invalid_smiles,
rejected_smiles,
missing_annotations,
) | /rxn_reaction_preprocessing-2.2.0-py3-none-any.whl/rxn/reaction_preprocessing/molecule_standardizer.py | 0.964355 | 0.481271 | molecule_standardizer.py | pypi |
""" A utility class to apply standardization to the data """
from pathlib import Path
from typing import List, Optional
import pandas as pd
from rdkit import RDLogger
from rxn.chemutils.miscellaneous import remove_chiral_centers
from rxn.chemutils.reaction_smiles import parse_any_reaction_smiles
from rxn.reaction_preprocessing.annotations.molecule_annotation import (
MoleculeAnnotation,
load_annotations_multiple,
)
from rxn.reaction_preprocessing.config import StandardizeConfig
from rxn.reaction_preprocessing.molecule_standardizer import MoleculeStandardizer
RDLogger.DisableLog("rdApp.*")
class Standardizer:
def __init__(
self,
df: pd.DataFrame,
annotations: List[MoleculeAnnotation],
discard_unannotated_metals: bool,
reaction_column_name: str,
fragment_bond: Optional[str] = None,
remove_stereo_if_not_defined_in_precursors: bool = False,
):
"""Creates a new instance of the Standardizer class.
Args:
df: A pandas DataFrame containing the reaction SMILES.
annotations: A list of MoleculeAnnotation objects used to perform the substitutions/rejections
discard_unannotated_metals: whether reactions containing unannotated
molecules with transition metals must be rejected.
reaction_column_name: The name of the DataFrame column containing the reaction SMILES.
fragment_bond: the fragment bond used in the dataframe.
remove_stereo_if_not_defined_in_precursors: Remove chiral centers from products.
"""
self.df = df
self.molecule_standardizer = MoleculeStandardizer(
annotations=annotations,
discard_missing_annotations=discard_unannotated_metals,
canonicalize=True,
)
self.fragment_bond = fragment_bond
self.remove_stereo_if_not_defined_in_precursors = (
remove_stereo_if_not_defined_in_precursors
)
self.rxn_column = reaction_column_name
self.rxn_before_std_column = f"{self.rxn_column}_before_std"
self.invalid_smiles_column = f"{self.rxn_column}_invalid_smiles"
self.rejected_smiles_column = f"{self.rxn_column}_rejected_smiles"
self.missing_annotations_column = f"{self.rxn_column}_missing_annotations"
def __remove_stereo_if_not_defined_in_precursors(self, rxn_smiles: str) -> str:
"""
Remove stereocenters from products if not explainable by precursors.
"""
if not self.remove_stereo_if_not_defined_in_precursors:
return rxn_smiles
reactants, reagents, products = rxn_smiles.split(">")
if "@" in products and not ("@" in reactants or "@" in reagents):
rxn_smiles = remove_chiral_centers(rxn_smiles) # replaces with the group
return rxn_smiles
def standardize(self, canonicalize: bool = True) -> "Standardizer":
"""
Standardizes the entries of self.df[self.__reaction_column_name]
"""
self.molecule_standardizer.canonicalize = canonicalize
# Make a copy of the non-standardized reaction SMILES. Achieved by
# renaming to enable the "join" operation below without conflict.
self.df.rename(
columns={self.rxn_column: self.rxn_before_std_column}, inplace=True
)
new_columns: pd.DataFrame = self.df.apply(self.process_row, axis=1)
new_columns.columns = [
self.rxn_column,
self.invalid_smiles_column,
self.rejected_smiles_column,
self.missing_annotations_column,
]
# Merge the new columns
self.df = self.df.join(new_columns)
return self
def process_row(self, x: pd.Series) -> pd.Series:
"""
Function applied to every row of the dataframe to get the new columns.
Returns:
Pandas Series with 1) the standardized reaction SMILES, 2) the list
of invalid molecules, 3) the list of rejected molecules (from
the annotations), 4) the list of missing annotations.
"""
# Get RXN SMILES from the column
rxn_smiles = x[self.rxn_before_std_column]
# Remove stereo information from products, if needed
rxn_smiles = self.__remove_stereo_if_not_defined_in_precursors(rxn_smiles)
# Read the reaction SMILES while allowing for different formats (with
# fragment bond, extended reaction SMILES, etc.).
reaction_equation = parse_any_reaction_smiles(rxn_smiles)
(
standardized_reaction,
invalid_smiles,
rejected_smiles,
missing_annotations,
) = self.molecule_standardizer.standardize_in_equation_with_errors(
reaction_equation, propagate_exceptions=False
)
standardized_smiles = standardized_reaction.to_string(self.fragment_bond)
return pd.Series(
[standardized_smiles, invalid_smiles, rejected_smiles, missing_annotations]
)
@staticmethod
def read_csv(
filepath: str,
annotations: List[MoleculeAnnotation],
discard_unannotated_metals: bool,
reaction_column_name: str,
fragment_bond: Optional[str] = None,
remove_stereo_if_not_defined_in_precursors: bool = False,
) -> "Standardizer":
"""
A helper function to read a list or csv of VALID reactions (in the sense of RDKIT).
Args:
filepath (str): The path to the text file containing the reactions.
annotations: A list of MoleculeAnnotation objects used to perform the substitutions/rejections
discard_unannotated_metals: whether reactions containing unannotated
molecules with transition metals must be rejected.
reaction_column_name: The name of the reaction column (or the name that wil be given to the reaction
column if the input file has no headers)
fragment_bond: the fragment bond used.
remove_stereo_if_not_defined_in_precursors: Remove chiral centers from products.
Returns:
: A new standardizer instance.
"""
df = pd.read_csv(filepath, lineterminator="\n")
if len(df.columns) == 1:
df.rename(columns={df.columns[0]: reaction_column_name}, inplace=True)
return Standardizer(
df,
annotations=annotations,
discard_unannotated_metals=discard_unannotated_metals,
reaction_column_name=reaction_column_name,
fragment_bond=fragment_bond,
remove_stereo_if_not_defined_in_precursors=remove_stereo_if_not_defined_in_precursors,
)
def standardize(cfg: StandardizeConfig) -> None:
output_file_path = Path(cfg.output_file_path)
if not Path(cfg.input_file_path).exists():
raise ValueError(
f"Input file for standardization does not exist: {cfg.input_file_path}"
)
# Create a list of MoleculeAnnotations from the json files provided.
annotations = load_annotations_multiple(cfg.annotation_file_paths)
# Create an instance of the Standardizer
std = Standardizer.read_csv(
cfg.input_file_path,
annotations,
discard_unannotated_metals=cfg.discard_unannotated_metals,
reaction_column_name=cfg.reaction_column_name,
fragment_bond=cfg.fragment_bond.value,
remove_stereo_if_not_defined_in_precursors=cfg.remove_stereo_if_not_defined_in_precursors,
)
columns_to_keep = list(std.df.columns)
# Perform standardization
std.standardize(canonicalize=True)
if not cfg.keep_intermediate_columns:
std.df = std.df[columns_to_keep]
# Exporting standardized samples
std.df.to_csv(output_file_path, index=False) | /rxn_reaction_preprocessing-2.2.0-py3-none-any.whl/rxn/reaction_preprocessing/standardizer.py | 0.933522 | 0.44083 | standardizer.py | pypi |
import copy
from enum import Enum
from typing import Iterable, List, TypeVar, Union
from rxn.chemutils.reaction_equation import ReactionEquation
LIGHT_TOKEN = "[Lv]"
HEAT_TOKEN = "[Ts]"
class _SpecialToken(Enum):
"""
Enum class for special reaction SMILES tokens.
Useful to avoid dealing with the token strings where not actually necessary.
"""
LIGHT = LIGHT_TOKEN
HEAT = HEAT_TOKEN
ReactionOrList = TypeVar("ReactionOrList", ReactionEquation, List[str])
ReactionOrIterable = Union[ReactionEquation, Iterable[str]]
def _add_special_tokens_to_list(
smiles_list: List[str], tokens: Iterable[_SpecialToken], in_place: bool
) -> List[str]:
"""Add the required tokens to a list of SMILES."""
if not in_place:
smiles_list = copy.deepcopy(smiles_list)
for token in tokens:
smiles_list.append(token.value)
return smiles_list
def _add_special_tokens(
reaction_or_list: ReactionOrList, tokens: Iterable[_SpecialToken], in_place: bool
) -> ReactionOrList:
"""Add the required tokens to the reactants of a reaction or list of SMILES."""
if isinstance(reaction_or_list, ReactionEquation):
# Create a copy of the ReactionEquation if not in-place - the copy can
# then be updated in-place.
if not in_place:
reaction_or_list = copy.deepcopy(reaction_or_list)
_add_special_tokens_to_list(reaction_or_list.reactants, tokens, in_place=True)
return reaction_or_list
else:
# i.e., already a list
return _add_special_tokens_to_list(reaction_or_list, tokens, in_place=in_place)
def add_light_token(
reaction_or_list: ReactionOrList, in_place: bool = False
) -> ReactionOrList:
"""Add the light token to the precursors of a reaction or list of SMILES."""
return _add_special_tokens(
reaction_or_list, [_SpecialToken.LIGHT], in_place=in_place
)
def add_heat_token(
reaction_or_list: ReactionOrList, in_place: bool = False
) -> ReactionOrList:
"""Add the heat token to the precursors of a reaction or list of SMILES."""
return _add_special_tokens(
reaction_or_list, [_SpecialToken.HEAT], in_place=in_place
)
def _contains_token(
reaction_or_iterable: ReactionOrIterable, token: _SpecialToken
) -> bool:
"""Whether a reaction (or set of SMILES strings) contains the specified token."""
smiles_iterable: Iterable[str]
if isinstance(reaction_or_iterable, ReactionEquation):
smiles_iterable = reaction_or_iterable.iter_all_smiles()
else:
smiles_iterable = reaction_or_iterable
return any(compound == token.value for compound in smiles_iterable)
def contains_light_token(reaction_or_iterable: ReactionOrIterable) -> bool:
"""Whether a reaction (or set of SMILES strings) contains the light token."""
return _contains_token(reaction_or_iterable, _SpecialToken.LIGHT)
def contains_heat_token(reaction_or_iterable: ReactionOrIterable) -> bool:
"""Whether a reaction (or set of SMILES strings) contains the heat token."""
return _contains_token(reaction_or_iterable, _SpecialToken.HEAT)
def _strip_special_tokens_from_list(
smiles_list: List[str], token_strings: Iterable[str], in_place: bool
) -> List[str]:
"""Strip the specified tokens from a list of SMILES strings."""
if not in_place:
smiles_list = copy.deepcopy(smiles_list)
for token in token_strings:
try:
smiles_list.remove(token)
except ValueError:
# NB: remove() raises ValueError if the value is not in the list
pass
return smiles_list
def _strip_special_tokens(
reaction_or_list: ReactionOrList, tokens: Iterable[_SpecialToken], in_place: bool
) -> ReactionOrList:
"""Strip the specified tokens from a reaction or list of SMILES strings."""
token_strings_to_remove = [token.value for token in tokens]
if isinstance(reaction_or_list, ReactionEquation):
# Create a copy of the ReactionEquation if not in-place - the copy can
# then be updated in-place.
if not in_place:
reaction_or_list = copy.deepcopy(reaction_or_list)
for reaction_group in reaction_or_list:
_strip_special_tokens_from_list(
reaction_group, token_strings_to_remove, in_place=True
)
return reaction_or_list
else:
# i.e., already a list
return _strip_special_tokens_from_list(
reaction_or_list, token_strings_to_remove, in_place=in_place
)
def strip_all_special_tokens(
reaction_or_list: ReactionOrList, in_place: bool = False
) -> ReactionOrList:
"""Strip all the special tokens from a reaction or list of SMILES strings."""
# NB: calling list on an enum class gets all the possible values.
return _strip_special_tokens(
reaction_or_list, list(_SpecialToken), in_place=in_place
)
def strip_heat_token(
reaction_or_list: ReactionOrList, in_place: bool = False
) -> ReactionOrList:
"""Strip the heat from a reaction or list of SMILES strings."""
return _strip_special_tokens(
reaction_or_list, [_SpecialToken.HEAT], in_place=in_place
)
def strip_light_token(
reaction_or_list: ReactionOrList, in_place: bool = False
) -> ReactionOrList:
"""Strip the light token from a reaction or list of SMILES strings."""
return _strip_special_tokens(
reaction_or_list, [_SpecialToken.LIGHT], in_place=in_place
) | /rxn_reaction_preprocessing-2.2.0-py3-none-any.whl/rxn/reaction_preprocessing/special_tokens.py | 0.882403 | 0.455986 | special_tokens.py | pypi |
""" A class encapsulating filtering functionality for chemical reactions """
import itertools
from functools import partial
from typing import Callable, Generator, Iterable, List, Tuple, Union
from rxn.chemutils.exceptions import InvalidSmiles
from rxn.chemutils.reaction_equation import ReactionEquation
from rxn.chemutils.tokenization import to_tokens
from .utils import MolEquation, get_atoms_for_mols, get_formal_charge_for_mols
_POLYMER_HEAD_AND_TAIL_PLACEHOLDER_ATOMS = {"Kr", "Rn", "Xe"}
_ATOM_TYPES_ALLOWED_IN_PRODUCT = _POLYMER_HEAD_AND_TAIL_PLACEHOLDER_ATOMS | {"H"}
SmilesBasedCheck = Callable[[ReactionEquation], bool]
MolBasedCheck = Callable[[MolEquation], bool]
class ReactionFilterError(ValueError):
"""Exception raised when calling validate() on reactions not passing one
or several filters."""
def __init__(self, reaction: ReactionEquation, reasons: Iterable[str]):
# Store information just in case
self.reaction = reaction
self.reasons = list(reasons)
super().__init__(
f'Reaction "{self.reaction.to_string("~")}" did not pass the '
f'filters: {"; ".join(self.reasons)}'
)
class MixedReactionFilter:
def __init__(
self,
max_reactants: int = 10,
max_agents: int = 0,
max_products: int = 1,
min_reactants: int = 2,
min_agents: int = 0,
min_products: int = 1,
max_reactants_tokens: int = 300,
max_agents_tokens: int = 0,
max_products_tokens: int = 200,
max_absolute_formal_charge: int = 2,
):
"""Creates a new instance of the type MixedReactionFilter.
Args:
max_reactants: The maximum number of reactant molecules.
max_agents: The maximum number of agent molcules.
max_products: The maximum number of product molecules.
min_reactants: The minimum number of reactant molecules.
min_agents: The minium number of agent molecules.
min_products: The minimum number of product molecules.
max_reactants_tokens: The maximum number of precursor tokens.
max_agents_tokens: The maximum number of agent tokens.
max_products_tokens: The maximum number of product tokens.
max_absolute_formal_charge: The maximum formal charge (for
reactants, agents, or products).
"""
self.max_reactants = max_reactants
self.max_agents = max_agents
self.max_products = max_products
self.min_reactants = min_reactants
self.min_agents = min_agents
self.min_products = min_products
self.max_reactants_tokens = max_reactants_tokens
self.max_agents_tokens = max_agents_tokens
self.max_products_tokens = max_products_tokens
self.max_absolute_formal_charge = max_absolute_formal_charge
self.smiles_based_checks: List[Tuple[SmilesBasedCheck, str]] = [
(self.max_reactants_exceeded, "max_reactants_exceeded"),
(self.max_agents_exceeded, "max_agents_exceeded"),
(self.max_products_exceeded, "max_products_exceeded"),
(self.min_reactants_subceeded, "min_reactants_subceeded"),
(self.min_agents_subceeded, "min_agents_subceeded"),
(self.min_products_subceeded, "min_products_subceeded"),
(self.products_subset_of_reactants, "products_subset_of_reactants"),
(self.max_reactant_tokens_exceeded, "max_reactant_tokens_exceeded"),
(self.max_agent_tokens_exceeded, "max_agent_tokens_exceeded"),
(self.max_product_tokens_exceeded, "max_product_tokens_exceeded"),
]
self.mol_based_checks: List[Tuple[MolBasedCheck, str]] = [
(self.products_single_atoms, "products_single_atoms"),
(self.formal_charge_exceeded, "formal_charge_exceeded"),
(self.invalid_atom_type, "invalid_atom_type"),
(self.different_atom_types, "different_atom_types"),
]
def validate(self, reaction: ReactionEquation) -> None:
"""
Make sure that the given reaction is valid; if not, an exception will
be raised.
Raises:
ReactionFilterError: if the reaction does not pass the filters.
Args:
reaction: reaction to validate.
"""
valid, reasons = self.validate_reasons(reaction)
if not valid:
raise ReactionFilterError(reaction, reasons)
def is_valid(self, reaction: ReactionEquation) -> bool:
"""
Whether a reaction is valid based on the rules set on the instance of this
MixedReactionFilter class.
Args:
reaction: The reaction to validate.
Returns:
bool: Whether or not the reaction is valid according to the rules
set on the instance of this MixedReactionFilter class.
"""
def callbacks() -> Generator[Callable[[], bool], None, None]:
"""Generator function for providing the checks to make as
callable objects.
Formulating it as a generator makes it efficient; for instance,
the mol_equation object will not be generated if any of the
SMILES-based checks fails.
"""
for smiles_based_fn, _ in self.smiles_based_checks:
yield partial(smiles_based_fn, reaction)
try:
mol_equation = MolEquation.from_reaction_equation(reaction)
except InvalidSmiles:
# If there is an invalid SMILES, we yield a final callback that
# will then return `True` (meaning: invalid)
yield lambda: True
return
for mol_based_fn, _ in self.mol_based_checks:
yield partial(mol_based_fn, mol_equation)
return not any(callback() for callback in callbacks())
def validate_reasons(self, reaction: ReactionEquation) -> Tuple[bool, List[str]]:
reasons = []
for smiles_based_fn, error_message in self.smiles_based_checks:
if smiles_based_fn(reaction):
reasons.append(error_message)
try:
mol_equation = MolEquation.from_reaction_equation(reaction)
except InvalidSmiles:
reasons.append("rdkit_molfromsmiles_failed")
else:
for mol_based_fn, error_message in self.mol_based_checks:
if mol_based_fn(mol_equation):
reasons.append(error_message)
valid = len(reasons) == 0
return valid, reasons
def max_reactants_exceeded(self, reaction: ReactionEquation) -> bool:
"""Checks whether the number of reactants exceeds the maximum.
Args:
reaction: The reaction to test.
Returns:
bool: Whether the number of reactants exceeds the maximum.
"""
return len(reaction.reactants) > self.max_reactants
def max_agents_exceeded(self, reaction: ReactionEquation) -> bool:
"""Checks whether the number of agents exceeds the maximum.
Args:
reaction: The reaction to test.
Returns:
bool: Whether the number of agents exceeds the maximum.
"""
return len(reaction.agents) > self.max_agents
def max_products_exceeded(self, reaction: ReactionEquation) -> bool:
"""Checks whether the number of products exceeds the maximum.
Args:
reaction: The reaction to test.
Returns:
bool: Whether the number of products exceeds the maximum.
"""
return len(reaction.products) > self.max_products
def min_reactants_subceeded(self, reaction: ReactionEquation) -> bool:
"""Checks whether the number of reactants exceeds the maximum.
Args:
reaction: The reaction to test.
Returns:
bool: Whether the number of reactants exceeds the maximum.
"""
return len(reaction.reactants) < self.min_reactants
def min_agents_subceeded(self, reaction: ReactionEquation) -> bool:
"""Checks whether the number of agents exceeds the maximum.
Args:
reaction: The reaction to test.
Returns:
bool: Whether the number of agents exceeds the maximum.
"""
return len(reaction.agents) < self.min_agents
def min_products_subceeded(self, reaction: ReactionEquation) -> bool:
"""Checks whether the number of products exceeds the maximum.
Args:
reaction: The reaction to test.
Returns:
bool: Whether the number of products exceeds the maximum.
"""
return len(reaction.products) < self.min_products
def products_subset_of_reactants(self, reaction: ReactionEquation) -> bool:
"""Checks whether the set of products is a subset of the set of reactants.
Args:
reaction: The reaction to test.
Returns:
bool: Whether the set of products is a subset of the set of reactants.
"""
products = set(reaction.products)
reactants = set(reaction.reactants)
return len(products) > 0 and products.issubset(reactants)
def products_single_atoms(
self, reaction: Union[MolEquation, ReactionEquation]
) -> bool:
"""Checks whether the products solely consist of single atoms.
Args:
reaction: The reaction to test.
Returns:
bool: Whether the products solely consist of single atoms.
"""
if isinstance(reaction, ReactionEquation):
reaction = MolEquation.from_reaction_equation(reaction)
return len(reaction.products) > 0 and all(
[product.GetNumAtoms() == 1 for product in reaction.products]
)
def max_reactant_tokens_exceeded(self, reaction: ReactionEquation) -> bool:
"""Check whether the number of reactant tokens exceeds the maximum.
Args:
reaction: The reaction to test.
Returns:
[type]: Whether the number of reactant tokens exceeds the maximum.
"""
return self._group_tokens_exceeded(
reaction.reactants, self.max_reactants_tokens
)
def max_agent_tokens_exceeded(self, reaction: ReactionEquation) -> bool:
"""Check whether the number of agent tokens exceeds the maximum.
Args:
reaction: The reaction to test.
Returns:
[type]: Whether the number of agent tokens exceeds the maximum.
"""
return self._group_tokens_exceeded(reaction.agents, self.max_agents_tokens)
def max_product_tokens_exceeded(self, reaction: ReactionEquation) -> bool:
"""Check whether the number of product tokens exceeds the maximum.
Args:
reaction: The reaction to test.
Returns:
[type]: Whether the number of product tokens exceeds the maximum.
"""
return self._group_tokens_exceeded(reaction.products, self.max_products_tokens)
def _group_tokens_exceeded(self, smiles_list: List[str], threshold: int) -> bool:
"""Check whether the number of SMILES tokens in a group exceeds the maximum.
Returns:
[type]: Whether the number of product tokens exceeds the maximum.
"""
smiles = ".".join(
smiles_list
) # NB: we use '.' here, but '~' would be the same.
return len(to_tokens(smiles)) > threshold
def formal_charge_exceeded(
self, reaction: Union[MolEquation, ReactionEquation]
) -> bool:
"""Check whether the absolute formal charge of the reactants, agents,
or products exceeds a maximum.
Args:
reaction: The reaction to test.
Returns:
bool: Whether the absolute formal charge of the reactants, agents,
or products exceeds a maximum.
"""
if isinstance(reaction, ReactionEquation):
reaction = MolEquation.from_reaction_equation(reaction)
return (
abs(get_formal_charge_for_mols(reaction.reactants))
> self.max_absolute_formal_charge
or abs(get_formal_charge_for_mols(reaction.agents))
> self.max_absolute_formal_charge
or abs(get_formal_charge_for_mols(reaction.products))
> self.max_absolute_formal_charge
)
def invalid_atom_type(self, reaction: Union[MolEquation, ReactionEquation]) -> bool:
"""
Check whether the reaction contains atoms with invalid atom types such as the asterisk "*".
Args:
reaction: The reaction to test.
Returns:
bool: Whether the reaction contains invalid atom types.
"""
if isinstance(reaction, ReactionEquation):
reaction = MolEquation.from_reaction_equation(reaction)
# So far, the only invalid atom type is "*"; this function can be
# reformulated to account for additional ones if some appear later on.
mols = itertools.chain(reaction.reactants, reaction.agents, reaction.products)
return "*" in get_atoms_for_mols(mols)
def different_atom_types(
self, reaction: Union[MolEquation, ReactionEquation]
) -> bool:
"""Check whether the products contain atom types not found in the agents or reactants.
It handles the presence of placeholders for polymer head and tail representations.
Args:
reaction: The reaction to test.
Returns:
bool: Whether the products contain atom types not found in the agents or reactants.
"""
if isinstance(reaction, ReactionEquation):
reaction = MolEquation.from_reaction_equation(reaction)
products_atoms = get_atoms_for_mols(reaction.products)
# ignore H atom (because usually implicit) and atoms used in polymer representations
products_atoms -= _ATOM_TYPES_ALLOWED_IN_PRODUCT
agents_atoms = get_atoms_for_mols(reaction.agents)
reactants_atoms = get_atoms_for_mols(reaction.reactants)
return len(products_atoms - (reactants_atoms | agents_atoms)) != 0 | /rxn_reaction_preprocessing-2.2.0-py3-none-any.whl/rxn/reaction_preprocessing/mixed_reaction_filter.py | 0.96682 | 0.34726 | mixed_reaction_filter.py | pypi |
import logging
import random
from enum import Enum, auto
from pathlib import Path
from typing import Iterable, List, Set
import attr
import numpy
from rdkit.Chem import GetFormalCharge, Mol
from rxn.chemutils.conversion import smiles_to_mol
from rxn.chemutils.reaction_equation import ReactionEquation
from rxn.utilities.files import PathLike
from rxn.utilities.logging import LoggingFormat
class DataSplit(Enum):
TRAIN = auto()
VALIDATION = auto()
TEST = auto()
class RandomType(Enum):
molecules = auto()
unrestricted = auto()
restricted = auto()
rotated = auto()
class ReactionSection(Enum):
precursors = auto()
products = auto()
def root_directory() -> Path:
"""
Returns the path to the root directory of the repository
"""
return Path(__file__).parent.parent.resolve()
def data_directory() -> Path:
"""
Returns the path to the data directory at the root of the repository
"""
return Path(__file__).parent.resolve() / "data"
def standardization_files_directory() -> Path:
"""
Returns the path to the data directory at the root of the repository
"""
return data_directory() / "standardization-files"
def reset_random_seed() -> None:
random.seed(42)
numpy.random.seed(42)
@attr.s(auto_attribs=True)
class MolEquation:
"""
Same as a ReactionEquation, except that RDKit Mol objects are stored
instead of the SMILES.
"""
reactants: List[Mol]
agents: List[Mol]
products: List[Mol]
@classmethod
def from_reaction_equation(cls, reaction: ReactionEquation) -> "MolEquation":
return cls(
reactants=[smiles_to_mol(s) for s in reaction.reactants],
agents=[smiles_to_mol(s) for s in reaction.agents],
products=[smiles_to_mol(s) for s in reaction.products],
)
def get_formal_charge_for_mols(mols: Iterable[Mol]) -> int:
"""Get the formal charge for a group of RDKit Mols."""
return sum(GetFormalCharge(mol) for mol in mols)
def get_atoms_for_mols(mols: Iterable[Mol]) -> Set[str]:
"""Get the set of atoms for a list of RDKit Mols."""
return {atom.GetSymbol() for mol in mols for atom in mol.GetAtoms()}
def add_custom_logger_to_file(log_file: PathLike) -> None:
"""
Set up logging to a file.
This is a bit more complex than usual because hydra sets up the logger
automattically, and it is not possible to disable it.
Args:
log_file: file where to save the logs.
"""
root_logger = logging.getLogger()
fh = logging.FileHandler(log_file, mode="w")
fh.setLevel(logging.INFO)
root_logger.addHandler(fh)
def overwrite_logging_format() -> None:
"""
Reset the log format to our default, instead of using the hydra default.
"""
root_logger = logging.getLogger()
for handler in root_logger.handlers:
formatter = logging.Formatter(LoggingFormat.BASIC.value)
handler.setFormatter(formatter) | /rxn_reaction_preprocessing-2.2.0-py3-none-any.whl/rxn/reaction_preprocessing/utils.py | 0.872252 | 0.427815 | utils.py | pypi |
import csv
from typing import Generator
import click
from rxn.reaction_preprocessing.annotations.annotation_info import AnnotationInfo
from rxn.reaction_preprocessing.annotations.missing_annotation_detector import (
MissingAnnotationDetector,
)
from rxn.reaction_preprocessing.annotations.molecule_annotation import (
load_annotations_multiple,
)
from rxn.reaction_preprocessing.config import DEFAULT_ANNOTATION_FILES
def iterate_rxn_smiles(csv_file: str, column_name: str) -> Generator[str, None, None]:
with open(csv_file) as f:
r = csv.reader(f)
header = next(r)
try:
smiles_index = header.index(column_name)
except ValueError as e:
raise RuntimeError(f'No "{column_name}" column in {csv_file}') from e
for row in r:
yield row[smiles_index]
@click.command()
@click.option("--csv_file", required=True)
@click.option(
"--column_name", required=True, help="Column containing the reaction SMILES"
)
def main(csv_file: str, column_name: str) -> None:
"""Check for missing annotations: what is already annotated (accepted /
rejected), what still needs to be annotated."""
iterator = iterate_rxn_smiles(csv_file, column_name)
missing_annotation_detector = MissingAnnotationDetector(set())
molecules_requiring_annotation = list(
missing_annotation_detector.missing_in_reaction_smiles(
iterator, fragment_bond="~"
)
)
annotations = load_annotations_multiple(DEFAULT_ANNOTATION_FILES)
annotation_info = AnnotationInfo(annotations)
not_annotated = [
m for m in molecules_requiring_annotation if not annotation_info.is_annotated(m)
]
annotated = [
m for m in molecules_requiring_annotation if annotation_info.is_annotated(m)
]
accepted = [m for m in annotated if annotation_info.is_accepted(m)]
rejected = [m for m in annotated if annotation_info.is_rejected(m)]
to_print = [
("requiring annotation", molecules_requiring_annotation),
("not annotated", not_annotated),
("annotated", annotated),
("accepted", accepted),
("rejected", rejected),
]
# Print summary
for label, smiles_list in to_print:
print(label, len(smiles_list), len(set(smiles_list)))
# Print details
for label, smiles_list in to_print:
print()
print(label)
print("=" * len(label))
for smiles in sorted(set(smiles_list)):
print(smiles.replace("~", "."))
if __name__ == "__main__":
main() | /rxn_reaction_preprocessing-2.2.0-py3-none-any.whl/rxn/reaction_preprocessing/scripts/annotation_check.py | 0.730578 | 0.268036 | annotation_check.py | pypi |
from typing import Optional
import click
from rxn.reaction_preprocessing.annotations.molecule_annotation import (
load_annotations_multiple,
)
from rxn.reaction_preprocessing.config import DEFAULT_ANNOTATION_FILES
from rxn.reaction_preprocessing.standardizer import Standardizer
@click.command()
@click.option("--csv_file", required=True)
@click.option(
"--output_csv",
help="(optional) Where to save the CSV augmented with column for missing annotations.",
)
@click.option(
"--column_name", required=True, help="Column containing the reaction SMILES"
)
@click.option("--fragment_bond", default="~")
def main(
csv_file: str, output_csv: Optional[str], column_name: str, fragment_bond: str
) -> None:
"""Find the missing annotation in a set of reactions.
The missing annotations will be printed to standard output, and optionally
a CSV will be created with the missing annotations in a new column.
"""
# NB: in the future, the script may be updated to allow to change the annotation files.
annotations = load_annotations_multiple(DEFAULT_ANNOTATION_FILES)
# To find the missing annotations, we mis-use the standardizer, which anyway
# looks for the missing annotations if `discard_unannotated_metals` is True.
standardizer = Standardizer.read_csv(
csv_file,
annotations=annotations,
discard_unannotated_metals=True,
reaction_column_name=column_name,
fragment_bond=fragment_bond,
remove_stereo_if_not_defined_in_precursors=False,
)
standardizer.standardize()
# Save csv if required
if output_csv is not None:
standardizer.df.to_csv(output_csv, index=False)
missing_annotations = set()
for missing_annotation_list in standardizer.df[
standardizer.missing_annotations_column
]:
for missing_annotation in missing_annotation_list:
missing_annotations.add(missing_annotation)
for missing_annotation in sorted(missing_annotations):
print(missing_annotation)
if __name__ == "__main__":
main() | /rxn_reaction_preprocessing-2.2.0-py3-none-any.whl/rxn/reaction_preprocessing/scripts/find_missing_annotations.py | 0.885068 | 0.261281 | find_missing_annotations.py | pypi |
from typing import Iterable, List, Mapping, Optional, Union
from rxn.chemutils.reaction_equation import ReactionEquation
from rxn.reaction_preprocessing.annotations.molecule_annotation import (
AnnotationDecision,
MoleculeAnnotation,
)
class MoleculeReplacer:
"""
Class to replace SMILES strings by their improved alternatives (for instance
from the catalyst annotations).
"""
def __init__(self, replacements: Mapping[str, Union[str, List[str]]]):
"""
Args:
replacements: mapping between SMILES strings to replace, and what
to replace them with (given as a string or list of strings).
Use dots for fragment bonds!
"""
# The mapped values given as strings are converted to lists
self.replacements = {
key: ([value] if isinstance(value, str) else value)
for key, value in replacements.items()
}
def replace_molecule_smiles(self, smiles: str) -> List[str]:
"""
Do the molecule replacements in a molecule SMILES.
Returns:
List of replaced molecules (will have length of one in most cases)
"""
try:
return self.replacements[smiles]
except KeyError:
# If not found: return the original SMILES (as a list!)
return [smiles]
def replace_in_reaction_smiles(
self, smiles: str, fragment_bond: Optional[str] = None
) -> str:
"""
Do the molecule replacements in a reaction SMILES.
Args:
smiles: reaction SMILES.
fragment_bond: fragment bond used in the reaction SMILES.
"""
reaction_equation = ReactionEquation.from_string(smiles, fragment_bond)
return self.replace_in_reaction_equation(reaction_equation).to_string(
fragment_bond
)
def replace_in_reaction_equation(
self, reaction_equation: ReactionEquation
) -> ReactionEquation:
"""
Do the molecule replacements in a ReactionEquation instance.
"""
groups = (self._replace_in_molecule_list(group) for group in reaction_equation)
return ReactionEquation(*groups)
def _replace_in_molecule_list(self, molecules: List[str]) -> List[str]:
"""
Do the replacements in a list of SMILES, potentially leading to a larger list.
"""
return [
replaced_molecule
for molecule in molecules
for replaced_molecule in self.replace_molecule_smiles(molecule)
]
@classmethod
def from_molecule_annotations(
cls, molecule_annotations: Iterable[MoleculeAnnotation]
) -> "MoleculeReplacer":
"""Instantiate from a list of molecule annotations."""
def criterion(annotation: MoleculeAnnotation) -> bool:
"""Whether to add a replacement rule for a given annotation."""
is_accepted = annotation.decision == AnnotationDecision.ACCEPT
has_updated_smiles = annotation.updated_smiles is not None
return is_accepted and has_updated_smiles
replacements = {
annotation.original_without_fragment_bond: annotation.updated_without_fragment_bond
for annotation in molecule_annotations
if criterion(annotation)
}
return cls(replacements) | /rxn_reaction_preprocessing-2.2.0-py3-none-any.whl/rxn/reaction_preprocessing/annotations/molecule_replacer.py | 0.956634 | 0.539165 | molecule_replacer.py | pypi |
import json
from enum import auto
from pathlib import Path
from typing import Any, Dict, Iterable, List, Optional, Union
import attr
from rxn.chemutils.multicomponent_smiles import multicomponent_smiles_to_list
from rxn.utilities.types import RxnEnum
class AnnotationDecision(RxnEnum):
ACCEPT = auto()
REJECT = auto()
@attr.s(auto_attribs=True, init=False)
class MoleculeAnnotation:
"""
Specifies a molecule annotation, i.e. a SMILES string that may have an
updated SMILES, whether to keep it or not, etc.
"""
original_smiles: str
updated_smiles: Optional[str]
decision: AnnotationDecision
categories: List[str]
extra_info: Dict[str, Any]
def __init__(
self,
original_smiles: str,
updated_smiles: Optional[str],
decision: str,
categories: List[str],
**extra_info: Any
):
"""
Args:
original_smiles: original SMILES that is potentially present in a
data set. Fragment bonds are indicated by a tilde '~'.
updated_smiles: if specified, SMILES with which to replace
original_smiles. Also uses '~' for fragment bonds, and dots
'.' may be used to separate compounds from the solvent in which
they are solved.
decision: "accept" or "reject".
categories: categories to which the annotation belongs to.
**extra_info: additional information not covered by the other
variables.
"""
decision_enum = AnnotationDecision.from_string(decision)
self.__attrs_init__(
original_smiles=original_smiles,
updated_smiles=updated_smiles,
decision=decision_enum,
categories=categories,
extra_info=extra_info,
)
@property
def original_without_fragment_bond(self) -> str:
"""Get the original SMILES with dots instead of tildes to delimit fragments."""
return self.original_smiles.replace("~", ".")
@property
def updated_without_fragment_bond(self) -> List[str]:
"""
Get the updated SMILES with dots instead of tildes to delimit fragments.
Since dots may be used to delimit solvents from compounds, a list must be returned.
"""
if self.updated_smiles is None:
raise RuntimeError("No updated SMILES!")
return multicomponent_smiles_to_list(self.updated_smiles, "~")
def load_annotations(json_file: Union[Path, str]) -> List[MoleculeAnnotation]:
"""
Load the molecule annotations from a JSON file.
Args:
json_file: path to the JSON file containing the annotations.
Returns:
List of annotations.
"""
with open(json_file, "rt") as f:
json_content = json.load(f)
return [MoleculeAnnotation(**block) for block in json_content]
def load_annotations_multiple(
json_files: Iterable[Union[Path, str]]
) -> List[MoleculeAnnotation]:
"""
Load the molecule annotations from multiple JSON files.
Args:
json_files: paths to the JSON file containing the annotations.
Returns:
List of annotations.
"""
annotations = []
for json_file in json_files:
annotations.extend(load_annotations(json_file))
return annotations | /rxn_reaction_preprocessing-2.2.0-py3-none-any.whl/rxn/reaction_preprocessing/annotations/molecule_annotation.py | 0.899049 | 0.301754 | molecule_annotation.py | pypi |
from typing import Callable, Generator, Iterable, Optional, Set, Union
from rxn.chemutils.reaction_equation import ReactionEquation
from rxn.reaction_preprocessing.annotations.annotation_criterion import (
AnnotationCriterion,
)
from rxn.reaction_preprocessing.annotations.molecule_annotation import (
MoleculeAnnotation,
)
class MissingAnnotationDetector:
"""
Find reactions with molecules that should be annotated, taking into account
a set of already-annotated molecules.
"""
def __init__(
self,
annotated_molecules: Set[str],
requires_annotation_fn: Optional[Callable[[str], bool]] = None,
):
"""
Args:
annotated_molecules: set of already-annotated molecules.
requires_annotation_fn: function with which to decide whether a molecule
needs an annotation. Defaults to AnnotationCriterion().
"""
self.annotated_molecules = annotated_molecules
if requires_annotation_fn is None:
requires_annotation_fn = AnnotationCriterion()
self.requires_annotation_fn = requires_annotation_fn
def molecule_needs_annotation(self, smiles: str) -> bool:
"""
Whether a molecule needs annotation.
Checks the overlap between the elements in the molecule and the extended
transition metals, and then looks in the annotated molecules if necessary.
"""
if not self.requires_annotation_fn(smiles):
return False
else:
return smiles not in self.annotated_molecules
def missing_in_reaction_equation(
self, reaction_equation: ReactionEquation
) -> Generator[str, None, None]:
"""In a reaction equation, find the molecules requiring annotation."""
for smiles in reaction_equation.iter_all_smiles():
if self.molecule_needs_annotation(smiles):
yield smiles
def missing_in_reaction_equations(
self, reaction_equations: Iterable[ReactionEquation]
) -> Generator[str, None, None]:
"""In multiple reaction equations, find the molecules requiring annotation."""
for reaction_equation in reaction_equations:
yield from self.missing_in_reaction_equation(reaction_equation)
def missing_in_reaction_smiles(
self,
reaction_smiles: Union[Iterable[str], str],
fragment_bond: Optional[str] = None,
) -> Generator[str, None, None]:
"""
In one or multiple reaction SMILES, find the molecules requiring annotation.
Args:
reaction_smiles: One reaction SMILES (str), or multiple reaction SMILES.
fragment_bond: fragment bond used in the reaction SMILES.
"""
if isinstance(reaction_smiles, str):
reaction_smiles = [reaction_smiles]
reaction_equations = (
ReactionEquation.from_string(reaction_smile, fragment_bond)
for reaction_smile in reaction_smiles
)
return self.missing_in_reaction_equations(reaction_equations)
@classmethod
def from_molecule_annotations(
cls,
molecule_annotations: Iterable[MoleculeAnnotation],
requires_annotation_fn: Optional[Callable[[str], bool]] = None,
) -> "MissingAnnotationDetector":
"""
Create a MissingAnnotationDetector instance from existing molecule annotations.
Args:
molecule_annotations: existing molecule annotations.
requires_annotation_fn: function with which to decide whether a molecule
needs an annotation. Defaults to AnnotationCriterion().
"""
original_smiles = {
annotation.original_without_fragment_bond
for annotation in molecule_annotations
}
# Also consider the updated SMILES, but only if they consist in exactly one molecule.
updated_smiles = {
annotation.updated_without_fragment_bond[0]
for annotation in molecule_annotations
if annotation.updated_smiles is not None
and len(annotation.updated_without_fragment_bond) == 1
}
return cls(
annotated_molecules=original_smiles | updated_smiles,
requires_annotation_fn=requires_annotation_fn,
) | /rxn_reaction_preprocessing-2.2.0-py3-none-any.whl/rxn/reaction_preprocessing/annotations/missing_annotation_detector.py | 0.972349 | 0.469459 | missing_annotation_detector.py | pypi |
from typing import Iterable, Optional
from rxn.chemutils.reaction_equation import ReactionEquation
from rxn.reaction_preprocessing.annotations.molecule_annotation import (
AnnotationDecision,
MoleculeAnnotation,
)
class RejectedMoleculesFilter:
"""
Class to filter rejected molecules (potentially based on annotations).
"""
def __init__(self, rejected_molecules: Iterable[str]):
"""
Args:
rejected_molecules: Molecules to reject. Use dots for fragment bonds!
"""
self.rejected_molecules = set(rejected_molecules)
def is_valid_molecule_smiles(self, smiles: str) -> bool:
"""
Whether a molecule SMILES is considered to be valid.
Args:
smiles: molecule SMILES. Fragment bonds must be given with a dot!
"""
return smiles not in self.rejected_molecules
def is_valid_reaction_smiles(
self, smiles: str, fragment_bond: Optional[str] = None
) -> bool:
"""
Whether a reaction SMILES is considered to be valid.
Args:
smiles: reaction SMILES.
fragment_bond: fragment bond used in the reaction SMILES.
"""
return self.is_valid_reaction_equation(
ReactionEquation.from_string(smiles, fragment_bond)
)
def is_valid_reaction_equation(self, reaction_equation: ReactionEquation) -> bool:
"""
Whether a reaction equation is considered to be valid.
Args:
reaction_equation: reaction equation instance.
"""
return all(
self.is_valid_molecule_smiles(smiles)
for smiles in reaction_equation.iter_all_smiles()
)
@classmethod
def from_molecule_annotations(
cls, molecule_annotations: Iterable[MoleculeAnnotation]
) -> "RejectedMoleculesFilter":
"""
Instantiate from existing molecule annotations.
Args:
molecule_annotations: existing molecule annotations.
"""
rejected_molecules = (
annotation.original_without_fragment_bond
for annotation in molecule_annotations
if annotation.decision is AnnotationDecision.REJECT
)
return cls(rejected_molecules) | /rxn_reaction_preprocessing-2.2.0-py3-none-any.whl/rxn/reaction_preprocessing/annotations/rejected_molecules_filter.py | 0.956022 | 0.474144 | rejected_molecules_filter.py | pypi |
import itertools
from typing import Generator, Iterable, Optional, Set
from rdkit.Chem import GetPeriodicTable
from rxn.chemutils.miscellaneous import atom_type_counter
class AnnotationCriterion:
"""
Determine what molecules need an annotation, independently of the molecules
that have been annotated so far.
To do so, this class relies on whether the molecules contain (extended)
transition metals or not.
"""
def __init__(
self,
additional_elements_to_consider: Optional[Iterable[str]] = None,
elements_not_to_consider: Optional[Iterable[str]] = None,
):
"""
Args:
additional_elements_to_consider: elements for which to require an
annotation, in addition to the extended transition metals.
elements_not_to_consider: elements for which not to require an
annotation, even if they are extended transition metals.
"""
self.elements_requiring_annotation = set(
AnnotationCriterion.extended_transition_metals()
)
if additional_elements_to_consider is not None:
self.elements_requiring_annotation.update(additional_elements_to_consider)
if elements_not_to_consider is not None:
self.elements_requiring_annotation.difference_update(
elements_not_to_consider
)
def __call__(self, smiles: str) -> bool:
"""
Function making the object callable, falling back to "requires_annotation".
Args:
smiles: molecule SMILES. Use dots for fragment bonds!
"""
return self.requires_annotation(smiles)
def requires_annotation(self, smiles: str) -> bool:
"""
Whether a given SMILES string requires an annotation.
Args:
smiles: molecule SMILES. Use dots for fragment bonds!
"""
return bool(
AnnotationCriterion.elements_in_smiles(smiles)
& self.elements_requiring_annotation
)
@staticmethod
def elements_in_smiles(smiles: str) -> Set[str]:
return set(atom_type_counter(smiles).keys())
@staticmethod
def extended_transition_metals() -> Generator[str, None, None]:
"""
Atomic symbols for the extended transition metals.
"""
periodic_table = GetPeriodicTable()
return (
periodic_table.GetElementSymbol(atomic_number)
for atomic_number in AnnotationCriterion.extended_transition_metal_numbers()
)
@staticmethod
def extended_transition_metal_numbers() -> Generator[int, None, None]:
"""
Atomic numbers for the extended transition metals.
"""
yield from itertools.chain(
# Al
(13,),
# first-row transition metals + Ga
range(21, 32),
# second-row transition metals + In
range(39, 50),
# lanthanides + third-row transition metals + Tl + Pb + Bi + Po
range(57, 85),
# actinides + fourth-row transition metals
range(89, 113),
) | /rxn_reaction_preprocessing-2.2.0-py3-none-any.whl/rxn/reaction_preprocessing/annotations/annotation_criterion.py | 0.940394 | 0.337081 | annotation_criterion.py | pypi |
import itertools
from typing import (
Any,
Callable,
Generator,
Iterable,
Iterator,
List,
Optional,
Sequence,
Set,
Tuple,
TypeVar,
)
T = TypeVar("T")
V = TypeVar("V")
def all_identical(sequence: Sequence[Any]) -> bool:
"""Evaluates whether all the elements of a sequence are identical."""
return all(s == sequence[0] for s in sequence)
def remove_duplicates(
seq: Iterable[T], key: Optional[Callable[[T], V]] = None
) -> List[T]:
"""Remove duplicates and preserve order.
Relies on the function ``iterate_unique_values``, only converts its
output to a list.
Args:
seq: sequence to remove duplicates from.
key: what to base duplicates on, must be hashable.
Defaults to the elements of seq.
Returns:
a list without duplicates.
"""
return list(iterate_unique_values(seq, key))
def iterate_unique_values(
seq: Iterable[T], key: Optional[Callable[[T], V]] = None
) -> Iterator[T]:
"""Remove duplicates and preserve order.
Adapted from https://stackoverflow.com/a/480227.
``remove_duplicates`` is identical except that it returns a list.
Args:
seq: sequence to remove duplicates from.
key: what to base duplicates on, must be hashable.
Defaults to the elements of seq.
Yields:
the original values after removal of the duplicates
"""
if key is None:
def key(x: T) -> V:
return x # type: ignore
seen: Set[V] = set()
seen_add = seen.add
yield from (x for x in seq if not (key(x) in seen or seen_add(key(x))))
def pairwise(s: List[T]) -> Iterator[Tuple[T, T]]:
"""
Iterates over neighbors in a list.
s -> (s0,s1), (s1,s2), (s2, s3), ...
From https://stackoverflow.com/a/5434936
"""
a, b = itertools.tee(s)
next(b, None)
return zip(a, b)
# Make it possible to test whether a value was provided at all (See Python Cookbook 7.5).
_no_value: T = object() # type: ignore
def chunker(
iterable: Iterable[T],
chunk_size: int,
fill_value: T = _no_value,
) -> Generator[List[T], None, None]:
"""
Iterate through an iterable in chunks of given size.
Adapted from "grouper" function in the itertools documentation:
https://docs.python.org/3/library/itertools.html#itertools-recipes
Args:
iterable: some iterable to create chunks from.
chunk_size: size of the chunks.
fill_value: value to fill in if the last chunk is too small. If nothing
is specified, the last chunk may be smaller.
Returns:
Iterator over lists representing the chunks.
"""
# These two lines: same as the "grouper" function in the itertools doc.
# In zip_longest, we do not give the user-provided fill value, which
# would make it complicated to differentiate with the case where nothing
# was given further below.
args = [iter(iterable)] * chunk_size
tuple_iterable = itertools.zip_longest(*args, fillvalue=_no_value)
for chunk_tuple in tuple_iterable:
# convert to list instead of tuples, remove the fill value
chunk = [value for value in chunk_tuple if value is not _no_value]
# If the user provided a fill value, add it.
if len(chunk) != chunk_size and fill_value is not _no_value:
n_missing = chunk_size - len(chunk)
chunk += [fill_value] * n_missing
yield chunk | /rxn-utils-1.5.1.tar.gz/rxn-utils-1.5.1/src/rxn/utilities/containers.py | 0.907893 | 0.544801 | containers.py | pypi |
import logging
from enum import Enum
from typing import Iterable, Union
from .files import PathLike
logger = logging.getLogger(__name__)
logger.addHandler(logging.NullHandler())
class LoggingFormat(Enum):
"""
Common logging formats used in the RXN universe.
"""
BASIC = "[%(asctime)s %(levelname)s] %(message)s"
DETAILED = (
"%(asctime)s %(levelname)-7s [%(filename)s:%(funcName)s:%(lineno)d] %(message)s"
)
def setup_console_logger(
level: Union[int, str] = "INFO",
format: Union[LoggingFormat, str] = LoggingFormat.BASIC,
) -> None:
"""
Set up a logger writing to the console (i.e., to stderr).
Args:
level: log level, either as a string ("INFO") or integer (logging.INFO).
format: log format, as a LoggingFormat value, or a string directly.
"""
_setup_logger_from_handlers(
handlers=[logging.StreamHandler()], level=level, format=format
)
def setup_file_logger(
filename: PathLike,
level: Union[int, str] = "INFO",
format: Union[LoggingFormat, str] = LoggingFormat.BASIC,
) -> None:
"""
Set up a logger writing to the given file.
Overwrites the default file mode 'a' with 'w' (i.e., overwrites the file).
Args:
level: log level, either as a string ("INFO") or integer (logging.INFO).
format: log format, as a LoggingFormat value, or a string directly.
"""
_setup_logger_from_handlers(
handlers=[logging.FileHandler(filename, mode="w")], level=level, format=format
)
def setup_console_and_file_logger(
filename: PathLike,
level: Union[int, str] = "INFO",
format: Union[LoggingFormat, str] = LoggingFormat.BASIC,
) -> None:
"""
Set up a logger writing to both the terminal and the given file.
Overwrites the default file mode 'a' with 'w' (i.e., overwrites the file).
Args:
level: log level, either as a string ("INFO") or integer (logging.INFO).
format: log format, as a LoggingFormat value, or a string directly.
"""
_setup_logger_from_handlers(
handlers=[logging.FileHandler(filename, mode="w"), logging.StreamHandler()],
level=level,
format=format,
)
def _setup_logger_from_handlers(
handlers: Iterable[logging.Handler],
level: Union[int, str],
format: Union[LoggingFormat, str],
) -> None:
"""
Helper function to avoid duplication in the other setup functions.
Args:
handlers: log handlers.
level: log level, either as a string ("INFO") or integer (logging.INFO).
format: log format, as a LoggingFormat value, or a string directly.
"""
if isinstance(format, LoggingFormat):
format = format.value
logging.basicConfig(format=format, level=level, handlers=handlers)
def log_debug(message: str) -> None:
"""
Utility function to log a message with DEBUG level.
Can be useful for testing purposes, to test logging capabilities from
another Python package.
"""
logger.debug(message)
def log_info(message: str) -> None:
"""
Utility function to log a message with INFO level.
Can be useful for testing purposes, to test logging capabilities from
another Python package.
"""
logger.info(message)
def log_warning(message: str) -> None:
"""
Utility function to log a message with WARNING level.
Can be useful for testing purposes, to test logging capabilities from
another Python package.
"""
logger.warning(message)
def log_error(message: str) -> None:
"""
Utility function to log a message with ERROR level.
Can be useful for testing purposes, to test logging capabilities from
another Python package.
"""
logger.error(message) | /rxn-utils-1.5.1.tar.gz/rxn-utils-1.5.1/src/rxn/utilities/logging.py | 0.872728 | 0.328543 | logging.py | pypi |
import re
dash_characters = [
"-", # hyphen-minus
"–", # en dash
"—", # em dash
"−", # minus sign
"", # soft hyphen
]
def remove_prefix(text: str, prefix: str, raise_if_missing: bool = False) -> str:
"""Removes a prefix from a string, if present at its beginning.
Args:
text: string potentially containing a prefix.
prefix: string to remove at the beginning of text.
raise_if_missing: whether to raise a ValueError if the prefix is not found.
Raises:
ValueError: if the prefix is not found and raise_if_missing is True.
"""
if text.startswith(prefix):
return text[len(prefix) :]
if raise_if_missing:
raise ValueError(f'Prefix "{prefix}" not found in "{text}".')
return text
def remove_postfix(text: str, postfix: str, raise_if_missing: bool = False) -> str:
"""Removes a postfix from a string, if present at its end.
Args:
text: string potentially containing a postfix.
postfix: string to remove at the end of text.
raise_if_missing: whether to raise a ValueError if the postfix is not found.
Raises:
ValueError: if the postfix is not found and raise_if_missing is True.
"""
if text.endswith(postfix):
return text[: -len(postfix)]
if raise_if_missing:
raise ValueError(f'Postfix "{postfix}" not found in "{text}".')
return text
def escape_latex(text: str) -> str:
r"""
Escape special LaTex characters in a string.
Adapted from https://stackoverflow.com/a/25875504.
Example: will convert "30%" to "30\%".
Args:
text: string to escape.
Returns:
The message escaped to appear correctly in LaTeX.
"""
conv = {
"&": r"\&",
"%": r"\%",
"$": r"\$",
"#": r"\#",
"_": r"\_",
"{": r"\{",
"}": r"\}",
"~": r"\textasciitilde{}",
"^": r"\^{}",
"\\": r"\textbackslash{}",
"<": r"\textless{}",
">": r"\textgreater{}",
}
regex = re.compile(
"|".join(
re.escape(str(key))
for key in sorted(conv.keys(), key=lambda item: -len(item))
)
)
return regex.sub(lambda match: conv[match.group()], text) | /rxn-utils-1.5.1.tar.gz/rxn-utils-1.5.1/src/rxn/utilities/strings.py | 0.832747 | 0.288184 | strings.py | pypi |
import errno
import os
import random
import shutil
import sys
import tempfile
from contextlib import ExitStack, contextmanager
from pathlib import Path
from typing import Iterable, Iterator, List, Tuple, Union
from typing_extensions import TypeAlias
from .basic import temporary_random_seed
from .containers import all_identical
PathLike: TypeAlias = Union[str, os.PathLike]
def load_list_from_file(filename: PathLike) -> List[str]:
return list(iterate_lines_from_file(filename))
def iterate_lines_from_file(filename: PathLike) -> Iterator[str]:
with open(filename, "rt") as f:
for line in f:
yield line.rstrip("\r\n")
def dump_list_to_file(values: Iterable[str], filename: PathLike) -> None:
"""Write an iterable of strings to a file.
Args:
values: values to write to the file.
filename: file to write to. Will be overwritten if it exists already.
"""
with open(filename, "wt") as f:
for v in values:
f.write(f"{v}\n")
def append_to_file(values: Iterable[str], filename: PathLike) -> None:
"""Append an iterable of strings to a file.
Args:
values: values to append to the file.
filename: file to append to.
"""
with open(filename, "at") as f:
for v in values:
f.write(f"{v}\n")
def count_lines(filename: PathLike) -> int:
return sum(1 for _ in open(filename))
def iterate_tuples_from_files(
filenames: List[PathLike],
) -> Iterator[Tuple[str, ...]]:
"""
Read from several files at once, and put the values from the same lines numbers
into tuples.
Args:
filenames: files to read.
Returns:
iterator over the generated tuples.
"""
# Make sure the files have the same lengths. This is not the optimal solution
# and in principle, one could detect unequal lengths when reading the files.
# However, an easy solution is available only from Python 3.10:
# https://stackoverflow.com/q/32954486
if not all_identical([count_lines(file) for file in filenames]):
raise ValueError("Not all the files have identical lengths")
# Opening several files at once;
# See https://docs.python.org/3/library/contextlib.html#contextlib.ExitStack
with ExitStack() as stack:
files = [stack.enter_context(open(fname, "rt")) for fname in filenames]
iterators = [(line.rstrip("\r\n") for line in f) for f in files]
yield from zip(*iterators)
def dump_tuples_to_files(
values: Iterable[Tuple[str, ...]], filenames: List[PathLike]
) -> None:
"""Write tuples to multiple files (1st tuple value ends up in 1st file, etc.).
Args:
values: tuples to write to files.
filenames: files to create.
"""
# Opening several files at once;
# See https://docs.python.org/3/library/contextlib.html#contextlib.ExitStack
with ExitStack() as stack:
files = [stack.enter_context(open(fname, "wt")) for fname in filenames]
number_files = len(files)
for value_tuple in values:
if len(value_tuple) != number_files:
raise ValueError(
f"Tuple {value_tuple} has incorrect size (expected: {number_files})."
)
for value, f in zip(value_tuple, files):
f.write(f"{value}\n")
def stable_shuffle(
input_file: PathLike, output_file: PathLike, seed: int, is_csv: bool = False
) -> None:
"""
Shuffle a file in a deterministic order (the same seed always reorders
files of the same number of lines identically).
Useful, as an example, to shuffle a source and target files identically.
Args:
input_file: file to shuffle.
output_file: where to save the shuffled file.
is_csv: if True, the first line will not be shuffled.
"""
# Note we use the context manager to avoid side effects of setting the seed.
with temporary_random_seed(seed):
line_iterator = iterate_lines_from_file(input_file)
# Get the header, if it's a CSV. We store it as a list, which will have 0 or 1 element.
header = []
if is_csv:
header = [next(line_iterator)]
# Get actual content and shuffle it
lines = list(line_iterator)
random.shuffle(lines)
# Write header (if there is no header, it will empty the file)
dump_list_to_file(header, output_file)
# Write the shuffled lines
append_to_file(lines, output_file)
@contextmanager
def named_temporary_path(delete: bool = True) -> Iterator[Path]:
"""
Get the path for a temporary file or directory, without creating it (can
be especially useful in tests).
This is similar to tempfile.NamedTemporaryFile, when the file is not
to be actually opened, and one is just interested in obtaining a writable /
readable path to optionally delete at the end of the context.
This function was originally created to bypass a limitation of NamedTemporaryFile
on Windows (https://stackoverflow.com/q/23212435), which becomes relevant when
one does not want the file to be opened automatically. The solution is
inspired by https://stackoverflow.com/a/58955530.
Args:
delete: whether to delete the file when exiting the context
Examples:
>>> with named_temporary_path() as temporary_path:
... # do something on the temporary path.
... # The file or directory at that path will be deleted at the
... # end of the context, except if delete=False.
"""
base_temp_dir = Path(tempfile.gettempdir())
temporary_path = base_temp_dir / os.urandom(24).hex()
try:
yield temporary_path
finally:
if delete and temporary_path.exists():
if temporary_path.is_file():
temporary_path.unlink()
else:
shutil.rmtree(temporary_path)
@contextmanager
def named_temporary_directory(delete: bool = True) -> Iterator[Path]:
"""
Get the path for a temporary directory and create it.
Relies on ``named_temporary_path`` to provide a context manager that will
automatically delete the directory when leaving the context.
Args:
delete: whether to delete the file when exiting the context
Examples:
>>> with named_temporary_directory() as temporary_directory:
... # do something with the temporary directory.
... # The directory will be deleted at the
... # end of the context, except if delete=False.
"""
with named_temporary_path(delete=delete) as path:
path.mkdir()
yield path
def is_pathname_valid(pathname: PathLike) -> bool:
"""
`True` if the passed pathname is a valid pathname for the current OS;
`False` otherwise.
Copied from https://stackoverflow.com/a/34102855. More details there.
"""
pathname = str(pathname)
try:
if not isinstance(pathname, str) or not pathname:
return False
_, pathname = os.path.splitdrive(pathname)
root_dirname = (
os.environ.get("HOMEDRIVE", "C:")
if sys.platform == "win32"
else os.path.sep
)
assert os.path.isdir(root_dirname)
root_dirname = root_dirname.rstrip(os.path.sep) + os.path.sep
for pathname_part in pathname.split(os.path.sep):
try:
os.lstat(root_dirname + pathname_part)
except OSError as exc:
if hasattr(exc, "winerror"):
error_invalid_name = 123
if exc.winerror == error_invalid_name:
return False
elif exc.errno in {errno.ENAMETOOLONG, errno.ERANGE}:
return False
except TypeError:
return False
else:
return True
def is_path_creatable(pathname: PathLike) -> bool:
"""
`True` if the current user has sufficient permissions to create the passed
pathname; `False` otherwise.
Copied from https://stackoverflow.com/a/34102855. More details there.
"""
pathname = str(pathname)
dirname = os.path.dirname(pathname) or os.getcwd()
return os.access(dirname, os.W_OK)
def is_path_exists_or_creatable(pathname: PathLike) -> bool:
"""
`True` if the passed pathname is a valid pathname for the current OS _and_
either currently exists or is hypothetically creatable; `False` otherwise.
This function is guaranteed to _never_ raise exceptions.
Copied from https://stackoverflow.com/a/34102855. More details there.
"""
pathname = str(pathname)
try:
return is_pathname_valid(pathname) and (
os.path.exists(pathname) or is_path_creatable(pathname)
)
except OSError:
return False
def paths_are_identical(*paths: PathLike) -> bool:
"""Whether paths, possibly given in a mix of absolute and relative formats,
point to the same file."""
real_paths = {os.path.realpath(p) for p in paths}
return len(real_paths) == 1
def raise_if_paths_are_identical(*paths: PathLike) -> None:
"""
Raise an exception if input and output paths point to the same file.
"""
if paths_are_identical(*paths):
paths_str = ", ".join(f'"{p}"' for p in paths)
raise ValueError(f"The paths {paths_str} must be different.")
def ensure_directory_exists_and_is_empty(directory: Path) -> None:
"""Create a directory if it does not exist already, and raise if not empty."""
directory.mkdir(parents=True, exist_ok=True)
directory_contains_files = any(directory.iterdir())
if directory_contains_files:
raise RuntimeError(f'The directory "{directory}" is required to be empty.') | /rxn-utils-1.5.1.tar.gz/rxn-utils-1.5.1/src/rxn/utilities/files.py | 0.675015 | 0.350352 | files.py | pypi |
from __future__ import annotations
import csv
from typing import Iterator, List, TextIO, Type, TypeVar
_CsvIteratorT = TypeVar("_CsvIteratorT", bound="CsvIterator")
class CsvIterator:
"""Class to easily iterate through CSV files while having easy
access to the column names.
Note: the choice to not handle the file opening/closing in this
class is on purpose. This avoids issue with keeping track of
which files are open and when to close them.
Examples:
>>> with open("some_file.csv", "rt") as f:
... csv_iterator = CsvIterator.from_stream(f)
... area_index = csv_iterator.column_index("area")
... price_index = csv_iterator.column_index("price")
... for row in csv_iterator.rows:
... price = row[price_index]
... area = row[area_index]
"""
def __init__(self, columns: List[str], rows: Iterator[List[str]]):
self.columns = columns
self.rows = rows
def column_index(self, column_name: str) -> int:
"""
Get the index corresponding to the given column.
Args:
column_name: column to look up.
Raises:
ValueError: if the column does not exist.
Returns:
the index for the given column.
"""
try:
return self.columns.index(column_name)
except ValueError:
raise ValueError(f'Column "{column_name}" not found in {self.columns}.')
@classmethod
def from_stream(
cls: Type[_CsvIteratorT], stream: TextIO, delimiter: str = ","
) -> _CsvIteratorT:
"""Instantiate from a stream or file object.
Args:
stream: stream or file object to instantiate from.
delimiter: CSV delimiter.
"""
reader = csv.reader(stream, delimiter=delimiter)
header = next(reader)
return cls(columns=header, rows=reader)
def to_stream(
self, file: TextIO, delimiter: str = ",", line_terminator: str = "\n"
) -> None:
writer = csv.writer(file, delimiter=delimiter, lineterminator=line_terminator)
writer.writerow(self.columns)
writer.writerows(self.rows) | /rxn-utils-1.5.1.tar.gz/rxn-utils-1.5.1/src/rxn/utilities/csv/csv_iterator.py | 0.888227 | 0.353707 | csv_iterator.py | pypi |
from inspect import Signature, signature
from typing import Any, Callable, List, Tuple, Type, Union
from attr import define
from tqdm import tqdm
from typing_extensions import TypeAlias
from ..files import PathLike, count_lines
from . import CsvIterator
# Transformation function as actually used under the hood
_TransformationFunction: TypeAlias = Callable[[List[str]], List[str]]
class StreamingCsvEditor:
"""
Edit the content of a CSV with a specified transformation, line-by-line.
This class avoids loading the whole file into memory as would be done
with a pandas DataFrame.
"""
def __init__(
self,
columns_in: List[str],
columns_out: List[str],
transformation: Callable[..., Any],
line_terminator: str = "\n",
):
"""
Args:
columns_in: names for the columns acting as input for the transformation.
columns_out: names for the columns where to write the result of the
transformation.
transformation: function to call on the values from the input columns,
with the results being written to the output columns.
The function should be annotated, and the following are admissible:
- For the parameters:
- one or several strings
- a list of strings (with one or more elements)
- a tuple of strings (with one or more elements)
- For the return type:
- one string
- a list of strings (with one or more elements)
- a tuple of strings (with one or more elements)
line_terminator: line terminator to use for writing the CSV.
"""
self.transformation = _CsvTransformation(
columns_in=columns_in,
columns_out=columns_out,
fn=_callback_handler(transformation),
)
self.line_terminator = line_terminator
def process(self, csv_iterator: CsvIterator) -> CsvIterator:
"""
Process and edit a CSV file.
Args:
csv_iterator: Input CSV iterator.
Returns:
an edited instance of a CsvIterator.
"""
helper = _Helper(csv_iterator.columns, transformation=self.transformation)
return CsvIterator(
columns=helper.output_columns,
rows=(helper.process_line(row) for row in csv_iterator.rows),
)
def process_paths(
self, path_in: PathLike, path_out: PathLike, verbose: bool = False
) -> None:
"""
Process and edit a CSV file.
Args:
path_in: path to the existing CSV.
path_out: path to the edited CSV (to be saved).
verbose: whether to write the progress with tqdm.
"""
with open(path_in, "rt") as f_in, open(path_out, "wt") as f_out:
input_iterator = CsvIterator.from_stream(f_in)
if verbose:
row_count = count_lines(path_in)
input_iterator = CsvIterator(
input_iterator.columns,
rows=(row for row in tqdm(input_iterator.rows, total=row_count)),
)
output_iterator = self.process(input_iterator)
output_iterator.to_stream(f_out, line_terminator=self.line_terminator)
@define
class _CsvTransformation:
"""Helper class containing the details of a transformation for one CSV file."""
columns_in: List[str]
columns_out: List[str]
fn: _TransformationFunction
class _Helper:
"""Helper class that does the actual row-by-row processing."""
def __init__(
self,
input_columns: List[str],
transformation: _CsvTransformation,
):
self.fn = transformation.fn
self.indices_in = self._determine_column_indices(
input_columns, transformation.columns_in
)
new_columns = [c for c in transformation.columns_out if c not in input_columns]
self.n_new_columns = len(new_columns)
self.output_columns = input_columns + new_columns
self.indices_out = self._determine_column_indices(
self.output_columns, transformation.columns_out
)
def _determine_column_indices(
self, all_columns: List[str], target_columns: List[str]
) -> List[int]:
indices: List[int] = []
for c in target_columns:
try:
indices.append(all_columns.index(c))
except ValueError:
raise RuntimeError(f'"{c}" not found in {all_columns}.')
return indices
def process_line(self, row: List[str]) -> List[str]:
"""Process one line from the CSV.
Args:
row: content of one CSV line.
Returns:
Content of the line after applying the function
"""
# Process the values
input_items = [row[i] for i in self.indices_in]
results = self.fn(input_items)
# Extend the row object to make space for the new values (if needed)
row.extend("" for _ in range(self.n_new_columns))
# overwrite the results
for index, result in zip(self.indices_out, results):
row[index] = result
return row
def _parameter_is_tuple(parameter_type: Type[Any]) -> bool:
return any(v in str(parameter_type) for v in ["Tuple", "tuple"])
def _parameter_is_list(parameter_type: Type[Any]) -> bool:
return any(v in str(parameter_type) for v in ["List", "list"])
def _parameter_is_list_or_tuple(parameter_type: Type[Any]) -> bool:
return _parameter_is_list(parameter_type) or _parameter_is_tuple(parameter_type)
def _postprocessing_fn(fn: Callable[..., Any]) -> Callable[..., List[str]]:
"""From the user-given function, wrap it so that the result is converted
to a list of strings."""
sig = signature(fn)
return_type = sig.return_annotation
if return_type is Signature.empty:
raise ValueError(
"Make sure that the function you provided has a return annotation."
)
adapter: Callable[..., List[str]]
if return_type is str:
def adapter(x: str) -> List[str]:
return [x]
return adapter
if _parameter_is_list_or_tuple(return_type):
def adapter(x: Union[List[str], Tuple[str]]) -> List[str]:
return list(x)
return adapter
raise ValueError(f"Unsupported return type: {return_type}")
def _preprocessing_fn(fn: Callable[..., Any]) -> Callable[[List[str]], Any]:
"""From the user-given function, wrap it so that it can ingest a list of strings."""
sig = signature(fn)
parameter_types = [p.annotation for p in sig.parameters.values()]
if any(p is Signature.empty for p in parameter_types):
raise ValueError(
"Make sure that the function you provided is fully type-annotated."
)
# Necessary for the below
adapter: Callable[[List[str]], Any]
parameters_are_strs = all(p is str for p in parameter_types)
if parameters_are_strs:
def adapter(inputs: List[str]) -> Any:
return fn(*inputs)
return adapter
parameters_is_list = len(parameter_types) == 1 and _parameter_is_list(
parameter_types[0]
)
if parameters_is_list:
def adapter(inputs: List[str]) -> Any:
return fn(inputs)
return adapter
parameters_is_tuple = len(parameter_types) == 1 and _parameter_is_tuple(
parameter_types[0]
)
if parameters_is_tuple:
def adapter(inputs: List[str]) -> Any:
return fn(tuple(inputs))
return adapter
raise ValueError(
f"Cannot process parameter types of function with signature {sig}."
)
def _callback_handler(fn: Callable[..., Any]) -> _TransformationFunction:
"""From the user-provided callback, convert it to a function converting
a list of strings to a list of strings."""
sig = signature(fn)
parameter_types = [p.annotation for p in sig.parameters.values()]
if any(p is Signature.empty for p in parameter_types):
raise ValueError(
"Make sure that the function you provided is fully type-annotated."
)
postprocessing_fn = _postprocessing_fn(fn)
preprocessing_fn = _preprocessing_fn(fn)
def new_fn(inputs: List[str]) -> List[str]:
return postprocessing_fn(preprocessing_fn(inputs))
return new_fn | /rxn-utils-1.5.1.tar.gz/rxn-utils-1.5.1/src/rxn/utilities/csv/streaming_csv_editor.py | 0.933416 | 0.532243 | streaming_csv_editor.py | pypi |
import os
from functools import lru_cache
from typing import Any, Dict, Optional
import pymongo
from pydantic import Extra
try:
# pydantic >= 2, requires the pydantic_settings package
from pydantic_settings import BaseSettings # type: ignore[import,unused-ignore]
except ImportError:
# pydantic < 2
from pydantic import BaseSettings # type: ignore[no-redef,unused-ignore]
class PyMongoSettings(BaseSettings): # type: ignore[misc,unused-ignore]
"""Settings for connecting to a MongoDB via pymongo."""
mongo_uri: Optional[str] = None
tls_ca_certificate_path: Optional[str] = None
class Config:
env_prefix = "RXN_" # prefix for env vars to override defaults
extra = Extra.ignore
@staticmethod
def instantiate_client(
mongo_uri: str,
tls_ca_certificate_path: Optional[str] = None,
tz_aware: bool = False,
) -> pymongo.MongoClient: # type: ignore
# MongoClient's generic typing is incompatible with older versions
"""Instantiate a Mongo client using the provided SSL settings.
All other options except the tlsCAFile (and tz_aware) are expected
to be passed via the mongo_uri. For example for insecure access
something like the following could be added to the url:
ssl=true&tlsAllowInvalidCertificates=true&tlsAllowInvalidHostnames=true
Different mongodb server versions might behave differently!
Args:
mongo_uri: connection string for Mongo.
tls_ca_certificate_path: optional path to an SSL CA certificate.
tz_aware: flag indicating whether datetime objects returned are
timezone aware.
Returns:
a client for MongoDB.
"""
options: Dict[str, Any] = {}
if tls_ca_certificate_path and os.path.exists(tls_ca_certificate_path):
options["tlsCAFile"] = tls_ca_certificate_path
return pymongo.MongoClient(mongo_uri, tz_aware=tz_aware, **options)
def get_client(self, tz_aware: bool = False) -> pymongo.MongoClient: # type: ignore
# MongoClient's generic typing is incompatible with older versions
"""Instantiate a Mongo client using the provided SSL settings.
All other options except the tlsCAFile (and tz_aware) are expected
to be passed via the mongo_uri. For example for insecure access
something like the following could be added to the url:
ssl=true&tlsAllowInvalidCertificates=true&tlsAllowInvalidHostnames=true
Different mongodb server versions might behave differently!
Args:
tz_aware: flag indicating whether datetime objects returned are timezone aware.
Returns:
a client for MongoDB.
"""
if self.mongo_uri is None:
raise ValueError(
"mongo_uri is not set, define it via RXN_MONGO_URI environment variable!"
)
return self.instantiate_client(
self.mongo_uri, self.tls_ca_certificate_path, tz_aware=tz_aware
)
@lru_cache()
def get_pymongo_settings() -> PyMongoSettings:
return PyMongoSettings() | /rxn-utils-1.5.1.tar.gz/rxn-utils-1.5.1/src/rxn/utilities/databases/pymongo.py | 0.884595 | 0.229546 | pymongo.py | pypi |
import logging
import os
import sys
import click
import click_log
import colorama
from rxncon.input.excel_book.excel_book import ExcelBook
from rxncon.visualization.regulatory_graph import SpeciesReactionGraph
from rxncon.visualization.graphML import XGMML
from rxncon.visualization.graphML import map_layout2xgmml
logger = logging.getLogger(__name__)
colorama.init()
def _file_path_existence(file_path):
"""
Checking if the file path already exists.
Note:
It is supposed to be possible to overwrite existing files.
Args:
file_path: File path.
Returns:
None
Raises:
FileExistsError: If file exists.
NotADirectoryError: If directory does not exists.
"""
path, file = os.path.split(file_path)
if path and os.path.exists(path) and os.path.isfile(file_path):
raise FileExistsError("{0} exists! remove file and run again".format(file_path))
elif not path and os.path.isfile(file):
raise FileExistsError("{0} exists! remove file and run again".format(file_path))
elif path and not os.path.exists(path):
raise NotADirectoryError("Path {0} does not exists.".format(path))
def write_xgmml(excel_filename: str, output=None, layout_template_file=None):
"""
creating the xgmml file from an excel input and writing it into a new file.
Args:
excel_filename: Name of the excel input file.
output: Name of the new output.
layout_template_file: Name of the layout template file.
Returns:
None
"""
if not output:
output = os.path.splitext(os.path.basename(excel_filename))[0]
base_path = os.path.dirname(excel_filename)
suffix = '_srg'
if not output.endswith('.xgmml'):
output = '{0}{1}.xgmml'.format(output, suffix)
else:
base_name = output.split('.xgmml')[0]
output = "{0}{1}.{2}".format(base_name, suffix, 'xgmml')
graph_filename = os.path.join(base_path, '{0}'.format(output))
print('graph_filename: ', graph_filename)
_file_path_existence(graph_filename)
print('Reading in Excel file [{}] ...'.format(excel_filename))
excel_book = ExcelBook(excel_filename)
rxncon_system = excel_book.rxncon_system
print('Constructed rxncon system: [{} reactions], [{} contingencies]'
.format(len(rxncon_system.reactions), len(rxncon_system.contingencies)))
print('Generating reaction species graph output...')
reg_system = SpeciesReactionGraph(rxncon_system)
graph = reg_system.to_graph()
if layout_template_file:
print('Writing layout information from [{0}] to graph file [{1}] ...'.format(layout_template_file,
graph_filename))
gml_system = XGMML(graph, "{}".format(output))
graph = map_layout2xgmml(gml_system.to_string(), layout_template_file)
print('Writing reaction species graph file [{}] ...'.format(graph_filename))
with open(graph_filename, "w") as graph_handle:
graph_handle.write(graph)
else:
print('Writing reaction species graph file [{}] ...'.format(graph_filename))
gml_system = XGMML(graph, "{}".format(output))
gml_system.to_file(graph_filename)
@click.command()
@click.option('--output', default=None,
help='Base name for output files. Default: \'fn\' for input file \'fn.xls\'')
@click.option('--layout', default=None, nargs=1, type=click.Path(exists=True),
help='xgmml file containing layout information, which should be transferred to the new file.')
@click.argument('excel_file')
@click_log.simple_verbosity_option(default='WARNING')
@click_log.init()
def run(output, excel_file, layout):
write_xgmml(excel_file, output, layout)
def setup_logging_colors():
click_log.ColorFormatter.colors = {
'error': dict(fg='red'),
'exception': dict(fg='red'),
'critical': dict(fg='red'),
'debug': dict(fg='yellow'),
'warning': dict(fg='yellow'),
'info': dict(fg='yellow')
}
def format(self, record):
if not record.exc_info:
level = record.levelname.lower()
if level in self.colors:
padding_size = 7 # Assume just INFO / DEBUG entries.
prefix = click.style('{}: '.format(level).ljust(padding_size),
**self.colors[level])
prefix += click.style('{} '.format(record.name), fg='blue')
msg = record.msg
if isinstance(msg, bytes):
msg = msg.decode(sys.getfilesystemencoding(),
'replace')
elif not isinstance(msg, str):
msg = str(msg)
record.msg = '\n'.join(prefix + x for x in msg.splitlines())
return logging.Formatter.format(self, record)
click_log.ColorFormatter.format = format
if __name__ == '__main__':
try:
setup_logging_colors()
run()
except Exception as e:
print('ERROR: {}\n{}\nPlease re-run this command with the \'-v DEBUG\' option.'.format(type(e), e)) | /rxncon-2.0b14.tar.gz/rxncon-2.0b14/rxncon2srgraph.py | 0.477798 | 0.155431 | rxncon2srgraph.py | pypi |
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Bool_from_rxnconsys',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('project_name', models.CharField(max_length=120)),
('slug', models.SlugField(blank=True)),
('comment', models.TextField(blank=True, null=True)),
('timestamp', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
('smoothing', models.CharField(choices=[('no_smoothing', 'no_smoothing'), ('smooth_production_sources', 'smooth_production_sources')], default='smooth_production_sources', help_text='Smoothing strategy.', max_length=25)),
('knockout', models.CharField(choices=[('no_knockout', 'no_knockout'), ('knockout_neutral_states', 'knockout_neutral_states'), ('knockout_all_states', 'knockout_all_states')], default='no_knockout', help_text='Generate knockouts.', max_length=23)),
('overexpr', models.CharField(choices=[('no_overexpression', 'no_overexpression'), ('overexpress_neutral_states', 'overexpress_neutral_states'), ('overexpress_all_states', 'overexpress_all_states')], default='no_overexpression', help_text='Generate overexpressions.', max_length=26)),
('k_plus', models.CharField(choices=[('strict', 'strict'), ('ignore', 'ignore')], default='ignore', help_text='Strategy for handling k+ contingencies.', max_length=6)),
('k_minus', models.CharField(choices=[('strict', 'strict'), ('ignore', 'ignore')], default='ignore', help_text='Strategy for handling k- contingencies.', max_length=6)),
('model_path', models.FileField(null=True, upload_to='')),
('symbol_path', models.FileField(null=True, upload_to='')),
('init_path', models.FileField(null=True, upload_to='')),
],
options={
'ordering': ['-updated', '-timestamp'],
},
),
] | /rxncon_workbench-0.99.1.tar.gz/rxncon_workbench-0.99.1/src/boolean_model/migrations/0001_initial.py | 0.645679 | 0.174164 | 0001_initial.py | pypi |
from rxndb_utils.db_base import DBBase
class TableMol(DBBase):
def __init__(self, host: str, port: int, database: str, user: str, password: str,
table_name: str = 'base.mol'):
super(TableMol, self).__init__(host, port, database, user, password)
self._cols = ['mid', 'smiles', 'inchi', 'mol_block']
self._table_name = table_name
def set_mol_value(self, column: str, value: str, mid: int, commit: bool = True):
sql = f"update {self._table_name} set {column}='{value}' where mid={mid}"
self.exec(sql, commit=commit)
def get_mol_max_mid(self):
sql = f"select max(mid) from {self._table_name}"
return self.select(sql)[0]
def get_mol_with_smiles(self, smiles: str):
mols = list(self.get_data_iter(self._table_name, self._cols, f"WHERE smiles='{smiles}'"))
if len(mols) == 0:
return None
else:
return mols[0]
def get_all_mols(self):
for mol in self.get_data_iter(self._table_name, self._cols, None):
yield mol
def get_max_mid(self):
sql = f"select MAX(mid) from {self._table_name}"
self._cur.execute(sql)
max_mid = self._cur.fetchone()[0]
return max_mid
def add_mol(self, smiles, inchi, mid: int = None, commit: bool = True):
if mid is None:
max_mid = self.get_max_mid()
mid = max_mid + 1
# sql = f"insert into {self._table_name} (smiles, inchi) values ('{smiles}', '{inchi}')"
# else:
sql = f"insert into {self._table_name} (mid, smiles, inchi) values ({mid}, '{smiles}', '{inchi}')"
# print(sql)
self.exec(sql, commit)
def add_or_query_mol_with_smiles(self, smiles: str, inchi: str):
mol = self.get_mol_with_smiles(smiles)
if mol is not None:
return mol['mid']
else:
self.add_mol(smiles, inchi, mid=None, commit=True)
mol = self.get_mol_with_smiles(smiles)
return mol['mid']
def get_mol_block_by_smiles(self, smiles: str):
mol = self.get_mol_with_smiles(smiles)
if mol is None:
return None
else:
return mol['mol_block']
def commit(self):
self.commit()
if __name__ == "__main__":
smi = "CC(NC(=O)NC1CCN(C1)C1CCCC1)c1cn(nc1C)C(C)(C)C"
# dbb = DBBase("114.214.205.122", 1684, "rxndb", "postgres", "65zm]+7[d1Kb")
tm = TableMol("114.214.205.122", 1684, "rxndb", "postgres", "65zm]+7[d1Kb")
print(tm.get_mol_block_by_smiles('CCc1nnc(CNC(=O)NC2CCN(C)CC2)s1'))
# print(tm.get_mol_max_mid())
# print(tm.get_mol_with_smiles(smi)) | /rxndb_utils-0.0.7.tar.gz/rxndb_utils-0.0.7/rxndb_utils/table_mol.py | 0.408631 | 0.175326 | table_mol.py | pypi |
from rdkit.Chem import AllChem
from rxndb_utils.db_base import DBBase
from rxndb_utils.table_mol import TableMol
from rxndb_utils.sql_utils import SQLUtils
class TableRxn(DBBase):
def __init__(self, host: str, port: int, database: str, user: str, password: str,
table_name: str = 'base.rxn'):
super(TableRxn, self).__init__(host, port, database, user, password)
self._cols = ['rid', 'rxn_code', 'reactants_ids', 'products_ids', 'catalysts_ids', 'solvents_ids',
'rxn_smiles', 'product_yield', 'time_year']
self._table_name = table_name
def get_rxn_with_rxn_code(self, rxn_code: str):
rxns = list(self.get_data_iter(self._table_name, self._cols, f"WHERE rxn_code='{rxn_code}'"))
if len(rxns) == 0:
return None
else:
return rxns[0]
def get_rxns_with_p_mid(self, p_mid: int):
rxns = list(self.get_data_iter(self._table_name, self._cols, f"WHERE {p_mid} IN products_ids"))
return rxns
def get_rxn_with_rid(self, rid: int):
rxns = list(self.get_data_iter(self._table_name, self._cols, f"WHERE rid={rid}"))
if len(rxns) == 0:
return None
else:
return rxns[0]
def get_all_rxn(self):
for rxn in self.get_data_iter(self._table_name, self._cols, "ORDER by rid"):
yield rxn
def add_rxn(self, rid, rxn_code, reactants_ids, products_ids, catalysts_ids, solvents_ids, rxn_smiles):
rs_sql = SQLUtils.num_array_to_sql(reactants_ids)
ps_sql = SQLUtils.num_array_to_sql(products_ids)
cats_sql = SQLUtils.num_array_to_sql(catalysts_ids)
sols_sql = SQLUtils.num_array_to_sql(solvents_ids)
sql = f"insert into {self._table_name} " \
f"(rid, rxn_code, reactants_ids, products_ids, catalysts_ids, solvents_ids, rxn_smiles) values " \
f"({rid}, '{rxn_code}', '{rs_sql}', '{ps_sql}', '{cats_sql}', '{sols_sql}', '{rxn_smiles}')"
self.exec(sql, commit=True)
def add_rxn_by_rdrxn(self, rid: int, rdrxn: AllChem.ChemicalReaction, table_mol: TableMol,
catalysts_smis: [str], solvents_smis: [str], check_dup: bool):
rxn_code, rs_mids, ps_mids = self._rxn_to_code(rdrxn, table_mol)
if check_dup:
rxn = self.get_rxn_with_rxn_code(rxn_code)
else:
rxn = self.get_rxn_with_rid(rid)
if rxn is None:
rxn_smiles = AllChem.ReactionToSmiles(rdrxn)
cats_mids = self._get_mids_by_smis(catalysts_smis, table_mol)
sols_mids = self._get_mids_by_smis(solvents_smis, table_mol)
cats_sql = SQLUtils.num_array_to_sql(cats_mids)
sols_sql = SQLUtils.num_array_to_sql(sols_mids)
rs_sql = SQLUtils.num_array_to_sql(rs_mids)
ps_sql = SQLUtils.num_array_to_sql(ps_mids)
sql = f"insert into {self._table_name} " \
f"(rid, rxn_code, reactants_ids, products_ids, catalysts_ids, solvents_ids, " \
f"rxn_smiles) values " \
f"({rid}, '{rxn_code}', '{rs_sql}', '{ps_sql}', '{cats_sql}', '{sols_sql}', " \
f"'{rxn_smiles}')"
self.exec(sql, commit=True)
return 1
return 0
# region ===== rxn utils =====
@classmethod
def _get_mids_by_smis(cls, smis: [str], table_mol: TableMol) -> [int]:
mids = []
for smi in smis:
mol = AllChem.MolFromSmiles(smi)
if mol is None:
continue
inchi = AllChem.MolToInchi(mol)
mids.append(table_mol.add_or_query_mol_with_smiles(smi, inchi))
return mids
@classmethod
def _rxn_to_code(cls, rdrxn: AllChem.ChemicalReaction, table_mol: TableMol) -> str:
r_mids = []
p_mids = []
for reactant in rdrxn.GetReactants():
smiles = AllChem.MolToSmiles(reactant)
inchi = AllChem.MolToInchi(reactant)
mid = table_mol.add_or_query_mol_with_smiles(smiles, inchi)
r_mids.append(mid)
for product in rdrxn.GetProducts():
smiles = AllChem.MolToSmiles(product)
inchi = AllChem.MolToInchi(product)
mid = table_mol.add_or_query_mol_with_smiles(smiles, inchi)
p_mids.append(mid)
rs_code = '.'.join([str(mid) for mid in sorted(r_mids)])
ps_code = '.'.join([str(mid) for mid in sorted(p_mids)])
return f"{rs_code}>>{ps_code}", r_mids, p_mids
# endregion
if __name__ == "__main__":
pass | /rxndb_utils-0.0.7.tar.gz/rxndb_utils-0.0.7/rxndb_utils/table_rxn.py | 0.611266 | 0.242688 | table_rxn.py | pypi |
# Reaction Optimisation Goal Iteration (ROGI) Agent
The folder contains the source, resource, and Docker setup files for the Reaction Optimisation Goal Iteration (ROGI) Agent, following the suggestions on a template provide as `TheWorldAvatar/JPS_BASE_LIB/python_derivation_agent/README.md`.
## Purpose
The Reaction Optimisation Goal Iteration (ROGI) Agent is designed to perform iterations of reaction experiment as part of goal-driven reaction optimisation exercise. It operates based on concepts defined in [`OntoGoal`](https://github.com/cambridge-cares/TheWorldAvatar/tree/main/JPS_Ontology/ontology/ontogoal) and orchestrates [`DoE Agent`](https://github.com/cambridge-cares/TheWorldAvatar/tree/main/Agents/DoEAgent), [`VapourtecSchedule Agent`](https://github.com/cambridge-cares/TheWorldAvatar/tree/main/Agents/VapourtecScheduleAgent), [`Vapourtec Agent`](https://github.com/cambridge-cares/TheWorldAvatar/tree/main/Agents/VapourtecAgent), [`HPLC Agent`](https://github.com/cambridge-cares/TheWorldAvatar/tree/main/Agents/HPLCAgent), and [`HPLCPostPro Agent`](https://github.com/cambridge-cares/TheWorldAvatar/tree/main/Agents/HPLCPostProAgent) to complete one iteration.
## Building the Docker image
Requirements:
* Example of configurations for the agent are provided in `TheWorldAvatar/Agents/RxnOptGoalIterAgent/agent.goal.iter.env.example` file. The knowledge graph endpoints used by this agent are specified using `SPARQL_QUERY_ENDPOINT` and `SPARQL_UPDATE_ENDPOINT` for triple store, and `FILESERVER_URL` for the file server. The credentials for knowledge graph endpoints, i.e. triple store and file server, should be provided in the same file using `KG_USERNAME`, `KG_PASSWORD`, `FILE_SERVER_USERNAME`, `FILE_SERVER_PASSWORD`. To avoid commit these information to git at deployment, developer may make a copy of this example file as `agent.goal.iter.env`. As `*.env` entry already exist in `.gitignore`, this new created file will be omitted. Any credentials encoded are safe. The OntoAgent:Service IRI of the agent is specified using `ONTOAGENT_SERVICE_IRI`. The periodically time interval to monitor asynchronous derivation is specified by `DERIVATION_PERIODIC_TIMESCALE`. One may also provide `DERIVATION_INSTANCE_BASE_URL` to be used by DerivationClient when creating derivations related instances. `ONTOAGENT_OPERATION_HTTP_URL` can be used to specify the URL of the agent that listens the request for updating synchronous derivations, however, given the nature of the post processing Agent, this is NOT RECOMMENDED. Developers needs to ensure that this file is correctly updated before building the Docker Image.
Once the requirements have been addressed, the Image can be build via docker container, one example of which is:
`(Linux)`
```sh
cd /your_absolute_path_to/TheWorldAvatar/Agents/RxnOptGoalIterAgent/
docker-compose -f "docker-compose.test.yml" up -d --build
```
Or, simply right click `docker-compose.test.yml` file and select `Compose Up` option in Visual Studio Code.
## Environment setup
For development and testing reasons, follow below instructions to get started.
### Virtual environment setup
It is highly recommended to install `pyderivationagent` packages in a [virtual environment (python>=3.8)](https://docs.python.org/3/tutorial/venv.html). The following steps can be taken to build a virtual environment:
`(Windows)`
```cmd
$ python -m venv <venv_name>
$ <venv_name>\Scripts\activate.bat
(<venv_name>) $
```
`(Linux)`
```sh
$ python3 -m venv <venv_name>
$ source <venv_name>/bin/activate
(<venv_name>) $
```
The above commands will create and activate the virtual environment `<venv_name>` in the current directory.
### Package installation
The following command can be used to install all required packages.
`(Linux)`
```bash
(<venv_name>) $ python -m pip install --upgrade pip
# Install all required packages, incl. pyderivationagent, pytest etc.
# NOTE instead of the loosely constrained versions defined in the setup.py
# here packages in the requirements.txt with the pinned version are installed
# This ensures the user getting a tested version of the agent
# However, one can skip this line and execute the next command directly
# where pip will pick up the versions automatically
(<venv_name>) $ python -m pip install -r requirements.txt
# Install the agent package itself for development purpose
# If the previous command executed, pip will skip all the "install_requires"
# as "Requirement already satisfied"
(<venv_name>) $ python -m pip install -e .[dev]
```
As `pyderivationagent` library relies on the `py4jps` package, Java 11 is required. For Windows, it is recommended to obtain OpenJDK 11 from [here](https://developers.redhat.com/products/openjdk/download) and follow the [instructions](https://access.redhat.com/documentation/en-us/openjdk/11/html-single/installing_and_using_openjdk_11_for_windows/index). For linux environment, one can install via:
`(Linux)`
```sh
$ sudo apt update
$ sudo apt install openjdk-11-jdk-headless
```
## How to use it
The ROGI Agent is intended to use the `asychronous mode` of the Derivation Framework to detect changes in instantiated OntoGoal properties (e.g. `goal`, `best resutl`, and `restriction`) to automatically request the design/schedule/execution/analyses of a reaction experiment instances in the KG. As the agent adopts the `pyderivationagent`, it also serves HTTP requests to handle synchronous derivations. However, it is (strongly) discouraged to invoke such HTTP request by ONESELF.
After successful agent start-up, an instructional page shall become available at the root (i.e. `/`) of the port specified in the docker compose file. The exact address depends on where the agent container is deployed (i.e. localhost, remote VM, ...), but takes a form like `http://165.232.172.16:7060/`.
### Asynchronous derivation operation
Once the Agent is deployed, it periodically (defined by `DERIVATION_PERIODIC_TIMESCALE`) checks the derivation that `isDerivedUsing` itself (parameter `ONTOAGENT_SERVICE_IRI`) and acts based on the status associated with that derivation. Over time, you may see more information about a reaction experiment is added to the knowledge graph gradually.
### Prior derivation markup
For the Agent to detect outdated information, a proper mark up of the relevant derivation inputs (i.e. *pure* inputs) is required. (Please note, that another pre-requisite for detecting derivation inputs is the registration of the agent in the KG, i.e. `REGISTER_AGENT=true` in the env file.) The following methods from the `pyderivationagent` package shall be used to mark up derivation inputs within the KG (for illustration purposes only):
```bash
# Retrieve derivation client from derivation agent
deriv_client = agent.derivation_client
# Alternatively create new derivation client using:
# deriv_client = pyderivationagent.PyDerivationClient(...)
# Using pyderivationagent>=1.3.0, the timestamp for pure inputs will be added automatically when marking up the derivations
# Hence, no need to add them separately (just for reference here)
#deriv_client.addTimeInstance(inputIRI)
# Update time stamp to all pure input instances (i.e. inputIRIs)
deriv_client.updateTimestamp(inputIRI)
# Create (flat!) list of all pure inputs (i.e. inputIRIs)
deriv_inputs = [goal_set_IRI, lab_IRI, chem_rxn_IRI, rxn_exp_IRI1, rxn_exp_IRI2, ...]
# Create derivation markup in KG
deriv_iri = deriv_client.createAsyncDerivationForNewInfo(agent.agentIRI, deriv_inputs)
```
## Agent integration test
As this derivation agent modifies the knowledge graph automatically, it is recommended to run integration tests before deploying it for production. A few integration tests are provided in the `tests` repository.
The dockerised integration test can be invoked via below commands:
`(Linux)`
```sh
cd /your_absolute_path_to/TheWorldAvatar/Agents/RxnOptGoalIterAgent
pytest -s --docker-compose=./docker-compose.test.yml --reruns 5 --reruns-delay 5
```
If you would like to contribute to new features for the ROGI Agent, you may use the same integration test to make sure the new features added do NOT break the original function.
## Upload docker image to GitHub
Developers who add new features to the `RxnOptGoalIter Agent` handle the distribution of the docker image on GitHub. If you want to add new features that suit your project and release the docker image independently, i.e. become a developer/maintainer, please contact the repository's administrator to indicate your interest.
The release procedure is currently semi-automated and requires a few items:
- Your GitHub account and password ([personal access token](https://docs.github.com/en/authentication/keeping-your-account-and-data-secure/creating-a-personal-access-token))
- The version number x.x.x for the release
- Clone of `TheWorldAvatar` repository on your local machine
- Docker-desktop is installed and running on your local machine
### Stable version release
The release process can be started by using the commands below. (REMEMBER TO CHANGE THE CORRECT VALUES FOR `<absolute_path_to>` IN THE COMMANDS BELOW!) **NOTE: the release process is only tested in WSL2 environment.**
`(Linux)`
```sh
$ cd /<absolute_path_to>/TheWorldAvatar/Agents/RxnOptGoalIterAgent
$ ./upload_docker_image_to_github.sh -v x.x.x
```
Please follow the instructions presented in the console once the process has begun. If everything goes well, the change performed automatically during the release process should be commited, i.e., in python script `Agents/RxnOptGoalIterAgent/docker-compose.github.yml`
```
image: ghcr.io/cambridge-cares/rxn_opt_goal_iter_agent:x.x.x
```
**NOTE: the visibility of the uploaded docker image is set as private by default, developer who uploaded the image need to change the package visibility to public manually after the upload.**
### Snapshot version release
If you would like to release the package in SNAPSHOT version, below commands can be used intead:
`(Linux)`
```sh
$ cd /<absolute_path_to>/TheWorldAvatar/Agents/RxnOptGoalIterAgent
$ ./upload_docker_image_to_github.sh -v x.x.x-SNAPSHOT
```
Please follow the instructions presented in the console once the process has begun. If everything goes well, commit the change in version number following the same procedure as in the stable version release.
# Authors #
Jiaru Bai (jb2197@cam.ac.uk)
| /rxnoptgoaliteragent-0.0.2.tar.gz/rxnoptgoaliteragent-0.0.2/README.md | 0.487063 | 0.862641 | README.md | pypi |
from warnings import warn
from rxnpy.chemical.ingredient import Ingredient
from rxnpy.reaction.tools.ingredients_calc import QuantityCalculator, RelativeCalculator
class IngredientsError(Exception):
pass
class Ingredients:
_error = IngredientsError
_label_length = 12
def __init__(self, *args):
"""
Functions: add, remove, scale, scale_one
__init__ calls add
:param see add function
"""
self._ingrs = []
self._init_with_args(*args)
def __repr__(self):
return self._table()
def __str__(self):
return self._table()
def __call__(self):
return self._ingr
def __len__(self):
return len(self._ingr)
def __getitem__(self, item):
index = self._get_mat_index(item)
return self._ingr[index]
def _init_with_args(self, *args):
if args:
for arg in args[0]:
# get keyword arguments
kwarg = {}
for i, a in enumerate(arg):
if isinstance(a, dict) and len(a.keys()) == 1:
kwarg = kwarg | arg.pop(i)
try:
self.add(*arg, **kwarg)
except TypeError as e:
warn(f"Material skipped: '{arg}'. {e}")
def add(self, ingr: Ingredient, equivalence: list[Union[float, int], Union[int, str]] = None):
"""
Adds mat to Ingr and performs calculations for the other quantities.
:param mat:
:param equivalence: [equivalence, "material"] "material" is the equivalence is in respect to
"""
if equivalence is not None:
index = self._get_mat_index(equivalence[1])
ingr["mole"] = self._ingr[index]["mole"] * equivalence[0]
self._ingr.append(self._q_calc.calc_mass_volume_mole(mat))
self._ingr = self._r_calc.calc_equiv_molarity_mass_fraction(self._ingr)
def remove(self, mat: Union[int, str]):
"""
Removes mat from ingr
:param mat: material to be removed
"""
if isinstance(mat, int) and mat <= len(self._ingr):
del self._ingr[mat]
elif isinstance(mat, str):
mat = self._get_mat_index(mat)
del self._ingr[mat]
else:
mes = f"'{mat}' invalid input."
raise self._error(mes)
self._ingr = self._r_calc.calc_equiv_molarity_mass_fraction(self._ingr)
def scale(self, factor: Union[int, float]):
"""
Scales all ingredients' mass, volume, moles by a factor.
:param factor: scale factor
"""
for i in range(len(self._ingr)):
self._ingr[i] = self._q_calc.scale(self._ingr[i], factor)
def scale_one(self, mat: Union[int, str], factor: Union[int, float]):
"""
Scales one ingredient's mass, volume, moles by a factor.
:param mat: material to be scaled
:param factor: scale factor
"""
index = self._get_mat_index(mat)
self._ingr[index] = self._q_calc.scale(self._ingr[index], factor)
self._ingr = self._r_calc.calc_equiv_molarity_mass_fraction(self._ingr)
def _get_mat_index(self, target: Union[str, int]):
if isinstance(target, int):
if target <= len(self._ingr):
return target
possible_names = [i["name"] for i in self._ingr]
scores = {v: SequenceMatcher(None, target, v).ratio() * 100 for v in possible_names}
best_match = max(scores, key=scores.get)
best_score = scores[best_match]
if best_score < 75:
mes = f"No match to '{target}' found in ingredients. Try a different writing of the chemical name."
raise self._error(mes)
return possible_names.index(best_match)
def _table(self) -> str:
"""
Creates a nice viewable table.
"""
if len(self._ingr) == 0:
return "No ingredients."
headers, units = self._table_headers()
headers.insert(0, "#")
units.insert(0, "")
row_format = "{:<3}" + ("{:<" + str(self._label_length) + "}") * (len(headers) - 1)
text_out = row_format.format(*[self._length_limit_header(h) for h in headers])
text_out += "\n" + row_format.format(*[f"({u})" if u != "" else "" for u in units])
text_out += "\n" + "-" * self._label_length * len(headers)
for i, ingr in enumerate(self._ingr):
entries = []
for k, u in zip(headers, units):
if k in ingr.keys():
v = ingr[k]
if u != "":
value = self.sig_figs(v.to(u).m)
elif isinstance(v, Quantity):
value = self.sig_figs(v.to_base_units().m)
else:
value = str(v)
entries.append(self._length_limit_header(value))
elif k == "#":
entries.append(f"{i}")
else:
entries.append("---")
text_out += "\n" + row_format.format(*entries)
text_out += "\n" * 2
return text_out
def _table_headers(self):
headers = []
back_headers = []
units = []
back_units = []
divide = 0
divide2 = 0
for ingr in self._ingr:
for k in ingr.keys():
if k not in headers + back_headers:
if k == "name":
headers.insert(0, k)
units.insert(0, "")
divide += 1
elif k in self._q_calc.keys_qty.keys():
headers.insert(divide, k)
units.insert(divide, self._q_calc.keys_qty[k]["unit"])
divide += 1
elif k in self._q_calc.keys_rel.keys():
headers.insert(divide + divide2, k)
units.insert(divide + divide2, self._q_calc.keys_rel[k]["unit"])
divide2 += 1
elif k in self._q_calc.keys_calc.keys():
headers.insert(divide + divide2 + 1, k)
units.insert(divide + divide2 + 1, self._q_calc.keys_calc[k]["unit"])
else:
back_headers.append(k)
back_units.append("")
return headers + back_headers, units + back_units
def _length_limit_header(self, entry) -> str:
length_limit = self._label_length
if len(entry) > length_limit:
return entry[0:length_limit - 2]
return entry
@staticmethod
def sig_figs(number: float, significant_figures: int = 3) -> str:
"""
Given a number return a string rounded to the desired significant digits.
:param number:
:param significant_figures:
:return:
"""
try:
return '{:g}'.format(float('{:.{p}g}'.format(number, p=significant_figures)))
except Exception:
return str(number)
if __name__ == "__main__":
calc = IngredientCalculator()
calc.add({
"name": "secbuLi",
"volume": 0.0172 * Unit("ml"),
"molar_mass": 64.06 * Unit("g/mol"),
"density": 0.768 * Unit("g/ml"),
"molar_conc": 1.3 * Unit("M")
})
calc.add({
"name": "styrene",
"mass": 0.455 * Unit("g"),
"molar_mass": 104.15 * Unit("g/mol"),
"density": 0.909 * Unit("g/ml")
})
calc.add({
"name": "toluene",
"volume": 10 * Unit("ml"),
"molar_mass": 92.141 * Unit("g/mol"),
"density": 0.87 * Unit("g/ml")
})
calc.add({
"name": "THF",
"mole": 45.545 * Unit("mmol"),
"molar_mass": 72.107 * Unit("g/mol"),
"density": .8876 * Unit("g/ml"),
})
print(calc)
calc.scale(2)
print(calc)
calc.remove("toluene")
print(calc)
calc.scale_one("styrene", 0.5)
print(calc) | /reaction/ingredients.py | 0.843605 | 0.315051 | ingredients.py | pypi |
from typing import Union
from warnings import warn
from src.rxnpy import Quantity, Unit
from src.rxnpy.chemical.ingredient import Ingredient, IngrRole
DEFAULT_DENSITY = 1 * Unit("g/ml")
class IngrCalculatorError(Exception):
pass
class QuantityCalculator:
_error = IngrCalculatorError
def calc_mass_volume_mole(self, mat: Ingredient):
if mat.pressure is not None:
return mat # nothing to be done with pressure
if mat.mass is not None:
if mat.density is not None:
volume = self._mass_to_volume(mat.mass, mat.density)
self._approx_same(mat, volume, "volume")
mat.volume = volume
if mat.molar_mass is not None:
mole = self._mass_to_mole(mat.mass, mat.molar_mass)
self._approx_same(mat, mole, "mole")
mat.mole = mole
elif mat.mole is not None:
if mat.molar_concentration is not None:
volume = self._mole_to_volume(mat.mole, mat.molar_concentration)
self._approx_same(mat, volume, "volume")
mat.volume = volume
if mat.molar_mass is not None:
mass = self._mole_to_mass(mat.mole, mat.molar_mass)
self._approx_same(mat, mass, "mass")
mat.mass = mass
if mat.density is not None:
volume = self._mass_to_volume(mat.mass, mat.density)
self._approx_same(mat, volume, "volume")
mat.volume = volume
elif mat.volume is not None:
if mat.molar_concentration is not None:
mole = self._vol_to_mole_conc(mat.volume, mat.molar_concentration)
self._approx_same(mat, mole, "mole")
mat.mole = mole
elif mat.density is not None:
mass = self._vol_to_mass(mat.volume, mat.density)
self._approx_same(mat, mass, "mass")
mat.mass = mass
if mat.molar_mass is not None:
mole = self._mass_to_mole(mat.mass, mat.molar_mass)
self._approx_same(mat, mole, "mole")
mat.mole = mole
@staticmethod
def scale(mat: Ingredient, factor: Union[int, float]):
if mat.pressure is not None:
mat.pressure = mat.pressure * factor
keys = ["mass", "mole", "volume"]
for k in keys:
if getattr(mat, k) is not None:
setattr(mat, k, getattr(mat, k) * factor)
def _approx_same(self, mat: Ingredient, qty2: Quantity, label: str, error: float = 0.01):
"""Checks if two quantities are approximately the same."""
if (qty1 := getattr(mat, label)) is None:
return
if abs((qty1 - qty2) / qty1) < error:
return
else:
mes = f"Calculated {label} does not match provided for '{mat.name}'. Calculated: {qty1}," \
f" Given by user: {qty2}."
raise self._error(mes)
@staticmethod
def _vol_to_mass(volume: Quantity, density: Quantity) -> Quantity:
""" Returns mass"""
return volume * density
@staticmethod
def _mass_to_volume(mass: Quantity, density: Quantity) -> Quantity:
""" Returns volume"""
return mass / density
@staticmethod
def _mass_to_mole(mass: Quantity, molar_mass: Quantity) -> Quantity:
""" Returns mole"""
return mass / molar_mass
@staticmethod
def _mole_to_mass(mole: Quantity, molar_mass: Quantity) -> Quantity:
""" Returns mass"""
return mole * molar_mass
@staticmethod
def _vol_to_mole_conc(volume: Quantity, molarity: Quantity) -> Quantity:
""" Returns moles"""
return volume * molarity
@staticmethod
def _mole_to_volume(mole: Quantity, molarity: Quantity) -> Quantity:
""" Returns volume"""
return mole / molarity
@staticmethod
def _mole_to_equiv(mole: Quantity, base: Quantity) -> Quantity:
""" Returns equivalence"""
return mole / base
class RelativeCalculator:
_error = IngrCalculatorError
def calc_equiv_molarity_mass_fraction(self, mats: list[Ingredient], defaults=True):
"""
Calculates equivalence, molarity and mass fraction if possible
"""
mats = self._calc_equivalence(mats)
mats = self._calc_molarity(mats, defaults)
mats = self._calc_mass_fraction(mats, defaults)
return mats
def _calc_equivalence(self, mats: list[Ingredient]):
""" Calculates molar equivalence for all materials with moles."""
if not self._check_calc_possible(mats, "mole"):
return mats
base = self._get_mole_basis(mats)
for mat in mats:
setattr(mat, "equivalence", getattr(mat, "mole") / base)
@staticmethod
def _check_calc_possible(mats: list[Ingredient], key: str, required_num: int = 2) -> bool:
""" Needs at least 2 materials to calculate relative quantities."""
gas_mat = len([True for mat in mats if mat.pressure is not None])
adjusted_num = max([2, required_num - gas_mat])
if len([True for mat in mats if getattr(mat, key) is not None]) >= adjusted_num:
return True
else:
return False
@staticmethod
def _get_mole_basis(mats: list[Ingredient]) -> Quantity:
"""
Returns the moles of base.
Selection by keyword ordering or first in list if no keywords present.
"""
base = None
base_keyword = None
for mat in mats:
if mat.mole is not None:
if base is None:
base = mat.mole
if mat.keyword is not None:
if base_keyword is None:
base_keyword = mat.keyword
base = mat.mole
elif mat.keyword < base_keyword:
base_keyword = mat.keyword
base = mat.mole
return base
def _calc_molarity(self, mats: list[Ingredient], default: bool):
if not self._check_calc_possible(mats, "volume", len(mats)):
if default:
pass
else:
return mats
total_volume, mats = self._get_total_volume(mats, default)
for mat in mats:
setattr(mat, "molarity", getattr(mat, "mole") / total_volume)
@staticmethod
def _get_total_volume(mats: list[Ingredient], default: bool) -> (Quantity, list[Ingredient]):
total_volume = 0
mat_not_skipped = []
for mat in mats:
if mat.keyword in [IngrRole.WORKUP, IngrRole.QUENCH]:
continue
if mat.volume is not None:
total_volume += mat.volume
mat_not_skipped.append(mat)
elif default:
if mat.mass is not None:
total_volume += mat.mass / DEFAULT_DENSITY
mat_not_skipped.append(mat)
warn(f"Default density used for '{mat.name}' when calculating molarity.")
else:
mat_not_skipped.append(mat.name)
warn(f"'{mat.name}' not included in molarity calculation.")
return total_volume, mat_not_skipped
def _calc_mass_fraction(self, mats: list[Ingredient], default: bool):
if not self._check_calc_possible(mats, "mass", len(mats)):
if default:
pass
else:
return mats
total_mass, mats = self._get_total_mass(mats, default)
for mat in mats:
setattr(mat, "molarity", getattr(mat, "mass") / total_mass)
@staticmethod
def _get_total_mass(mats: list[Ingredient], default: bool) -> (Quantity, list[Ingredient]):
total_mass = 0
mat_not_skipped = []
for mat in mats:
if mat.keyword in [IngrRole.WORKUP, IngrRole.QUENCH]:
continue
if mat.mass is not None:
total_mass += mat.mass
mat_not_skipped.append(mat)
elif default:
if mat.volume is not None:
total_mass += mat.volume * DEFAULT_DENSITY
mat_not_skipped.append(mat)
warn(f"Default density used for '{mat.name}' when calculating mass fraction.")
else:
mat_not_skipped.append(mat.name)
warn(f"'{mat.name}' not included in mass fraction calculation.")
return total_mass, mat_not_skipped | /reaction/tools/ingredients_calc.py | 0.907492 | 0.485844 | ingredients_calc.py | pypi |
import boto3
import argparse
import time
SLA_ZONE_ID = "SLA_ZONE_ID"
PERSONAL_ONE = "Z1XY6GIMZ8S0NH"
### Todo : make -d flag actually work from cli,
## cleanup and get_obj etc
class DNSEntry:
"""
Wraps AWS's Route 53 DNS service to either insert a record, or to pull a record from a given domain.
"""
sla_zone_id = PERSONAL_ONE
def __init__(self, name_entry=None, value_entry=None, target_domain_id=None):
self.subdomain = name_entry # type: str
self.value_entry = value_entry # type: str
self.target_zone_id = self.sla_zone_id
if target_domain_id is not None: self.target_zone_id = target_domain_id # type: str
self.client = boto3.client("route53")
self.root_domain_name = self.client.get_hosted_zone(Id=self.target_zone_id)["HostedZone"]["Name"]
def driver(self) -> boto3.client:
"""
:return: instantiated Amazon Route53
"""
return self.client
def verify(self, entry_name):
""" Check whether or not an entry was succesfully added. """
for entry in self.pull_records():
if entry_name in entry["Name"]:
ret = entry["Type"], entry["Name"], entry["ResourceRecords"][0]["Value"]
print(ret)
return ret
def pull_records(self) -> boto3.resource:
"""
Return either the entire hosted zones JSON object or hosted_zones[sla-ptt.com.]
:param key: root domain
:return: boto3 json object.
"""
return self.client.list_resource_record_sets(HostedZoneId=self.target_zone_id)["ResourceRecordSets"]
def set_record(self, record_type="A", name: "name column" = None, value: "value column" = None,
ttl: int = 60) -> boto3.resource:
"""
Insert a record into the AWS Route53 records. Be very_very_careful!
:param record_type: A, CNAME, TXT, NS, MX
:param name: Name column, ie, subdomain
:param value: Value column: ie, IP address, txt entry, etc
:return:
"""
if name is None: name = self.subdomain
if value is None: value = self.value_entry
print("Inserting: {} {} {} [domain: {}]".format(record_type, name, value, self.root_domain_name))
for i in range(0, 5):
print("{}\r".format(5 - i), end="", flush=True)
time.sleep(1)
response = self.client.change_resource_record_sets(
HostedZoneId=self.target_zone_id,
ChangeBatch={
"Changes": [
{
"Action": "UPSERT",
"ResourceRecordSet": {
"Name": name + "." + self.root_domain_name,
"Type": record_type,
"TTL": ttl,
"ResourceRecords": [{"Value": value}]
}
}]
}
)
if response["ResponseMetadata"]["HTTPStatusCode"] == 200:
print("-OK- : [{}] {}.{}".format(record_type, name, self.root_domain_name))
else:
print("FAIL -- " + response["ResponseMetadata"]["HTTPStatusCode"])
return response
def records(self):
"""
:return: iterable by domains
"""
rec = self.pull_records()
return [self.verify(record["Name"]) for record in rec]
class R53Args(DNSEntry):
def __init__(self):
"""
version 2 - removed the pull function. Accepts arguments like one would find in a zonefile.
"""
self.parser = argparse.ArgumentParser()
self.cliargs = self._do_parse_args()
super().__init__(self.cliargs.name_column, self.cliargs.value_column,
self.cliargs.domain if self.cliargs.domain else None)
self.set_record(self.cliargs.record_type)
def _do_parse_args(self):
"""
Get the command line args.
should look like: ./route53.py A new_deployment 10.9.5.180
does:
A record, name: new_deployment.sla-ptt.com, value: 10.9.5.18
:return:
"""
self.parser.add_argument(
"record_type",
help="Type of record to insert.",
)
self.parser.add_argument(
"name_column",
help="Name column of the DNS entry.",
)
self.parser.add_argument(
"value_column",
help="Value column of the DNS entry.",
)
self.parser.add_argument(
"-d", "--domain",
help="Root domain - if other than sla-ptt.com"
)
a = self.parser.parse_args()
return a
if __name__ == "__main__":
R53Args() | /rxpipe-0.7.0-py3-none-any.whl/remotelib/route53.py | 0.485356 | 0.164382 | route53.py | pypi |
import rx
import rxsci_river as rsr
from river import base
def prequential(model, pretrain_size=200):
"""Prequential predict/train evaluation of a model
The source items may or may not have associated labels. If a label is
present, then the item is used for training. Otherwise only inference is done.
Source:
An observable emitting tuples of (x, y) items.
Args:
model: A river model object
pretrain_size: [Optional] number of initial items used to train the
model before doing predictions.
Returns:
An Observable emitting prediction items for each input item. The
firsts pretrain_size items do not emit predictions.
"""
learn_dict = False
if isinstance(model, base.Classifier):
predict = model.predict_one
elif isinstance(model, base.AnomalyDetector):
predict = model.score_one
learn_dict = True
else:
raise NotImplementedError("prequential not implemented for model {}, contributions are welcome!".format(type(model)))
def _learn_one(i):
if learn_dict is False:
model.learn_one(i.data, i.label)
else:
model.learn_one({'x': i.data})
def _predict(i):
if learn_dict is False:
return predict(i.data)
else:
return predict({'x': i.data})
def _prequential(source):
def on_subscribe(observer, scheduler):
pretrain = pretrain_size
def on_next(i):
nonlocal pretrain
if pretrain > 0:
_learn_one(i)
pretrain -= 1
else:
predict = _predict(i)
_learn_one(i)
observer.on_next(rsr.Prediction(utterance=i, prediction=predict))
return source.subscribe(
on_next=on_next,
on_error=observer.on_error,
on_completed=observer.on_completed,
scheduler=scheduler,
)
return rx.create(on_subscribe)
return _prequential | /rxsci-river-0.1.0.tar.gz/rxsci-river-0.1.0/rxsci_river/evaluate/prequential.py | 0.828939 | 0.415788 | prequential.py | pypi |
import logging
from datetime import datetime, date, time
from typing import Union, Optional
import backoff
import requests
from deprecated import deprecated
from ryanair.types import Flight, Trip
logger = logging.getLogger("ryanair")
logger.setLevel(logging.INFO)
console_handler = logging.StreamHandler()
formatter = logging.Formatter('%(asctime)s.%(msecs)03d %(levelname)s:%(message)s', datefmt="%Y-%m-%d %I:%M:%S")
console_handler.setFormatter(formatter)
logger.addHandler(console_handler)
class RyanairException(Exception):
def __init__(self, message):
super().__init__(f"Ryanair API: {message}")
class AvailabilityException(RyanairException):
def __init__(self):
super().__init__("Availability API declined to provide a result")
# noinspection PyBroadException
class Ryanair:
BASE_SERVICES_API_URL = "https://services-api.ryanair.com/farfnd/v4/"
BASE_AVAILABILITY_API_URL = "https://www.ryanair.com/api/booking/v4/"
BASE_SITE_FOR_SESSION_URL = "https://www.ryanair.com/ie/en"
def __init__(self, currency: Optional[str] = None):
self.currency = currency
self._num_queries = 0
self.session = requests.Session()
self._update_session_cookie()
def get_cheapest_flights(self, airport: str, date_from: Union[datetime, date, str],
date_to: Union[datetime, date, str], destination_country: Optional[str] = None,
custom_params: Optional[dict] = None,
departure_time_from: Union[str, time] = "00:00",
departure_time_to: Union[str, time] = "23:59",
max_price: Optional[int] = None,
destination_airport: Optional[str] = None
):
query_url = ''.join((Ryanair.BASE_SERVICES_API_URL,
"oneWayFares"))
params = {
"departureAirportIataCode": airport,
"outboundDepartureDateFrom": self._format_date_for_api(date_from),
"outboundDepartureDateTo": self._format_date_for_api(date_to),
"outboundDepartureTimeFrom": self._format_time_for_api(departure_time_from),
"outboundDepartureTimeTo": self._format_time_for_api(departure_time_to)
}
if self.currency:
params['currency'] = self.currency
if destination_country:
params['arrivalCountryCode'] = destination_country
if max_price:
params['priceValueTo'] = max_price
if destination_airport:
params['arrivalAirportIataCode'] = destination_airport
if custom_params:
params.update(custom_params)
try:
response = self._retryable_query(query_url, params)["fares"]
except Exception:
logger.exception(f"Failed to parse response when querying {query_url}")
return []
if response:
return [self._parse_cheapest_flight(flight['outbound']) for flight in response]
return []
def get_cheapest_return_flights(self, source_airport: str,
date_from: Union[datetime, date, str],
date_to: Union[datetime, date, str],
return_date_from: Union[datetime, date, str],
return_date_to: Union[datetime, date, str],
destination_country: Optional[str] = None,
custom_params: Optional[dict] = None,
outbound_departure_time_from: Union[str, time] = "00:00",
outbound_departure_time_to: Union[str, time] = "23:59",
inbound_departure_time_from: Union[str, time] = "00:00",
inbound_departure_time_to: Union[str, time] = "23:59",
max_price: Optional[int] = None,
destination_airport: Optional[str] = None
):
query_url = ''.join((Ryanair.BASE_SERVICES_API_URL,
"roundTripFares"))
params = {
"departureAirportIataCode": source_airport,
"outboundDepartureDateFrom": self._format_date_for_api(date_from),
"outboundDepartureDateTo": self._format_date_for_api(date_to),
"inboundDepartureDateFrom": self._format_date_for_api(return_date_from),
"inboundDepartureDateTo": self._format_date_for_api(return_date_to),
"outboundDepartureTimeFrom": self._format_time_for_api(outbound_departure_time_from),
"outboundDepartureTimeTo": self._format_time_for_api(outbound_departure_time_to),
"inboundDepartureTimeFrom": self._format_time_for_api(inbound_departure_time_from),
"inboundDepartureTimeTo": self._format_time_for_api(inbound_departure_time_to)
}
if self.currency:
params['currency'] = self.currency
if destination_country:
params['arrivalCountryCode'] = destination_country
if max_price:
params['priceValueTo'] = max_price
if destination_airport:
params['arrivalAirportIataCode'] = destination_airport
if custom_params:
params.update(custom_params)
try:
response = self._retryable_query(query_url, params)["fares"]
except Exception as e:
logger.exception(f"Failed to parse response when querying {query_url}")
return []
if response:
return [self._parse_cheapest_return_flights_as_trip(trip["outbound"], trip["inbound"])
for trip in response]
else:
return []
def get_all_flights(self, origin_airport: str, date_out: Union[datetime, date, str], destination: str,
locale: str = "en-ie", origin_is_mac: bool = False, destination_is_mac: bool = False,
custom_params: Optional[dict] = None):
query_url = ''.join((Ryanair.BASE_AVAILABILITY_API_URL, f"{locale}/availability"))
params = {
# Assume single adult ticket only
"ADT": 1,
"TEEN": 0,
"CHD": 0,
"INF": 0,
"DateOut": self._format_date_for_api(date_out),
"DateIn": "",
"Origin": origin_airport,
"Destination": destination,
"OriginIsMac": origin_is_mac,
"DestinationIsMac": destination_is_mac,
"IncludeConnectingFlights": False, # What? You do that?
"ToUs": "AGREED",
# Presently unused, but these and others can be set by custom_params
# "Disc": 0,
# "promoCode": "",
# "FlexDaysBeforeOut": 2,
# "FlexDaysOut": 2,
# "FlexDaysBeforeIn": 2,
# "FlexDaysIn": 2,
# "RoundTrip": false,
}
if custom_params:
params.update(custom_params)
try:
# Try once to get a new session cookie, just in case the old one has expired.
# If that fails too, we should raise the exception.
response = self._retryable_query(query_url, params)
if self.check_if_availability_response_is_declined(response):
logger.warning("Availability API declined to respond, attempting again with a new session cookie")
self._update_session_cookie()
response = self._retryable_query(query_url, params)
if self.check_if_availability_response_is_declined(response):
raise AvailabilityException
currency = response["currency"]
trip = response["trips"][0]
flights = trip['dates'][0]['flights']
if flights:
if self.currency and self.currency != currency:
logger.warning(f"Configured to fetch fares in {self.currency} but availability API doesn't support"
f" specifying the currency, so it responded with fares in {currency}")
return [self._parse_all_flights_availability_result_as_flight(flight,
trip['originName'],
trip['destinationName'],
currency)
for flight in flights]
except RyanairException:
logger.exception(f"Failed to parse response when querying {query_url} with parameters {params}")
return []
except Exception:
logger.exception(f"Failed to parse response when querying {query_url} with parameters {params}")
return []
@staticmethod
def check_if_availability_response_is_declined(response: dict) -> bool:
return 'message' in response and response['message'] == 'Availability declined'
@staticmethod
def _on_query_error(e):
logger.exception(f"Gave up retrying query, last exception was {e}")
@backoff.on_exception(backoff.expo, Exception, max_tries=5, logger=logger, on_giveup=_on_query_error,
raise_on_giveup=False)
def _retryable_query(self, url, params):
self._num_queries += 1
return self.session.get(url, params=params).json()
def _update_session_cookie(self):
# Visit main website to get session cookies
self.session.get(Ryanair.BASE_SITE_FOR_SESSION_URL)
def _parse_cheapest_flight(self, flight):
currency = flight['price']['currencyCode']
if self.currency and self.currency != currency:
logger.warning(f"Requested cheapest flights in {self.currency} but API responded with fares in {currency}")
return Flight(
origin=flight['departureAirport']['iataCode'],
originFull=', '.join((flight['departureAirport']['name'], flight['departureAirport']['countryName'])),
destination=flight['arrivalAirport']['iataCode'],
destinationFull=', '.join((flight['arrivalAirport']['name'], flight['arrivalAirport']['countryName'])),
departureTime=datetime.fromisoformat(flight['departureDate']),
flightNumber=f"{flight['flightNumber'][:2]} {flight['flightNumber'][2:]}",
price=flight['price']['value'],
currency=currency
)
def _parse_cheapest_return_flights_as_trip(self, outbound, inbound):
outbound = self._parse_cheapest_flight(outbound)
inbound = self._parse_cheapest_flight(inbound)
return Trip(
outbound=outbound,
inbound=inbound,
totalPrice=inbound.price + outbound.price
)
@staticmethod
def _parse_all_flights_availability_result_as_flight(response, origin_full, destination_full, currency):
return Flight(departureTime=datetime.fromisoformat(response['time'][0]),
flightNumber=response['flightNumber'],
price=response['regularFare']['fares'][0]['amount'] if response['faresLeft'] != 0 else float(
'inf'),
currency=currency,
origin=response['segments'][0]['origin'],
originFull=origin_full,
destination=response['segments'][0]['destination'],
destinationFull=destination_full
)
@staticmethod
def _format_date_for_api(d: Union[datetime, date, str]):
if isinstance(d, str):
return d
if isinstance(d, datetime):
return d.date().isoformat()
if isinstance(d, date):
return d.isoformat()
@staticmethod
def _format_time_for_api(t: Union[time, str]):
if isinstance(t, str):
return t
if isinstance(t, time):
return t.strftime("%H:%M")
@property
def num_queries(self):
return self._num_queries
@deprecated(version="2.0.0", reason="deprecated in favour of get_cheapest_flights", action="once")
def get_flights(self, airport, date_from, date_to, destination_country=None):
return self.get_cheapest_flights(airport, date_from, date_to, destination_country)
@deprecated(version="2.0.0", reason="deprecated in favour of get_cheapest_return_flights", action="once")
def get_return_flights(self, source_airport, date_from, date_to,
return_date_from, return_date_to,
destination_country=None):
return self.get_cheapest_return_flights(source_airport, date_from, date_to,
return_date_from, return_date_to, destination_country) | /ryanair_py-2.3.1-py3-none-any.whl/ryanair/ryanair.py | 0.764364 | 0.170439 | ryanair.py | pypi |
class Token():
def __init__(self, y=None, x=None):
self.y = y
self.x = x
self.shape = []
def read_token(self):
self.y, self.x = map(int, input()[:-1].split(' ')[1:])
self.shape = []
for _ in range(self.y):
self.shape.append(input())
def get_topleft_edge(self):
for i in range(self.y):
for l in range(self.x):
if self.shape[i][l] == '*': yield i, l
return None, None
def get_bottomright_edge(self):
for i in range(self.y)[::-1]:
for l in range(self.x)[::-1]:
if self.shape[i][l] == '*': yield i, l
return None, None
class Board():
def __init__(self, y=None, x=None):
self.y = y
self.x = x
self.board = []
def read_board(self):
self.y, self.x = map(int, input()[:-1].split(' ')[1:])
_ = input()
self.board = []
for _ in range(self.y):
self.board.append(input().split(' ')[1])
class Player():
def __init__(self, p, board, token):
self.p = p
self.char = 'o' if self.p == "p1" else 'x'
self.enemy_char = 'x' if self.char == 'o' else 'o'
self.board = board
self.token = token
def check_overlap(self, x, y):
token = self.token
overlap_counter = 0
for token_y in range(token.y):
for token_x in range(token.x):
if self.board.board[y + token_y][x + token_x] in (self.enemy_char, self.enemy_char.upper()):
return 1
if token.shape[token_y][token_x] == '*' and \
self.board.board[y + token_y][x + token_x] in (self.char, self.char.upper()):
overlap_counter += 1
if overlap_counter != 1:
return 1
return 0
def check_overflow(self, x, y):
token = self.token
board = self.board
if ((x + token.x) > board.x) or ((y + token.y) > board.y):
return 1
return 0
def put_token(self, token_y, token_x):
board = self.board
for board_y in range(board.y):
for board_x in range(board.x):
if board.board[board_y][board_x] in (self.char, self.char.upper()):
x = board_x - token_x
y = board_y - token_y
if self.check_overflow(x, y) == 0 and \
self.check_overlap(x, y) == 0:
print(f"{y} {x}")
return True
return False
def put_random(self):
for token_y, token_x in self.token.get_topleft_edge():
if self.put_token(token_y, token_x): return True
print("0 0")
return False
def main():
_, _, p, _, _ = input().split(' ')
p = Player(p, Board(), Token())
while True:
p.board.read_board()
p.token.read_token()
p.put_random()
if __name__ == "__main__":
main() | /ryaoi_filler-0.1.10.tar.gz/ryaoi_filler-0.1.10/src/ryaoi_filler/cli.py | 0.407451 | 0.225651 | cli.py | pypi |
class Token():
def __init__(self, y=None, x=None):
self.y = y
self.x = x
self.shape = []
def read_token(self):
self.y, self.x = map(int, input()[:-1].split(' ')[1:])
self.shape = []
for _ in range(self.y):
self.shape.append(input())
def get_topleft_edge(self):
for i in range(self.y):
for l in range(self.x):
if self.shape[i][l] == '*': yield i, l
return None, None
def get_bottomright_edge(self):
for i in range(self.y)[::-1]:
for l in range(self.x)[::-1]:
if self.shape[i][l] == '*': yield i, l
return None, None
class Board():
def __init__(self, y=None, x=None):
self.y = y
self.x = x
self.board = []
def read_board(self):
self.y, self.x = map(int, input()[:-1].split(' ')[1:])
_ = input()
self.board = []
for _ in range(self.y):
self.board.append(input().split(' ')[1])
class Player():
def __init__(self, p, board, token):
self.p = p
self.char = 'o' if self.p == "p1" else 'x'
self.enemy_char = 'x' if self.char == 'o' else 'o'
self.board = board
self.token = token
def check_overlap(self, x, y):
token = self.token
overlap_counter = 0
for token_y in range(token.y):
for token_x in range(token.x):
if self.board.board[y + token_y][x + token_x] in (self.enemy_char, self.enemy_char.upper()):
return 1
if token.shape[token_y][token_x] == '*' and \
self.board.board[y + token_y][x + token_x] in (self.char, self.char.upper()):
overlap_counter += 1
if overlap_counter != 1:
return 1
return 0
def check_overflow(self, x, y):
token = self.token
board = self.board
if ((x + token.x) > board.x) or ((y + token.y) > board.y):
return 1
return 0
def put_token(self, token_y, token_x):
board = self.board
for board_y in range(board.y):
for board_x in range(board.x):
if board.board[board_y][board_x] in (self.char, self.char.upper()):
x = board_x - token_x
y = board_y - token_y
if self.check_overflow(x, y) == 0 and \
self.check_overlap(x, y) == 0:
print(f"{y} {x}")
return True
return False
def put_random(self):
for token_y, token_x in self.token.get_topleft_edge():
if self.put_token(token_y, token_x): return True
print("0 0")
return False
def main():
_, _, p, _, _ = input().split(' ')
p = Player(p, Board(), Token())
while True:
p.board.read_board()
p.token.read_token()
p.put_random()
if __name__ == "__main__":
main() | /ryaoi2_filler-1.0.0-py3-none-any.whl/ryaoi2_filler/cli.py | 0.407451 | 0.225651 | cli.py | pypi |
from google.protobuf import empty_pb2
class CreateModelMixin:
def Create(self, request, context):
"""
Create a model instance.
The request should be a proto message of ``serializer.Meta.proto_class``.
If an object is created this returns a proto message of
``serializer.Meta.proto_class``.
"""
serializer = self.get_serializer(message=request)
serializer.is_valid(raise_exception=True)
self.perform_create(serializer)
return serializer.message
def perform_create(self, serializer):
"""Save a new object instance."""
serializer.save()
class ListModelMixin:
def List(self, request, context):
"""
List a queryset. This sends a sequence of messages of
``serializer.Meta.proto_class`` to the client.
.. note::
This is a server streaming RPC.
"""
queryset = self.filter_queryset(self.get_queryset())
serializer = self.get_serializer(queryset, many=True)
for message in serializer.message:
yield message
class RetrieveModelMixin:
def Retrieve(self, request, context):
"""
Retrieve a model instance.
The request have to include a field corresponding to
``lookup_request_field``. If an object can be retrieved this returns
a proto message of ``serializer.Meta.proto_class``.
"""
instance = self.get_object()
serializer = self.get_serializer(instance)
return serializer.message
class UpdateModelMixin:
def Update(self, request, context):
"""
Update a model instance.
The request should be a proto message of ``serializer.Meta.proto_class``.
If an object is updated this returns a proto message of
``serializer.Meta.proto_class``.
"""
instance = self.get_object()
serializer = self.get_serializer(instance, message=request)
serializer.is_valid(raise_exception=True)
self.perform_update(serializer)
if getattr(instance, '_prefetched_objects_cache', None):
# If 'prefetch_related' has been applied to a queryset, we need to
# forcibly invalidate the prefetch cache on the instance.
instance._prefetched_objects_cache = {}
return serializer.message
def perform_update(self, serializer):
"""Save an existing object instance."""
serializer.save()
class PartialUpdateModelMixin:
def PartialUpdate(self, request, context):
"""
Partial update a model instance.
The request have to include a field corresponding to
``lookup_request_field`` and you need to explicitly set the fields that
you want to update. If an object is updated this returns a proto
message of ``serializer.Meta.proto_class``.
"""
instance = self.get_object()
serializer = self.get_serializer(instance, message=request, partial=True)
serializer.is_valid(raise_exception=True)
self.perform_partial_update(serializer)
if getattr(instance, '_prefetched_objects_cache', None):
# If 'prefetch_related' has been applied to a queryset, we need to
# forcibly invalidate the prefetch cache on the instance.
instance._prefetched_objects_cache = {}
return serializer.message
def perform_partial_update(self, serializer):
"""Save an existing object instance."""
serializer.save()
class DestroyModelMixin:
def Destroy(self, request, context):
"""
Destroy a model instance.
The request have to include a field corresponding to
``lookup_request_field``. If an object is deleted this returns
a proto message of ``google.protobuf.empty_pb2.Empty``.
"""
instance = self.get_object()
self.perform_destroy(instance)
return empty_pb2.Empty()
def perform_destroy(self, instance):
"""Delete an object instance."""
instance.delete() | /ryca_django_grpc-1.0.13-py3-none-any.whl/ryca_django_grpc/mixins.py | 0.827793 | 0.198025 | mixins.py | pypi |
from django.db.models.query import QuerySet
from django.shortcuts import get_object_or_404
from django.core.exceptions import ValidationError
from django.http import Http404
import grpc
from ryca_django_grpc.utils import model_meta
from ryca_django_grpc import mixins, services
class GenericService(services.Service):
"""
Base class for all other generic services.
"""
# Either set this attribute or override ``get_queryset()``.
queryset = None
# Either set this attribute or override ``get_serializer_class()``.
serializer_class = None
# Set this if you want to use object lookups other than id
lookup_field = None
lookup_request_field = None
def get_queryset(self):
"""
Get the list of items for this service.
This must be an iterable, and may be a queryset.
Defaults to using ``self.queryset``.
If you are overriding a handler method, it is important that you call
``get_queryset()`` instead of accessing the ``queryset`` attribute as
``queryset`` will get evaluated only once.
Override this to provide dynamic behavior, for example::
def get_queryset(self):
if self.action == 'ListSpecialUser':
return SpecialUser.objects.all()
return super().get_queryset()
"""
assert self.queryset is not None, (
"'%s' should either include a ``queryset`` attribute, "
"or override the ``get_queryset()`` method."
% self.__class__.__name__
)
queryset = self.queryset
if isinstance(queryset, QuerySet):
# Ensure queryset is re-evaluated on each request.
queryset = queryset.all()
return queryset
def get_serializer_class(self):
"""
Return the class to use for the serializer. Defaults to using
`self.serializer_class`.
"""
assert self.serializer_class is not None, (
"'%s' should either include a `serializer_class` attribute, "
"or override the `get_serializer_class()` method."
% self.__class__.__name__
)
return self.serializer_class
def get_object(self):
"""
Returns an object instance that should be used for detail services.
Defaults to using the lookup_field parameter to filter the base
queryset.
"""
queryset = self.filter_queryset(self.get_queryset())
lookup_field = (
self.lookup_field
or model_meta.get_model_pk(queryset.model).name
)
lookup_request_field = self.lookup_request_field or lookup_field
assert hasattr(self.request, lookup_request_field), (
'Expected service %s to be called with request that has a field '
'named "%s". Fix your request protocol definition, or set the '
'`.lookup_field` attribute on the service correctly.' %
(self.__class__.__name__, lookup_request_field)
)
lookup_value = getattr(self.request, lookup_request_field)
filter_kwargs = {lookup_field: lookup_value}
try:
return get_object_or_404(queryset, **filter_kwargs)
except (TypeError, ValueError, ValidationError, Http404):
self.context.abort(grpc.StatusCode.NOT_FOUND, (
'%s: %s not found!' %
(queryset.model.__name__, lookup_value)
))
def get_serializer(self, *args, **kwargs):
"""
Return the serializer instance that should be used for validating and
deserializing input, and for serializing output.
"""
serializer_class = self.get_serializer_class()
kwargs.setdefault('context', self.get_serializer_context())
return serializer_class(*args, **kwargs)
def get_serializer_context(self):
"""
Extra context provided to the serializer class. Defaults to including
``grpc_request``, ``grpc_context``, and ``service`` keys.
"""
print('>>>>>>>>>>>>>>>>>>>>>>')
return {
'grpc_request': self.request,
'grpc_context': self.context,
'service': self,
}
def filter_queryset(self, queryset):
"""Given a queryset, filter it, returning a new queryset."""
return queryset
class CreateService(mixins.CreateModelMixin,
GenericService):
"""
Concrete service for creating a model instance that provides a ``Create()``
handler.
"""
pass
class ListService(mixins.ListModelMixin,
GenericService):
"""
Concrete service for listing a queryset that provides a ``List()`` handler.
"""
pass
class RetrieveService(mixins.RetrieveModelMixin,
GenericService):
"""
Concrete service for retrieving a model instance that provides a
``Retrieve()`` handler.
"""
pass
class DestroyService(mixins.DestroyModelMixin,
GenericService):
"""
Concrete service for deleting a model instance that provides a ``Destroy()``
handler.
"""
pass
class UpdateService(mixins.UpdateModelMixin,
GenericService):
"""
Concrete service for updating a model instance that provides a
``Update()`` handler.
"""
pass
class ReadOnlyModelService(mixins.RetrieveModelMixin,
mixins.ListModelMixin,
GenericService):
"""
Concrete service that provides default ``List()`` and ``Retrieve()``
handlers.
"""
pass
class ModelService(mixins.CreateModelMixin,
mixins.RetrieveModelMixin,
mixins.UpdateModelMixin,
mixins.DestroyModelMixin,
mixins.ListModelMixin,
GenericService):
"""
Concrete service that provides default ``Create()``, ``Retrieve()``,
``Update()``, ``Destroy()`` and ``List()`` handlers.
"""
pass | /ryca_django_grpc-1.0.13-py3-none-any.whl/ryca_django_grpc/generics.py | 0.875228 | 0.1937 | generics.py | pypi |
import base64
import hashlib
import random
import string
from time import sleep
import requests
API_URL = "https://returnyoutubedislikeapi.com"
HEADERS = {"User-Agent": "https://github.com/bbilly1/ryd-client v0.0.6"}
class Login:
"""handle user registation"""
def __init__(self, user_id=False):
self.user_id = user_id
def generate_user_id(self):
"""get random 36 int user id"""
choice = string.ascii_letters + string.digits
new_user_id = str()
for _ in range(36):
letter = random.SystemRandom().choice(choice)
new_user_id = new_user_id + letter
self.user_id = new_user_id
return new_user_id
def get_puzzle(self):
"""get puzzle"""
user_id = self.user_id or self.generate_user_id()
url = f"{API_URL}/puzzle/registration?userId={user_id}"
puzzle = requests.get(url, headers=HEADERS).json()
puzzle["user_id"] = user_id
return puzzle
def post_puzzle(self, solution):
"""post solved puzzle to confirm registration"""
url = f"{API_URL}/puzzle/registration?userId={self.user_id}"
response = requests.post(url, headers=HEADERS, json=solution)
if response.ok:
print(f"successfully registered with user id {self.user_id}")
return response.text == "true"
return False
class Puzzle:
"""solve your puzzle"""
def __init__(self, puzzle):
self.puzzle = puzzle
@staticmethod
def count_leading_zeros(to_check):
"""return leading binary zeroes"""
zeros = 0
for i in to_check:
if i == 0:
zeros = zeros + 8
else:
zeros = zeros + f"{i:08b}".index("1")
break
return zeros
def solve(self):
"""get puzzle solution"""
challenge = list(base64.b64decode(self.puzzle["challenge"]))
max_count = 2 ** self.puzzle["difficulty"] * 5
# fill buffer
buffer = bytearray(20)
for i in range(4, 20):
buffer[i] = challenge[i - 4]
# keep hashing until leading zeros are matched
for i in range(max_count):
new_buffer = (i).to_bytes(4, byteorder="little") + buffer[4:20]
to_check = list(hashlib.sha512(new_buffer).digest())
zeros = self.count_leading_zeros(to_check)
if zeros >= self.puzzle["difficulty"]:
solution = base64.b64encode(new_buffer[0:4]).decode()
return {"solution": solution}
return False
class VotePost:
"""cast your vote"""
def __init__(self, votes, user_id):
self.votes = votes
self.user_id = user_id
def process(self):
"""process the input"""
if isinstance(self.votes, list):
to_return = self._process_list()
elif isinstance(self.votes, tuple):
youtube_id, vote = self.votes
validated = self.validate_vote(vote)
to_return = self.post((youtube_id, validated))
else:
raise ValueError
return to_return
def _process_list(self):
"""process a list of votes"""
validated = [(i[0], self.validate_vote(i[1])) for i in self.votes]
all_messages = []
for vote in validated:
message = self.post(vote)
all_messages.append(message)
return all_messages
def post(self, vote):
"""post vote to API"""
puzzle = self._initial_vote(vote)
solution = Puzzle(puzzle).solve()
response = self._confirm_vote(solution, vote[0])
if not response:
print(f"failed to cast vote for: {self.user_id}, {vote}")
raise ValueError
message = {
"id": vote[0],
"status": response,
"vote": vote[1],
}
return message
@staticmethod
def validate_vote(vote):
"""convert vote"""
vote_map = {
"like": 1,
"dislike": -1,
"neutral": 0,
}
if isinstance(vote, str):
try:
return vote_map[vote]
except KeyError:
print(f"invalid vote: {vote}")
raise
elif isinstance(vote, int):
if vote in vote_map.values():
return vote
raise ValueError(f"invalid vote cast: {vote}")
return False
def _initial_vote(self, vote):
"""send initial vote to receive puzzle"""
data = {
"userId": self.user_id,
"videoId": vote[0],
"value": vote[1],
}
url = f"{API_URL}/interact/vote"
response = requests.post(url, headers=HEADERS, json=data)
if not response.ok:
print("failed")
raise ValueError
puzzle = response.json()
return puzzle
def _confirm_vote(self, solution, video_id):
"""send second confirmation with solved puzzle"""
data = {
"userId": self.user_id,
"videoId": video_id,
"solution": solution["solution"],
}
url = f"{API_URL}/interact/confirmVote"
response = requests.post(url, headers=HEADERS, json=data)
if response.ok:
return response.text == "true"
return False
class VoteGet:
"""get single vote or list of votes"""
def __init__(self, youtube_ids):
self.youtube_ids = youtube_ids
def process(self):
"""process youtube_ids build list or string"""
if isinstance(self.youtube_ids, list):
to_return = self._process_list()
elif isinstance(self.youtube_ids, str):
to_return = self._get_vote(self.youtube_ids)
else:
raise ValueError
return to_return
def _process_list(self):
"""process list"""
all_votes = []
for youtube_id in self.youtube_ids:
parsed = self._get_vote(youtube_id)
all_votes.append(parsed)
return all_votes
@staticmethod
def _get_vote(youtube_id):
"""get vote from a single video"""
url = f"{API_URL}/votes?videoId={youtube_id}"
votes = None
try:
votes = requests.get(url, headers=HEADERS, timeout=3)
except requests.exceptions.RequestException:
sleep(5)
votes = requests.get(url, headers=HEADERS, timeout=5)
if votes is None:
raise ConnectionError("failed to connect to API")
if votes.ok:
parsed = votes.json()
parsed["status"] = votes.status_code
del parsed["dateCreated"]
else:
print(f"{youtube_id}: RYD returns error code {votes.status_code}.")
parsed = {
"id": youtube_id,
"status": votes.status_code,
}
return parsed
def generate_user_id():
"""short hand to generate user id"""
user_id = Login().generate_user_id()
return user_id
def register(user_id):
"""register your user id"""
login_handler = Login(user_id)
puzzle = login_handler.get_puzzle()
solution = Puzzle(puzzle).solve()
response = login_handler.post_puzzle(solution)
if not response:
print(f"failed to register with user id {user_id}")
return False
return True
def get(youtube_ids):
"""get votes from list of youtube_ids"""
result = VoteGet(youtube_ids).process()
return result
def post(votes, user_id):
"""post votes"""
result = VotePost(votes, user_id).process()
return result | /ryd_client-0.0.6-py3-none-any.whl/ryd_client/ryd_client.py | 0.570092 | 0.162646 | ryd_client.py | pypi |
import os
import pickle
import jsonpickle
class FileService:
"""
The File Service class reads/uploads files with the given name.
Uses the PICKLE library to serialize data
:param filename: File name to read or write
"""
def __init__(self,
filename):
"""
Constract method
Check for the existence of the pickle file and create a new
one if not found.
"""
self.__filename = filename
if not os.path.exists(filename):
self.write([])
def read(self):
"""
This method read pickle file
:return: List of data
:rtype: list
"""
with open(self.__filename, "rb") as f:
return pickle.loads(f.read())
def write(self, data):
"""
This method write data to pickle file
:param data: Some data for write in file
:return: None
"""
with open(self.__filename, "wb") as f:
f.write(pickle.dumps(data))
class FileJsonService:
"""
The File Service class reads/uploads files with the given name.
Uses the JSON library to serialize data
:param filename: File name to read or write
"""
def __init__(self,
filename):
"""
Constract method
Check for the existence of the JSON file and create a new
one if not found
"""
self.__filename = filename
if not os.path.exists(filename):
self.write([])
def read(self):
"""
This method read JSON file
:return: List of data
:rtype: list
"""
with open(self.__filename, "r") as f:
return jsonpickle.decode(f.read())
def write(self, data):
"""
This method write data to pickle file
:param data: Some data for write in file
:return: None
:rtype: NoneType
"""
with open(self.__filename, "w") as f:
f.write(jsonpickle.encode(data))
class Database:
"""
This class works with the database
:param filename: Get name of file to work
:type: str
:param mode: With what type of file mast work data base. Default = byte
:type: str, optional
:param fileservice: Get type of fileservice class from fileservice
dictionary.
[mode] - type of class fileservice:
(class: `FileService` - pickle fileservice
class: 'FileJsonService' - JSON fileservice)
:type: class
:param data: Read database data from file by fileservice
:type: object
"""
def __init__(self,
filename,
mode="byte"):
"""
Constarct method
"""
fileservice = {
"byte": FileService,
"json": FileJsonService}
self.fileservice = fileservice[mode](filename)
self.data = self.fileservice.read()
def save(self):
"""
Save data to data base file
"""
self.fileservice.write(self.data)
def append(self, data):
"""
Append data to database
:param data: Data to append in database
:type: class
"""
self.data.append(data)
self.save()
def pop(self, element):
"""
Delete data from data base by index number
:param element: Index of element to delete from database list
"""
self.data.pop(element)
def remove(self, element):
"""
Delete data from data base by index number
:param element: Index of element to delete from database list
"""
self.data.remove(element)
self.save()
def clear(self):
"""
Clear all data base
"""
self.data.clear()
self.save()
def __len__(self):
"""
Return data base lenght
:return: Return database lenght
:rtype: int
"""
return len(self.data)
def __getitem__(self, number):
"""
Get element from database by index number
:return: Return element from database by number
:rtype: class
"""
return self.data[number]
def __str__(self):
"""
Show database
:return: Return object selected class
:rtype: object
"""
return f"{self.data}" | /rydeen-lesson-notebook-bad-code-0.1.3.tar.gz/rydeen-lesson-notebook-bad-code-0.1.3/terminal_notebook_project/file_models.py | 0.741487 | 0.35942 | file_models.py | pypi |
import math
import matplotlib.pyplot as plt
from .Generaldistribution import Distribution
class Gaussian(Distribution):
""" Gaussian distribution class for calculating and
visualizing a Gaussian distribution.
Attributes:
mean (float) representing the mean value of the distribution
stdev (float) representing the standard deviation of the distribution
data_list (list of floats) a list of floats extracted from the data file
"""
def __init__(self, mu=0, sigma=1):
Distribution.__init__(self, mu, sigma)
def calculate_mean(self):
"""Function to calculate the mean of the data set.
Args:
None
Returns:
float: mean of the data set
"""
avg = 1.0 * sum(self.data) / len(self.data)
self.mean = avg
return self.mean
def calculate_stdev(self, sample=True):
"""Function to calculate the standard deviation of the data set.
Args:
sample (bool): whether the data represents a sample or population
Returns:
float: standard deviation of the data set
"""
if sample:
n = len(self.data) - 1
else:
n = len(self.data)
mean = self.calculate_mean()
sigma = 0
for d in self.data:
sigma += (d - mean) ** 2
sigma = math.sqrt(sigma / n)
self.stdev = sigma
return self.stdev
def plot_histogram(self):
"""Function to output a histogram of the instance variable data using
matplotlib pyplot library.
Args:
None
Returns:
None
"""
plt.hist(self.data)
plt.title('Histogram of Data')
plt.xlabel('data')
plt.ylabel('count')
def pdf(self, x):
"""Probability density function calculator for the gaussian distribution.
Args:
x (float): point for calculating the probability density function
Returns:
float: probability density function output
"""
return (1.0 / (self.stdev * math.sqrt(2*math.pi))) * math.exp(-0.5*((x - self.mean) / self.stdev) ** 2)
def plot_histogram_pdf(self, n_spaces = 50):
"""Function to plot the normalized histogram of the data and a plot of the
probability density function along the same range
Args:
n_spaces (int): number of data points
Returns:
list: x values for the pdf plot
list: y values for the pdf plot
"""
mu = self.mean
sigma = self.stdev
min_range = min(self.data)
max_range = max(self.data)
# calculates the interval between x values
interval = 1.0 * (max_range - min_range) / n_spaces
x = []
y = []
# calculate the x values to visualize
for i in range(n_spaces):
tmp = min_range + interval*i
x.append(tmp)
y.append(self.pdf(tmp))
# make the plots
fig, axes = plt.subplots(2,sharex=True)
fig.subplots_adjust(hspace=.5)
axes[0].hist(self.data, density=True)
axes[0].set_title('Normed Histogram of Data')
axes[0].set_ylabel('Density')
axes[1].plot(x, y)
axes[1].set_title('Normal Distribution for \n Sample Mean and Sample Standard Deviation')
axes[0].set_ylabel('Density')
plt.show()
return x, y
def __add__(self, other):
"""Function to add together two Gaussian distributions
Args:
other (Gaussian): Gaussian instance
Returns:
Gaussian: Gaussian distribution
"""
result = Gaussian()
result.mean = self.mean + other.mean
result.stdev = math.sqrt(self.stdev ** 2 + other.stdev ** 2)
return result
def __repr__(self):
"""Function to output the characteristics of the Gaussian instance
Args:
None
Returns:
string: characteristics of the Gaussian
"""
return "mean {}, standard deviation {}".format(self.mean, self.stdev) | /ryk_distributions-0.1.tar.gz/ryk_distributions-0.1/ryk_distributions/Gaussiandistribution.py | 0.688364 | 0.853058 | Gaussiandistribution.py | pypi |
import dataclasses as dcs
import logging
from collections import abc, defaultdict
from functools import singledispatch
from typing import Any, Callable, Generator, Iterable, Mapping
from . import variation
LOGGER = logging.getLogger(__name__)
class AliasError(KeyError):
...
def _default_transforms() -> Iterable[Callable[[str], str]]:
return [
variation.upper,
variation.lower,
]
@dcs.dataclass
class Alias:
"""Simple name lookup.
- Provides basic name lookup.
- Support for enumerating common (or custom) variations, including
upper and lower case or (de)essing.
Attributes:
identity: The "true name" of the alias.
aliases: An iterable of names to alias.
transforms: An iterable of functions to apply to each alias.
Each function should take one string and return one string.
Default: Upper and lower case of each alias.
"""
identity: str
aliases: Iterable[str]
transforms: Iterable[Callable[[str], str]] = dcs.field(
default_factory=_default_transforms
)
logger: logging.Logger = dcs.field(
default=None, repr=False, hash=False, compare=False
)
_lookup: Mapping[str, int] = dcs.field(init=False, repr=False)
_attempts: Mapping[str, int] = dcs.field(
init=False,
repr=False,
hash=False,
compare=False,
)
def __post_init__(self):
# allow users to explicitly provide 'None"
self.aliases = self.aliases or []
self.logger = self.logger or LOGGER
self.transforms = resolve_variations(self.transforms)
# support single alias
if isinstance(self.aliases, str):
self.aliases = [self.aliases]
# setup alias internal data
opts = self.names
self._attempts = defaultdict(int, {k: 0 for k in self.names})
self._lookup = {
**{k: 1 for k in opts},
**{func(name): 1 for name in opts for func in self.transforms},
}
@property
def names(self) -> Iterable[str]:
return [self.identity, *self.aliases]
def add_alias(self, value: str) -> None:
"""Add given alias to lookup, including transformed names."""
if value in self.aliases:
self.logger.warning("existing alias: %s", value)
return # do not add more than once
lookup = {func(value): 1 for func in self.transforms}
self._lookup.update(lookup)
self.aliases.append(value)
def all_names(self) -> Iterable[str]:
"""Return all known aliases and transformations."""
return sorted(self._lookup.keys())
def add_transform(self, value: Callable[[str], str]) -> None:
"""Add given transform and update alias lookup."""
for func in resolve_variations(value):
if func in self.transforms:
self.logger.warning("existing transform: %s", func)
return # do not add more than once
lookup = {func(k): 1 for k in self.names}
self._lookup.update(lookup)
self.transforms.append(func)
def identify(self, value: str) -> str:
"""Return identity for the given alias value.
Arguments:
value: Alias to match.
Returns:
Identity for the given alias.
Raises:
AliasError (KeyError) if unknown alias given.
"""
self._attempts[value] += 1 # know which aliases are used / needed
match = self._lookup.get(value) # faster than itrable and try:except
if not match:
raise AliasError(value)
return self.identity
def set_transforms(self, value: Iterable[Callable[[str], str]]) -> None:
"""Replace current transforms and update lookup.
Arguments:
value: Iterable of callables. None to clear current.
Returns:
None
"""
value = resolve_variations(value)
opts = self.names
lookup = {
**{k: 1 for k in opts},
**{func(name): 1 for name in opts for func in value},
}
self._lookup = lookup
self.transforms = value[:]
# resolve variation
# ----------------------------------
def resolve_variations(value: Any) -> Callable[[str], str]:
"""Resolve given value into callables.
Supported:
- string names (see rym.alias.variation)
- callables (should take and return a single string)
- iterables of either of the others
Arguments:
value: One of the supported input.
Returns:
An iterable of resolved variation callables.
Raises:
TypeError for invalid input.
"""
return list(_resolve_variations(value))
@singledispatch
def _resolve_variations(value: Any) -> Generator[Callable[[str], str], None, None]:
if value is not None:
raise TypeError(f"invalid variation: {value}")
yield from []
@_resolve_variations.register(str)
def _(value: str) -> Generator[Callable[[str], str], None, None]:
yield getattr(variation, value)
@_resolve_variations.register(abc.Callable)
def _(value: Callable) -> Generator[Callable[[str], str], None, None]:
yield value
@_resolve_variations.register(abc.Iterable)
def _(value: Iterable) -> Generator[Callable[[str], str], None, None]:
for item in value:
yield from _resolve_variations(item)
# __END__ | /rym_alias-0.1.1.tar.gz/rym_alias-0.1.1/rym/alias/_alias.py | 0.894792 | 0.37439 | _alias.py | pypi |
import dataclasses as dcs
import itertools
import json
import logging
from collections import ChainMap, abc, defaultdict
from functools import singledispatch
from pathlib import Path
from pprint import pformat
from typing import Any, Callable, Generator, Iterable, Mapping, Optional
from ._alias import Alias, AliasError
def _load_pkg(names: Iterable[str]):
"""Safe import. Allow variable feature set based on available packages.
Arguments:
names: List of acceptable package names (with compatible interfaces).
Returns:
The loaded module or None.
"""
import importlib
for name in names:
try:
return importlib.import_module(name)
except ImportError:
continue
return None
toml = _load_pkg(
[
"tomllib", # py 3.11+
"tomlkit", # style-preserving
"toml",
]
)
yaml = _load_pkg(["yaml"])
LOGGER = logging.getLogger(__name__)
_DEFAULT = __file__
class CollisionError(ValueError):
"""Raise for an alias collision."""
@dcs.dataclass
class AliasResolver:
"""Group of aliases."""
aliases: Iterable[Alias]
logger: logging.Logger = dcs.field(
default=None, repr=False, hash=False, compare=False
)
_lookup: Mapping[str, int] = dcs.field(init=False, repr=False)
_attempts: Mapping[str, int] = dcs.field(
init=False,
repr=False,
hash=False,
compare=False,
)
def __post_init__(self):
self.logger = self.logger or LOGGER
# setup alias internal data
self._build_lookup_index()
@classmethod
def build(
cls,
*args,
strict: bool = True,
transforms: Optional[Iterable[Callable[[str], str]]] = _DEFAULT,
logger: logging.Logger = None,
_resolver: Callable = None,
**kwargs,
) -> "AliasResolver":
"""Build aliases to resolve.
Arguments:
*args: Supported formats as positional arguments
strict: If true, will raise if collisions detected.
transforms: Optional transforms to apply to all aliases.
If given, will replace existing transforms on each alias.
Use 'None' to disable all transformations
_resolver: Inject an alias factory.
**kwargs: Supported formats as keyword arguments
Returns:
An AliasResolver instance.
See also:
alias_factory
"""
_resolver = _resolver or resolve_aliases
aliases = _resolver(*args, transforms=transforms, **kwargs)
instance = cls(aliases=[], logger=logger)
instance.add(aliases, strict=strict)
return instance
def _build_lookup_index(self) -> None:
"""Index alias lookup."""
self._lookup = {k: i for i, x in enumerate(self.aliases) for k in x.all_names()}
self._attempts = defaultdict(int, {k: 0 for k in self._lookup.keys()})
def add(
self,
*args,
strict: bool = True,
transforms: Optional[Iterable[Callable[[str], str]]] = _DEFAULT,
_resolver: Callable = None,
**kwargs,
) -> "AliasResolver":
"""Add aliases to self."""
_resolver = _resolver or resolve_aliases
aliases = _resolver(*args, transforms=transforms, **kwargs)
collisions = self.find_collisions(self.aliases, aliases)
if not collisions:
...
elif strict:
raise CollisionError(collisions)
else:
self.logger.warning("Collisions detected: %s", collisions)
self.aliases.extend(aliases)
self._build_lookup_index()
return self # support chaining
@classmethod
def find_collisions(
cls,
*aliases: Iterable[Alias],
logger: logging.Logger = None,
) -> Iterable[str]:
"""Check for alias collisions."""
logger = logger or LOGGER
lookup = ChainMap(*[x._lookup for x in resolve_aliases(aliases)])
keys = set()
lost = defaultdict(list)
collisions = set()
for child in lookup.maps:
both = keys & child.keys()
for k in both:
lost[k].append(child[k])
keys |= child.keys()
collisions |= both
logger.debug("Lost aliases due to collisions: %s", pformat(lost))
return sorted(collisions)
def identify(self, value: str) -> str:
"""Return identity for the given alias value.
Arguments:
value: Alias to match.
Returns:
Identity for the given alias.
Raises:
AliasError (KeyError) if unknown alias given.
"""
self._attempts[value] += 1 # know which aliases are used / needed
idx = self._lookup.get(value) # faster than itrable and try:except
if idx is None:
raise AliasError(value)
return self.aliases[idx].identity
def resolve_aliases(
*args,
transforms: Optional[Iterable[Callable[[str], str]]] = _DEFAULT,
**kwargs,
) -> Iterable[Alias]:
"""Build aliases from multiple supported formats.
Supported Formats:
- Alias instances
- Alias keywords
e.g., {'identity': 'foo', 'aliases': 'bar', 'transform': 'upper'}
- Alias mapping (does not support transform definition)
e.g., {'foo': ['bar']}
- None
- Iterable of supported format
- Encoding of supported format
- May be string (json only)
- May be file path (json, toml, yaml)
NOTE: TOML requires a root object (not an array)
Arguments:
*args: Supported formats as positional arguments
transforms: Optional transforms to apply to all aliases.
Use 'None' to disable.
**kwargs: Supported formats as keyword arguments
Returns:
Iterable of Alias instances.
"""
aliases = list(
itertools.chain(
_yield_aliases(args),
_yield_aliases(kwargs),
)
)
if transforms != _DEFAULT:
for alias in aliases:
alias.set_transforms(transforms)
return aliases
@singledispatch
def _yield_aliases(value: Any) -> Generator[Alias, None, None]:
if value is not None:
raise TypeError(f"invalid alias: {value}")
yield from []
@_yield_aliases.register(str)
def _(value: str) -> Generator[Alias, None, None]:
yield from _yield_aliases(json.loads(value))
@_yield_aliases.register(Alias)
def _(value: Alias) -> Generator[Alias, None, None]:
yield value
@_yield_aliases.register(abc.Iterable)
def _(value: Iterable) -> Generator[Alias, None, None]:
for item in value:
yield from _yield_aliases(item)
@_yield_aliases.register(abc.Mapping)
def _(value: Mapping) -> Generator[Alias, None, None]:
try:
yield Alias(**value)
except TypeError:
for identity, aliases in value.items():
if identity == "aliases":
yield from _yield_aliases(aliases)
else:
yield Alias(identity, aliases)
@_yield_aliases.register(Path)
def _(value: Path) -> Generator[Alias, None, None]:
cases = {
".json": json.loads,
".toml": getattr(toml, "loads", None),
".yaml": getattr(yaml, "safe_load", None),
".yml": getattr(yaml, "safe_load", None),
}
func = cases.get(value.suffix)
if not func:
raise ValueError(f"unavailable encoding: {value.suffix} ({value})") from None
content = value.read_text()
data = func(content)
yield from _yield_aliases(data)
# __END__ | /rym_alias-0.1.1.tar.gz/rym_alias-0.1.1/rym/alias/_aliasresolver.py | 0.943148 | 0.195729 | _aliasresolver.py | pypi |
import logging
from collections import abc, deque
from functools import singledispatch
from traceback import TracebackException
from typing import Any, Deque, Iterable, Mapping, Optional, Union
from ._delim import get_delimiter
LOGGER = logging.getLogger(__name__)
__DEFAULT = "any random string that is unlikely to be provided"
class InvalidKey(ValueError):
"""Raise if given an unsupported key type."""
def get(
value: Any,
key: Union[str, Iterable[str]],
*,
default: Optional[Any] = __DEFAULT,
delim: Optional[str] = None,
) -> Any:
"""Return the value of the property found at the given key.
Arguments:
value: An object, iterable, or mapping
key: A string indicating the path to the value.
An itererable of strings may be provided. The first match will be returned.
delim: Specify the delimiter. Default is '.'.
Returns:
The property found.
Raises:
AttributeError, IndexError, or KeyError if the requested key could not be found.
ValueError if an invalid key given.
"""
delim = delim or get_delimiter()
try:
return _get(key, value, delim)
except InvalidKey:
raise
except (AttributeError, KeyError, IndexError, ValueError):
if __DEFAULT != default:
return default
raise
@singledispatch
def _get(key: Any, value: Any, delim: str) -> Any:
raise InvalidKey(
f"invalid key: {key}, ({type(key)}); expected str or list of str"
)
@_get.register(str)
def _(key: str, value: str, delim: str) -> Any:
parts = key.split(delim)
try:
return _get_from(value, deque(parts))
except (AttributeError, IndexError, KeyError) as err:
tb = TracebackException.from_exception(err)
missing = str(err).strip("'\"")
idx = parts.index(missing) + 1
raise tb.exc_type(".".join(parts[:idx])) from err
except ValueError as err:
raise ValueError(f"{err} (given={key})") from err
@_get.register(abc.Iterable)
def _(key: Iterable[str], value: str, delim: str) -> Any:
for k in key:
try:
parts = k.split(delim)
return _get_from(value, deque(parts))
except (AttributeError, IndexError, KeyError, ValueError):
continue
raise KeyError(f"no matches: {key}")
@singledispatch
def _get_from(value: Any, parts: Deque[str]) -> Any:
if not parts:
return value
key = parts.popleft()
try:
curr = getattr(value, key)
except AttributeError as err:
raise AttributeError(key) from err
return _get_from(curr, parts)
@_get_from.register(abc.Iterable)
def _(value: Iterable, parts: Deque[str]) -> Any:
if not parts:
return value
key = int(parts.popleft())
try:
curr = value[key]
except IndexError:
raise IndexError(key) from None
return _get_from(curr, parts)
@_get_from.register(abc.Mapping)
def _(value: Mapping, parts: Deque[str]) -> Any:
if not parts:
return value
key = parts.popleft()
return _get_from(value[key], parts)
# __END__ | /rym_lpath-0.3.1.tar.gz/rym_lpath-0.3.1/rym/lpath/_get.py | 0.831656 | 0.17075 | _get.py | pypi |
import logging
import re
from collections import abc
from functools import singledispatch
from re import Pattern
from typing import Any, Iterable, Tuple, Union
from .structures import TokenSpec
LOGGER = logging.getLogger(__name__)
try:
from functools import cache
except ImportError: # pragma: no cover
from functools import lru_cache
cache = lru_cache(maxsize=None)
def combine_regex(sources: Iterable[Union[str, Pattern, TokenSpec]]) -> Pattern:
"""Compile sources into single, compiled regex pattern.
Accepted sources:
- String
- re.Pattern
- rym.token.TokenSpec
All string and pattern sources will be captured as 'PX' where 'X' is the
index within the given list. All TokenSpec sources will be captured with
their 'type'.
Sources are deduplicated based on capture name and pattern, i.e., only
token spec sources are deduplicated. Group index is based on the
pre-deduplication order, i.e., count based on your input.
This function is indirectly memoized for (name, regex) sources.
Arguments:
sources: Iterable with one or more regex sources.
Returns:
Compiled regex with named capture group for each source.
Raises:
TypeError for invalid source.
)
"""
patterns = tuple(_yield_patterns(sources, i=None))
return combine_regex_memoized(patterns)
@cache
def combine_regex_memoized(specs: Tuple[Tuple[str, str], ...]) -> Pattern:
"""Memoized version of combine_regex.
See also:
combine_regex
"""
idx = {k: i for i, (k, _) in reversed(list(enumerate(specs)))}
ordered = [specs[i] for i in sorted(idx.values())]
regexp = "|".join("(?P<%s>%s)" % (name, pattern) for name, pattern in ordered)
return re.compile(regexp)
@singledispatch
def _yield_patterns(value: Any, i: int) -> Tuple[str, str]:
"""Return (name, pattern) for every source.
Arguments:
value: Regex pattern source
i: Index for naming (if needed)
Returns:
A tuple of the name and pattern.
Raises:
TypeError for unsupported sources.
"""
raise TypeError(f"{value}; expected string, re.Pattern, or TokenSpec")
@_yield_patterns.register(str)
def _(value: str, i: int) -> Tuple[str, str]:
yield f"P{i or 0}", value
@_yield_patterns.register(Pattern)
def _(value: Pattern, i: int) -> Tuple[str, str]:
yield f"P{i or 0}", value.pattern
@_yield_patterns.register(TokenSpec)
def _(value: TokenSpec, i: int) -> Tuple[str, str]:
yield value.type, value.pattern
@_yield_patterns.register(abc.Iterable)
def _(value: Iterable, i: int) -> Tuple[str, str]:
prefix = f"{i}_" if i else ""
for j, x in enumerate(value):
yield from _yield_patterns(x, f"{prefix}{j}")
# __END__ | /rym_token-0.1.3.tar.gz/rym_token-0.1.3/rym/token/regex.py | 0.812682 | 0.266757 | regex.py | pypi |
import calendar
import datetime as dt
import itertools
import logging
import re
from typing import Callable, Iterable, Optional, Tuple
from rym.token.structures import TokenSpec
try:
from functools import cache
except ImportError: # pragma: no cover
from functools import lru_cache
cache = lru_cache(maxsize=None)
LOGGER = logging.getLogger(__name__)
# support
# ======================================================================
def build_subtype_assignment(
subtypes: Iterable[Tuple[str, Tuple[str, ...]]]
) -> Callable[[str], str]:
"""Return a callable to assign subtype.
Arguments:
subtype: One or more ("type", ("subtype", ...)) definitions
Returns:
A callable that takes a value and type string and returns the
updated type assignment.
"""
lookup = {str(name).lower(): k.upper() for k, names in subtypes for name in names}
def assign_subtype(value: str, type_: str) -> str:
return lookup.get(str(value).lower(), type_)
return assign_subtype
# specs
# ======================================================================
# datetime
# ----------------------------------
_DATE = r"(?:\d\d)?\d{2}[\-/\.]\d{2}[\-/\.]\d{2}"
_TS_SEP = r"[T\s]"
_TIME = r"\d?\d:\d{2}(?::\d{2}(?:\.\d+)?)?(?:\s?[ZAPap][Mm]?)?"
_TZ = r"(?:Z|[\+\-]\d{2}:\d{2})"
_DATE_SEP = re.compile(r"[\./]")
_TIME_SEP = re.compile(r"[:\sa-z]")
def _safe_date(value: str, *args) -> dt.date:
value = _DATE_SEP.sub("-", value)
return dt.date.fromisoformat(value)
def _safe_time(value: str, *args) -> dt.time:
value = value.lower()
if "z" == value[-1]:
# Z support added in 3.11
value = value[:-1] + "+00:00"
elif "m" == value[-1]:
h, m, *_ = _TIME_SEP.split(value)
adj = 0 if "a" == value[-2] else 12
h = int(h) + adj
value = "{:0d}:{}".format(h, m)
return dt.time.fromisoformat(value)
def _safe_timestamp(value: str, *args) -> dt.datetime:
if "z" == value[-1].lower():
# Z support added in 3.11
value = value[:-1] + "+00:00"
return dt.datetime.fromisoformat(value)
@cache
def timestamp() -> TokenSpec:
"""Return a spec for ISO-8601 timestamps."""
return TokenSpec(
"TIMESTAMP",
"%s%s%s(?:%s)?" % (_DATE, _TS_SEP, _TIME, _TZ),
_safe_timestamp,
)
@cache
def date() -> TokenSpec:
"""Return a spec for ISO-8601 dates, e.g., 2023-10-30."""
return TokenSpec("DATE", _DATE, _safe_date)
@cache
def time() -> TokenSpec:
"""Return a spec for ISO-8601 time strings, e.g., 24:00.000Z."""
return TokenSpec("TIME", "%s(?:%s)?" % (_TIME, _TZ), _safe_time)
@cache
def reldate() -> TokenSpec:
"""Return a spec for relative date words, e.g., 'tomorrow'."""
names = "|".join(
r"[%s%s]%s" % (x[0].lower(), x[0].upper(), x[1:])
for x in itertools.chain(
("yesterday", "today", "tomorrow", "day"),
("weekend", "weekday", "week"),
("month", "year"),
("winter", "spring", "summer", "fall"),
("Q[1-4]",),
)
)
pattern = r"(?<=\b)(?:%s)(?=\b)" % (names,)
return TokenSpec("RELDATE", pattern)
@cache
def month() -> str:
"""Return a spec for month names and abbreviations. Title case only."""
names = "|".join(
itertools.chain.from_iterable(
zip(calendar.month_name[1:], calendar.month_abbr[1:])
)
)
pattern = r"(?<=\b)(?:%s)(?=\b)" % (names,)
return TokenSpec("MONTH", pattern)
@cache
def day() -> str:
"""Return a spec for day names and abbreviations. Title case only."""
names = "|".join(
itertools.chain.from_iterable(zip(calendar.day_name[1:], calendar.day_abbr[1:]))
)
pattern = r"(?<=\b)(?:%s)(?=\b)" % (names,)
return TokenSpec("DAY", pattern)
# numeric
# ----------------------------------
def _safe_float(x: str, *args) -> int:
return float(x.replace(",", ""))
def _safe_int(x: str, *args) -> int:
return int(x.replace(",", ""))
@cache
def number() -> TokenSpec:
"""Return a spec for floating point numbers."""
return TokenSpec("NUMBER", r"-?\d[\d_]*[e\.]-?\d+", _safe_float)
@cache
def integer() -> TokenSpec:
"""Return a spec for integers."""
return TokenSpec(
"INTEGER",
r"(?<![\.\d\w])(?<!e-)\-?\d(?:[\,_]\d)?\d*(?!\.\d)(?![\d_e])\b",
_safe_int,
)
# text
# ----------------------------------
@cache
def alphanum(
subtype: Optional[Iterable[Tuple[str, Tuple[str, ...]]]] = None
) -> TokenSpec:
"""Return a spec for alphanumeric words. Includes hyphens."""
if subtype:
subtype = build_subtype_assignment(subtype)
return TokenSpec(
"ALPHANUM",
r"\b[\w\d\-]+\b",
None,
subtype=subtype,
)
@cache
def newline() -> TokenSpec:
"""Return a spec for newlines."""
return TokenSpec("NEWLINE", r"\r?\n", None)
@cache
def punctuation() -> TokenSpec:
"""Return a spec for punctuation, e.g., non-word, non-whitespace."""
return TokenSpec("PUNCTUATION", r"[^\w\s]+", None)
@cache
def quote() -> TokenSpec:
"""Return a spec for returning quoted strings."""
return TokenSpec("QUOTE", r"\"[^\"]*\"")
@cache
def search_term() -> TokenSpec:
"""Return a spec for search terms in the format 'key=val' or 'key:val'."""
return TokenSpec(
"TERM",
r"(?P<term_key>[\w\-]+)(?P<term_op>[:=><]+)(?P<term_value>[\w\-\.]+\b)(?![:])",
)
@cache
def uuid_string() -> TokenSpec:
"""Return a spec for UUID v4."""
return TokenSpec(
"UUID",
r"[\da-fA-F]{8}-[\da-fA-F]{4}-[\da-fA-F]{4}-[\da-fA-F]{4}-[\da-fA-F]{12}",
)
@cache
def word(subtype: Optional[Iterable[Tuple[str, Tuple[str, ...]]]] = None) -> TokenSpec:
"""Return a spec for words.
NOTE: Matches any consecutive letters.
"""
if subtype:
subtype = build_subtype_assignment(subtype)
return TokenSpec(
"WORD",
r"[A-Za-z]+(?:\'[A-Za-z]+)?",
None,
subtype=subtype,
)
# __END__ | /rym_token-0.1.3.tar.gz/rym_token-0.1.3/rym/token/tokenspec.py | 0.870831 | 0.316363 | tokenspec.py | pypi |
import math
import matplotlib.pyplot as plt
from .Generaldistribution import Distribution
class Gaussian(Distribution):
""" Gaussian distribution class for calculating and
visualizing a Gaussian distribution.
Attributes:
mean (float) representing the mean value of the distribution
stdev (float) representing the standard deviation of the distribution
data_list (list of floats) a list of floats extracted from the data file
"""
def __init__(self, mu=0, sigma=1):
Distribution.__init__(self, mu, sigma)
def calculate_mean(self):
"""Function to calculate the mean of the data set.
Args:
None
Returns:
float: mean of the data set
"""
avg = 1.0 * sum(self.data) / len(self.data)
self.mean = avg
return self.mean
def calculate_stdev(self, sample=True):
"""Function to calculate the standard deviation of the data set.
Args:
sample (bool): whether the data represents a sample or population
Returns:
float: standard deviation of the data set
"""
if sample:
n = len(self.data) - 1
else:
n = len(self.data)
mean = self.calculate_mean()
sigma = 0
for d in self.data:
sigma += (d - mean) ** 2
sigma = math.sqrt(sigma / n)
self.stdev = sigma
return self.stdev
def plot_histogram(self):
"""Function to output a histogram of the instance variable data using
matplotlib pyplot library.
Args:
None
Returns:
None
"""
plt.hist(self.data)
plt.title('Histogram of Data')
plt.xlabel('data')
plt.ylabel('count')
def pdf(self, x):
"""Probability density function calculator for the gaussian distribution.
Args:
x (float): point for calculating the probability density function
Returns:
float: probability density function output
"""
return (1.0 / (self.stdev * math.sqrt(2*math.pi))) * math.exp(-0.5*((x - self.mean) / self.stdev) ** 2)
def plot_histogram_pdf(self, n_spaces = 50):
"""Function to plot the normalized histogram of the data and a plot of the
probability density function along the same range
Args:
n_spaces (int): number of data points
Returns:
list: x values for the pdf plot
list: y values for the pdf plot
"""
mu = self.mean
sigma = self.stdev
min_range = min(self.data)
max_range = max(self.data)
# calculates the interval between x values
interval = 1.0 * (max_range - min_range) / n_spaces
x = []
y = []
# calculate the x values to visualize
for i in range(n_spaces):
tmp = min_range + interval*i
x.append(tmp)
y.append(self.pdf(tmp))
# make the plots
fig, axes = plt.subplots(2,sharex=True)
fig.subplots_adjust(hspace=.5)
axes[0].hist(self.data, density=True)
axes[0].set_title('Normed Histogram of Data')
axes[0].set_ylabel('Density')
axes[1].plot(x, y)
axes[1].set_title('Normal Distribution for \n Sample Mean and Sample Standard Deviation')
axes[0].set_ylabel('Density')
plt.show()
return x, y
def __add__(self, other):
"""Function to add together two Gaussian distributions
Args:
other (Gaussian): Gaussian instance
Returns:
Gaussian: Gaussian distribution
"""
result = Gaussian()
result.mean = self.mean + other.mean
result.stdev = math.sqrt(self.stdev ** 2 + other.stdev ** 2)
return result
def __repr__(self):
"""Function to output the characteristics of the Gaussian instance
Args:
None
Returns:
string: characteristics of the Gaussian
"""
return "mean {}, standard deviation {}".format(self.mean, self.stdev) | /ryroraf_distributions-0.1.tar.gz/ryroraf_distributions-0.1/ryroraf_distributions/Gaussiandistribution.py | 0.688364 | 0.853058 | Gaussiandistribution.py | pypi |
from pathlib import Path
from invoke import task
from jinja2 import Template
@task
def lint(c):
""""""
for system in ["rys"]:
c.run(f"python3 -m black {system}")
c.run(f"python3 -m pylint {system}")
@task
def test(c):
""""""
c.run("python3 -m unittest discover tests 'test_*' -v")
@task(name="migrate")
def migrate_requirements(c):
"""Copy requirements from the requirements.txt file to pyproject.toml."""
lines = Path("requirements.txt").read_text().split("\n")
requirements = {"rys": [], "test": [], "doc": [], "graphical": [], "dev": []}
current = "rys"
for line in lines:
if line.startswith("#"):
candidate = line[1:].lower().strip()
if candidate in requirements.keys():
current = candidate
continue
if line.strip() == "":
continue
requirements[current].append("".join(line.split()))
template = Template(Path("docs/templates/pyproject.toml").read_text())
Path("pyproject.toml").write_text(template.render(requirements=requirements))
@task
def release(c, version):
""""""
if version not in ["minor", "major", "patch"]:
print("Version can be either major, minor or patch.")
return
from rys import __version_info__, __version__
_major, _minor, _patch = __version_info__
if version == "patch":
_patch = _patch + 1
elif version == "minor":
_minor = _minor + 1
_patch = 0
elif version == "major":
_major = _major + 1
_minor = 0
_patch = 0
c.run(f"git checkout -b release-{_major}.{_minor}.{_patch} dev")
c.run(f"sed -i 's/{__version__}/{_major}.{_minor}.{_patch}/g' rys/__init__.py")
print(f"Update the readme for version {_major}.{_minor}.{_patch}.")
input("Press enter when ready.")
c.run(f"git add -u")
c.run(f'git commit -m "Update changelog version {_major}.{_minor}.{_patch}"')
c.run(f"git push --set-upstream origin release-{_major}.{_minor}.{_patch}")
c.run(f"git checkout main")
c.run(f"git merge --no-ff release-{_major}.{_minor}.{_patch}")
c.run(f'git tag -a {_major}.{_minor}.{_patch} -m "Release {_major}.{_minor}.{_patch}"')
c.run(f"git push")
c.run(f"git checkout dev")
c.run(f"git merge --no-ff release-{_major}.{_minor}.{_patch}")
c.run(f"git push")
c.run(f"git branch -d release-{_major}.{_minor}.{_patch}")
c.run(f"git push origin --tags") | /rys-0.0.1.tar.gz/rys-0.0.1/tasks.py | 0.646014 | 0.17676 | tasks.py | pypi |
import time
from dataclasses import dataclass
from datetime import datetime
from typing import Any, Dict, List, Optional
import web3
from rysk_client.src.collateral import CollateralFactory
from rysk_client.src.constants import (ARBITRUM_GOERLI, CHAINS_TO_SUBGRAPH_URL,
USDC_MULTIPLIER, WETH_MULTIPLIER, Chain)
from rysk_client.src.crypto import EthCrypto
from rysk_client.src.operation_factory import OperationFactory
from rysk_client.src.order_side import OrderSide
from rysk_client.src.pnl_calculator import PnlCalculator, Trade
from rysk_client.src.position_side import PositionSide
from rysk_client.src.rysk_option_market import (MarketFactory, OptionChain,
RyskOptionMarket)
from rysk_client.src.subgraph import SubgraphClient
from rysk_client.src.utils import get_contract, get_logger
from rysk_client.web3_client import Web3Client, print_operate_tuple
ALLOWED_SLIPPAGE = 0.15
NULL_SERIES = (
0,
0,
False,
"0x0000000000000000000000000000000000000000",
"0x0000000000000000000000000000000000000000",
"0x0000000000000000000000000000000000000000",
)
class ApprovalException(Exception):
"""
Exception raised when the approval fails.
"""
def from_timestamp(date_string):
"""Parse a timestamp. 2023-05-31T08:00:00.000Z"""
return datetime.fromtimestamp(int(date_string)).strftime("%Y-%m-%dT%H:%M:%S.%fZ")
def to_human_format(row):
"""
Format the row to align to the ccxt unified client.
'ETH-16MAY23-1550-C'
"""
month_code = row["expiration_datetime"].strftime("%b").upper()
day = row["expiration_datetime"].strftime("%d")
year = str(row["expiration_datetime"].year)[2:]
strike_price = str(int(int(row["strike"]) / WETH_MULTIPLIER))
return f"ETH-{day}{month_code}{year}-{strike_price}-{'P' if row['isPut'] else 'C'}"
DEFAULT_MARKET = {
"base": "ETH",
"baseId": "ETH",
"contract": True,
"contractSize": 1.0,
"spot": False,
"swap": False,
"future": False,
"type": "option",
"linear": False,
"inverse": True,
"maker": 0.0003,
"taker": 0.0003,
}
@dataclass
class RyskClient: # noqa: R0902
"""
Client for the rysk contracts.
"""
_markets: List[RyskOptionMarket]
_tickers: List[Dict[str, Any]]
_otokens: Dict[str, Dict[str, Any]]
web3_client: Web3Client
def __init__(
self,
address: Optional[str] = None,
private_key: Optional[str] = None,
logger=None,
chain: Chain = ARBITRUM_GOERLI,
verbose: bool = True,
):
self._markets: List[RyskOptionMarket] = []
self._tickers = []
self._otokens = {}
self._option_chain: OptionChain
self._crypto = EthCrypto(address, private_key)
self._logger = logger or get_logger()
self.web3_client = Web3Client(
self._logger,
self._crypto,
chain=chain,
verbose=verbose,
)
self.subgraph_client = SubgraphClient(CHAINS_TO_SUBGRAPH_URL[chain])
self._logger.info(
f"Rysk client initialized and connected to the blockchain at {self.web3_client.web3.provider}"
)
self._verbose = verbose
self.operation_factory = OperationFactory(chain)
self.collateral_factory = CollateralFactory(chain)
self.market_factory = MarketFactory(chain)
def _sign_and_submit(self, txn, retries=3, backoff=2):
"""
Sign and submit transaction.
retries: number of retries
backoff: backoff in seconds
"""
try:
txn["nonce"] = self.web3_client.web3.eth.get_transaction_count(
self._crypto.address
)
signed_txn = self.web3_client.web3.eth.account.sign_transaction(
txn, private_key=self._crypto.private_key
)
tx_hash = self.web3_client.web3.eth.send_raw_transaction(
signed_txn.rawTransaction
)
self._logger.debug(f"Transaction hash: {tx_hash.hex()}")
receipt = self.web3_client.web3.eth.wait_for_transaction_receipt(
tx_hash, 180
)
# we need to check the receipt to see if the transaction was successful
if receipt["status"] == 0:
self._logger.error(f"Transaction failed: {receipt}")
raise ValueError("Transaction Failed!")
self._logger.debug(f"Transaction successful: {receipt}")
return tx_hash.hex()
except ValueError as error:
if retries > 0:
self._logger.warning(
f"Error {error} while submitting transaction. Retrying..."
)
time.sleep(backoff)
return self._sign_and_submit(
txn, retries=retries - 1, backoff=backoff * 2
)
raise error
def fetch_markets(self) -> List[Dict[str, Any]]:
"""
Fetchs the markets from the DHV Lens contracts.
"""
raw_data = self.web3_client.get_option_chain()
self._option_chain = OptionChain(raw_data)
self._markets = [
RyskOptionMarket.from_option_drill(*data)
for data in self._option_chain.active_markets
]
return [market.to_json() for market in self._markets] # type: ignore
def to_checksum_address(self, address):
"""Convert an address to a checksum address."""
return self.web3_client.web3.toChecksumAddress(address)
def fetch_tickers(
self, market: Optional[str] = None, is_active: Optional[bool] = True
) -> List[Dict[str, Any]]:
"""
Fetchs the ticker from the beyond pricer smart contract.
"""
if not self._markets:
self.fetch_markets()
tradeable = filter(lambda x: x["active"] is is_active, self._markets) # type: ignore
if market:
tradeable = filter(lambda x: x["id"] == market, tradeable) # type: ignore
return [market.to_json() for market in self._markets] # type: ignore
@property
def otokens(self) -> Dict[str, Dict[str, Any]]:
"""
Returns a dictionary of the otokens.
"""
if not self._otokens:
self.fetch_tickers()
self._otokens = {ticker["info"]["id"]: ticker for ticker in self._tickers}
return self._otokens
def fetch_positions(self, expired=False) -> List[Dict[str, Any]]:
"""
Fetchs the positions from the subgraph.
"""
if self._crypto.address is None:
raise ValueError("No account address was provided.")
longs = self.subgraph_client.query_longs(address=self._crypto.address)
shorts = self.subgraph_client.query_shorts(address=self._crypto.address)
parsed_short = [self._parse_position(pos, PositionSide.SHORT) for pos in shorts]
parsed_longs = [self._parse_position(pos, PositionSide.LONG) for pos in longs]
if expired:
positions = filter(
lambda x: x["datetime"] <= datetime.now(),
parsed_longs + parsed_short,
)
else:
positions = filter(
lambda x: x["datetime"] > datetime.now(),
parsed_longs + parsed_short,
)
return list(positions)
def _parse_position(
self, position: Dict[str, Any], side: PositionSide
) -> Dict[str, Any]:
"""
Parse the position data from the subgraph into a unified format.
"""
position["expiration_datetime"] = datetime.fromtimestamp(
int(position["oToken"]["expiryTimestamp"])
)
position["strike"] = float(position["oToken"]["strikePrice"]) * 1e10
position["isPut"] = position["oToken"]["isPut"]
pnl_calculator = PnlCalculator()
buys = [
Trade(
int(order["amount"]) / WETH_MULTIPLIER,
total_cost=int(order["premium"]) / USDC_MULTIPLIER,
)
for order in position["optionsBoughtTransactions"]
]
sells = [
Trade(
-int(order["amount"]) / WETH_MULTIPLIER,
total_cost=-int(order["premium"]) / USDC_MULTIPLIER,
)
for order in position["optionsSoldTransactions"]
]
pnl_calculator.add_trades(buys + sells)
symbol = to_human_format(position)
if symbol in self.otokens:
self._logger.debug(f"Found `{symbol}` in the otokens list.")
book_side = "ask" if side == PositionSide.LONG else "bid"
price = self.otokens[symbol][book_side]
pnl_calculator.update_price(price)
else:
self._logger.debug(
f"Could not find `{symbol}` in the otokens list. Maket is probably not active."
)
result = {
"id": position["id"],
"symbol": symbol,
"timestamp": int(position["oToken"]["expiryTimestamp"]) * 1000,
"datetime": datetime.fromtimestamp(
int(position["oToken"]["expiryTimestamp"])
),
"initialMarginPercentage": None,
"realizedPnl": pnl_calculator.realised_pnl,
"unrealizedPnl": pnl_calculator.unrealised_pnl,
"contractSize": pnl_calculator.position_size,
"side": side.value,
"size": pnl_calculator.position_size,
"info": position,
"entryPrice": pnl_calculator.average_price,
}
return result
def create_order(
self,
symbol: str,
amount: float,
side: str = "buy",
) -> Dict[str, Any]:
"""Create a market order."""
if side.upper() not in OrderSide.__members__:
raise ValueError("Invalid order side")
if side == OrderSide.BUY.value:
transaction = self.buy_option(symbol, amount)
else:
transaction = self.sell_option(symbol, amount)
submitted = self._sign_and_submit(transaction)
self.web3_client.web3.eth.wait_for_transaction_receipt(submitted)
self._logger.info(f"Submitted transaction with hash: {submitted}")
return {
"id": submitted,
"symbol": symbol,
"datetime": datetime.now(),
}
def get_market(self, symbol: str) -> RyskOptionMarket:
"""
Returns the market information.
"""
if not self._markets:
self.fetch_markets()
for market in self._markets:
if market.name == symbol:
return market
raise ValueError(f"Could not find market {symbol}.")
def buy_option( # noqa: R0914
self,
market: str,
amount: float,
collateral_asset: str = "USDC",
leverage: float = 1,
):
"""
Create a buy option order.
"""
# now we can try to buy the option
if not self._markets:
self._logger.info("Fetching Tickers.")
self.fetch_tickers() # type: ignore
self._logger.info(f"Fetching acceptable premium for {market}")
rysk_option_market = self.get_market(market)
rysk_option_market.collateral = self.collateral_factory.from_symbol(
collateral_asset
)
self._logger.info(
f"Buying {amount} of {market} with {collateral_asset} collateral @ {leverage}x leverage."
)
# we first check the approval amount of the collateral asset
if not self.collateral_factory.is_supported(collateral_asset):
raise ValueError(
f"Collateral asset {collateral_asset} is not supported by the protocol."
)
collateral_contract = get_contract(
collateral_asset.lower(), self.web3_client.web3, self.web3_client.chain
)
collateral_approved = self.web3_client.is_approved(
collateral_contract,
self.web3_client.option_exchange.address,
str(self._crypto.address),
int(amount * WETH_MULTIPLIER),
)
self._logger.info(f"Collateral approved: {collateral_approved}.")
if not collateral_approved:
self._logger.info(
f"Approving {collateral_asset} collateral for {self.web3_client.option_exchange.address}"
)
txn = self.web3_client.create_approval(
collateral_contract,
self.web3_client.option_exchange.address,
str(self._crypto.address),
int(amount * WETH_MULTIPLIER),
)
# we submit and sign the transaction
result = self._sign_and_submit(txn)
self._logger.info(f"Transaction successful with hash: {result}")
series = self.market_factory.to_series(rysk_option_market)
otoken_address = self.web3_client.get_otoken(series)
balance = self.web3_client.get_otoken_balance(otoken_address)
self._logger.info(f"Balance of {otoken_address}: {balance}")
self._logger.info(
f"Fetching market data for {market}. Otoken address: {otoken_address}"
)
_amount = amount * WETH_MULTIPLIER
acceptable_premium = self.web3_client.get_options_prices( # type: ignore
series,
rysk_option_market.dhv,
side=OrderSide.BUY.value,
amount=_amount,
) # pylint: disable=E1120
self._logger.info(
f"Acceptable premium: ${acceptable_premium / USDC_MULTIPLIER:.2f}"
)
# we check if we need to issue the option
issuance_required = self.is_issuance_required(otoken_address)
operate_tuple = self.operation_factory.buy(
int(acceptable_premium * (1 + ALLOWED_SLIPPAGE)),
owner_address=self._crypto.address, # pylint: disable=E1120
amount=int(_amount),
option_market=rysk_option_market,
issuance_required=issuance_required,
)
if self._verbose:
print_operate_tuple([operate_tuple])
try:
txn = self.web3_client.option_exchange.functions.operate(
operate_tuple
).build_transaction({"from": self._crypto.address})
except web3.exceptions.ContractCustomError as error: # pylint: disable=E1101
self._logger.error("Transaction failed due to incorrect parameters.")
raise ValueError(error) from error
return txn
def is_issuance_required(self, otoken_address: str) -> bool:
"""
Returns True if an issuance is required.
"""
result = self.web3_client.get_series_info(otoken_address)
return result == NULL_SERIES
def sell_option( # noqa
self,
market: str,
amount: float,
collateral_asset: str = "weth",
leverage: float = 1,
):
"""
Create a sell option order.
"""
self._logger.info(
f"Selling {amount} of {market} with {collateral_asset} collateral @ {leverage}x leverage."
)
if not self._markets:
self._logger.info("Fetching Tickers.")
self.fetch_tickers()
_amount = amount * WETH_MULTIPLIER
rysk_option_market = self.get_market(market)
if rysk_option_market.is_put:
rysk_option_market.collateral = self.collateral_factory.USDC
# we need to approve the collateral
amount_to_approve = int(
rysk_option_market.strike / WETH_MULTIPLIER * amount * USDC_MULTIPLIER
)
contract = self.web3_client.usdc
collateral_asset = "usdc"
else:
rysk_option_market.collateral = self.collateral_factory.WETH
amount_to_approve = int(_amount * (1 + ALLOWED_SLIPPAGE))
contract = self.web3_client.settlement_weth
series = self.market_factory.to_series(rysk_option_market)
acceptable_premium = self.web3_client.get_options_prices(
series,
dhv_exposure=rysk_option_market.dhv,
amount=_amount,
side=OrderSide.SELL.value,
collateral=collateral_asset,
)
self._logger.info(
f"Acceptable premium: ${acceptable_premium / USDC_MULTIPLIER:.2f}"
)
user_vaults = self.web3_client.fetch_user_vaults(self._crypto.address)
otoken_id = self.web3_client.get_otoken(series)
self._logger.info(f"Option Otoken id is {otoken_id}")
issue_new_vault = False
if otoken_id not in set(i[1] for i in user_vaults):
new_vault_id = len(user_vaults) + 1
self._logger.info(
f"Necessary to create a vault for the user. New vault id is {new_vault_id}"
)
vault_id = new_vault_id
issue_new_vault = True
else:
# we need to use the vault
vault_id = [f[1] for f in user_vaults].index(otoken_id) + 1
self._logger.info(f"Using existing vault id user_vaults {vault_id}")
# we check the approval of the amount
self._logger.info(
f"Checking approval of {amount_to_approve / WETH_MULTIPLIER} of {collateral_asset}"
)
allowance = contract.functions.allowance(
self._crypto.address,
self.web3_client.option_exchange.address,
).call()
self._logger.info(f"Allowance is {allowance}")
if allowance < amount_to_approve:
self._logger.info("Need to approve more collateral.")
approve_tx = self.web3_client.create_approval(
contract=contract,
spender=self.web3_client.option_exchange.address,
owner=self._crypto.address,
amount=amount_to_approve,
)
self._logger.debug(f"Approve tx is {approve_tx}")
tx_hash = self._sign_and_submit(approve_tx)
self._logger.info(f"Tx hash is {tx_hash}")
# has allowcance incremented
allowance = contract.functions.allowance(
self._crypto.address,
self.web3_client.option_exchange.address,
).call()
self._logger.info(f"Allowance is {allowance}")
otoken_address = self.web3_client.get_otoken(series)
operate_tuple = self.operation_factory.sell(
int(acceptable_premium * 0.95),
owner_address=self._crypto.address, # pylint: disable=E1120
exchange_address=self.web3_client.option_exchange.address,
otoken_address=otoken_address,
amount=int(_amount),
vault_id=int(vault_id),
collateral_amount=int(amount_to_approve),
rysk_option_market=rysk_option_market,
issue_new_vault=issue_new_vault,
)
return self.web3_client._operate( # pylint: disable=protected-access
operate_tuple, self.web3_client.option_exchange
)
def watch_trades(self):
"""Watch trades."""
self._logger.info("Watching trades...")
self.web3_client.watch_trades()
def fetch_balances(self):
"""Fetch balances."""
self._logger.info("Fetching balances...")
return self.web3_client.get_balances()
def settle_vault(self, vault_id):
"""Settle options."""
self._logger.info(f"Settling vault {vault_id}...")
txn = self.web3_client.settle_vault(vault_id=vault_id)
return self._sign_and_submit(txn)
def redeem_otoken(self, otoken_id: str, amount: int):
"""Redeem otoken."""
self._logger.info(f"Redeeming otoken {otoken_id}...")
txn = self.web3_client.redeem_otoken(otoken_id=otoken_id, amount=amount)
return self._sign_and_submit(txn)
def redeem_market(self, market: str):
"""Redeem otoken."""
self._logger.info(f"Redeeming market {market}...")
rysk_option_market = RyskOptionMarket.from_str(market)
series = self.market_factory.to_series(rysk_option_market)
otoken_address = self.web3_client.get_otoken(series)
amount = self.web3_client.get_otoken_balance(otoken_address) / 10**8
return self.redeem_otoken(otoken_address, amount)
@property
def active_markets(self):
"""Get active markets."""
if not self._markets:
self.fetch_markets()
return {market.name: market for market in self._markets}
def close_long(self, market: str, size: float):
"""Close long."""
self._logger.info(f"Closing long {market}...")
# as we are long, we use usdc as collateral
collateral_asset = self.collateral_factory.USDC
_market = self.get_market(market)
_market.collateral = collateral_asset
series = self.market_factory.to_series(_market)
otoken_address = self.web3_client.get_otoken(series)
if size is None:
_amount = self.web3_client.get_otoken_balance(otoken_address) * 10**10
else:
_amount = size * WETH_MULTIPLIER
if _market.name not in self.active_markets:
raise ValueError(f"{market} is not an active market...")
acceptable_premium = int(_market.ask * (1 - ALLOWED_SLIPPAGE) * size)
# we check the approval
otoken_contract = self.web3_client.get_otoken_contract(otoken_address)
# we get the balance
balance = self.web3_client.get_otoken_balance(otoken_address)
self._logger.info(f"Balance for {market} is {balance / 10**8}")
if balance == 0:
raise ValueError(f"Nothing to close for {market}...")
if not self.web3_client.is_approved(
otoken_contract,
self.web3_client.option_exchange.address,
self._crypto.address,
int(10**8 * _amount),
):
self._logger.info(f"Approving {market}...")
txn = self.web3_client.create_approval(
otoken_contract,
self.web3_client.option_exchange.address,
self._crypto.address, # type: ignore
int(10**8 * _amount),
)
self._sign_and_submit(
txn,
)
if _amount == 0:
raise ValueError(f"Nothing to close for {market}...")
txn = self.web3_client.close_long(
acceptable_premium=acceptable_premium,
amount=_amount,
otoken_address=self.web3_client.web3.toChecksumAddress(otoken_address),
)
return self._sign_and_submit(txn)
def close_short(self, market: str, size: float):
"""
CLose short.
"""
self._logger.info(f"Closing short {market}...")
# as we are short, we ensure we are covered
rysk_option_market = self.active_markets[market]
if rysk_option_market.is_put:
rysk_option_market.collateral = self.collateral_factory.USDC
else:
rysk_option_market.collateral = self.collateral_factory.WETH
series = self.market_factory.to_series(rysk_option_market)
otoken_address = self.web3_client.get_otoken(series)
if size is None:
raise NotImplementedError(
"Closing short with no size is not implemented yet"
)
_amount = size * WETH_MULTIPLIER
if rysk_option_market.name not in self.active_markets:
raise ValueError(f"{market} is not an active market...")
acceptable_premium = int(rysk_option_market.bid * (1 + ALLOWED_SLIPPAGE) * size)
user_vaults = self.web3_client.fetch_user_vaults(self._crypto.address)
otoken_id = self.web3_client.get_otoken(series)
self._logger.info(f"Option Otoken id is {otoken_id}")
positions = [f for f in user_vaults if f[1] == otoken_id]
if len(positions) == 0:
raise ValueError(f"Nothing to close for {market}...")
if len(positions) > 1:
raise ValueError(f"Multiple positions for {market}...")
vault_id = positions[0][0]
if rysk_option_market.is_put:
# here we retrieve how much collateral we get for the amount of options
# we basically need strike * amount
strike = rysk_option_market.strike / WETH_MULTIPLIER
collateral_amount = int(strike * size * USDC_MULTIPLIER)
else:
collateral_amount = int(_amount)
txn = self.web3_client.close_short(
acceptable_premium=acceptable_premium,
amount=int(_amount),
otoken_address=self.web3_client.web3.toChecksumAddress(otoken_address),
collateral_asset=rysk_option_market.collateral,
collateral_amount=collateral_amount,
vault_id=vault_id,
)
return self._sign_and_submit(txn)
def fetch_trades(self):
"""List trades."""
self._logger.info("Listing trades for every position in the subgraph...")
positions = self.fetch_positions()
tickers = {ticker["id"]: ticker for ticker in self.fetch_tickers()}
all_trades = []
for position in positions:
symbol = position["symbol"]
pnl_manager = PnlCalculator()
pnl_manager.add_trades(
[
Trade(
int(order["amount"]) / WETH_MULTIPLIER,
total_cost=int(order["premium"]) / USDC_MULTIPLIER,
market=symbol,
trade_id=order["transactionHash"],
)
for order in position["info"]["optionsBoughtTransactions"]
]
)
pnl_manager.add_trades(
[
Trade(
-int(order["amount"]) / WETH_MULTIPLIER,
total_cost=-int(order["premium"]) / USDC_MULTIPLIER,
market=symbol,
trade_id=order["transactionHash"],
)
for order in position["info"]["optionsSoldTransactions"]
]
)
all_trades += pnl_manager.trades
if symbol not in tickers:
self._logger.warning(f"Could not find {symbol} in the tickers.")
_rate = pnl_manager.current_price
else:
_rate = (tickers[symbol]["ask"] + tickers[symbol]["bid"]) / 2
pnl_manager.update_price(_rate)
self._logger.info(
f"Position {position['symbol']} has a realised pnl of {pnl_manager.realised_pnl}"
)
self._logger.info(
f"Position {position['symbol']} has a unrealised pnl of {pnl_manager.unrealised_pnl}"
)
self._logger.info(
f"Position {position['symbol']} has a position size of {pnl_manager.position_size}"
)
self._logger.info(
f"Position {position['symbol']} has an average price of {pnl_manager.average_price}"
)
self._logger.info(
f"Position {position['symbol']} has a current price of {pnl_manager.current_price}"
)
return all_trades | /rysk_client-0.2.15-py3-none-any.whl/rysk_client/client.py | 0.865622 | 0.159577 | client.py | pypi |
from typing import Any
from aea.common import JSONLike
from aea.contracts.base import Contract
from aea.crypto.base import Address, LedgerApi
from rysk_client.packages.eightballer.contracts.opyn_option_registry import \
PUBLIC_ID
class OpynOptionRegistry(Contract): # pylint: disable=too-many-public-methods
"""The scaffold contract class for a smart contract."""
contract_id = PUBLIC_ID
@classmethod
def get_raw_transaction(
cls, ledger_api: LedgerApi, contract_address: str, **kwargs: Any
) -> JSONLike:
"""
Handler method for the 'GET_RAW_TRANSACTION' requests.
Implement this method in the sub class if you want
to handle the contract requests manually.
:param ledger_api: the ledger apis.
:param contract_address: the contract address.
:param kwargs: the keyword arguments.
:return: the tx # noqa: DAR202
"""
del ledger_api, contract_address, kwargs
raise NotImplementedError
@classmethod
def get_raw_message(
cls, ledger_api: LedgerApi, contract_address: str, **kwargs: Any
) -> bytes:
"""
Handler method for the 'GET_RAW_MESSAGE' requests.
Implement this method in the sub class if you want
to handle the contract requests manually.
:param ledger_api: the ledger apis.
:param contract_address: the contract address.
:param kwargs: the keyword arguments.
:return: the tx # noqa: DAR202
"""
del ledger_api, contract_address, kwargs
raise NotImplementedError
@classmethod
def get_state(
cls, ledger_api: LedgerApi, contract_address: str, **kwargs: Any
) -> JSONLike:
"""
Handler method for the 'GET_STATE' requests.
Implement this method in the sub class if you want
to handle the contract requests manually.
:param ledger_api: the ledger apis.
:param contract_address: the contract address.
:param kwargs: the keyword arguments.
:return: the tx # noqa: DAR202
"""
del ledger_api, contract_address, kwargs
raise NotImplementedError
@classmethod
def address_book(
cls,
ledger_api: LedgerApi,
contract_address: str,
) -> JSONLike:
"""Handler method for the 'address_book' requests."""
instance = cls.get_instance(ledger_api, contract_address)
result = instance.functions.addressBook().call()
return {"address": result}
@classmethod
def authority(
cls,
ledger_api: LedgerApi,
contract_address: str,
) -> JSONLike:
"""Handler method for the 'authority' requests."""
instance = cls.get_instance(ledger_api, contract_address)
result = instance.functions.authority().call()
return {"address": result}
@classmethod
def call_lower_health_factor(
cls,
ledger_api: LedgerApi,
contract_address: str,
) -> JSONLike:
"""Handler method for the 'call_lower_health_factor' requests."""
instance = cls.get_instance(ledger_api, contract_address)
result = instance.functions.callLowerHealthFactor().call()
return {"int": result}
@classmethod
def call_upper_health_factor(
cls,
ledger_api: LedgerApi,
contract_address: str,
) -> JSONLike:
"""Handler method for the 'call_upper_health_factor' requests."""
instance = cls.get_instance(ledger_api, contract_address)
result = instance.functions.callUpperHealthFactor().call()
return {"int": result}
@classmethod
def check_vault_health(
cls, ledger_api: LedgerApi, contract_address: str, vault_id: int
) -> JSONLike:
"""Handler method for the 'check_vault_health' requests."""
instance = cls.get_instance(ledger_api, contract_address)
result = instance.functions.checkVaultHealth(vault_id=vault_id).call()
return {
"isBelowMin": result,
"isAboveMax": result,
"healthFactor": result,
"upperHealthFactor": result,
"collatRequired": result,
"collatAsset": result,
}
@classmethod
def collateral_asset(
cls,
ledger_api: LedgerApi,
contract_address: str,
) -> JSONLike:
"""Handler method for the 'collateral_asset' requests."""
instance = cls.get_instance(ledger_api, contract_address)
result = instance.functions.collateralAsset().call()
return {"address": result}
@classmethod
def get_collateral(
cls, ledger_api: LedgerApi, contract_address: str, series: tuple, amount: int
) -> JSONLike:
"""Handler method for the 'get_collateral' requests."""
instance = cls.get_instance(ledger_api, contract_address)
result = instance.functions.getCollateral(series=series, amount=amount).call()
return {"int": result}
@classmethod
def get_issuance_hash(
cls, ledger_api: LedgerApi, contract_address: str, _series: tuple
) -> JSONLike:
"""Handler method for the 'get_issuance_hash' requests."""
instance = cls.get_instance(ledger_api, contract_address)
result = instance.functions.getIssuanceHash(_series=_series).call()
return {"str": result}
@classmethod
def get_otoken(
cls,
ledger_api: LedgerApi,
contract_address: str,
underlying: Address,
strike_asset: Address,
expiration: int,
is_put: bool,
strike: int,
collateral: Address,
) -> JSONLike:
"""Handler method for the 'get_otoken' requests."""
instance = cls.get_instance(ledger_api, contract_address)
result = instance.functions.getOtoken(
underlying=underlying,
strike_asset=strike_asset,
expiration=expiration,
is_put=is_put,
strike=strike,
collateral=collateral,
).call()
return {"address": result}
@classmethod
def get_series(
cls, ledger_api: LedgerApi, contract_address: str, _series: tuple
) -> JSONLike:
"""Handler method for the 'get_series' requests."""
instance = cls.get_instance(ledger_api, contract_address)
result = instance.functions.getSeries(_series=_series).call()
return {"address": result}
@classmethod
def get_series_address(
cls, ledger_api: LedgerApi, contract_address: str, issuance_hash: str
) -> JSONLike:
"""Handler method for the 'get_series_address' requests."""
instance = cls.get_instance(ledger_api, contract_address)
result = instance.functions.getSeriesAddress(issuance_hash=issuance_hash).call()
return {"address": result}
@classmethod
def get_series_info(
cls, ledger_api: LedgerApi, contract_address: str, series: Address
) -> JSONLike:
"""Handler method for the 'get_series_info' requests."""
instance = cls.get_instance(ledger_api, contract_address)
result = instance.functions.getSeriesInfo(series=series).call()
return {"tuple": result}
@classmethod
def keeper(
cls, ledger_api: LedgerApi, contract_address: str, var_0: Address
) -> JSONLike:
"""Handler method for the 'keeper' requests."""
instance = cls.get_instance(ledger_api, contract_address)
result = instance.functions.keeper(var_0).call()
return {"bool": result}
@classmethod
def liquidity_pool(
cls,
ledger_api: LedgerApi,
contract_address: str,
) -> JSONLike:
"""Handler method for the 'liquidity_pool' requests."""
instance = cls.get_instance(ledger_api, contract_address)
result = instance.functions.liquidityPool().call()
return {"address": result}
@classmethod
def put_lower_health_factor(
cls,
ledger_api: LedgerApi,
contract_address: str,
) -> JSONLike:
"""Handler method for the 'put_lower_health_factor' requests."""
instance = cls.get_instance(ledger_api, contract_address)
result = instance.functions.putLowerHealthFactor().call()
return {"int": result}
@classmethod
def put_upper_health_factor(
cls,
ledger_api: LedgerApi,
contract_address: str,
) -> JSONLike:
"""Handler method for the 'put_upper_health_factor' requests."""
instance = cls.get_instance(ledger_api, contract_address)
result = instance.functions.putUpperHealthFactor().call()
return {"int": result}
@classmethod
def series_info(
cls, ledger_api: LedgerApi, contract_address: str, var_0: Address
) -> JSONLike:
"""Handler method for the 'series_info' requests."""
instance = cls.get_instance(ledger_api, contract_address)
result = instance.functions.seriesInfo(var_0).call()
return {
"expiration": result,
"strike": result,
"is_put": result,
"underlying": result,
"strike_asset": result,
"collateral": result,
}
@classmethod
def vault_count(
cls,
ledger_api: LedgerApi,
contract_address: str,
) -> JSONLike:
"""Handler method for the 'vault_count' requests."""
instance = cls.get_instance(ledger_api, contract_address)
result = instance.functions.vaultCount().call()
return {"int": result}
@classmethod
def vault_ids(
cls, ledger_api: LedgerApi, contract_address: str, var_0: Address
) -> JSONLike:
"""Handler method for the 'vault_ids' requests."""
instance = cls.get_instance(ledger_api, contract_address)
result = instance.functions.vault_ids(var_0).call()
return {"int": result} | /rysk_client-0.2.15-py3-none-any.whl/rysk_client/packages/eightballer/contracts/opyn_option_registry/contract.py | 0.945336 | 0.194865 | contract.py | pypi |
from typing import Any
from aea.common import JSONLike
from aea.contracts.base import Contract
from aea.crypto.base import Address, LedgerApi
from rysk_client.packages.eightballer.contracts.option_exchange import \
PUBLIC_ID
class OptionExchange(Contract): # pylint: disable=too-many-public-methods
"""The scaffold contract class for a smart contract."""
contract_id = PUBLIC_ID
@classmethod
def get_raw_transaction(
cls, ledger_api: LedgerApi, contract_address: str, **kwargs: Any
) -> JSONLike:
"""
Handler method for the 'GET_RAW_TRANSACTION' requests.
Implement this method in the sub class if you want
to handle the contract requests manually.
:param ledger_api: the ledger apis.
:param contract_address: the contract address.
:param kwargs: the keyword arguments.
:return: the tx # noqa: DAR202
"""
del ledger_api, contract_address, kwargs
raise NotImplementedError
@classmethod
def get_raw_message(
cls, ledger_api: LedgerApi, contract_address: str, **kwargs: Any
) -> bytes:
"""
Handler method for the 'GET_RAW_MESSAGE' requests.
Implement this method in the sub class if you want
to handle the contract requests manually.
:param ledger_api: the ledger apis.
:param contract_address: the contract address.
:param kwargs: the keyword arguments.
:return: the tx # noqa: DAR202
"""
del ledger_api, contract_address, kwargs
raise NotImplementedError
@classmethod
def get_state(
cls, ledger_api: LedgerApi, contract_address: str, **kwargs: Any
) -> JSONLike:
"""
Handler method for the 'GET_STATE' requests.
Implement this method in the sub class if you want
to handle the contract requests manually.
:param ledger_api: the ledger apis.
:param contract_address: the contract address.
:param kwargs: the keyword arguments.
:return: the tx # noqa: DAR202
"""
del ledger_api, contract_address, kwargs
raise NotImplementedError
@classmethod
def addressbook(
cls,
ledger_api: LedgerApi,
contract_address: str,
) -> JSONLike:
"""Handler method for the 'addressbook' requests."""
instance = cls.get_instance(ledger_api, contract_address)
result = instance.functions.addressbook().call()
return {"address": result}
@classmethod
def approved_collateral(
cls, ledger_api: LedgerApi, contract_address: str, var_0: Address, var_1: bool
) -> JSONLike:
"""Handler method for the 'approved_collateral' requests."""
instance = cls.get_instance(ledger_api, contract_address)
result = instance.functions.approvedCollateral(var_0, var_1).call()
return {"bool": result}
@classmethod
def authority(
cls,
ledger_api: LedgerApi,
contract_address: str,
) -> JSONLike:
"""Handler method for the 'authority' requests."""
instance = cls.get_instance(ledger_api, contract_address)
result = instance.functions.authority().call()
return {"address": result}
@classmethod
def catalogue(
cls,
ledger_api: LedgerApi,
contract_address: str,
) -> JSONLike:
"""Handler method for the 'catalogue' requests."""
instance = cls.get_instance(ledger_api, contract_address)
result = instance.functions.catalogue().call()
return {"address": result}
@classmethod
def check_hash(
cls,
ledger_api: LedgerApi,
contract_address: str,
option_series: tuple,
strike_decimal_converted: int,
is_sell: bool,
) -> JSONLike:
"""Handler method for the 'check_hash' requests."""
instance = cls.get_instance(ledger_api, contract_address)
result = instance.functions.checkHash(
optionSeries=option_series,
strikeDecimalConverted=strike_decimal_converted,
isSell=is_sell,
).call()
return {"oHash": result}
@classmethod
def collateral_asset(
cls,
ledger_api: LedgerApi,
contract_address: str,
) -> JSONLike:
"""Handler method for the 'collateral_asset' requests."""
instance = cls.get_instance(ledger_api, contract_address)
result = instance.functions.collateralAsset().call()
return {"address": result}
@classmethod
def fee_recipient(
cls,
ledger_api: LedgerApi,
contract_address: str,
) -> JSONLike:
"""Handler method for the 'fee_recipient' requests."""
instance = cls.get_instance(ledger_api, contract_address)
result = instance.functions.feeRecipient().call()
return {"address": result}
@classmethod
def get_delta(
cls,
ledger_api: LedgerApi,
contract_address: str,
) -> JSONLike:
"""Handler method for the 'get_delta' requests."""
instance = cls.get_instance(ledger_api, contract_address)
result = instance.functions.getDelta().call()
return {"delta": result}
@classmethod
def get_option_details(
cls,
ledger_api: LedgerApi,
contract_address: str,
series_address: Address,
option_series: tuple,
) -> JSONLike:
"""Handler method for the 'get_option_details' requests."""
instance = cls.get_instance(ledger_api, contract_address)
result = instance.functions.getOptionDetails(
seriesAddress=series_address, optionSeries=option_series
).call()
return {"address": result, "tuple": result, "int": result}
@classmethod
def get_pool_denominated_value(
cls,
ledger_api: LedgerApi,
contract_address: str,
) -> JSONLike:
"""Handler method for the 'get_pool_denominated_value' requests."""
instance = cls.get_instance(ledger_api, contract_address)
result = instance.functions.getPoolDenominatedValue().call()
return {"int": result}
@classmethod
def held_tokens(
cls,
ledger_api: LedgerApi,
contract_address: str,
var_0: Address,
var_1: Address,
) -> JSONLike:
"""Handler method for the 'held_tokens' requests."""
instance = cls.get_instance(ledger_api, contract_address)
result = instance.functions.heldTokens(var_0, var_1).call()
return {"int": result}
@classmethod
def liquidity_pool(
cls,
ledger_api: LedgerApi,
contract_address: str,
) -> JSONLike:
"""Handler method for the 'liquidity_pool' requests."""
instance = cls.get_instance(ledger_api, contract_address)
result = instance.functions.liquidityPool().call()
return {"address": result}
@classmethod
def max_trade_size(
cls,
ledger_api: LedgerApi,
contract_address: str,
) -> JSONLike:
"""Handler method for the 'max_trade_size' requests."""
instance = cls.get_instance(ledger_api, contract_address)
result = instance.functions.maxTradeSize().call()
return {"int": result}
@classmethod
def min_trade_size(
cls,
ledger_api: LedgerApi,
contract_address: str,
) -> JSONLike:
"""Handler method for the 'min_trade_size' requests."""
instance = cls.get_instance(ledger_api, contract_address)
result = instance.functions.minTradeSize().call()
return {"int": result}
@classmethod
def paused(
cls,
ledger_api: LedgerApi,
contract_address: str,
) -> JSONLike:
"""Handler method for the 'paused' requests."""
instance = cls.get_instance(ledger_api, contract_address)
result = instance.functions.paused().call()
return {"bool": result}
@classmethod
def pool_fees(
cls, ledger_api: LedgerApi, contract_address: str, var_0: Address
) -> JSONLike:
"""Handler method for the 'pool_fees' requests."""
instance = cls.get_instance(ledger_api, contract_address)
result = instance.functions.poolFees(var_0).call()
return {"int": result}
@classmethod
def pricer(
cls,
ledger_api: LedgerApi,
contract_address: str,
) -> JSONLike:
"""Handler method for the 'pricer' requests."""
instance = cls.get_instance(ledger_api, contract_address)
result = instance.functions.pricer().call()
return {"address": result}
@classmethod
def protocol(
cls,
ledger_api: LedgerApi,
contract_address: str,
) -> JSONLike:
"""Handler method for the 'protocol' requests."""
instance = cls.get_instance(ledger_api, contract_address)
result = instance.functions.protocol().call()
return {"address": result}
@classmethod
def strike_asset(
cls,
ledger_api: LedgerApi,
contract_address: str,
) -> JSONLike:
"""Handler method for the 'strike_asset' requests."""
instance = cls.get_instance(ledger_api, contract_address)
result = instance.functions.strikeAsset().call()
return {"address": result}
@classmethod
def swap_router(
cls,
ledger_api: LedgerApi,
contract_address: str,
) -> JSONLike:
"""Handler method for the 'swap_router' requests."""
instance = cls.get_instance(ledger_api, contract_address)
result = instance.functions.swapRouter().call()
return {"address": result}
@classmethod
def underlying_asset(
cls,
ledger_api: LedgerApi,
contract_address: str,
) -> JSONLike:
"""Handler method for the 'underlying_asset' requests."""
instance = cls.get_instance(ledger_api, contract_address)
result = instance.functions.underlyingAsset().call()
return {"address": result}
@classmethod
def update(
cls,
ledger_api: LedgerApi,
contract_address: str,
) -> JSONLike:
"""Handler method for the 'update' requests."""
instance = cls.get_instance(ledger_api, contract_address)
result = instance.functions.update().call()
return {"int": result} | /rysk_client-0.2.15-py3-none-any.whl/rysk_client/packages/eightballer/contracts/option_exchange/contract.py | 0.940599 | 0.182353 | contract.py | pypi |
from typing import Any
from aea.common import JSONLike
from aea.contracts.base import Contract
from aea.crypto.base import Address, LedgerApi
from rysk_client.packages.eightballer.contracts.user_position_lens import \
PUBLIC_ID
class UserPositionLens(Contract):
"""The scaffold contract class for a smart contract."""
contract_id = PUBLIC_ID
@classmethod
def get_raw_transaction(
cls, ledger_api: LedgerApi, contract_address: str, **kwargs: Any
) -> JSONLike:
"""
Handler method for the 'GET_RAW_TRANSACTION' requests.
Implement this method in the sub class if you want
to handle the contract requests manually.
:param ledger_api: the ledger apis.
:param contract_address: the contract address.
:param kwargs: the keyword arguments.
:return: the tx # noqa: DAR202
"""
del ledger_api, contract_address, kwargs
raise NotImplementedError
@classmethod
def get_raw_message(
cls, ledger_api: LedgerApi, contract_address: str, **kwargs: Any
) -> bytes:
"""
Handler method for the 'GET_RAW_MESSAGE' requests.
Implement this method in the sub class if you want
to handle the contract requests manually.
:param ledger_api: the ledger apis.
:param contract_address: the contract address.
:param kwargs: the keyword arguments.
:return: the tx # noqa: DAR202
"""
del ledger_api, contract_address, kwargs
raise NotImplementedError
@classmethod
def get_state(
cls, ledger_api: LedgerApi, contract_address: str, **kwargs: Any
) -> JSONLike:
"""
Handler method for the 'GET_STATE' requests.
Implement this method in the sub class if you want
to handle the contract requests manually.
:param ledger_api: the ledger apis.
:param contract_address: the contract address.
:param kwargs: the keyword arguments.
:return: the tx # noqa: DAR202
"""
del ledger_api, contract_address, kwargs
raise NotImplementedError
@classmethod
def addressbook(
cls,
ledger_api: LedgerApi,
contract_address: str,
) -> JSONLike:
"""Handler method for the 'addressbook' requests."""
instance = cls.get_instance(ledger_api, contract_address)
result = instance.functions.addressbook().call()
return {"address": result}
@classmethod
def get_vaults_for_user(
cls, ledger_api: LedgerApi, contract_address: str, user: Address
) -> JSONLike:
"""Handler method for the 'get_vaults_for_user' requests."""
instance = cls.get_instance(ledger_api, contract_address)
result = instance.functions.getVaultsForUser(user=user).call()
return {"tuple[...]": result}
@classmethod
def get_vaults_for_user_and_otoken(
cls,
ledger_api: LedgerApi,
contract_address: str,
user: Address,
otoken: Address,
) -> JSONLike:
"""Handler method for the 'get_vaults_for_user_and_otoken' requests."""
instance = cls.get_instance(ledger_api, contract_address)
result = instance.functions.getVaultsForUserAndOtoken(
user=user, otoken=otoken
).call()
return {"int": result} | /rysk_client-0.2.15-py3-none-any.whl/rysk_client/packages/eightballer/contracts/user_position_lens/contract.py | 0.93402 | 0.181118 | contract.py | pypi |
from typing import Any
from aea.common import JSONLike
from aea.contracts.base import Contract
from aea.crypto.base import Address, LedgerApi
from rysk_client.packages.eightballer.contracts.o_token import PUBLIC_ID
class OToken(Contract):
"""The scaffold contract class for a smart contract."""
contract_id = PUBLIC_ID
@classmethod
def get_raw_transaction(
cls, ledger_api: LedgerApi, contract_address: str, **kwargs: Any
) -> JSONLike:
"""
Handler method for the 'GET_RAW_TRANSACTION' requests.
Implement this method in the sub class if you want
to handle the contract requests manually.
:param ledger_api: the ledger apis.
:param contract_address: the contract address.
:param kwargs: the keyword arguments.
:return: the tx # noqa: DAR202
"""
del ledger_api, contract_address, kwargs
raise NotImplementedError
@classmethod
def get_raw_message(
cls, ledger_api: LedgerApi, contract_address: str, **kwargs: Any
) -> bytes:
"""
Handler method for the 'GET_RAW_MESSAGE' requests.
Implement this method in the sub class if you want
to handle the contract requests manually.
:param ledger_api: the ledger apis.
:param contract_address: the contract address.
:param kwargs: the keyword arguments.
:return: the tx # noqa: DAR202
"""
del ledger_api, contract_address, kwargs
raise NotImplementedError
@classmethod
def get_state(
cls, ledger_api: LedgerApi, contract_address: str, **kwargs: Any
) -> JSONLike:
"""
Handler method for the 'GET_STATE' requests.
Implement this method in the sub class if you want
to handle the contract requests manually.
:param ledger_api: the ledger apis.
:param contract_address: the contract address.
:param kwargs: the keyword arguments.
:return: the tx # noqa: DAR202
"""
del ledger_api, contract_address, kwargs
raise NotImplementedError
@classmethod
def domain_separator(
cls,
ledger_api: LedgerApi,
contract_address: str,
) -> JSONLike:
"""Handler method for the 'domain_separator' requests."""
instance = cls.get_instance(ledger_api, contract_address)
result = instance.functions.DOMAIN_SEPARATOR().call()
return {"str": result}
@classmethod
def allowance(
cls,
ledger_api: LedgerApi,
contract_address: str,
owner: Address,
spender: Address,
) -> JSONLike:
"""Handler method for the 'allowance' requests."""
instance = cls.get_instance(ledger_api, contract_address)
result = instance.functions.allowance(owner=owner, spender=spender).call()
return {"int": result}
@classmethod
def balance_of(
cls, ledger_api: LedgerApi, contract_address: str, account: Address
) -> JSONLike:
"""Handler method for the 'balance_of' requests."""
instance = cls.get_instance(ledger_api, contract_address)
result = instance.functions.balanceOf(account=account).call()
return {"int": result}
@classmethod
def collateral_asset(
cls,
ledger_api: LedgerApi,
contract_address: str,
) -> JSONLike:
"""Handler method for the 'collateral_asset' requests."""
instance = cls.get_instance(ledger_api, contract_address)
result = instance.functions.collateralAsset().call()
return {"address": result}
@classmethod
def controller(
cls,
ledger_api: LedgerApi,
contract_address: str,
) -> JSONLike:
"""Handler method for the 'controller' requests."""
instance = cls.get_instance(ledger_api, contract_address)
result = instance.functions.controller().call()
return {"address": result}
@classmethod
def decimals(
cls,
ledger_api: LedgerApi,
contract_address: str,
) -> JSONLike:
"""Handler method for the 'decimals' requests."""
instance = cls.get_instance(ledger_api, contract_address)
result = instance.functions.decimals().call()
return {"int": result}
@classmethod
def expiry_timestamp(
cls,
ledger_api: LedgerApi,
contract_address: str,
) -> JSONLike:
"""Handler method for the 'expiry_timestamp' requests."""
instance = cls.get_instance(ledger_api, contract_address)
result = instance.functions.expiryTimestamp().call()
return {"int": result}
@classmethod
def get_otoken_details(
cls,
ledger_api: LedgerApi,
contract_address: str,
) -> JSONLike:
"""Handler method for the 'get_otoken_details' requests."""
instance = cls.get_instance(ledger_api, contract_address)
result = instance.functions.getOtokenDetails().call()
return {
"address_0": result[0],
"address_1": result[1],
"address_2": result[2],
"int_0": result[3],
"int_1": result[4],
"bool": result[5],
}
@classmethod
def is_put(
cls,
ledger_api: LedgerApi,
contract_address: str,
) -> JSONLike:
"""Handler method for the 'is_put' requests."""
instance = cls.get_instance(ledger_api, contract_address)
result = instance.functions.isPut().call()
return {"bool": result}
@classmethod
def name(
cls,
ledger_api: LedgerApi,
contract_address: str,
) -> JSONLike:
"""Handler method for the 'name' requests."""
instance = cls.get_instance(ledger_api, contract_address)
result = instance.functions.name().call()
return {"str": result}
@classmethod
def nonces(
cls, ledger_api: LedgerApi, contract_address: str, owner: Address
) -> JSONLike:
"""Handler method for the 'nonces' requests."""
instance = cls.get_instance(ledger_api, contract_address)
result = instance.functions.nonces(owner=owner).call()
return {"int": result}
@classmethod
def strike_asset(
cls,
ledger_api: LedgerApi,
contract_address: str,
) -> JSONLike:
"""Handler method for the 'strike_asset' requests."""
instance = cls.get_instance(ledger_api, contract_address)
result = instance.functions.strikeAsset().call()
return {"address": result}
@classmethod
def strike_price(
cls,
ledger_api: LedgerApi,
contract_address: str,
) -> JSONLike:
"""Handler method for the 'strike_price' requests."""
instance = cls.get_instance(ledger_api, contract_address)
result = instance.functions.strikePrice().call()
return {"int": result}
@classmethod
def symbol(
cls,
ledger_api: LedgerApi,
contract_address: str,
) -> JSONLike:
"""Handler method for the 'symbol' requests."""
instance = cls.get_instance(ledger_api, contract_address)
result = instance.functions.symbol().call()
return {"str": result}
@classmethod
def total_supply(
cls,
ledger_api: LedgerApi,
contract_address: str,
) -> JSONLike:
"""Handler method for the 'total_supply' requests."""
instance = cls.get_instance(ledger_api, contract_address)
result = instance.functions.totalSupply().call()
return {"int": result}
@classmethod
def underlying_asset(
cls,
ledger_api: LedgerApi,
contract_address: str,
) -> JSONLike:
"""Handler method for the 'underlying_asset' requests."""
instance = cls.get_instance(ledger_api, contract_address)
result = instance.functions.underlyingAsset().call()
return {"address": result} | /rysk_client-0.2.15-py3-none-any.whl/rysk_client/packages/eightballer/contracts/o_token/contract.py | 0.945349 | 0.193243 | contract.py | pypi |
from typing import Any
from aea.common import JSONLike
from aea.contracts.base import Contract
from aea.crypto.base import Address, LedgerApi
from rysk_client.packages.eightballer.contracts.weth import PUBLIC_ID
class Weth(Contract):
"""The scaffold contract class for a smart contract."""
contract_id = PUBLIC_ID
@classmethod
def get_raw_transaction(
cls, ledger_api: LedgerApi, contract_address: str, **kwargs: Any
) -> JSONLike:
"""
Handler method for the 'GET_RAW_TRANSACTION' requests.
Implement this method in the sub class if you want
to handle the contract requests manually.
:param ledger_api: the ledger apis.
:param contract_address: the contract address.
:param kwargs: the keyword arguments.
:return: the tx # noqa: DAR202
"""
del ledger_api, contract_address, kwargs
raise NotImplementedError
@classmethod
def get_raw_message(
cls, ledger_api: LedgerApi, contract_address: str, **kwargs: Any
) -> bytes:
"""
Handler method for the 'GET_RAW_MESSAGE' requests.
Implement this method in the sub class if you want
to handle the contract requests manually.
:param ledger_api: the ledger apis.
:param contract_address: the contract address.
:param kwargs: the keyword arguments.
:return: the tx # noqa: DAR202
"""
del ledger_api, contract_address, kwargs
raise NotImplementedError
@classmethod
def get_state(
cls, ledger_api: LedgerApi, contract_address: str, **kwargs: Any
) -> JSONLike:
"""
Handler method for the 'GET_STATE' requests.
Implement this method in the sub class if you want
to handle the contract requests manually.
:param ledger_api: the ledger apis.
:param contract_address: the contract address.
:param kwargs: the keyword arguments.
:return: the tx # noqa: DAR202
"""
del ledger_api, contract_address, kwargs
raise NotImplementedError
@classmethod
def domain_separator(
cls,
ledger_api: LedgerApi,
contract_address: str,
) -> JSONLike:
"""Handler method for the 'domain_separator' requests."""
instance = cls.get_instance(ledger_api, contract_address)
result = instance.functions.DOMAIN_SEPARATOR().call()
return {"str": result}
@classmethod
def allowance(
cls,
ledger_api: LedgerApi,
contract_address: str,
owner: Address,
spender: Address,
) -> JSONLike:
"""Handler method for the 'allowance' requests."""
instance = cls.get_instance(ledger_api, contract_address)
result = instance.functions.allowance(owner=owner, spender=spender).call()
return {"int": result}
@classmethod
def balance_of(
cls, ledger_api: LedgerApi, contract_address: str, account: Address
) -> JSONLike:
"""Handler method for the 'balance_of' requests."""
instance = cls.get_instance(ledger_api, contract_address)
result = instance.functions.balanceOf(account=account).call()
return {"int": result}
@classmethod
def decimals(
cls,
ledger_api: LedgerApi,
contract_address: str,
) -> JSONLike:
"""Handler method for the 'decimals' requests."""
instance = cls.get_instance(ledger_api, contract_address)
result = instance.functions.decimals().call()
return {"int": result}
@classmethod
def l1_address(
cls,
ledger_api: LedgerApi,
contract_address: str,
) -> JSONLike:
"""Handler method for the 'l1_address' requests."""
instance = cls.get_instance(ledger_api, contract_address)
result = instance.functions.l1Address().call()
return {"address": result}
@classmethod
def l2_gateway(
cls,
ledger_api: LedgerApi,
contract_address: str,
) -> JSONLike:
"""Handler method for the 'l2_gateway' requests."""
instance = cls.get_instance(ledger_api, contract_address)
result = instance.functions.l2Gateway().call()
return {"address": result}
@classmethod
def name(
cls,
ledger_api: LedgerApi,
contract_address: str,
) -> JSONLike:
"""Handler method for the 'name' requests."""
instance = cls.get_instance(ledger_api, contract_address)
result = instance.functions.name().call()
return {"str": result}
@classmethod
def nonces(
cls, ledger_api: LedgerApi, contract_address: str, owner: Address
) -> JSONLike:
"""Handler method for the 'nonces' requests."""
instance = cls.get_instance(ledger_api, contract_address)
result = instance.functions.nonces(owner=owner).call()
return {"int": result}
@classmethod
def symbol(
cls,
ledger_api: LedgerApi,
contract_address: str,
) -> JSONLike:
"""Handler method for the 'symbol' requests."""
instance = cls.get_instance(ledger_api, contract_address)
result = instance.functions.symbol().call()
return {"str": result}
@classmethod
def total_supply(
cls,
ledger_api: LedgerApi,
contract_address: str,
) -> JSONLike:
"""Handler method for the 'total_supply' requests."""
instance = cls.get_instance(ledger_api, contract_address)
result = instance.functions.totalSupply().call()
return {"int": result} | /rysk_client-0.2.15-py3-none-any.whl/rysk_client/packages/eightballer/contracts/weth/contract.py | 0.943112 | 0.185099 | contract.py | pypi |
from typing import Any
from aea.common import JSONLike
from aea.contracts.base import Contract
from aea.crypto.base import LedgerApi
from rysk_client.packages.eightballer.contracts.d_h_v_lens import PUBLIC_ID
class DHVLens(Contract):
"""The scaffold contract class for a smart contract."""
contract_id = PUBLIC_ID
@classmethod
def get_raw_transaction(
cls, ledger_api: LedgerApi, contract_address: str, **kwargs: Any
) -> JSONLike:
"""
Handler method for the 'GET_RAW_TRANSACTION' requests.
Implement this method in the sub class if you want
to handle the contract requests manually.
:param ledger_api: the ledger apis.
:param contract_address: the contract address.
:param kwargs: the keyword arguments.
:return: the tx # noqa: DAR202
"""
del ledger_api, contract_address, kwargs
raise NotImplementedError
@classmethod
def get_raw_message(
cls, ledger_api: LedgerApi, contract_address: str, **kwargs: Any
) -> bytes:
"""
Handler method for the 'GET_RAW_MESSAGE' requests.
Implement this method in the sub class if you want
to handle the contract requests manually.
:param ledger_api: the ledger apis.
:param contract_address: the contract address.
:param kwargs: the keyword arguments.
:return: the tx # noqa: DAR202
"""
del ledger_api, contract_address, kwargs
raise NotImplementedError
@classmethod
def get_state(
cls, ledger_api: LedgerApi, contract_address: str, **kwargs: Any
) -> JSONLike:
"""
Handler method for the 'GET_STATE' requests.
Implement this method in the sub class if you want
to handle the contract requests manually.
:param ledger_api: the ledger apis.
:param contract_address: the contract address.
:param kwargs: the keyword arguments.
:return: the tx # noqa: DAR202
"""
del ledger_api, contract_address, kwargs
raise NotImplementedError
@classmethod
def catalogue(
cls,
ledger_api: LedgerApi,
contract_address: str,
) -> JSONLike:
"""Handler method for the 'catalogue' requests."""
instance = cls.get_instance(ledger_api, contract_address)
result = instance.functions.catalogue().call()
return {"address": result}
@classmethod
def collateral_asset(
cls,
ledger_api: LedgerApi,
contract_address: str,
) -> JSONLike:
"""Handler method for the 'collateral_asset' requests."""
instance = cls.get_instance(ledger_api, contract_address)
result = instance.functions.collateralAsset().call()
return {"address": result}
@classmethod
def exchange(
cls,
ledger_api: LedgerApi,
contract_address: str,
) -> JSONLike:
"""Handler method for the 'exchange' requests."""
instance = cls.get_instance(ledger_api, contract_address)
result = instance.functions.exchange().call()
return {"address": result}
@classmethod
def get_expirations(
cls,
ledger_api: LedgerApi,
contract_address: str,
) -> JSONLike:
"""Handler method for the 'get_expirations' requests."""
instance = cls.get_instance(ledger_api, contract_address)
result = instance.functions.getExpirations().call()
return {"list[int]": result}
@classmethod
def get_option_chain(
cls,
ledger_api: LedgerApi,
contract_address: str,
) -> JSONLike:
"""Handler method for the 'get_option_chain' requests."""
instance = cls.get_instance(ledger_api, contract_address)
result = instance.functions.getOptionChain().call()
return {"tuple": result}
@classmethod
def get_option_expiration_drill(
cls, ledger_api: LedgerApi, contract_address: str, expiration: int
) -> JSONLike:
"""Handler method for the 'get_option_expiration_drill' requests."""
instance = cls.get_instance(ledger_api, contract_address)
result = instance.functions.getOptionExpirationDrill(
expiration=expiration
).call()
return {"tuple": result}
@classmethod
def pricer(
cls,
ledger_api: LedgerApi,
contract_address: str,
) -> JSONLike:
"""Handler method for the 'pricer' requests."""
instance = cls.get_instance(ledger_api, contract_address)
result = instance.functions.pricer().call()
return {"address": result}
@classmethod
def protocol(
cls,
ledger_api: LedgerApi,
contract_address: str,
) -> JSONLike:
"""Handler method for the 'protocol' requests."""
instance = cls.get_instance(ledger_api, contract_address)
result = instance.functions.protocol().call()
return {"address": result}
@classmethod
def strike_asset(
cls,
ledger_api: LedgerApi,
contract_address: str,
) -> JSONLike:
"""Handler method for the 'strike_asset' requests."""
instance = cls.get_instance(ledger_api, contract_address)
result = instance.functions.strikeAsset().call()
return {"address": result}
@classmethod
def underlying_asset(
cls,
ledger_api: LedgerApi,
contract_address: str,
) -> JSONLike:
"""Handler method for the 'underlying_asset' requests."""
instance = cls.get_instance(ledger_api, contract_address)
result = instance.functions.underlyingAsset().call()
return {"address": result} | /rysk_client-0.2.15-py3-none-any.whl/rysk_client/packages/eightballer/contracts/d_h_v_lens/contract.py | 0.938836 | 0.178705 | contract.py | pypi |
from typing import Any
from aea.common import JSONLike
from aea.contracts.base import Contract
from aea.crypto.base import Address, LedgerApi
from rysk_client.packages.eightballer.contracts.opyn_controller import \
PUBLIC_ID
class OpynController(Contract): # pylint: disable=too-many-public-methods
"""The scaffold contract class for a smart contract."""
contract_id = PUBLIC_ID
@classmethod
def get_raw_transaction(
cls, ledger_api: LedgerApi, contract_address: str, **kwargs: Any
) -> JSONLike:
"""
Handler method for the 'GET_RAW_TRANSACTION' requests.
Implement this method in the sub class if you want
to handle the contract requests manually.
:param ledger_api: the ledger apis.
:param contract_address: the contract address.
:param kwargs: the keyword arguments.
:return: the tx # noqa: DAR202
"""
del ledger_api, contract_address, kwargs
raise NotImplementedError
@classmethod
def get_raw_message(
cls, ledger_api: LedgerApi, contract_address: str, **kwargs: Any
) -> bytes:
"""
Handler method for the 'GET_RAW_MESSAGE' requests.
Implement this method in the sub class if you want
to handle the contract requests manually.
:param ledger_api: the ledger apis.
:param contract_address: the contract address.
:param kwargs: the keyword arguments.
:return: the tx # noqa: DAR202
"""
del ledger_api, contract_address, kwargs
raise NotImplementedError
@classmethod
def get_state(
cls, ledger_api: LedgerApi, contract_address: str, **kwargs: Any
) -> JSONLike:
"""
Handler method for the 'GET_STATE' requests.
Implement this method in the sub class if you want
to handle the contract requests manually.
:param ledger_api: the ledger apis.
:param contract_address: the contract address.
:param kwargs: the keyword arguments.
:return: the tx # noqa: DAR202
"""
del ledger_api, contract_address, kwargs
raise NotImplementedError
@classmethod
def addressbook(
cls,
ledger_api: LedgerApi,
contract_address: str,
) -> JSONLike:
"""Handler method for the 'addressbook' requests."""
instance = cls.get_instance(ledger_api, contract_address)
result = instance.functions.addressbook().call()
return {"address": result}
@classmethod
def calculator(
cls,
ledger_api: LedgerApi,
contract_address: str,
) -> JSONLike:
"""Handler method for the 'calculator' requests."""
instance = cls.get_instance(ledger_api, contract_address)
result = instance.functions.calculator().call()
return {"address": result}
@classmethod
def call_restricted(
cls,
ledger_api: LedgerApi,
contract_address: str,
) -> JSONLike:
"""Handler method for the 'call_restricted' requests."""
instance = cls.get_instance(ledger_api, contract_address)
result = instance.functions.callRestricted().call()
return {"bool": result}
@classmethod
def can_settle_assets(
cls,
ledger_api: LedgerApi,
contract_address: str,
_underlying: Address,
_strike: Address,
_collateral: Address,
_expiry: int,
) -> JSONLike:
"""Handler method for the 'can_settle_assets' requests."""
instance = cls.get_instance(ledger_api, contract_address)
result = instance.functions.canSettleAssets(
_underlying=_underlying,
_strike=_strike,
_collateral=_collateral,
_expiry=_expiry,
).call()
return {"bool": result}
@classmethod
def full_pauser(
cls,
ledger_api: LedgerApi,
contract_address: str,
) -> JSONLike:
"""Handler method for the 'full_pauser' requests."""
instance = cls.get_instance(ledger_api, contract_address)
result = instance.functions.fullPauser().call()
return {"address": result}
@classmethod
def get_account_vault_counter(
cls, ledger_api: LedgerApi, contract_address: str, _account_owner: Address
) -> JSONLike:
"""Handler method for the 'get_account_vault_counter' requests."""
instance = cls.get_instance(ledger_api, contract_address)
result = instance.functions.getAccountVaultCounter(
_accountOwner=_account_owner
).call()
return {"int": result}
@classmethod
def get_configuration(
cls,
ledger_api: LedgerApi,
contract_address: str,
) -> JSONLike:
"""Handler method for the 'get_configuration' requests."""
instance = cls.get_instance(ledger_api, contract_address)
result = instance.functions.getConfiguration().call()
return {
"address_0": result[0],
"address_1": result[1],
"address_2": result[2],
"address_3": result[3],
}
@classmethod
def get_naked_cap(
cls, ledger_api: LedgerApi, contract_address: str, _asset: Address
) -> JSONLike:
"""Handler method for the 'get_naked_cap' requests."""
instance = cls.get_instance(ledger_api, contract_address)
result = instance.functions.getNakedCap(_asset=_asset).call()
return {"int": result}
@classmethod
def get_naked_pool_balance(
cls, ledger_api: LedgerApi, contract_address: str, _asset: Address
) -> JSONLike:
"""Handler method for the 'get_naked_pool_balance' requests."""
instance = cls.get_instance(ledger_api, contract_address)
result = instance.functions.getNakedPoolBalance(_asset=_asset).call()
return {"int": result}
@classmethod
def get_payout(
cls,
ledger_api: LedgerApi,
contract_address: str,
_otoken: Address,
_amount: int,
) -> JSONLike:
"""Handler method for the 'get_payout' requests."""
instance = cls.get_instance(ledger_api, contract_address)
result = instance.functions.getPayout(_otoken=_otoken, _amount=_amount).call()
return {"int": result}
@classmethod
def get_proceed(
cls,
ledger_api: LedgerApi,
contract_address: str,
_owner: Address,
_vault_id: int,
) -> JSONLike:
"""Handler method for the 'get_proceed' requests."""
instance = cls.get_instance(ledger_api, contract_address)
result = instance.functions.getProceed(
_owner=_owner, _vault_id=_vault_id
).call()
return {"int": result}
@classmethod
def get_vault(
cls,
ledger_api: LedgerApi,
contract_address: str,
_owner: Address,
_vault_id: int,
) -> JSONLike:
"""Handler method for the 'get_vault' requests."""
instance = cls.get_instance(ledger_api, contract_address)
result = instance.functions.getVault(_owner=_owner, _vault_id=_vault_id).call()
return {"tuple": result}
@classmethod
def get_vault_liquidation_details(
cls,
ledger_api: LedgerApi,
contract_address: str,
_owner: Address,
_vault_id: int,
) -> JSONLike:
"""Handler method for the 'get_vault_liquidation_details' requests."""
instance = cls.get_instance(ledger_api, contract_address)
result = instance.functions.getVaultLiquidationDetails(
_owner=_owner, _vault_id=_vault_id
).call()
return {"address": result, "int_0": result, "int_1": result}
@classmethod
def get_vault_with_details(
cls,
ledger_api: LedgerApi,
contract_address: str,
_owner: Address,
_vault_id: int,
) -> JSONLike:
"""Handler method for the 'get_vault_with_details' requests."""
instance = cls.get_instance(ledger_api, contract_address)
result = instance.functions.getVaultWithDetails(
_owner=_owner, _vault_id=_vault_id
).call()
return {"tuple": result[0], "int_0": result[1], "int_1": result[2]}
@classmethod
def has_expired(
cls, ledger_api: LedgerApi, contract_address: str, _otoken: Address
) -> JSONLike:
"""Handler method for the 'has_expired' requests."""
instance = cls.get_instance(ledger_api, contract_address)
result = instance.functions.hasExpired(_otoken=_otoken).call()
return {"bool": result}
@classmethod
def is_liquidatable(
cls,
ledger_api: LedgerApi,
contract_address: str,
_owner: Address,
_vault_id: int,
) -> JSONLike:
"""Handler method for the 'is_liquidatable' requests."""
instance = cls.get_instance(ledger_api, contract_address)
result = instance.functions.isLiquidatable(
_owner=_owner, _vault_id=_vault_id
).call()
return {"bool": result, "int_1": result, "int_2": result}
@classmethod
def is_operator(
cls,
ledger_api: LedgerApi,
contract_address: str,
_owner: Address,
_operator: Address,
) -> JSONLike:
"""Handler method for the 'is_operator' requests."""
instance = cls.get_instance(ledger_api, contract_address)
result = instance.functions.isOperator(
_owner=_owner, _operator=_operator
).call()
return {"bool": result}
@classmethod
def is_settlement_allowed(
cls, ledger_api: LedgerApi, contract_address: str, _otoken: Address
) -> JSONLike:
"""Handler method for the 'is_settlement_allowed' requests."""
instance = cls.get_instance(ledger_api, contract_address)
result = instance.functions.isSettlementAllowed(_otoken=_otoken).call()
return {"bool": result}
@classmethod
def oracle(
cls,
ledger_api: LedgerApi,
contract_address: str,
) -> JSONLike:
"""Handler method for the 'oracle' requests."""
instance = cls.get_instance(ledger_api, contract_address)
result = instance.functions.oracle().call()
return {"address": result}
@classmethod
def owner(
cls,
ledger_api: LedgerApi,
contract_address: str,
) -> JSONLike:
"""Handler method for the 'owner' requests."""
instance = cls.get_instance(ledger_api, contract_address)
result = instance.functions.owner().call()
return {"address": result}
@classmethod
def partial_pauser(
cls,
ledger_api: LedgerApi,
contract_address: str,
) -> JSONLike:
"""Handler method for the 'partial_pauser' requests."""
instance = cls.get_instance(ledger_api, contract_address)
result = instance.functions.partialPauser().call()
return {"address": result}
@classmethod
def pool(
cls,
ledger_api: LedgerApi,
contract_address: str,
) -> JSONLike:
"""Handler method for the 'pool' requests."""
instance = cls.get_instance(ledger_api, contract_address)
result = instance.functions.pool().call()
return {"address": result}
@classmethod
def system_fully_paused(
cls,
ledger_api: LedgerApi,
contract_address: str,
) -> JSONLike:
"""Handler method for the 'system_fully_paused' requests."""
instance = cls.get_instance(ledger_api, contract_address)
result = instance.functions.systemFullyPaused().call()
return {"bool": result}
@classmethod
def system_partially_paused(
cls,
ledger_api: LedgerApi,
contract_address: str,
) -> JSONLike:
"""Handler method for the 'system_partially_paused' requests."""
instance = cls.get_instance(ledger_api, contract_address)
result = instance.functions.systemPartiallyPaused().call()
return {"bool": result}
@classmethod
def whitelist(
cls,
ledger_api: LedgerApi,
contract_address: str,
) -> JSONLike:
"""Handler method for the 'whitelist' requests."""
instance = cls.get_instance(ledger_api, contract_address)
result = instance.functions.whitelist().call()
return {"address": result} | /rysk_client-0.2.15-py3-none-any.whl/rysk_client/packages/eightballer/contracts/opyn_controller/contract.py | 0.93402 | 0.176618 | contract.py | pypi |
from typing import Any
from aea.common import JSONLike
from aea.contracts.base import Contract
from aea.crypto.base import LedgerApi
from rysk_client.packages.eightballer.contracts.beyond_pricer import PUBLIC_ID
class BeyondPricer(Contract): # pylint: disable=too-many-public-methods
"""The scaffold contract class for a smart contract."""
contract_id = PUBLIC_ID
@classmethod
def get_raw_transaction(
cls, ledger_api: LedgerApi, contract_address: str, **kwargs: Any
) -> JSONLike:
"""
Handler method for the 'GET_RAW_TRANSACTION' requests.
Implement this method in the sub class if you want
to handle the contract requests manually.
:param ledger_api: the ledger apis.
:param contract_address: the contract address.
:param kwargs: the keyword arguments.
:return: the tx # noqa: DAR202
"""
del ledger_api, contract_address, kwargs
raise NotImplementedError
@classmethod
def get_raw_message(
cls, ledger_api: LedgerApi, contract_address: str, **kwargs: Any
) -> bytes:
"""
Handler method for the 'GET_RAW_MESSAGE' requests.
Implement this method in the sub class if you want
to handle the contract requests manually.
:param ledger_api: the ledger apis.
:param contract_address: the contract address.
:param kwargs: the keyword arguments.
:return: the tx # noqa: DAR202
"""
del ledger_api, contract_address, kwargs
raise NotImplementedError
@classmethod
def get_state(
cls, ledger_api: LedgerApi, contract_address: str, **kwargs: Any
) -> JSONLike:
"""
Handler method for the 'GET_STATE' requests.
Implement this method in the sub class if you want
to handle the contract requests manually.
:param ledger_api: the ledger apis.
:param contract_address: the contract address.
:param kwargs: the keyword arguments.
:return: the tx # noqa: DAR202
"""
del ledger_api, contract_address, kwargs
raise NotImplementedError
@classmethod
def address_book(
cls,
ledger_api: LedgerApi,
contract_address: str,
) -> JSONLike:
"""Handler method for the 'address_book' requests."""
instance = cls.get_instance(ledger_api, contract_address)
result = instance.functions.addressBook().call()
return {"address": result}
@classmethod
def authority(
cls,
ledger_api: LedgerApi,
contract_address: str,
) -> JSONLike:
"""Handler method for the 'authority' requests."""
instance = cls.get_instance(ledger_api, contract_address)
result = instance.functions.authority().call()
return {"address": result}
@classmethod
def bid_ask_i_v_spread(
cls,
ledger_api: LedgerApi,
contract_address: str,
) -> JSONLike:
"""Handler method for the 'bid_ask_i_v_spread' requests."""
instance = cls.get_instance(ledger_api, contract_address)
result = instance.functions.bidAskIVSpread().call()
return {"int": result}
@classmethod
def collateral_asset(
cls,
ledger_api: LedgerApi,
contract_address: str,
) -> JSONLike:
"""Handler method for the 'collateral_asset' requests."""
instance = cls.get_instance(ledger_api, contract_address)
result = instance.functions.collateralAsset().call()
return {"address": result}
@classmethod
def collateral_lending_rate(
cls,
ledger_api: LedgerApi,
contract_address: str,
) -> JSONLike:
"""Handler method for the 'collateral_lending_rate' requests."""
instance = cls.get_instance(ledger_api, contract_address)
result = instance.functions.collateralLendingRate().call()
return {"int": result}
@classmethod
def delta_band_width(
cls,
ledger_api: LedgerApi,
contract_address: str,
) -> JSONLike:
"""Handler method for the 'delta_band_width' requests."""
instance = cls.get_instance(ledger_api, contract_address)
result = instance.functions.deltaBandWidth().call()
return {"int": result}
@classmethod
def delta_borrow_rates(
cls,
ledger_api: LedgerApi,
contract_address: str,
) -> JSONLike:
"""Handler method for the 'delta_borrow_rates' requests."""
instance = cls.get_instance(ledger_api, contract_address)
result = instance.functions.deltaBorrowRates().call()
return {
"sellLong": result,
"sellShort": result,
"buyLong": result,
"buyShort": result,
}
@classmethod
def fee_per_contract(
cls,
ledger_api: LedgerApi,
contract_address: str,
) -> JSONLike:
"""Handler method for the 'fee_per_contract' requests."""
instance = cls.get_instance(ledger_api, contract_address)
result = instance.functions.feePerContract().call()
return {"int": result}
@classmethod
def get_call_slippage_gradient_multipliers(
cls, ledger_api: LedgerApi, contract_address: str, _tenor_index: int
) -> JSONLike:
"""Handler method for the 'get_call_slippage_gradient_multipliers' requests."""
instance = cls.get_instance(ledger_api, contract_address)
result = instance.functions.getCallSlippageGradientMultipliers(
_tenor_index=_tenor_index
).call()
return {"list[int]": result}
@classmethod
def get_call_spread_collateral_multipliers(
cls, ledger_api: LedgerApi, contract_address: str, _tenor_index: int
) -> JSONLike:
"""Handler method for the 'get_call_spread_collateral_multipliers' requests."""
instance = cls.get_instance(ledger_api, contract_address)
result = instance.functions.getCallSpreadCollateralMultipliers(
_tenor_index=_tenor_index
).call()
return {"list[int]": result}
@classmethod
def get_call_spread_delta_multipliers(
cls, ledger_api: LedgerApi, contract_address: str, _tenor_index: int
) -> JSONLike:
"""Handler method for the 'get_call_spread_delta_multipliers' requests."""
instance = cls.get_instance(ledger_api, contract_address)
result = instance.functions.getCallSpreadDeltaMultipliers(
_tenor_index=_tenor_index
).call()
return {"list[int]": result}
@classmethod
def get_put_slippage_gradient_multipliers(
cls, ledger_api: LedgerApi, contract_address: str, _tenor_index: int
) -> JSONLike:
"""Handler method for the 'get_put_slippage_gradient_multipliers' requests."""
instance = cls.get_instance(ledger_api, contract_address)
result = instance.functions.getPutSlippageGradientMultipliers(
_tenor_index=_tenor_index
).call()
return {"list[int]": result}
@classmethod
def get_put_spread_collateral_multipliers(
cls, ledger_api: LedgerApi, contract_address: str, _tenor_index: int
) -> JSONLike:
"""Handler method for the 'get_put_spread_collateral_multipliers' requests."""
instance = cls.get_instance(ledger_api, contract_address)
result = instance.functions.getPutSpreadCollateralMultipliers(
_tenor_index=_tenor_index
).call()
return {"list[int]": result}
@classmethod
def get_put_spread_delta_multipliers(
cls, ledger_api: LedgerApi, contract_address: str, _tenor_index: int
) -> JSONLike:
"""Handler method for the 'get_put_spread_delta_multipliers' requests."""
instance = cls.get_instance(ledger_api, contract_address)
result = instance.functions.getPutSpreadDeltaMultipliers(
_tenor_index=_tenor_index
).call()
return {"list[int]": result}
@classmethod
def liquidity_pool(
cls,
ledger_api: LedgerApi,
contract_address: str,
) -> JSONLike:
"""Handler method for the 'liquidity_pool' requests."""
instance = cls.get_instance(ledger_api, contract_address)
result = instance.functions.liquidityPool().call()
return {"address": result}
@classmethod
def low_delta_sell_option_flat_i_v(
cls,
ledger_api: LedgerApi,
contract_address: str,
) -> JSONLike:
"""Handler method for the 'low_delta_sell_option_flat_i_v' requests."""
instance = cls.get_instance(ledger_api, contract_address)
result = instance.functions.lowDeltaSellOptionFlatIV().call()
return {"int": result}
@classmethod
def low_delta_threshold(
cls,
ledger_api: LedgerApi,
contract_address: str,
) -> JSONLike:
"""Handler method for the 'low_delta_threshold' requests."""
instance = cls.get_instance(ledger_api, contract_address)
result = instance.functions.lowDeltaThreshold().call()
return {"int": result}
@classmethod
def max_tenor_value(
cls,
ledger_api: LedgerApi,
contract_address: str,
) -> JSONLike:
"""Handler method for the 'max_tenor_value' requests."""
instance = cls.get_instance(ledger_api, contract_address)
result = instance.functions.maxTenorValue().call()
return {"int": result}
@classmethod
def number_of_tenors(
cls,
ledger_api: LedgerApi,
contract_address: str,
) -> JSONLike:
"""Handler method for the 'number_of_tenors' requests."""
instance = cls.get_instance(ledger_api, contract_address)
result = instance.functions.numberOfTenors().call()
return {"int": result}
@classmethod
def protocol(
cls,
ledger_api: LedgerApi,
contract_address: str,
) -> JSONLike:
"""Handler method for the 'protocol' requests."""
instance = cls.get_instance(ledger_api, contract_address)
result = instance.functions.protocol().call()
return {"address": result}
@classmethod
def quote_option_price(
cls,
ledger_api: LedgerApi,
contract_address: str,
_option_series: tuple,
_amount: int,
is_sell: bool,
net_dhv_exposure: int,
) -> JSONLike:
"""Handler method for the 'quote_option_price' requests."""
instance = cls.get_instance(ledger_api, contract_address)
result = instance.functions.quoteOptionPrice(
_optionSeries=_option_series,
_amount=_amount,
isSell=is_sell,
netDhvExposure=net_dhv_exposure,
).call()
return {"totalPremium": result, "totalDelta": result, "totalFees": result}
@classmethod
def risk_free_rate(
cls,
ledger_api: LedgerApi,
contract_address: str,
) -> JSONLike:
"""Handler method for the 'risk_free_rate' requests."""
instance = cls.get_instance(ledger_api, contract_address)
result = instance.functions.riskFreeRate().call()
return {"int": result}
@classmethod
def slippage_gradient(
cls,
ledger_api: LedgerApi,
contract_address: str,
) -> JSONLike:
"""Handler method for the 'slippage_gradient' requests."""
instance = cls.get_instance(ledger_api, contract_address)
result = instance.functions.slippageGradient().call()
return {"int": result}
@classmethod
def strike_asset(
cls,
ledger_api: LedgerApi,
contract_address: str,
) -> JSONLike:
"""Handler method for the 'strike_asset' requests."""
instance = cls.get_instance(ledger_api, contract_address)
result = instance.functions.strikeAsset().call()
return {"address": result}
@classmethod
def underlying_asset(
cls,
ledger_api: LedgerApi,
contract_address: str,
) -> JSONLike:
"""Handler method for the 'underlying_asset' requests."""
instance = cls.get_instance(ledger_api, contract_address)
result = instance.functions.underlyingAsset().call()
return {"address": result} | /rysk_client-0.2.15-py3-none-any.whl/rysk_client/packages/eightballer/contracts/beyond_pricer/contract.py | 0.935332 | 0.175609 | contract.py | pypi |
from typing import Any
from aea.common import JSONLike
from aea.contracts.base import Contract
from aea.crypto.base import Address, LedgerApi
from rysk_client.packages.eightballer.contracts.usdc import PUBLIC_ID
class Usdc(Contract):
"""The scaffold contract class for a smart contract."""
contract_id = PUBLIC_ID
@classmethod
def get_raw_transaction(
cls, ledger_api: LedgerApi, contract_address: str, **kwargs: Any
) -> JSONLike:
"""
Handler method for the 'GET_RAW_TRANSACTION' requests.
Implement this method in the sub class if you want
to handle the contract requests manually.
:param ledger_api: the ledger apis.
:param contract_address: the contract address.
:param kwargs: the keyword arguments.
:return: the tx # noqa: DAR202
"""
del ledger_api, contract_address, kwargs
raise NotImplementedError
@classmethod
def get_raw_message(
cls, ledger_api: LedgerApi, contract_address: str, **kwargs: Any
) -> bytes:
"""
Handler method for the 'GET_RAW_MESSAGE' requests.
Implement this method in the sub class if you want
to handle the contract requests manually.
:param ledger_api: the ledger apis.
:param contract_address: the contract address.
:param kwargs: the keyword arguments.
:return: the tx # noqa: DAR202
"""
del ledger_api, contract_address, kwargs
raise NotImplementedError
@classmethod
def get_state(
cls, ledger_api: LedgerApi, contract_address: str, **kwargs: Any
) -> JSONLike:
"""
Handler method for the 'GET_STATE' requests.
Implement this method in the sub class if you want
to handle the contract requests manually.
:param ledger_api: the ledger apis.
:param contract_address: the contract address.
:param kwargs: the keyword arguments.
:return: the tx # noqa: DAR202
"""
del ledger_api, contract_address, kwargs
raise NotImplementedError
@classmethod
def domain_separator(
cls,
ledger_api: LedgerApi,
contract_address: str,
) -> JSONLike:
"""Handler method for the 'domain_separator' requests."""
instance = cls.get_instance(ledger_api, contract_address)
result = instance.functions.DOMAIN_SEPARATOR().call()
return {"str": result}
@classmethod
def allowance(
cls,
ledger_api: LedgerApi,
contract_address: str,
var_0: Address,
var_1: Address,
) -> JSONLike:
"""Handler method for the 'allowance' requests."""
instance = cls.get_instance(ledger_api, contract_address)
result = instance.functions.allowance(var_0, var_1).call()
return {"int": result}
@classmethod
def authority(
cls,
ledger_api: LedgerApi,
contract_address: str,
) -> JSONLike:
"""Handler method for the 'authority' requests."""
instance = cls.get_instance(ledger_api, contract_address)
result = instance.functions.authority().call()
return {"address": result}
@classmethod
def balance_of(
cls, ledger_api: LedgerApi, contract_address: str, var_0: Address
) -> JSONLike:
"""Handler method for the 'balance_of' requests."""
instance = cls.get_instance(ledger_api, contract_address)
result = instance.functions.balanceOf(var_0).call()
return {"int": result}
@classmethod
def decimals(
cls,
ledger_api: LedgerApi,
contract_address: str,
) -> JSONLike:
"""Handler method for the 'decimals' requests."""
instance = cls.get_instance(ledger_api, contract_address)
result = instance.functions.decimals().call()
return {"int": result}
@classmethod
def name(
cls,
ledger_api: LedgerApi,
contract_address: str,
) -> JSONLike:
"""Handler method for the 'name' requests."""
instance = cls.get_instance(ledger_api, contract_address)
result = instance.functions.name().call()
return {"str": result}
@classmethod
def nonces(
cls, ledger_api: LedgerApi, contract_address: str, var_0: Address
) -> JSONLike:
"""Handler method for the 'nonces' requests."""
instance = cls.get_instance(ledger_api, contract_address)
result = instance.functions.nonces(var_0).call()
return {"int": result}
@classmethod
def symbol(
cls,
ledger_api: LedgerApi,
contract_address: str,
) -> JSONLike:
"""Handler method for the 'symbol' requests."""
instance = cls.get_instance(ledger_api, contract_address)
result = instance.functions.symbol().call()
return {"str": result}
@classmethod
def total_supply(
cls,
ledger_api: LedgerApi,
contract_address: str,
) -> JSONLike:
"""Handler method for the 'total_supply' requests."""
instance = cls.get_instance(ledger_api, contract_address)
result = instance.functions.totalSupply().call()
return {"int": result} | /rysk_client-0.2.15-py3-none-any.whl/rysk_client/packages/eightballer/contracts/usdc/contract.py | 0.944957 | 0.180865 | contract.py | pypi |
import os
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Optional
def from_camel_case_to_snake_case(string: str):
"""Convert a string from camel case to snake case.
Note: If the string is all uppercase, it will be converted to lowercase.
"""
if string.isupper():
return string.lower()
return "".join("_" + c.lower() if c.isupper() else c for c in string).lstrip("_")
DEFAULT_TIMEOUT = 10
DEFAULT_ENCODING = "utf-8"
NULL_ADDRESS = "0x0000000000000000000000000000000000000000"
NULL_DATA = "0x0000000000000000000000000000000000000000"
SUPPORTED_LEVERAGES = [1, 1.5, 2, 3]
EMPTY_SERIES = {
"expiration": 1,
"strike": 1,
"isPut": True,
"collateral": NULL_ADDRESS,
"underlying": NULL_ADDRESS,
"strikeAsset": NULL_ADDRESS,
}
CONTRACT_ADDRESSES = {
"arbitrum": {
"opyn_controller": "0x594bD4eC29F7900AE29549c140Ac53b5240d4019",
"opyn_oracle": "0xBA1880CFFE38DD13771CB03De896460baf7dA1E7",
"opyn_new_calculator": "0x749a3624ad2a001F935E3319743f53Ecc7466358",
"opyn_option_registry": "0x8Bc23878981a207860bA4B185fD065f4fd3c7725",
"priceFeed": "0x7f86AC0c38bbc3211c610abE3841847fe19590A4",
"liquidity_pool": "0x217749d9017cB87712654422a1F5856AAA147b80",
"portfolio_values_feed": "0x7f9d820CFc109686F2ca096fFA93dd497b91C073",
"option_handler": "0xc63717c4436043781a63C8c64B02Ff774350e8F8",
"option_exchange": "0xC117bf3103bd09552F9a721F0B8Bce9843aaE1fa",
"beyond_pricer": "0xeA5Fb118862876f249Ff0b3e7fb25fEb38158def",
"d_h_v_lens": "0x10779CAE21C91897a5AdD1831Ffb813803c7fcf1",
"user_position_lens": "0x02eFd4e61C1883A0FfF1044ACd61c9100859336c",
"usdc": "0xaf88d065e77c8cC2239327C5EDb3A432268e5831",
"weth": "0x82aF49447D8a07e3bd95BD0d56f35241523fBab1",
"o_token": "0x1d96E828e0Aa743783919B24ccDB971504a96C77",
},
"arbitrum-goerli": {
"opyn_controller": "0x11a602a5F5D823c103bb8b7184e22391Aae5F4C2",
"opyn_oracle": "0x35578F5A49E1f1Cf34ed780B46A0BdABA23D4C0b",
"opyn_new_calculator": "0xcD270e755C2653e806e16dD3f78E16C89B7a1c9e",
"opyn_option_registry": "0x4E89cc3215AF050Ceb63Ca62470eeC7C1A66F737",
"price_feed": "0xf7B1e3a7856067BEcee81FdE0DD38d923b99554D",
"liquidity_pool": "0x0B1Bf5fb77AA36cD48Baa1395Bc2B5fa0f135d8C",
"portfolio_values_feed": "0x84fbb7C0a210e5e3A9f7707e1Fb725ADcf0CF528",
"option_handler": "0x1F63F3B37f818f05ebefaCa11086e5250958e0D8",
"option_exchange": "0xb672fE86693bF6f3b034730f5d2C77C8844d6b45",
"beyond_pricer": "0xc939df369C0Fc240C975A6dEEEE77d87bCFaC259",
"user_position_lens": "0xa6e2ebD13Cbb085659fB8Ce87fAFdF052066017f",
"d_h_v_lens": "0xFC2245435e38C6EAd5Cb05ac4ef536A6226eCacb",
"usdc": "0x408c5755b5c7a0a28D851558eA3636CfC5b5b19d",
"weth": "0x3b3a1dE07439eeb04492Fa64A889eE25A130CDd3",
"o_token": "0xB19d2eA6f662b13F530CB84B048877E5Ed0bD8FE",
},
}
@dataclass
class Contract:
"""Contract dataclass."""
path: str
address: str
@dataclass
class Chain:
"""Chain dataclass."""
name: str
chain_id: int
rpc_url: str
wss_url: str
def __hash__(self) -> int:
"""Hash the chain."""
return hash(self.chain_id)
@dataclass
class ProtocolDeployment:
"""Protocol deployment dataclass."""
name: str
contracts: Dict[str, Contract]
chain: Chain
subgraph_url: Optional[str] = None
WS_URL = "wss://quaint-billowing-morning.arbitrum-goerli.discover.quiknode.pro/def6c4c783fc626cb8a07d38f845b76b458e6e84"
ARBITRUM = Chain(
name="arbitrum",
chain_id=42161,
rpc_url="https://arbitrum.public-rpc.com",
wss_url="wss://arb1.arbitrum.io/ws",
)
ARBITRUM_GOERLI = Chain(
name="arbitrum-goerli",
chain_id=421613,
rpc_url="https://arbitrum-goerli.rpc.thirdweb.com",
wss_url=WS_URL,
)
LOCAL_FORK = Chain(
name="local-fork",
chain_id=421611,
rpc_url="http://localhost:8545",
wss_url="ws://localhost:8545",
)
PROTOCOL_DEPLOYMENTS = {}
SUPPORTED_CHAINS = [ARBITRUM, ARBITRUM_GOERLI]
CHAINS_TO_SUBGRAPH_URL = {
ARBITRUM: "https://api.goldsky.com/api/public/project_clhf7zaco0n9j490ce421agn4/subgraphs/arbitrum-one/0.1.17/gn",
ARBITRUM_GOERLI: "https://api.goldsky.com/api/public/project_clhf7zaco0n9j490ce421agn4/subgraphs/devey/0.1.17/gn",
}
for chain in SUPPORTED_CHAINS:
for name, address in CONTRACT_ADDRESSES[chain.name].items():
PROTOCOL_DEPLOYMENTS[chain.name] = ProtocolDeployment(
name=name,
contracts={
from_camel_case_to_snake_case(name): Contract(
path=Path(os.path.dirname(__file__))
/ ".."
/ "packages"
/ "eightballer"
/ "contracts"
/ from_camel_case_to_snake_case(name)
/ "build"
/ f"{from_camel_case_to_snake_case(name)}.json",
address=address,
)
for name, address in CONTRACT_ADDRESSES[chain.name].items()
},
chain=chain,
subgraph_url=CHAINS_TO_SUBGRAPH_URL[chain],
)
CHAIN_ID_TO_DEPLOYMENT = {
deploy.chain.chain_id: deploy for deploy in PROTOCOL_DEPLOYMENTS.values()
}
WETH_MULTIPLIER = 1e18
USDC_MULTIPLIER = 1e6 | /rysk_client-0.2.15-py3-none-any.whl/rysk_client/src/constants.py | 0.823257 | 0.286345 | constants.py | pypi |
from dataclasses import dataclass
from datetime import datetime
from enum import Enum
from typing import Dict, List, Optional
from rysk_client.src.collateral import CollateralFactory
from rysk_client.src.constants import USDC_MULTIPLIER, WETH_MULTIPLIER, Chain
EXPIRATION_TIME = "08:00:00"
@dataclass
class TradingSpec:
"""
Class to represent the trading spec.
"""
iv: int # noqa: C0103
quote: int
fee: int
disabled: bool
premium_too_small: bool
@dataclass
class OptionStrikeDrill:
"""
Class to represent the option strike drill.
"""
strike: int
sell: TradingSpec
buy: TradingSpec
delta: int
exposure: int
series_collateral_exchange_balance_usdc: Optional[int] = None
series_collateral_exchange_balance_weth: Optional[int] = None
class OptionChain:
"""Class to represent the option chain."""
_raw_data: tuple
expirations: List[int]
strikes: Dict[int, List[int]]
def __init__(self, raw_data: tuple) -> None:
self._raw_data = raw_data
self.parse_expirations()
self.parse_strikes()
def parse_expirations(self):
"""Parse and set the expirations."""
self.expirations = self._raw_data[0]
def parse_strikes(self):
"""Parse and set the strikes."""
self.strikes = {}
for index, expiration_1 in enumerate(self.expirations):
expiration_data = self._raw_data[1][index]
(
expiration_3, # noqa: F841
call_strikes, # noqa: F841
call_data,
put_strikes, # noqa: F841
put_data,
underlying_value, # noqa: F841
) = expiration_data
call_option_drill = self._parse_option_data(call_data)
put_option_drill = self._parse_option_data(put_data)
self.strikes[expiration_1] = {
"call": call_option_drill,
"put": put_option_drill,
}
def _parse_option_data(self, option_data: tuple):
"""
Parse the option data.
"""
markets = []
if len(option_data[0]) == 5:
cols = [
"strike",
"sell_trading_specs",
"buy_trading_specs",
"delta",
"exposure",
]
elif len(option_data[0]) == 7:
cols = [
"strike",
"sell_trading_specs",
"buy_trading_specs",
"delta",
"exposure",
"series_collateral_exchange_balance_usdc",
"series_collateral_exchange_balance_weth",
]
else:
raise ValueError("Option data has unexpected length. is not beta or prod.")
for row in option_data:
params = {}
for k, val in zip(cols, row):
if k in ["sell_trading_specs", "buy_trading_specs"]:
params[k.split("_")[0]] = TradingSpec(*val)
else:
params[k] = val
markets.append(OptionStrikeDrill(**params))
return markets
@property
def active_markets(self):
"""Return the active strikes for the active markets, where option drill is not disabled."""
active_markets = []
for expiration, option_drill in self.strikes.items():
for option_type, option_type_drill in option_drill.items():
for option_market in option_type_drill:
if (
not option_market.sell.disabled
or not option_market.buy.disabled
):
active_markets.append((expiration, option_type, option_market))
return active_markets
@property
def current_price(self):
"""
Return the underlying price from the oracle.
"""
return self._raw_data[1][-1][-1] / WETH_MULTIPLIER
class OptionType(Enum):
"""Option type enum."""
CALL = "call"
PUT = "put"
@dataclass
class RyskOptionMarket: # pylint: disable=too-many-instance-attributes
"""Rysk option market."""
strike: float
expiration: int
is_put: bool
active: bool = True
# market data
bid: Optional[int] = None
ask: Optional[int] = None
dhv: Optional[int] = None
delta: Optional[int] = None
_collateral: Optional[str] = None
@classmethod
def from_series(cls, series):
"""Returns a RyskOptionMarket from a series"""
return cls(
strike=series["strike"],
expiration=series["expiration"],
is_put=series["isPut"],
)
def _parse_name(self):
"""Returns the name of the option market into the following format.
NAME = "ETH-30JUN23-2200-C"
"""
_type = "P" if self.is_put else "C"
_expiration = datetime.fromtimestamp(self.expiration).strftime("%d%b%y").upper()
_strike = int(self.strike / WETH_MULTIPLIER)
return f"ETH-{_expiration}-{_strike}-{_type}"
@property
def name(self):
"""Returns the name of the option market"""
return self._parse_name()
def __str__(self):
return f"RyskOptionMarket({self.name})"
@classmethod
def from_option_drill(
cls, expiration: int, option_type: str, option_drill: OptionStrikeDrill
):
"""Returns a RyskOptionMarket from an option drill"""
return cls(
strike=option_drill.strike,
expiration=expiration,
is_put=option_type == "put",
ask=option_drill.buy.quote,
bid=option_drill.sell.quote,
dhv=option_drill.exposure,
delta=option_drill.delta,
)
def to_json(self):
"""Returns the option market as a json"""
market_data = {}
if self.bid and self.ask and self.dhv is not None:
market_data = {
"bid": self.bid / USDC_MULTIPLIER,
"ask": self.ask / USDC_MULTIPLIER,
"dhv": self.dhv / WETH_MULTIPLIER,
}
result = {
"id": self.name,
"strike": self.strike / WETH_MULTIPLIER,
"expiration": self.expiration,
"optionType": "put" if self.is_put else "call",
"active": self.active,
}
if self.delta:
result["delta"] = self.delta / WETH_MULTIPLIER
result.update(**market_data)
return result
@classmethod
def from_json(cls, json):
"""Returns a RyskOptionMarket from a json"""
return cls(
strike=json["strike"] * WETH_MULTIPLIER,
expiration=json["expiration"],
is_put=json["optionType"] == "put",
active=json["active"],
bid=json.get("bid") * USDC_MULTIPLIER,
ask=json.get("ask") * USDC_MULTIPLIER,
dhv=json.get("dhv") * WETH_MULTIPLIER,
delta=json.get("delta"),
)
@classmethod
def from_str(cls, name: str):
"""Returns a RyskOptionMarket from a name"""
_name = name.split("-")
expiration_date = datetime.strptime(
f"{_name[1]}T{EXPIRATION_TIME}+00:00",
"%d%b%yT%H:%M:%S%z",
)
_expiration = int(expiration_date.timestamp())
_strike = int(_name[2]) * WETH_MULTIPLIER
_is_put = _name[3] == "P"
return cls(_strike, _expiration, _is_put)
@property
def collateral(self):
"""Returns the collateral of the option market"""
if self._collateral is None:
raise ValueError("Collateral not set")
return self._collateral
@collateral.setter
def collateral(self, collateral):
"""Sets the collateral of the option market"""
self._collateral = collateral
@dataclass
class MarketFactory:
"""Class to generate option markets."""
chain: Chain
def __init__(self, chain: Chain, covered: bool = True):
"""
Markets factory will generate option markets for the given chain.
If covered is True, it will generate markets for the covered options.
I.e. for a given strike and expiration, it will generate a call and a put.
If called with covered=False, it will generate markets for the naked options.
"""
self.chain = chain
self.collateral_factory = CollateralFactory(chain)
self._covered = covered
def to_series(self, rysk_option_market: RyskOptionMarket):
"""Returns the series of the option market
effectively, we are adding the appriate contract addresses to the option market.
"""
return {
"strike": int(rysk_option_market.strike),
"expiration": int(rysk_option_market.expiration),
"isPut": rysk_option_market.is_put,
"underlying": self.collateral_factory.WETH,
"strikeAsset": self.collateral_factory.USDC,
"collateral": rysk_option_market.collateral,
} | /rysk_client-0.2.15-py3-none-any.whl/rysk_client/src/rysk_option_market.py | 0.915766 | 0.275308 | rysk_option_market.py | pypi |
class Trade:
"""Class to represent a trade."""
def __init__(
self,
quantity=None,
price=None,
total_cost=None,
market=None,
side=None,
fee=None,
trade_id=None,
):
if sum([quantity is None, price is None, total_cost is None]) != 1:
raise ValueError(
"2 out of 3 of quantity, price, total_cost must be provided"
)
if quantity is not None and price is not None:
self._total_cost = quantity * price
self._quantity = quantity
self._price = price
elif quantity is not None and total_cost is not None:
self._price = total_cost / quantity
self._quantity, self._total_cost = quantity, total_cost
elif price is not None and total_cost is not None:
self._quantity = total_cost / price
self._price, self._total_cost = price, total_cost
self.market = market
self.side = side
self.fee = fee
self.trade_id = trade_id
@property
def quantity(self):
"""Return the quantity of the trade."""
return self._quantity
@property
def price(self):
"""Return the price of the trade."""
return self._price
@property
def total_cost(self):
"""Return the total cost of the trade."""
return self._total_cost
def __str__(self) -> str:
"""Format the trade as a string."""
return f"Trade(quantity={self.quantity}, price={self.price})"
def __repr__(self) -> str:
"""Format the trade as a string."""
return str(self)
class PnlCalculator:
"""Class to calculate the PnL of a position."""
def __init__(self):
"""A PnlCalculator is initialised with no trades."""
self.current_price = 0
self.position_size = 0
self.total_cost = 0
self.realised_pnl_total = 0
self.trades = []
def update_price(self, price):
"""Update the current price of the PnlCalculator."""
self.current_price = price
@property
def average_price(self):
"""Calculate the average price of the open position."""
if self.position_size == 0:
return 0
return self.total_cost / self.position_size
@property
def unrealised_pnl(self):
"""Calculate the unrealised PnL of the PnlCalculator."""
return self.position_size * (self.current_price - self.average_price)
@property
def realised_pnl(self):
"""Return the realised PnL of the PnlCalculator."""
return self.realised_pnl_total
def add_trades(self, trades):
"""Add a list of trades to the PnlCalculator."""
for trade in trades:
self.add_trade(trade)
def add_trade(self, trade):
"""
add a trade to the PnlCalculator.
positions can be short
trades can be added in any order
buy_trade = Trade(1, 1000)
sell_trade = Trade(-1, 1010)
pnl_calculator.add_trade(buy_trade)
pnl_calculator.add_trade(sell_trade)
assert pnl_calculator.position_size == 0
assert pnl_calculator.realised_pnl == 10
"""
self.trades.append(trade)
# handle open
if self.position_size == 0:
self.position_size = trade.quantity
self.total_cost = trade.total_cost
return
# handle partial close
if self.position_size * trade.quantity < 0:
# we are closing part of the position
# we need to calculate the realised pnl
# and update the position size and total cost
realised_pnl = -self.position_size * (trade.price - self.average_price)
self.realised_pnl_total -= realised_pnl
self.position_size += trade.quantity
self.total_cost += trade.total_cost
return
# handle full close
if self.position_size + trade.quantity == 0:
# we are closing the position
# we need to calculate the realised pnl
# and update the position size and total cost
realised_pnl = -self.position_size * (trade.price - self.average_price)
self.realised_pnl_total -= realised_pnl
self.position_size = 0
self.total_cost = 0
return
# handle increase position
if self.position_size * trade.quantity > 0:
# we are increasing the position
# we need to update the position size and total cost
self.position_size += trade.quantity
self.total_cost += trade.total_cost
return | /rysk_client-0.2.15-py3-none-any.whl/rysk_client/src/pnl_calculator.py | 0.932184 | 0.424531 | pnl_calculator.py | pypi |
from dataclasses import dataclass
from enum import Enum
from rysk_client.src.action_type import ActionType, RyskActionType
from rysk_client.src.collateral import CollateralFactory
from rysk_client.src.constants import (EMPTY_SERIES, NULL_ADDRESS, NULL_DATA,
Chain)
from rysk_client.src.rysk_option_market import MarketFactory, RyskOptionMarket
from rysk_client.src.utils import from_wei_to_opyn
class OperationType(Enum):
"""Distinguish between operations for the rysk and opyn contracts."""
OPYN_ACTION = 0
RYSK_ACTION = 1
@dataclass
class OperationFactory:
"""Create operations for the rysk and opyn contracts."""
chain: Chain
def __init__(self, chain: Chain):
self.chain = chain
self.collateral_factory = CollateralFactory(chain)
self.market_factory = MarketFactory(chain)
def buy(
self,
acceptable_premium: int,
owner_address: str,
amount: int,
option_market: RyskOptionMarket,
issuance_required: bool = False,
):
"""Create the operation to buy an option."""
operations = []
if issuance_required:
operations.append(
{
"actionType": RyskActionType.ISSUE.value,
"owner": NULL_ADDRESS,
"secondAddress": NULL_ADDRESS,
"asset": NULL_ADDRESS,
"vaultId": 0,
"amount": 0,
"optionSeries": self.market_factory.to_series(option_market),
"indexOrAcceptablePremium": 0,
"data": NULL_ADDRESS,
}
)
operations.append(
{
"actionType": RyskActionType.BUY_OPTION.value,
"owner": NULL_ADDRESS,
"secondAddress": owner_address,
"asset": NULL_ADDRESS,
"vaultId": 0,
"amount": amount,
"optionSeries": self.market_factory.to_series(option_market),
"indexOrAcceptablePremium": acceptable_premium,
"data": NULL_ADDRESS,
}
)
return [
{
"operation": OperationType.RYSK_ACTION.value,
"operationQueue": operations,
}
]
def close_long(
self,
acceptable_premium: int,
owner_address: str,
otoken_address: str,
amount: int,
):
"""Create the operation to buy an option."""
return [
{
"operation": OperationType.RYSK_ACTION.value,
"operationQueue": [
{
"actionType": RyskActionType.SELL_OPTION.value,
"owner": NULL_ADDRESS,
"secondAddress": owner_address,
"asset": otoken_address,
"vaultId": 0,
"amount": amount,
"optionSeries": EMPTY_SERIES,
"indexOrAcceptablePremium": acceptable_premium,
"data": NULL_DATA,
}
],
}
]
def sell(
self,
acceptable_premium: int,
owner_address: str,
exchange_address: str,
otoken_address: str,
amount: int,
vault_id: int,
collateral_amount: int,
rysk_option_market: RyskOptionMarket,
issue_new_vault: bool = False,
):
"""Create the operation to sell an option."""
if rysk_option_market.is_put:
collateral = self.collateral_factory.USDC
else:
collateral = self.collateral_factory.WETH
required_data = [
{
"actionType": ActionType.DEPOSIT_COLLATERAL.value,
"owner": owner_address,
"secondAddress": exchange_address,
"asset": collateral,
"vaultId": vault_id,
"amount": collateral_amount,
"optionSeries": EMPTY_SERIES,
"indexOrAcceptablePremium": 0,
"data": NULL_DATA,
},
{
"actionType": ActionType.MINT_SHORT_OPTION.value,
"owner": owner_address,
"secondAddress": exchange_address,
"asset": otoken_address,
"vaultId": vault_id,
"amount": from_wei_to_opyn(amount),
"optionSeries": EMPTY_SERIES,
"indexOrAcceptablePremium": 0,
"data": NULL_DATA,
},
]
if issue_new_vault:
# we need to open a vault
required_data = [
{
"actionType": ActionType.OPEN_VAULT.value,
"owner": owner_address,
"secondAddress": owner_address,
"asset": NULL_ADDRESS,
"vaultId": vault_id,
"amount": 0,
"optionSeries": EMPTY_SERIES,
"indexOrAcceptablePremium": 0,
"data": NULL_DATA,
}
] + required_data
return [
{
"operation": OperationType.OPYN_ACTION.value,
"operationQueue": required_data,
},
{
"operation": OperationType.RYSK_ACTION.value,
"operationQueue": [
{
"actionType": RyskActionType.SELL_OPTION.value,
"owner": NULL_ADDRESS,
"secondAddress": owner_address,
"asset": NULL_ADDRESS,
"vaultId": 0,
"amount": amount,
"optionSeries": {
"expiration": int(rysk_option_market.expiration),
"strike": int(rysk_option_market.strike),
"isPut": rysk_option_market.is_put,
"underlying": self.collateral_factory.WETH,
"strikeAsset": self.collateral_factory.USDC,
"collateral": self.collateral_factory.WETH
if not rysk_option_market.is_put
else self.collateral_factory.USDC,
},
"indexOrAcceptablePremium": int(acceptable_premium),
"data": NULL_DATA,
}
],
},
]
def close_short(
self,
acceptable_premium: int,
owner_address: str,
otoken_address: str,
amount: int,
collateral_amount: int,
collateral_asset: str,
vault_id: int,
):
"""
Create the operation to close a short options
"""
tx_data = [
{
"operation": OperationType.RYSK_ACTION.value,
"operationQueue": [
{
"actionType": RyskActionType.BUY_OPTION.value,
"owner": NULL_ADDRESS,
"secondAddress": owner_address,
"asset": otoken_address,
"vaultId": 0,
"amount": amount,
"optionSeries": EMPTY_SERIES,
"indexOrAcceptablePremium": acceptable_premium,
"data": NULL_DATA,
}
],
},
{
"operation": OperationType.OPYN_ACTION.value,
"operationQueue": [
{
"actionType": ActionType.BURN_SHORT_OPTION.value,
"owner": owner_address,
"secondAddress": owner_address,
"asset": otoken_address,
"vaultId": vault_id,
"amount": from_wei_to_opyn(amount),
"optionSeries": EMPTY_SERIES,
"indexOrAcceptablePremium": 0,
"data": NULL_DATA,
},
{
"actionType": ActionType.WITHDRAW_COLLATERAL.value,
"owner": owner_address,
"secondAddress": owner_address,
"asset": collateral_asset,
"vaultId": vault_id,
"amount": collateral_amount,
"optionSeries": EMPTY_SERIES,
"indexOrAcceptablePremium": 0,
"data": NULL_DATA,
},
],
},
]
return tx_data | /rysk_client-0.2.15-py3-none-any.whl/rysk_client/src/operation_factory.py | 0.864439 | 0.289573 | operation_factory.py | pypi |
import json
import logging
import sys
from copy import deepcopy
from dataclasses import asdict
from typing import Any, Dict, List, Optional
from rich import print_json
from rich.console import Console
from rich.logging import RichHandler
from rich.table import Table
from web3 import Web3
from rysk_client.src.constants import (ARBITRUM_GOERLI, DEFAULT_ENCODING,
PROTOCOL_DEPLOYMENTS)
def from_wei_to_opyn(amount: int):
"""Convert amount from wei to opyn."""
return int(amount / 10**10)
def get_logger():
"""Get the logger."""
logger = logging.getLogger(__name__)
formatter = logging.Formatter("%(message)s")
# we check if the logger already has a handler
# to avoid adding multiple handlers
if logger.hasHandlers():
return logger
if sys.stdout.isatty():
handler = RichHandler(
markup=False,
rich_tracebacks=True,
locals_max_string=None,
locals_max_length=None,
)
else:
handler = logging.StreamHandler(sys.stdout)
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.INFO)
return logger
def get_contract(name, web3, chain, address=None):
"""Returns a web3 contract instance for the given contract name"""
spec = PROTOCOL_DEPLOYMENTS[chain.name].contracts[name]
with open(spec.path, "r", encoding=DEFAULT_ENCODING) as abi:
abi = json.loads(abi.read())["abi"]
res = asdict(spec)
del res["path"]
res["abi"] = abi
if address is not None:
res["address"] = address
return web3.eth.contract(**res)
def get_web3(chain=ARBITRUM_GOERLI) -> Web3:
"""Returns a web3 instance connected to RPC_URL"""
web3 = Web3(Web3.HTTPProvider(chain.rpc_url))
return web3
def render_table(title: str, data, cols: Optional[List[str]] = None):
"""Render a table from a dynamic input."""
table = Table(title=title)
# we first create the columns
if cols is None:
columns = []
for row in data:
for column in row.keys():
if column not in columns:
columns.append(column)
else:
columns = cols
for column in columns:
table.add_column(column)
# then we add the rows
for row in data:
table.add_row(*[str(row.get(column, "")) for column in columns])
console = Console()
console.print(table)
def print_operate_tuple(operate_tuple: List[Dict[str, Any]]):
"""
Ensure that the operate tuple is formated ina manner compatible with
tenderly's api.
print it using rich.
"""
display_tuple = deepcopy(operate_tuple)
keys_to_stringify = [
"strike",
"amount",
"indexOrAcceptablePremium",
"vaultId",
"actionType",
]
def stringify_json(json_data: Any):
"""
Recursively stringify json data.
"""
if isinstance(json_data, dict):
for key, value in json_data.items():
if key in keys_to_stringify:
json_data[key] = str(value)
else:
stringify_json(value)
elif isinstance(json_data, list):
for value in json_data:
stringify_json(value)
stringify_json(display_tuple)
print_json(data=display_tuple) | /rysk_client-0.2.15-py3-none-any.whl/rysk_client/src/utils.py | 0.578805 | 0.183191 | utils.py | pypi |
import json
from dataclasses import dataclass
from typing import Any, Dict, List
import requests
from rysk_client.src.constants import DEFAULT_TIMEOUT
MARKET_SUBGRAPH_QUERY = """
{series (
where: {
or:[
{
isBuyable: true
},
{
isSellable: true
}
]
}
)
{
id
expiration
netDHVExposure
strike
isPut
isBuyable
isSellable
}
}
"""
SHORT_SUBGRAPH_QUERY = """
{
shortPositions(
first: 1000,
where: {
account: "%s",
oToken_: {expiryTimestamp_gte: "1683273600"}
}
){
id
netAmount
buyAmount
sellAmount
active
realizedPnl
oToken {
id
symbol
expiryTimestamp
strikePrice
isPut
underlyingAsset {
id
}
createdAt
}
settleActions {
id
amount
transactionHash
}
optionsBoughtTransactions {
amount
premium
transactionHash
}
optionsSoldTransactions {
amount
premium
transactionHash
}
}
}
"""
LONG_SUBGRAPH_QUERY = """
{
longPositions(
first: 1000,
where: {
account: "%s",
oToken_: {expiryTimestamp_gte: "1683273600"}
}
){
id
netAmount
buyAmount
sellAmount
active
realizedPnl
oToken {
id
symbol
expiryTimestamp
strikePrice
isPut
underlyingAsset {
id
}
createdAt
}
redeemActions {
id
payoutAmount
transactionHash
}
optionsBoughtTransactions {
amount
premium
transactionHash
}
optionsSoldTransactions {
amount
premium
transactionHash
}
}
}
"""
INDEX_QUERY = """
{
stat(id: 0 block: {number: %s}) {
id
}
}
"""
class BlockNotIndexed(Exception):
"""Block not indexed."""
@dataclass
class SubgraphClient:
"""Simple client to interact with the Rysk subgraph."""
url: str
def _query(self, query):
"""Simple function to call a subgraph query."""
headers = {"Content-Type": "application/json"}
subgraph_query = {"query": query}
response = requests.post(
url=self.url,
headers=headers,
data=json.dumps(subgraph_query),
timeout=DEFAULT_TIMEOUT,
)
if response.status_code != 200 or response.content == b"404":
raise ValueError(
f"Subgraph query failed with status code {response.status_code}."
)
return json.loads(response.content)["data"]
def query_markets(self):
"""Query the subgraph for markets."""
return self._query(MARKET_SUBGRAPH_QUERY)["series"]
def query_longs(self, address: str) -> List[Dict[str, Any]]:
"""Query the subgraph for longs."""
query = LONG_SUBGRAPH_QUERY % address.lower()
result = self._query(query)
return result["longPositions"]
def query_shorts(self, address: str) -> List[Dict[str, Any]]:
"""Query the subgraph for shorts."""
query = SHORT_SUBGRAPH_QUERY % address.lower()
result = self._query(query)
return result["shortPositions"]
def query_index(self, block: int) -> Dict[str, Any]:
"""Query the subgraph for index."""
query = INDEX_QUERY % block
try:
result = self._query(query)
return result["stat"]
except KeyError as err:
raise BlockNotIndexed(f"Block {block} not indexed.") from err | /rysk_client-0.2.15-py3-none-any.whl/rysk_client/src/subgraph.py | 0.728362 | 0.202207 | subgraph.py | pypi |
from torch import nn
import numpy as np
import torch
from torch.autograd import Function
DEVICE = "cpu"
class CustomLoss(Function): # pylint: disable=W0223
"""A custom autograd function for the smooth loss component of the RhythmNet loss function."""
@staticmethod
def forward(ctx, hr_t, hr_outs, T): # pylint: disable=W0221
"""Computes the forward pass of the custom autograd function.
Args:
ctx (torch.autograd.function.Context): A context object that can be used to stash
information for backward computation.
hr_t (torch.Tensor): A tensor of shape (1,), representing the true heart rate
for a particular time step.
hr_outs (torch.Tensor)Args: A tensor of shape (seq_len,), representing the predicted heart
rates for all time steps.
T (int): An integer representing the number of time steps.
Returns:
torch.Tensor: A tensor of shape (1,), representing the smooth loss for a particular time step."""
ctx.hr_outs = hr_outs
ctx.hr_mean = hr_outs.mean()
ctx.T = T
ctx.save_for_backward(hr_t)
if hr_t > ctx.hr_mean:
loss = hr_t - ctx.hr_mean
else:
loss = ctx.hr_mean - hr_t
return loss
@staticmethod
def backward(ctx, grad_output):# pylint: disable=W0221,W0613:
"""Computes the backward pass of the custom autograd function.
Args:
ctx (torch.autograd.function.Context): A context object that canbe used to
stash information for backward computation.
Returns:
Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: A tuple containing the gradients
of the loss with respect to hr_t, hr_outs, and T."""
output = torch.zeros(1).to(DEVICE)
hr_t, = ctx.saved_tensors
hr_outs = ctx.hr_outs
for heart_rate in hr_outs:
if heart_rate == hr_t:
pass
else:
output = output + (1 / ctx.T) * torch.sign(ctx.hr_mean - heart_rate)
output = (1 / ctx.T - 1) * torch.sign(ctx.hr_mean - hr_t) + output
return output, None, None
class RhythmNetLoss(nn.Module):
""" RhythmNetLoss calculates the loss function for the RhythmNet model.
It uses a combination of L1 loss and a custom smoothness loss to calculate the final loss.
Args:
weight (float): A weight factor to control the relative contribution of the smoothness loss to the final loss.
Default value is 100.0.
Attributes:
l1_loss (nn.L1Loss): L1 loss function provided by PyTorch.
lambd (float): Weight factor for the smoothness loss.
gru_outputs_considered (None or Tensor): A tensor of shape (batch_size, seq_len) containing the output of the
GRU layer of the RhythmNet model, used for computing the smoothness loss. Initialized to None.
custom_loss (CustomLoss): A custom smoothness loss function that penalizes large changes
between consecutive output values.
device (str): The device on which to perform calculations. Default value is 'cpu'.
Methods:
forward(resnet_outputs, gru_outputs, target): Calculates the combined loss using L1 loss and smoothness loss.
smooth_loss(gru_outputs): Calculates the smoothness loss component of the combined loss."""
def __init__(self, weight=100.0):
""" Initializes a new instance of the RhythmNetLoss class.
Args:
weight (float): A weight factor to control the relative contribution of the smoothness loss to the final
loss. Default value is 100.0."""
super().__init__()
self.l1_loss = nn.L1Loss()
self.lambd = weight
self.gru_outputs_considered = None
self.custom_loss = CustomLoss()
self.device = 'cpu'
def forward(self, resnet_outputs, gru_outputs, target):
"""Calculates the combined loss using L1 loss and smoothness loss.
Args:
resnet_outputs (Tensor): A tensor of shape (batch_size, num_classes) containing
the output of the ResNet layer of the RhythmNet model.
gru_outputs (Tensor): A tensor of shape (batch_size, seq_len, hidden_size)
containing the output of the GRU layer of the RhythmNet model.
target (Tensor): A tensor of shape (batch_size, num_classes) containing the target values.
Returns:
loss (Tensor): A scalar tensor representing the combined loss."""
l1_loss = self.l1_loss(resnet_outputs, target)
smooth_loss_component = self.smooth_loss(gru_outputs)
loss = l1_loss + self.lambd * smooth_loss_component
return loss
def smooth_loss(self, gru_outputs):
"""Calculates the smoothness loss component of the combined loss.
Args:
gru_outputs (Tensor): A tensor of shape (batch_size, seq_len, hidden_size)
containing the output of the GRU layer of the RhythmNet model.
Returns:
smooth_loss (Tensor): A scalar tensor representing the smoothness loss."""
smooth_loss = torch.zeros(1).to(device=self.device)
self.gru_outputs_considered = gru_outputs.flatten()
for hr_t in self.gru_outputs_considered:
smooth_loss = smooth_loss + self.custom_loss.apply(torch.autograd.Variable(hr_t, requires_grad=True),
self.gru_outputs_considered,
self.gru_outputs_considered.shape[0])
return smooth_loss / self.gru_outputs_considered.shape[0]
def rmse(array_1, array_2):
"""Computes the root mean squared error (RMSE) between two arrays.
Returns:
float: RMSE between array_1 and array_2."""
return np.sqrt(np.mean((array_1 - array_2) ** 2))
def mae(array_1, array_2):
"""Computes the mean absolute error (MAE) between two arrays.
Returns:
float: MAE between l1 and l2."""
return np.mean([abs(item1 - item2) for item1, item2 in zip(array_1, array_2)])
def compute_criteria(target_hr_list, predicted_hr_list):
"""Computes the mean absolute error (MAE) and root mean squared error
(RMSE) between predicted and target heart rate lists.
Args:
target_hr_list (array-like): Target heart rate list.
predicted_hr_list (array-like): Predicted heart rate list.
Returns:
dict: Dictionary containing MAE and RMSE values."""
hr_mae = mae(np.array(predicted_hr_list), np.array(target_hr_list))
hr_rmse = rmse(np.array(predicted_hr_list), np.array(target_hr_list))
return {"MAE": np.mean(hr_mae), "RMSE": hr_rmse} | /rythmnet_func-0.0.1-py3-none-any.whl/rythmnet_func/utils/rhythmnet_loss.py | 0.963549 | 0.624093 | rhythmnet_loss.py | pypi |
import os
import io
import PIL
import numpy as np
import matplotlib.pyplot as plt
from torchvision.transforms import ToTensor
def gt_vs_est(data1, data2, plot_path=None, to_buffer=False):
data1 = np.asarray(data1)
data2 = np.asarray(data2)
fig, axis = plt.subplots()
axis.scatter(data1, data2)
axis.set_title('true labels vs estimated')
axis.set_ylabel('estimated HR')
axis.set_xlabel('true HR')
if to_buffer:
buf = io.BytesIO()
fig.savefig(buf, format='png')
buf.seek(0)
return buf
fig.savefig(os.path.join(plot_path, 'true_vs_est.png'), dpi=fig.dpi)
return None
def bland_altman_plot(data1, data2, plot_path=None, to_buffer=False):
data1 = np.asarray(data1)
data2 = np.asarray(data2)
mean = np.mean([data1, data2], axis=0)
diff = data1 - data2
mean_diff = np.mean(diff)
standard_deviation = np.std(diff, axis=0)
fig, axis = plt.subplots()
axis.scatter(mean, diff)
axis.axhline(mean_diff, color='gray', linestyle='--')
axis.axhline(mean_diff + 1.96 * standard_deviation, color='gray', linestyle='--')
axis.axhline(mean_diff - 1.96 * standard_deviation, color='gray', linestyle='--')
axis.set_title('Bland-Altman Plot')
axis.set_ylabel('Difference')
axis.set_xlabel('Mean')
if to_buffer:
buf = io.BytesIO()
fig.savefig(buf, format='png')
buf.seek(0)
return buf
return fig.savefig(os.path.join(plot_path, 'bland-altman_new.png'), dpi=fig.dpi)
def create_tensorboard_plot(plot_name: str, data1, data2) -> ToTensor:
"""Create a plot for Tensorboard.
Args:
plot_name (str): The type of plot to create. Valid options are "bland_altman" and "gt_vs_est".
data1: The first dataset to plot.
data2: The second dataset to plot.
Returns:
ToTensor: The plot image as a PyTorch tensor.
"""
plot_funcs = {
"bland_altman": bland_altman_plot,
"gt_vs_est": gt_vs_est,
}
plot_func = plot_funcs.get(plot_name)
if plot_func is None:
raise ValueError(f"Invalid plot name: {plot_name}")
fig_buf = plot_func(data1, data2, to_buffer=True)
image = ToTensor()(PIL.Image.open(fig_buf))
return image | /rythmnet_func-0.0.1-py3-none-any.whl/rythmnet_func/utils/plot_scripts.py | 0.825941 | 0.639905 | plot_scripts.py | pypi |
from tqdm import tqdm
import torch
DEVICE="cpu"
def train_fn(model, data_loader, optimizer, loss_fn,batch_size):
model.train()
fin_loss = 0
target_hr_list = []
predicted_hr_list = []
tk_iterator = tqdm(data_loader, total=len(data_loader))
for batch_data in tk_iterator:
for data in batch_data:
for (key, value) in data.items():
data[key] = value.to(DEVICE)
map_shape = data["st_maps"].shape
data["st_maps"] = data["st_maps"].reshape((-1, map_shape[3], map_shape[1], map_shape[2]))
optimizer.zero_grad()
with torch.set_grad_enabled(True):
outputs, gru_outputs = model(**data)
loss = loss_fn(outputs.squeeze(0), gru_outputs, data["target"])
loss.backward()
optimizer.step()
# For each face video, the avg of all HR (bpm) of individual clips (st_map)
# are computed as the final HR result
target_hr_list.append(data["target"].mean().item())
predicted_hr_list.append(outputs.squeeze(0).mean().item())
fin_loss += loss.item()
return target_hr_list, predicted_hr_list, fin_loss / (len(data_loader) * batch_size)
def eval_fn(model, data_loader, loss_fn,batch_size):
model.eval()
fin_loss = 0
target_hr_list = []
predicted_hr_list = []
with torch.no_grad():
tk_iterator = tqdm(data_loader, total=len(data_loader))
for batch in tk_iterator:
for data in batch:
for (key, value) in data.items():
data[key] = value.to(DEVICE)
map_shape = data["st_maps"].shape
data["st_maps"] = data["st_maps"].reshape((-1, map_shape[3], map_shape[1], map_shape[2]))
outputs, gru_outputs = model(**data)
loss = loss_fn(outputs.squeeze(0), gru_outputs, data["target"])
fin_loss += loss.item()
target_hr_list.append(data["target"].mean().item())
predicted_hr_list.append(outputs.squeeze(0).mean().item())
return target_hr_list, predicted_hr_list, fin_loss / (len(data_loader) * batch_size) | /rythmnet_func-0.0.1-py3-none-any.whl/rythmnet_func/utils/engine_vipl.py | 0.755547 | 0.29931 | engine_vipl.py | pypi |
from torch import nn
import numpy as np
import torch
from torch.autograd import Function
DEVICE = "cpu"
class CustomLoss(Function): # pylint: disable=W0223
"""A custom autograd function for the smooth loss component of the RhythmNet loss function."""
@staticmethod
def forward(ctx, hr_t, hr_outs, T): # pylint: disable=W0221
"""Computes the forward pass of the custom autograd function.
Args:
ctx (torch.autograd.function.Context): A context object that can be used to stash
information for backward computation.
hr_t (torch.Tensor): A tensor of shape (1,), representing the true heart rate
for a particular time step.
hr_outs (torch.Tensor)Args: A tensor of shape (seq_len,), representing the predicted heart
rates for all time steps.
T (int): An integer representing the number of time steps.
Returns:
torch.Tensor: A tensor of shape (1,), representing the smooth loss for a particular time step."""
ctx.hr_outs = hr_outs
ctx.hr_mean = hr_outs.mean()
ctx.T = T
ctx.save_for_backward(hr_t)
if hr_t > ctx.hr_mean:
loss = hr_t - ctx.hr_mean
else:
loss = ctx.hr_mean - hr_t
return loss
@staticmethod
def backward(ctx, grad_output):# pylint: disable=W0221,W0613:
"""Computes the backward pass of the custom autograd function.
Args:
ctx (torch.autograd.function.Context): A context object that canbe used to
stash information for backward computation.
Returns:
Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: A tuple containing the gradients
of the loss with respect to hr_t, hr_outs, and T."""
output = torch.zeros(1).to(DEVICE)
hr_t, = ctx.saved_tensors
hr_outs = ctx.hr_outs
for heart_rate in hr_outs:
if heart_rate == hr_t:
pass
else:
output = output + (1 / ctx.T) * torch.sign(ctx.hr_mean - heart_rate)
output = (1 / ctx.T - 1) * torch.sign(ctx.hr_mean - hr_t) + output
return output, None, None
class RhythmNetLoss(nn.Module):
""" RhythmNetLoss calculates the loss function for the RhythmNet model.
It uses a combination of L1 loss and a custom smoothness loss to calculate the final loss.
Args:
weight (float): A weight factor to control the relative contribution of the smoothness loss to the final loss.
Default value is 100.0.
Attributes:
l1_loss (nn.L1Loss): L1 loss function provided by PyTorch.
lambd (float): Weight factor for the smoothness loss.
gru_outputs_considered (None or Tensor): A tensor of shape (batch_size, seq_len) containing the output of the
GRU layer of the RhythmNet model, used for computing the smoothness loss. Initialized to None.
custom_loss (CustomLoss): A custom smoothness loss function that penalizes large changes
between consecutive output values.
device (str): The device on which to perform calculations. Default value is 'cpu'.
Methods:
forward(resnet_outputs, gru_outputs, target): Calculates the combined loss using L1 loss and smoothness loss.
smooth_loss(gru_outputs): Calculates the smoothness loss component of the combined loss."""
def __init__(self, weight=100.0):
""" Initializes a new instance of the RhythmNetLoss class.
Args:
weight (float): A weight factor to control the relative contribution of the smoothness loss to the final
loss. Default value is 100.0."""
super().__init__()
self.l1_loss = nn.L1Loss()
self.lambd = weight
self.gru_outputs_considered = None
self.custom_loss = CustomLoss()
self.device = 'cpu'
def forward(self, resnet_outputs, gru_outputs, target):
"""Calculates the combined loss using L1 loss and smoothness loss.
Args:
resnet_outputs (Tensor): A tensor of shape (batch_size, num_classes) containing
the output of the ResNet layer of the RhythmNet model.
gru_outputs (Tensor): A tensor of shape (batch_size, seq_len, hidden_size)
containing the output of the GRU layer of the RhythmNet model.
target (Tensor): A tensor of shape (batch_size, num_classes) containing the target values.
Returns:
loss (Tensor): A scalar tensor representing the combined loss."""
l1_loss = self.l1_loss(resnet_outputs, target)
smooth_loss_component = self.smooth_loss(gru_outputs)
loss = l1_loss + self.lambd * smooth_loss_component
return loss
def smooth_loss(self, gru_outputs):
"""Calculates the smoothness loss component of the combined loss.
Args:
gru_outputs (Tensor): A tensor of shape (batch_size, seq_len, hidden_size)
containing the output of the GRU layer of the RhythmNet model.
Returns:
smooth_loss (Tensor): A scalar tensor representing the smoothness loss."""
smooth_loss = torch.zeros(1).to(device=self.device)
self.gru_outputs_considered = gru_outputs.flatten()
for hr_t in self.gru_outputs_considered:
smooth_loss = smooth_loss + self.custom_loss.apply(torch.autograd.Variable(hr_t, requires_grad=True),
self.gru_outputs_considered,
self.gru_outputs_considered.shape[0])
return smooth_loss / self.gru_outputs_considered.shape[0]
def rmse(array_1, array_2):
"""Computes the root mean squared error (RMSE) between two arrays.
Returns:
float: RMSE between array_1 and array_2."""
return np.sqrt(np.mean((array_1 - array_2) ** 2))
def mae(array_1, array_2):
"""Computes the mean absolute error (MAE) between two arrays.
Returns:
float: MAE between l1 and l2."""
return np.mean([abs(item1 - item2) for item1, item2 in zip(array_1, array_2)])
def compute_criteria(target_hr_list, predicted_hr_list):
"""Computes the mean absolute error (MAE) and root mean squared error
(RMSE) between predicted and target heart rate lists.
Args:
target_hr_list (array-like): Target heart rate list.
predicted_hr_list (array-like): Predicted heart rate list.
Returns:
dict: Dictionary containing MAE and RMSE values."""
hr_mae = mae(np.array(predicted_hr_list), np.array(target_hr_list))
hr_rmse = rmse(np.array(predicted_hr_list), np.array(target_hr_list))
return {"MAE": np.mean(hr_mae), "RMSE": hr_rmse} | /rythmnet_functions-0.0.1-py3-none-any.whl/rythmnet_functions/utils_func/rhythmnet_loss.py | 0.963549 | 0.624093 | rhythmnet_loss.py | pypi |
import os
import io
import PIL
import numpy as np
import matplotlib.pyplot as plt
from torchvision.transforms import ToTensor
def gt_vs_est(data1, data2, plot_path=None, to_buffer=False):
data1 = np.asarray(data1)
data2 = np.asarray(data2)
fig, axis = plt.subplots()
axis.scatter(data1, data2)
axis.set_title('true labels vs estimated')
axis.set_ylabel('estimated HR')
axis.set_xlabel('true HR')
if to_buffer:
buf = io.BytesIO()
fig.savefig(buf, format='png')
buf.seek(0)
return buf
fig.savefig(os.path.join(plot_path, 'true_vs_est.png'), dpi=fig.dpi)
return None
def bland_altman_plot(data1, data2, plot_path=None, to_buffer=False):
data1 = np.asarray(data1)
data2 = np.asarray(data2)
mean = np.mean([data1, data2], axis=0)
diff = data1 - data2
mean_diff = np.mean(diff)
standard_deviation = np.std(diff, axis=0)
fig, axis = plt.subplots()
axis.scatter(mean, diff)
axis.axhline(mean_diff, color='gray', linestyle='--')
axis.axhline(mean_diff + 1.96 * standard_deviation, color='gray', linestyle='--')
axis.axhline(mean_diff - 1.96 * standard_deviation, color='gray', linestyle='--')
axis.set_title('Bland-Altman Plot')
axis.set_ylabel('Difference')
axis.set_xlabel('Mean')
if to_buffer:
buf = io.BytesIO()
fig.savefig(buf, format='png')
buf.seek(0)
return buf
return fig.savefig(os.path.join(plot_path, 'bland-altman_new.png'), dpi=fig.dpi)
def create_tensorboard_plot(plot_name: str, data1, data2) -> ToTensor:
"""Create a plot for Tensorboard.
Args:
plot_name (str): The type of plot to create. Valid options are "bland_altman" and "gt_vs_est".
data1: The first dataset to plot.
data2: The second dataset to plot.
Returns:
ToTensor: The plot image as a PyTorch tensor.
"""
plot_funcs = {
"bland_altman": bland_altman_plot,
"gt_vs_est": gt_vs_est,
}
plot_func = plot_funcs.get(plot_name)
if plot_func is None:
raise ValueError(f"Invalid plot name: {plot_name}")
fig_buf = plot_func(data1, data2, to_buffer=True)
image = ToTensor()(PIL.Image.open(fig_buf))
return image | /rythmnet_functions-0.0.1-py3-none-any.whl/rythmnet_functions/utils_func/plot_scripts.py | 0.825941 | 0.639905 | plot_scripts.py | pypi |
from tqdm import tqdm
import torch
DEVICE="cpu"
def train_fn(model, data_loader, optimizer, loss_fn,batch_size):
model.train()
fin_loss = 0
target_hr_list = []
predicted_hr_list = []
tk_iterator = tqdm(data_loader, total=len(data_loader))
for batch_data in tk_iterator:
for data in batch_data:
for (key, value) in data.items():
data[key] = value.to(DEVICE)
map_shape = data["st_maps"].shape
data["st_maps"] = data["st_maps"].reshape((-1, map_shape[3], map_shape[1], map_shape[2]))
optimizer.zero_grad()
with torch.set_grad_enabled(True):
outputs, gru_outputs = model(**data)
loss = loss_fn(outputs.squeeze(0), gru_outputs, data["target"])
loss.backward()
optimizer.step()
# For each face video, the avg of all HR (bpm) of individual clips (st_map)
# are computed as the final HR result
target_hr_list.append(data["target"].mean().item())
predicted_hr_list.append(outputs.squeeze(0).mean().item())
fin_loss += loss.item()
return target_hr_list, predicted_hr_list, fin_loss / (len(data_loader) * batch_size)
def eval_fn(model, data_loader, loss_fn,batch_size):
model.eval()
fin_loss = 0
target_hr_list = []
predicted_hr_list = []
with torch.no_grad():
tk_iterator = tqdm(data_loader, total=len(data_loader))
for batch in tk_iterator:
for data in batch:
for (key, value) in data.items():
data[key] = value.to(DEVICE)
map_shape = data["st_maps"].shape
data["st_maps"] = data["st_maps"].reshape((-1, map_shape[3], map_shape[1], map_shape[2]))
outputs, gru_outputs = model(**data)
loss = loss_fn(outputs.squeeze(0), gru_outputs, data["target"])
fin_loss += loss.item()
target_hr_list.append(data["target"].mean().item())
predicted_hr_list.append(outputs.squeeze(0).mean().item())
return target_hr_list, predicted_hr_list, fin_loss / (len(data_loader) * batch_size) | /rythmnet_functions-0.0.1-py3-none-any.whl/rythmnet_functions/utils_func/engine_vipl.py | 0.755547 | 0.29931 | engine_vipl.py | pypi |
import torch
import numpy as np
from PIL import Image
from PIL import ImageFile
from torch.utils.data import Dataset
from utils.signal_utils import read_target_data, calculate_hr, get_hr_data
ImageFile.LOAD_TRUNCATED_IMAGES = True
class DataLoaderRhythmNet(Dataset):
"""
Dataset class for RhythmNet
"""
# The data is now the SpatioTemporal Maps instead of videos
def __init__(self, st_maps_path, target_signal_path):
self.H = 180
self.W = 180
self.C = 3
# self.video_path = data_path
self.st_maps_path = st_maps_path
# self.resize = resize
self.target_path = target_signal_path
self.maps = None
mean = (0.485, 0.456, 0.406)
std = (0.229, 0.224, 0.225)
# Maybe add more augmentations
# self.augmentation_pipeline = albumentations.Compose(
# [
# albumentations.Normalize(
# mean, std, max_pixel_value=255.0, always_apply=True
# )
# ]
# )
def __len__(self):
return len(self.st_maps_path)
def __getitem__(self, index):
# identify the name of the video file so as to get the ground truth signal
self.video_file_name = self.st_maps_path[index].split('/')[-1].split('.')[0]
# targets, timestamps = read_target_data(self.target_path, self.video_file_name)
# sampling rate is video fps (check)
# Load the maps for video at 'index'
self.maps = np.load(self.st_maps_path[index])
map_shape = self.maps.shape
self.maps = self.maps.reshape((-1, map_shape[3], map_shape[1], map_shape[2]))
# target_hr = calculate_hr(targets, timestamps=timestamps)
# target_hr = calculate_hr_clip_wise(map_shape[0], targets, timestamps=timestamps)
target_hr = get_hr_data(self.video_file_name)
# To check the fact that we dont have number of targets greater than the number of maps
# target_hr = target_hr[:map_shape[0]]
self.maps = self.maps[:target_hr.shape[0], :, :, :]
return {
"st_maps": torch.tensor(self.maps, dtype=torch.float),
"target": torch.tensor(target_hr, dtype=torch.float)
} | /src/utils/dataset.py | 0.763836 | 0.315485 | dataset.py | pypi |
import json
from qtpy.QtCore import QObject, Signal
from qtpy.QtGui import QFontDatabase
from .flows.FlowTheme import FlowTheme, flow_themes
from .GlobalAttributes import Location
class Design(QObject):
"""Design serves as a container for the stylesheet and flow themes, and sends signals to notify GUI elements
on change of the flow theme. A configuration for the flow themes can be loaded from a json file."""
global_stylesheet = ''
flow_theme_changed = Signal(str)
performance_mode_changed = Signal(str)
def __init__(self):
super().__init__()
self.flow_themes = flow_themes
self.flow_theme: FlowTheme = None
self.default_flow_size = None
self.performance_mode: str = None
self.node_item_shadows_enabled: bool = None
self.animations_enabled: bool = None
self.node_selection_stylesheet: str = None
# load standard default values
self._default_flow_theme = self.flow_themes[-1]
self.set_performance_mode('pretty')
self.set_animations_enabled(True)
self.default_flow_size = [1000, 700]
self.set_flow_theme(self._default_flow_theme)
@staticmethod
def register_fonts():
db = QFontDatabase()
db.addApplicationFont(
Location.PACKAGE_PATH + '/resources/fonts/poppins/Poppins-Medium.ttf'
)
db.addApplicationFont(
Location.PACKAGE_PATH + '/resources/fonts/source_code_pro/SourceCodePro-Regular.ttf'
)
db.addApplicationFont(
Location.PACKAGE_PATH + '/resources/fonts/asap/Asap-Regular.ttf'
)
def load_from_config(self, filepath: str):
"""Loads design configs from a config json file"""
f = open(filepath, 'r')
data = f.read()
f.close()
IMPORT_DATA = json.loads(data)
if 'flow themes' in IMPORT_DATA:
# load flow theme configs
FTID = IMPORT_DATA['flow themes']
for flow_theme in self.flow_themes:
flow_theme.load(FTID)
if 'init flow theme' in IMPORT_DATA:
self._default_flow_theme = self.flow_theme_by_name(IMPORT_DATA.get('init flow theme'))
self.set_flow_theme(self._default_flow_theme)
if 'init performance mode' in IMPORT_DATA:
self.set_performance_mode(IMPORT_DATA['init performance mode'])
if 'init animations enabled' in IMPORT_DATA:
self.set_animations_enabled(IMPORT_DATA['init animations enabled'])
if 'default flow size' in IMPORT_DATA:
self.default_flow_size = IMPORT_DATA['default flow size']
def available_flow_themes(self) -> dict:
return {theme.name: theme for theme in self.flow_themes}
def flow_theme_by_name(self, name: str) -> FlowTheme:
for theme in self.flow_themes:
if theme.name.casefold() == name.casefold():
return theme
return None
def set_flow_theme(self, theme: FlowTheme = None, name: str = ''):
"""You can either specify the theme by name, or directly provide a FlowTheme object"""
if theme:
self.flow_theme = theme
elif name and name != '':
self.flow_theme = self.flow_theme_by_name(name)
else:
return
self.node_selection_stylesheet = self.flow_theme.build_node_selection_stylesheet()
self.flow_theme_changed.emit(self.flow_theme.name)
def set_performance_mode(self, new_mode: str):
self.performance_mode = new_mode
if new_mode == 'fast':
self.node_item_shadows_enabled = False
else:
self.node_item_shadows_enabled = True
self.performance_mode_changed.emit(self.performance_mode)
def set_animations_enabled(self, b: bool):
self.animations_enabled = b
def set_node_item_shadows(self, b: bool):
self.node_item_shadows_enabled = b
# default_node_selection_stylesheet = '''
# ''' | /ryvencore_qt-0.4.0a2-py3-none-any.whl/ryvencore_qt/src/Design.py | 0.541651 | 0.164852 | Design.py | pypi |
import bisect
import enum
import json
import pathlib
from math import sqrt
from typing import List, Dict
from qtpy.QtCore import QPointF, QByteArray
from ryvencore.utils import serialize, deserialize
from .GlobalAttributes import *
class Container:
"""used for threading; accessed from multiple threads"""
def __init__(self):
self.payload = None
self.has_been_set = False
def set(self, val):
self.payload = val
self.has_been_set = True
def is_set(self):
return self.has_been_set
def pythagoras(a, b):
return sqrt(a ** 2 + b ** 2)
def get_longest_line(s: str):
lines = s.split('\n')
lines = [line.replace('\n', '') for line in lines]
longest_line_found = ''
for line in lines:
if len(line) > len(longest_line_found):
longest_line_found = line
return line
def shorten(s: str, max_chars: int, line_break: bool = False):
"""Ensures, that a given string does not exceed a given max length. If it would, its cut in the middle."""
l = len(s)
if l > max_chars:
insert = ' . . . '
if line_break:
insert = '\n'+insert+'\n'
insert_length = len(insert)
left = s[:round((max_chars-insert_length)/2)]
right = s[round(l-((max_chars-insert_length)/2)):]
return left+insert+right
else:
return s
def pointF_mapped(p1, p2):
"""adds the floating part of p2 to p1"""
p2.setX(p1.x() + p2.x()%1)
p2.setY(p1.y() + p2.y()%1)
return p2
def points_dist(p1, p2):
return sqrt(abs(p1.x() - p2.x())**2 + abs(p1.y() - p2.y())**2)
def middle_point(p1, p2):
return QPointF((p1.x() + p2.x())/2, (p1.y() + p2.y())/2)
class MovementEnum(enum.Enum):
# this should maybe get removed later
mouse_clicked = 1
position_changed = 2
mouse_released = 3
def get_resource(filepath: str):
return pathlib.Path(Location.PACKAGE_PATH, 'resources', filepath)
def change_svg_color(filepath: str, color_hex: str):
"""Loads an SVG, changes all '#xxxxxx' occurrences to color_hex, renders it into and a pixmap and returns it"""
# https://stackoverflow.com/questions/15123544/change-the-color-of-an-svg-in-qt
from qtpy.QtSvg import QSvgRenderer
from qtpy.QtGui import QPixmap, QPainter
from qtpy.QtCore import Qt
with open(filepath) as f:
data = f.read()
data = data.replace('fill:#xxxxxx', 'fill:'+color_hex)
svg_renderer = QSvgRenderer(QByteArray(bytes(data, 'ascii')))
pix = QPixmap(svg_renderer.defaultSize())
pix.fill(Qt.transparent)
pix_painter = QPainter(pix)
svg_renderer.render(pix_painter)
return pix
def translate_project(project: Dict) -> Dict:
"""
Transforms a v3.0 project file into something that can be loaded in v3.1,
i.e. turns macros into scripts and removes macro nodes from the flows.
"""
# TODO: this needs to be changed to match ryvencore 0.4 structure
new_project = project.copy()
# turn macros into scripts
fixed_scripts = []
for script in (project['macro scripts']+project['scripts']):
new_script = script.copy()
# remove macro nodes
new_nodes, removed_node_indices = remove_macro_nodes(script['flow']['nodes'])
new_script['flow']['nodes'] = new_nodes
# fix connections
new_script['flow']['connections'] = fix_connections(script['flow']['connections'], removed_node_indices)
fixed_scripts.append(new_script)
del new_project['macro scripts']
new_project['scripts'] = fixed_scripts
return new_project
def remove_macro_nodes(nodes):
"""
removes all macro nodes from the nodes list and returns the new list as well as the indices of the removed nodes
"""
new_nodes = []
removed_node_indices = []
for n_i in range(len(nodes)):
node = nodes[n_i]
if node['identifier'] in ('BUILTIN_MacroInputNode', 'BUILTIN_MacroOutputNode') or \
node['identifier'].startswith('MACRO_NODE_'):
removed_node_indices.append(n_i)
else:
new_nodes.append(node)
return new_nodes, removed_node_indices
def fix_connections(connections: Dict, removed_node_indices: List) -> List:
"""
removes connections to removed nodes and fixes node indices of the other ones
"""
import bisect
new_connections = []
for conn in connections:
if conn['parent node index'] in removed_node_indices or conn['connected node'] in removed_node_indices:
# remove connection
continue
else:
# fix node indices
pni = conn['parent node index']
cni = conn['connected node']
# calculate the number of removed nodes with indices < pni | cni
num_smaller_removed_pni = bisect.bisect_left(removed_node_indices, pni)
num_smaller_removed_cni = bisect.bisect_left(removed_node_indices, cni)
c = conn.copy()
# decrease indices accordingly
c['parent node index'] = pni - num_smaller_removed_pni
c['connected node'] = cni - num_smaller_removed_cni
new_connections.append(c)
return new_connections | /ryvencore_qt-0.4.0a2-py3-none-any.whl/ryvencore_qt/src/utils.py | 0.518546 | 0.255802 | utils.py | pypi |
from typing import List
from qtpy.QtCore import QObject, Signal, Qt
from qtpy.QtWidgets import QWidget, QApplication
import ryvencore
from .flows.FlowView import FlowView
from .Design import Design
from .GUIBase import GUIBase
class SessionGUI(GUIBase, QObject):
"""
ryvencore-qt's Session wrapper class, implementing the GUI.
Any session with a GUI must be created through this class.
Access the ryvencore session through the :code:`session`
attribute, and the GUI from the ryvencore session through the
:code:`gui` attribute. Once instantiated, you can simply use
the :code:`session` directly to create, rename, delete flows,
register nodes, etc.
"""
flow_created = Signal(object)
flow_deleted = Signal(object)
flow_renamed = Signal(object, str)
flow_view_created = Signal(object, object)
def __init__(self, gui_parent: QWidget):
GUIBase.__init__(self)
QObject.__init__(self)
self.core_session = ryvencore.Session(gui=True, load_addons=True)
setattr(self.core_session, 'gui', self)
self.gui_parent = gui_parent
# flow views
self.flow_views = {} # {Flow : FlowView}
# register complete_data function
ryvencore.set_complete_data_func(self.get_complete_data_function(self))
# load design
app = QApplication.instance()
app.setAttribute(Qt.AA_UseHighDpiPixmaps)
Design.register_fonts()
self.design = Design()
# connect to session
self.core_session.flow_created.sub(self._flow_created)
self.core_session.flow_deleted.sub(self._flow_deleted)
self.core_session.flow_renamed.sub(self._flow_renamed)
def _flow_created(self, flow: ryvencore.Flow):
"""
Builds the flow view for a newly created flow, saves it in
self.flow_views, and emits the flow_view_created signal.
"""
self.flow_created.emit(flow)
self.flow_views[flow] = FlowView(
session_gui=self,
flow=flow,
parent=self.gui_parent,
)
self.flow_view_created.emit(flow, self.flow_views[flow])
return flow
def _flow_deleted(self, flow: ryvencore.Flow):
"""
Removes the flow view for a deleted flow from self.flow_views.
"""
self.flow_views.pop(flow)
self.flow_deleted.emit(flow)
def _flow_renamed(self, flow: ryvencore.Flow, new_name: str):
"""
Renames the flow view for a renamed flow.
"""
self.flow_renamed.emit(flow, new_name) | /ryvencore_qt-0.4.0a2-py3-none-any.whl/ryvencore_qt/src/SessionGUI.py | 0.803212 | 0.344636 | SessionGUI.py | pypi |
from qtpy.QtCore import QObject, QPointF
from qtpy.QtWidgets import QUndoCommand
from .drawings.DrawingObject import DrawingObject
from .nodes.NodeItem import NodeItem
from typing import Tuple
from ryvencore.NodePort import NodePort, NodeInput, NodeOutput
class FlowUndoCommand(QObject, QUndoCommand):
"""
The main difference to normal QUndoCommands is the activate feature. This allows the flow widget to add the
undo command to the undo stack before redo() is called. This is important since some of these commands can cause
other commands to be added while they are performing redo(), so to prevent those commands to be added to the
undo stack before the parent command, it is here blocked at first.
"""
def __init__(self, flow_view):
self.flow_view = flow_view
self.flow = flow_view.flow
self._activated = False
QObject.__init__(self)
QUndoCommand.__init__(self)
def activate(self):
self._activated = True
self.redo()
def redo(self) -> None:
if not self._activated:
return
else:
self.redo_()
def undo(self) -> None:
self.undo_()
def redo_(self):
"""subclassed"""
pass
def undo_(self):
"""subclassed"""
pass
class MoveComponents_Command(FlowUndoCommand):
def __init__(self, flow_view, items_list, p_from, p_to):
super(MoveComponents_Command, self).__init__(flow_view)
self.items_list = items_list
self.p_from = p_from
self.p_to = p_to
self.last_item_group_pos = p_to
def undo_(self):
items_group = self.items_group()
items_group.setPos(self.p_from)
self.last_item_group_pos = items_group.pos()
self.destroy_items_group(items_group)
def redo_(self):
items_group = self.items_group()
items_group.setPos(self.p_to - self.last_item_group_pos)
self.destroy_items_group(items_group)
def items_group(self):
return self.flow_view.scene().createItemGroup(self.items_list)
def destroy_items_group(self, items_group):
self.flow_view.scene().destroyItemGroup(items_group)
class PlaceNode_Command(FlowUndoCommand):
def __init__(self, flow_view, node_class, pos):
super().__init__(flow_view)
self.node_class = node_class
self.node = None
self.item_pos = pos
def undo_(self):
self.flow.remove_node(self.node)
def redo_(self):
if self.node:
self.flow.add_node(self.node)
else:
self.node = self.flow.create_node(self.node_class)
class PlaceDrawing_Command(FlowUndoCommand):
def __init__(self, flow_view, posF, drawing):
super().__init__(flow_view)
self.drawing = drawing
self.drawing_obj_place_pos = posF
self.drawing_obj_pos = self.drawing_obj_place_pos
def undo_(self):
# The drawing_obj_pos is not anymore the drawing_obj_place_pos because after the
# drawing object was completed, its actual position got recalculated according to all points and differs from
# the initial pen press pos (=drawing_obj_place_pos). See DrawingObject.finished().
self.drawing_obj_pos = self.drawing.pos()
self.flow_view.remove_component(self.drawing)
def redo_(self):
self.flow_view.add_drawing(self.drawing, self.drawing_obj_pos)
class RemoveComponents_Command(FlowUndoCommand):
def __init__(self, flow_view, items):
super().__init__(flow_view)
self.items = items
self.broken_connections = [] # the connections that go beyond the removed nodes and need to be restored in undo
self.internal_connections = set()
self.node_items = []
self.nodes = []
self.drawings = []
for i in self.items:
if isinstance(i, NodeItem):
self.node_items.append(i)
self.nodes.append(i.node)
elif isinstance(i, DrawingObject):
self.drawings.append(i)
for n in self.nodes:
for i in n.inputs:
cp = n.flow.connected_output(i)
if cp is not None:
cn = cp.node
if cn not in self.nodes:
self.broken_connections.append((cp, i))
else:
self.internal_connections.add((cp, i))
for o in n.outputs:
for cp in n.flow.connected_inputs(o):
cn = cp.node
if cn not in self.nodes:
self.broken_connections.append((o, cp))
else:
self.internal_connections.add((o, cp))
def undo_(self):
# add nodes
for n in self.nodes:
self.flow.add_node(n)
# add drawings
for d in self.drawings:
self.flow_view.add_drawing(d)
# add connections
self.restore_broken_connections()
self.restore_internal_connections()
def redo_(self):
# remove connections
self.remove_broken_connections()
self.remove_internal_connections()
# remove nodes
for n in self.nodes:
self.flow.remove_node(n)
# remove drawings
for d in self.drawings:
self.flow_view.remove_drawing(d)
def restore_internal_connections(self):
for c in self.internal_connections:
self.flow.add_connection(c)
def remove_internal_connections(self):
for c in self.internal_connections:
self.flow.remove_connection(c)
def restore_broken_connections(self):
for c in self.broken_connections:
self.flow.add_connection(c)
def remove_broken_connections(self):
for c in self.broken_connections:
self.flow.remove_connection(c)
class ConnectPorts_Command(FlowUndoCommand):
def __init__(self, flow_view, out, inp):
super().__init__(flow_view)
# CAN ALSO LEAD TO DISCONNECT INSTEAD OF CONNECT!!
self.out = out
self.inp = inp
self.connection = None
self.connecting = True
for i in flow_view.flow.connected_inputs(out):
if i == self.inp:
self.connection = (out, i)
self.connecting = False
def undo_(self):
if self.connecting:
# remove connection
self.flow.remove_connection(self.connection)
else:
# recreate former connection
self.flow.add_connection(self.connection)
def redo_(self):
if self.connecting:
if self.connection:
self.flow.add_connection(self.connection)
else:
# connection hasn't been created yet
self.connection = self.flow.connect_nodes(self.out, self.inp)
else:
# remove existing connection
self.flow.remove_connection(self.connection)
class Paste_Command(FlowUndoCommand):
def __init__(self, flow_view, data, offset_for_middle_pos):
super().__init__(flow_view)
self.data = data
self.modify_data_positions(offset_for_middle_pos)
self.pasted_components = None
def modify_data_positions(self, offset):
"""adds the offset to the components' positions in data"""
for node in self.data['nodes']:
node['pos x'] = node['pos x'] + offset.x()
node['pos y'] = node['pos y'] + offset.y()
for drawing in self.data['drawings']:
drawing['pos x'] = drawing['pos x'] + offset.x()
drawing['pos y'] = drawing['pos y'] + offset.y()
def redo_(self):
if self.pasted_components is None:
self.pasted_components = {}
# create components
self.create_drawings()
self.pasted_components['nodes'], self.pasted_components['connections'] = \
self.flow.load_components(
nodes_data=self.data['nodes'],
conns_data=self.data['connections'],
output_data=self.data['output data'],
)
self.select_new_components_in_view()
else:
self.add_existing_components()
def undo_(self):
# remove components and their items from flow
for c in self.pasted_components['connections']:
self.flow.remove_connection(c)
for n in self.pasted_components['nodes']:
self.flow.remove_node(n)
for d in self.pasted_components['drawings']:
self.flow_view.remove_drawing(d)
def add_existing_components(self):
# add existing components and items to flow
for n in self.pasted_components['nodes']:
self.flow.add_node(n)
for c in self.pasted_components['connections']:
self.flow.add_connection(c)
for d in self.pasted_components['drawings']:
self.flow_view.add_drawing(d)
self.select_new_components_in_view()
def select_new_components_in_view(self):
self.flow_view.clear_selection()
for d in self.pasted_components['drawings']:
d: DrawingObject
d.setSelected(True)
for n in self.pasted_components['nodes']:
n: NodeItem
ni: NodeItem = self.flow_view.node_items[n]
ni.setSelected(True)
def create_drawings(self):
drawings = []
for d in self.data['drawings']:
new_drawing = self.flow_view.create_drawing(d)
self.flow_view.add_drawing(new_drawing, posF=QPointF(d['pos x'], d['pos y']))
drawings.append(new_drawing)
self.pasted_components['drawings'] = drawings | /ryvencore_qt-0.4.0a2-py3-none-any.whl/ryvencore_qt/src/flows/FlowCommands.py | 0.580947 | 0.289898 | FlowCommands.py | pypi |
from qtpy.QtCore import QObject, QPropertyAnimation, Property
from qtpy.QtGui import QColor
from qtpy.QtWidgets import QGraphicsItem
class NodeItemAnimator(QObject):
def __init__(self, node_item):
super(NodeItemAnimator, self).__init__()
self.node_item = node_item
self.animation_running = False
self.title_activation_animation = QPropertyAnimation(self, b"p_title_color")
self.title_activation_animation.setDuration(700)
self.title_activation_animation.finished.connect(self.finished)
self.body_activation_animation = QPropertyAnimation(self, b"p_body_color")
self.body_activation_animation.setDuration(700)
def start(self):
self.animation_running = True
self.title_activation_animation.start()
self.body_activation_animation.start()
def stop(self):
# reset color values. it would just freeze without
self.title_activation_animation.setCurrentTime(self.title_activation_animation.duration())
self.body_activation_animation.setCurrentTime(self.body_activation_animation.duration())
self.title_activation_animation.stop()
self.body_activation_animation.stop()
def finished(self):
self.animation_running = False
def running(self):
return self.animation_running
def reload_values(self):
self.stop()
# self.node_item.title_label.update_design()
self.title_activation_animation.setKeyValueAt(0, self.get_title_color())
self.title_activation_animation.setKeyValueAt(0.3, self.get_body_color().lighter().lighter())
self.title_activation_animation.setKeyValueAt(1, self.get_title_color())
self.body_activation_animation.setKeyValueAt(0, self.get_body_color())
self.body_activation_animation.setKeyValueAt(0.3, self.get_body_color().lighter())
self.body_activation_animation.setKeyValueAt(1, self.get_body_color())
def fading_out(self):
return self.title_activation_animation.currentTime()/self.title_activation_animation.duration() >= 0.3
def set_animation_max(self):
self.title_activation_animation.setCurrentTime(0.3*self.title_activation_animation.duration())
self.body_activation_animation.setCurrentTime(0.3*self.body_activation_animation.duration())
def get_body_color(self):
return self.node_item.color
def set_body_color(self, val):
self.node_item.color = val
QGraphicsItem.update(self.node_item)
p_body_color = Property(QColor, get_body_color, set_body_color)
def get_title_color(self):
return self.node_item.widget.title_label.color
def set_title_color(self, val):
self.node_item.widget.title_label.color = val
# QGraphicsItem.update(self.node_item)
p_title_color = Property(QColor, get_title_color, set_title_color) | /ryvencore_qt-0.4.0a2-py3-none-any.whl/ryvencore_qt/src/flows/nodes/NodeItemAnimator.py | 0.606732 | 0.175467 | NodeItemAnimator.py | pypi |
from qtpy.QtCore import QRectF, QPointF, QSizeF, Property
from qtpy.QtGui import QFont, QFontMetricsF, QColor
from qtpy.QtWidgets import QGraphicsWidget, QGraphicsLayoutItem, QGraphicsItem
from ...utils import get_longest_line
class TitleLabel(QGraphicsWidget):
def __init__(self, node_gui, node_item):
super(TitleLabel, self).__init__(parent=node_item)
self.setGraphicsItem(self)
self.node_gui = node_gui
self.node_item = node_item
font = QFont('Poppins', 15) if self.node_gui.style == 'normal' else \
QFont('K2D', 20, QFont.Bold, True) # should be quite similar to every specific font chosen by the painter
self.fm = QFontMetricsF(font)
self.title_str, self.width, self.height = None, None, None
self.update_shape()
self.color = QColor(30, 43, 48)
self.pen_width = 1.5
self.hovering = False # whether the mouse is hovering over the parent NI (!)
# # Design.flow_theme_changed.connect(self.theme_changed)
# self.update_design()
def update_shape(self):
self.title_str = self.node_gui.display_title
# approximately!
self.width = self.fm.width(get_longest_line(self.title_str)+'___')
self.height = self.fm.height() * 0.7 * (self.title_str.count('\n') + 1)
def boundingRect(self):
return QRectF(QPointF(0, 0), self.geometry().size())
def setGeometry(self, rect):
self.prepareGeometryChange()
QGraphicsLayoutItem.setGeometry(self, rect)
self.setPos(rect.topLeft())
def sizeHint(self, which, constraint=...):
return QSizeF(self.width, self.height)
def paint(self, painter, option, widget=None):
self.node_item.session_design.flow_theme.paint_NI_title_label(
self.node_gui, self.node_item.isSelected(), self.hovering, painter, option,
self.design_style(), self.title_str,
self.node_item.color, self.boundingRect()
)
def design_style(self):
return self.node_gui.style
def set_NI_hover_state(self, hovering: bool):
self.hovering = hovering
# self.update_design()
self.update()
# ANIMATION STUFF
def get_color(self):
return self.color
def set_color(self, val):
self.color = val
QGraphicsItem.update(self)
p_color = Property(QColor, get_color, set_color) | /ryvencore_qt-0.4.0a2-py3-none-any.whl/ryvencore_qt/src/flows/nodes/NodeItem_TitleLabel.py | 0.691706 | 0.308281 | NodeItem_TitleLabel.py | pypi |
import json
from qtpy.QtWidgets import QLineEdit, QWidget, QLabel, QGridLayout, QHBoxLayout, QSpacerItem, QSizePolicy, QStyleOption, QStyle
from qtpy.QtGui import QFont, QPainter, QColor, QDrag
from qtpy.QtCore import Signal, Qt, QMimeData
class NodeWidget(QWidget):
chosen = Signal()
custom_focused_from_inside = Signal()
def __init__(self, parent, node):
super(NodeWidget, self).__init__(parent)
self.custom_focused = False
self.node = node
self.left_mouse_pressed_on_me = False
# UI
main_layout = QGridLayout()
main_layout.setContentsMargins(0, 0, 0, 0)
self_ = self
class NameLabel(QLineEdit):
def __init__(self, text):
super().__init__(text)
self.setReadOnly(True)
self.setFont(QFont('Source Code Pro', 8))
def mouseMoveEvent(self, ev):
self_.custom_focused_from_inside.emit()
ev.ignore()
def mousePressEvent(self, ev):
ev.ignore()
def mouseReleaseEvent(self, ev):
ev.ignore()
name_label = NameLabel(node.title)
type_layout = QHBoxLayout()
#type_label = QLabel(node.type_)
#type_label.setFont(QFont('Segoe UI', 8, italic=True))
# type_label.setStyleSheet('color: white;')
main_layout.addWidget(name_label, 0, 0)
#main_layout.addWidget(type_label, 0, 1)
self.setLayout(main_layout)
self.setContentsMargins(0, 0, 0, 0)
self.setMaximumWidth(250)
self.setToolTip(node.__doc__)
self.update_stylesheet()
def mousePressEvent(self, event):
self.custom_focused_from_inside.emit()
if event.button() == Qt.LeftButton:
self.left_mouse_pressed_on_me = True
def mouseMoveEvent(self, event):
if self.left_mouse_pressed_on_me:
drag = QDrag(self)
mime_data = QMimeData()
mime_data.setData('application/json', bytes(json.dumps(
{
'type': 'node',
'node identifier': self.node.identifier,
}
), encoding='utf-8'))
drag.setMimeData(mime_data)
drop_action = drag.exec_()
def mouseReleaseEvent(self, event):
self.left_mouse_pressed_on_me = False
if self.geometry().contains(self.mapToParent(event.pos())):
self.chosen.emit()
def set_custom_focus(self, new_focus):
self.custom_focused = new_focus
self.update_stylesheet()
def update_stylesheet(self):
color = self.node.GUI.color if hasattr(self.node, 'GUI') else '#888888'
r, g, b = QColor(color).red(), QColor(color).green(), QColor(color).blue()
new_style_sheet = f'''
NodeWidget {{
border: 1px solid rgba(255,255,255,150);
border-radius: 2px;
{(
f'background-color: rgba(255,255,255,80);'
) if self.custom_focused else ''}
}}
QLabel {{
background: transparent;
}}
QLineEdit {{
color: white;
background: transparent;
border: none;
padding: 2px;
}}
'''
self.setStyleSheet(new_style_sheet)
def paintEvent(self, event): # just to enable stylesheets
o = QStyleOption()
o.initFrom(self)
p = QPainter(self)
self.style().drawPrimitive(QStyle.PE_Widget, o, p, self) | /ryvencore_qt-0.4.0a2-py3-none-any.whl/ryvencore_qt/src/flows/node_list_widget/NodeWidget.py | 0.401688 | 0.162646 | NodeWidget.py | pypi |
from ryzom_django_channels.models import (
Publication,
Subscription,
Registration
)
model_templates = dict()
def model_template(name):
global model_templates
def decorator(component):
model_templates[name] = component
return component
return decorator
class ReactiveBase:
view = None
def to_html(self, *content, **context):
self.reactive_setup(**context)
return super(ReactiveBase, self).to_html(*content, **context)
def reactive_setup(self, **context):
self.view = self.get_view(**context)
if self.view is None:
parent = self.parent or self
while parent and parent.parent:
if hasattr(parent, 'view'):
break
parent = parent.parent
try:
self.view = parent.view
except AttributeError:
raise AttributeError('The current view cannot be found')
if not hasattr(self.view, 'client'):
raise AttributeError(
'The current view has no attribute "client".'
' Maybe you forgot to call view.get_token()'
' in your main component?')
def get_view(self, **context):
if 'view' in context:
return context['view']
class SubscribeComponentMixin(ReactiveBase):
@property
def model_template(self):
raise AttributeError(
f'{self} is missing attribute "model_template"'
)
def reactive_setup(self, **context):
if not hasattr(self, 'subscribe_options'):
self.subscribe_options = {}
super().reactive_setup(**context)
if hasattr(self, 'publication'):
self.create_subscription()
def create_subscription(self):
publication = Publication.objects.get(name=self.publication)
subscription = Subscription.objects.create(
client=self.view.client,
publication=publication,
subscriber_id=self.id,
subscriber_module=self.__module__,
subscriber_class=self.__class__.__name__,
options=self.subscribe_options,
)
self.get_content(publication, subscription)
def get_content(self, publication, subscription):
template = model_templates[self.model_template]
content = []
for obj in subscription.get_queryset():
content.append(template(obj))
self.content = content
@classmethod
def get_queryset(self, qs, opts):
return qs
class ReactiveComponentMixin(ReactiveBase):
register = None
def reactive_setup(self, **context):
super().reactive_setup(**context)
if hasattr(self, 'register'):
self.create_registration()
def create_registration(self):
existent = Registration.objects.filter(
name=self.get_register(),
client=self.view.client
).first()
if existent:
existent.subscriber_id = self.id
existent.subscriber_parent = self.parent.id
existent.save()
else:
Registration.objects.create(
name=self.get_register(),
client=self.view.client,
subscriber_id=self.id,
subscriber_parent=self.parent.id,
)
def get_register(self):
if self.register is None:
raise AttributeError(f'{self}.register is not defined')
return self.register | /ryzom-0.7.11.tar.gz/ryzom-0.7.11/src/ryzom_django_channels/components.py | 0.590189 | 0.170266 | components.py | pypi |
from asgiref.sync import async_to_sync
from channels.layers import get_channel_layer
def send_insert(sub, model, tmpl, id):
'''
Send insert message.
Function used to send a DDP message to a specific client
via the channel layer.
Uses the template class associated with a publication
to create a new instance of a component attached to a
model that was inserted, updated or removed
Essentially called by post_save and post_delete signal handlers
:param Subscriptions sub: The Subscription holding the connection \
information
:param Publishable model: The class of the model to insert
:param Component tmpl: The component subclass that templates \
the model instance
:param int id: The id of the model to insert
'''
if sub.client is None or sub.client.channel == '':
return
tmpl_instance = tmpl(model.objects.get(id=id))
tmpl_instance.parent = sub.subscriber_id
tmpl_instance.position = sub.queryset.index(id)
data = {
'type': 'handle.ddp',
'params': {
'type': 'inserted',
'instance': tmpl_instance.to_obj()
}
}
channel = get_channel_layer()
async_to_sync(channel.send)(sub.client.channel, data)
def send_change(sub, model, tmpl, id):
'''
Send change message.
Function used to send a DDP message to a specific client
via the channel layer.
Uses the template class associated with a publication
to create a new instance of a component attached to a
model that was updated
Essentially called by post_save and post_delete signal handlers
:param Subscriptions sub: The Subscription holding the connection \
information
:param Publishable model: The class of the model to change
:param Component tmpl: The component subclass that templates \
the model instance
:param int id: The id of the model to change
'''
if sub.client is None or sub.client.channel == '':
return
tmpl_instance = tmpl(model.objects.get(id=id))
tmpl_instance.parent = sub.subscriber_id
tmpl_instance.position = sub.queryset.index(id)
data = {
'type': 'handle.ddp',
'params': {
'type': 'changed',
'instance': tmpl_instance.to_obj()
}
}
channel = get_channel_layer()
async_to_sync(channel.send)(sub.client.channel, data)
def send_remove(sub, model, tmpl, id):
'''
Send remove message.
Function used to send a DDP message to a specific client
via the channel layer.
Uses the template class associated with a publication
to create a new instance of a component attached to a
model that was removed, in order to get the computed id
and send the computed id to the client.
Essentially called by post_save and post_delete signal handlers
:param Subscriptions sub: The Subscription holding the connection \
information
:param Publishable model: The class of the model to remove
:param Component tmpl: The component subclass that templates \
the model instance
:param int id: The id of the model to remove
'''
if sub.client is None or sub.client.channel == '':
return
tmp = model()
tmp.id = id
tmpl_instance = tmpl(tmp)
data = {
'type': 'handle.ddp',
'params': {
'type': 'removed',
'id': tmpl_instance.id,
'parent': sub.subscriber_id
}
}
channel = get_channel_layer()
async_to_sync(channel.send)(sub.client.channel, data) | /ryzom-0.7.11.tar.gz/ryzom-0.7.11/src/ryzom_django_channels/ddp.py | 0.782205 | 0.394842 | ddp.py | pypi |
import importlib
import secrets
import uuid
from django.conf import settings
from django.contrib.auth.models import User
from django.contrib.postgres.fields import ArrayField, JSONField
from django.db import models
from django.db.models import JSONField
from django.utils import timezone
class Client(models.Model):
'''
Clients are the representation of connected Clients
over websockets. It stores the channel name of a single
client to communicate over the channel layer
The user field is not used for now but in a near future
it will be used to store the user using this channel
once it's connected
'''
token = models.CharField(default=secrets.token_urlsafe,
max_length=255, unique=True)
channel = models.CharField(max_length=255)
user = models.ForeignKey(
settings.AUTH_USER_MODEL,
models.SET_NULL,
blank=True,
null=True
)
created = models.DateTimeField(default=timezone.now)
class Registration(models.Model):
name = models.CharField(max_length=255)
client = models.ForeignKey(Client, models.CASCADE, blank=True, null=True)
subscriber_id = models.CharField(max_length=255)
subscriber_parent = models.CharField(max_length=255)
class Meta:
unique_together = [('name', 'client')]
class Publication(models.Model):
'''
Publications model is used to store the apps publications
Each publication should have a unique name and define
the component used as template for the publicated model.
One can publish a model multiple time with varying templates
or query.
The query is a JSON field containing informations on what and
how to publish about the model concerned, such as
order_by, limit, offset, filters and more soon
'''
name = models.CharField(max_length=255, unique=True)
model_module = models.CharField(max_length=255)
model_class = models.CharField(max_length=255)
@property
def publish_function(self):
model = getattr(
importlib.import_module(self.model_module),
self.model_class
)
return getattr(model, self.name)
class Subscription(models.Model):
'''
A subscription is an object representing the relation between
a client and a publication. It also stores the id of the component
that subscribes to a given publication, and the queryset
computed from that publication query. This queryset is computed
per-subscription to permit user specific sets
After being instanciated, a subscription must be initialized by
it's init() method so that it fills the component asking for it
by its content via ryzom.ddp send_insert.
'''
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
client = models.ForeignKey(Client, models.CASCADE, blank=True, null=True)
publication = models.ForeignKey(Publication, models.CASCADE)
subscriber_id = models.CharField(max_length=255)
subscriber_module = models.CharField(max_length=255)
subscriber_class = models.CharField(max_length=255)
queryset = ArrayField(models.IntegerField(), default=list)
options = JSONField(blank=True, null=True)
@property
def subscriber(self):
subscriber_mod = importlib.import_module(self.subscriber_module)
return getattr(subscriber_mod, self.subscriber_class)
def get_queryset(self, opts=None): # noqa: C901
'''
This method computes the publication query and create/update the
queryset for the current subscription.
It supports somme special variables such as $count and $user that
are parsed and replaced with, respectively, the queryset.count()
value and the current user associated with the subscription client
More will come with special variables and function. Such as an $add
to replace that ugly tupple i'm using for now.. to be discussed
'''
queryset = self.publication.publish_function(self.client.user)
opts = opts or self.options
queryset = self.subscriber.get_queryset(
self.client.user, queryset, opts)
self.options = opts
self.queryset = list(queryset.values_list('id', flat=True))
self.save()
return queryset | /ryzom-0.7.11.tar.gz/ryzom-0.7.11/src/ryzom_django_channels/models.py | 0.563138 | 0.239572 | models.py | pypi |
class Formater(object):
"""
A very simple code formater that handles efficient concatenation and indentation of lines.
"""
def __init__(self, indent_string=" "):
self.__buffer = []
self.__indentation = 0
self.__indent_string = indent_string
self.__indent_temp = ""
self.__string_buffer = ""
def dedent(self):
"""
Subtracts one indentation level.
"""
self.__indentation -= 1
self.__indent_temp = self.__indent_string*self.__indentation
def indent(self):
"""
Adds one indentation level.
"""
self.__indentation += 1
self.__indent_temp = self.__indent_string*self.__indentation
def write(self, text, indent=True, newline=True):
"""
Writes the string text to the buffer with indentation and a newline if not specified otherwise.
"""
if indent:
self.__buffer.append(self.__indent_temp)
self.__buffer.append(text)
if newline:
self.__buffer.append("\n")
def read(self, size=None):
"""
Returns a string representation of the buffer.
"""
if size == None:
text = self.__string_buffer + "".join(self.__buffer)
self.__buffer = []
self.__string_buffer = ""
return text
else:
if len(self.__string_buffer) < size:
self.__string_buffer += "".join(self.__buffer)
self.__buffer = []
if len(self.__string_buffer) < size:
text, self.__string_buffer = self.__string_buffer, ""
return text
else:
text, self.__string_buffer = self.__string_buffer[:size], \
self.__string_buffer[size:]
return text
else:
text, self.__string_buffer = self.__string_buffer[:size], \
self.__string_buffer[size:]
return text | /ryzom-0.7.11.tar.gz/ryzom-0.7.11/src/py2js/formater.py | 0.613584 | 0.287143 | formater.py | pypi |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.