code stringlengths 114 1.05M | path stringlengths 3 312 | quality_prob float64 0.5 0.99 | learning_prob float64 0.2 1 | filename stringlengths 3 168 | kind stringclasses 1
value |
|---|---|---|---|---|---|
import os
import sys
import ctypes
import pathlib
from typing import Optional
P_FLOAT = ctypes.POINTER(ctypes.c_float)
class RWKVContext:
def __init__(self, ptr: ctypes.pointer):
self.ptr = ptr
class RWKVSharedLibrary:
"""
Python wrapper around rwkv.cpp shared library.
"""
def __init__(self, shared_library_path: str):
"""
Loads the shared library from specified file.
In case of any error, this method will throw an exception.
Parameters
----------
shared_library_path : str
Path to rwkv.cpp shared library. On Windows, it would look like 'rwkv.dll'. On UNIX, 'rwkv.so'.
"""
self.library = ctypes.cdll.LoadLibrary(shared_library_path)
self.library.rwkv_init_from_file.argtypes = [ctypes.c_char_p, ctypes.c_uint32]
self.library.rwkv_init_from_file.restype = ctypes.c_void_p
self.library.rwkv_eval.argtypes = [
ctypes.c_void_p, # ctx
ctypes.c_int32, # token
P_FLOAT, # state_in
P_FLOAT, # state_out
P_FLOAT # logits_out
]
self.library.rwkv_eval.restype = ctypes.c_bool
self.library.rwkv_get_state_buffer_element_count.argtypes = [ctypes.c_void_p]
self.library.rwkv_get_state_buffer_element_count.restype = ctypes.c_uint32
self.library.rwkv_get_logits_buffer_element_count.argtypes = [ctypes.c_void_p]
self.library.rwkv_get_logits_buffer_element_count.restype = ctypes.c_uint32
self.library.rwkv_free.argtypes = [ctypes.c_void_p]
self.library.rwkv_free.restype = None
self.library.rwkv_free.argtypes = [ctypes.c_void_p]
self.library.rwkv_free.restype = None
self.library.rwkv_quantize_model_file.argtypes = [ctypes.c_char_p, ctypes.c_char_p, ctypes.c_uint32]
self.library.rwkv_quantize_model_file.restype = ctypes.c_bool
self.library.rwkv_get_system_info_string.argtypes = []
self.library.rwkv_get_system_info_string.restype = ctypes.c_char_p
def rwkv_init_from_file(self, model_file_path: str, thread_count: int) -> RWKVContext:
"""
Loads the model from a file and prepares it for inference.
Throws an exception in case of any error. Error messages would be printed to stderr.
Parameters
----------
model_file_path : str
Path to model file in ggml format.
thread_count : int
Count of threads to use, must be positive.
"""
ptr = self.library.rwkv_init_from_file(model_file_path.encode('utf-8'), ctypes.c_uint32(thread_count))
assert ptr is not None, 'rwkv_init_from_file failed, check stderr'
return RWKVContext(ptr)
def rwkv_eval(
self,
ctx: RWKVContext,
token: int,
state_in_address: Optional[int],
state_out_address: int,
logits_out_address: int
) -> None:
"""
Evaluates the model for a single token.
Throws an exception in case of any error. Error messages would be printed to stderr.
Parameters
----------
ctx : RWKVContext
RWKV context obtained from rwkv_init_from_file.
token : int
Next token index, in range 0 <= token < n_vocab.
state_in_address : int
Address of the first element of a FP32 buffer of size rwkv_get_state_buffer_element_count; or None, if this is a first pass.
state_out_address : int
Address of the first element of a FP32 buffer of size rwkv_get_state_buffer_element_count. This buffer will be written to.
logits_out_address : int
Address of the first element of a FP32 buffer of size rwkv_get_logits_buffer_element_count. This buffer will be written to.
"""
assert self.library.rwkv_eval(
ctx.ptr,
ctypes.c_int32(token),
ctypes.cast(0 if state_in_address is None else state_in_address, P_FLOAT),
ctypes.cast(state_out_address, P_FLOAT),
ctypes.cast(logits_out_address, P_FLOAT)
), 'rwkv_eval failed, check stderr'
def rwkv_get_state_buffer_element_count(self, ctx: RWKVContext) -> int:
"""
Returns count of FP32 elements in state buffer.
Parameters
----------
ctx : RWKVContext
RWKV context obtained from rwkv_init_from_file.
"""
return self.library.rwkv_get_state_buffer_element_count(ctx.ptr)
def rwkv_get_logits_buffer_element_count(self, ctx: RWKVContext) -> int:
"""
Returns count of FP32 elements in logits buffer.
Parameters
----------
ctx : RWKVContext
RWKV context obtained from rwkv_init_from_file.
"""
return self.library.rwkv_get_logits_buffer_element_count(ctx.ptr)
def rwkv_free(self, ctx: RWKVContext) -> None:
"""
Frees all allocated memory and the context.
Parameters
----------
ctx : RWKVContext
RWKV context obtained from rwkv_init_from_file.
"""
self.library.rwkv_free(ctx.ptr)
ctx.ptr = ctypes.cast(0, ctypes.c_void_p)
def rwkv_quantize_model_file(self, model_file_path_in: str, model_file_path_out: str, q_type: int) -> None:
"""
Quantizes FP32 or FP16 model to one of INT4 formats.
Throws an exception in case of any error. Error messages would be printed to stderr.
Parameters
----------
model_file_path_in : str
Path to model file in ggml format, must be either FP32 or FP16.
model_file_path_out : str
Quantized model will be written here.
q_type : int
Set to 2 for GGML_TYPE_Q4_0, set to 3 for GGML_TYPE_Q4_1.
"""
assert self.library.rwkv_quantize_model_file(
model_file_path_in.encode('utf-8'),
model_file_path_out.encode('utf-8'),
ctypes.c_uint32(q_type)
), 'rwkv_quantize_model_file failed, check stderr'
def rwkv_get_system_info_string(self) -> str:
"""
Returns system information string.
"""
return self.library.rwkv_get_system_info_string().decode('utf-8')
def load_rwkv_shared_library() -> RWKVSharedLibrary:
"""
Attempts to find rwkv.cpp shared library and load it.
To specify exact path to the library, create an instance of RWKVSharedLibrary explicitly.
"""
file_name: str
if 'win32' in sys.platform or 'cygwin' in sys.platform:
file_name = 'rwkv.dll'
elif 'darwin' in sys.platform:
file_name = 'librwkv.so'
else:
file_name = 'librwkv.so'
repo_root_dir: pathlib.Path = pathlib.Path(os.path.abspath(__file__)).parent.parent
paths = [
# If we are in "rwkv" directory
f'../bin/Release/{file_name}',
# If we are in repo root directory
f'bin/Release/{file_name}',
# Search relative to this file
str(repo_root_dir / 'bin' / 'Release' / file_name),
# Search in python package
str(repo_root_dir / 'rwkv' / file_name),
# Fallback
str(repo_root_dir / file_name)
]
for path in paths:
if os.path.isfile(path):
return RWKVSharedLibrary(path)
return RWKVSharedLibrary(paths[-1]) | /rwkv_cpp_python-0.0.1.tar.gz/rwkv_cpp_python-0.0.1/rwkv/rwkv_cpp_shared_library.py | 0.736021 | 0.333354 | rwkv_cpp_shared_library.py | pypi |
import os
import time
import uuid
import json
import pathlib
from typing import List, Optional, Literal, Union, Iterator, Dict
from typing_extensions import TypedDict
import sampling
import tokenizers
import rwkv_cpp_model
import rwkv_cpp_shared_library
import server_types
from fastapi import FastAPI
from fastapi.middleware.cors import CORSMiddleware
from pydantic import BaseModel, BaseSettings, Field, create_model_from_typeddict
from sse_starlette.sse import EventSourceResponse
class Settings(BaseSettings):
model: str
tokens_per_generation: int = 100
app = FastAPI(
title="RWKV API",
version="0.0.1",
)
app.add_middleware(
CORSMiddleware,
allow_origins=["*"],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
settings = Settings()
tokenizer_path = pathlib.Path(os.path.abspath(__file__)).parent / '20B_tokenizer.json'
tokenizer = tokenizers.Tokenizer.from_file(str(tokenizer_path))
library = rwkv_cpp_shared_library.load_rwkv_shared_library()
model = rwkv_cpp_model.RWKVModel(library, settings.model)
model_path = settings.model
def generate(prompt, temperature, top_p):
prompt_tokens = tokenizer.encode(prompt).ids
init_logits, init_state = None, None
for token in prompt_tokens:
init_logits, init_state = model.eval(token, init_state, init_state, init_logits)
while True:
logits, state = init_logits.clone(), init_state.clone()
for _ in range(settings.tokens_per_generation):
token = sampling.sample_logits(logits, temperature, top_p)
yield token
logits, state = model.eval(token, state, state, logits)
def create_completion_or_chunks(
prompt: str,
suffix: Optional[str] = None,
max_tokens: int = 16,
temperature: float = 0.8,
top_p: float = 0.95,
logprobs: Optional[int] = None,
echo: bool = False,
stop: List[str] = [],
stream: bool = False,
) -> Union[Iterator[server_types.Completion], Iterator[server_types.CompletionChunk],]:
completion_id = f"cmpl-{str(uuid.uuid4())}"
created = int(time.time())
completion_tokens: List[int] = []
prompt_tokens: List[int] = tokenizer.encode(prompt).ids # type: ignore
text = ""
returned_characters = 0
if stop != []:
stop_sequences = [s for s in stop]
else:
stop_sequences = []
finish_reason = None
for token in generate(
prompt,
top_p=top_p,
temperature=temperature,
):
if token == 0:
text = tokenizer.decode(completion_tokens) # type: ignore
finish_reason = "stop"
break
completion_tokens.append(token)
all_text: str = tokenizer.decode(completion_tokens) # type: ignore
any_stop = [s for s in stop_sequences if s in all_text]
if len(any_stop) > 0:
first_stop = any_stop[0]
text = all_text[: all_text.index(first_stop)]
finish_reason = "stop"
break
if stream:
start = returned_characters
longest = 0
# We want to avoid yielding any characters from
# the generated text if they are part of a stop
# sequence.
for s in stop_sequences:
for i in range(len(s), 0, -1):
if all_text.endswith(s[:i]):
if i > longest:
longest = i
break
text = all_text[: len(all_text) - longest]
returned_characters += len(text[start:])
yield {
"id": completion_id,
"object": "text_completion",
"created": created,
"model": model_path,
"choices": [
{
"text": text[start:],
"index": 0,
"logprobs": None,
"finish_reason": None,
}
],
}
if len(completion_tokens) >= max_tokens:
text = tokenizer.decode(completion_tokens)
finish_reason = "length"
break
if finish_reason is None:
finish_reason = "length"
if stream:
yield {
"id": completion_id,
"object": "text_completion",
"created": created,
"model": model_path,
"choices": [
{
"text": text[returned_characters:],
"index": 0,
"logprobs": None,
"finish_reason": finish_reason,
}
],
}
return
if echo:
text = prompt + text
if suffix is not None:
text = text + suffix
if logprobs is not None:
raise NotImplementedError("logprobs not implemented")
yield {
"id": completion_id,
"object": "text_completion",
"created": created,
"model": model_path,
"choices": [
{
"text": text,
"index": 0,
"logprobs": None,
"finish_reason": finish_reason,
}
],
"usage": {
"prompt_tokens": len(prompt_tokens),
"completion_tokens": len(completion_tokens),
"total_tokens": len(prompt_tokens) + len(completion_tokens),
},
}
def completion(
prompt: str,
suffix: Optional[str] = None,
max_tokens: int = 128,
temperature: float = 0.8,
top_p: float = 0.95,
logprobs: Optional[int] = None,
echo: bool = False,
stop: List[str] = [],
stream: bool = False,
) -> Union[server_types.Completion, Iterator[server_types.CompletionChunk]]:
completion_or_chunks = create_completion_or_chunks(
prompt=prompt,
suffix=suffix,
max_tokens=max_tokens,
temperature=temperature,
top_p=top_p,
logprobs=logprobs,
echo=echo,
stop=stop,
stream=stream,
)
if stream:
chunks: Iterator[server_types.CompletionChunk] = completion_or_chunks
return chunks
completion: server_types.Completion = next(completion_or_chunks) # type: ignore
return completion
def convert_text_completion_to_chat(
completion: server_types.Completion
) -> server_types.ChatCompletion:
return {
"id": "chat" + completion["id"],
"object": "chat.completion",
"created": completion["created"],
"model": completion["model"],
"choices": [
{
"index": 0,
"message": {
"role": "assistant",
"content": completion["choices"][0]["text"],
},
"finish_reason": completion["choices"][0]["finish_reason"],
}
],
"usage": completion["usage"],
}
def convert_text_completion_chunks_to_chat(
chunks: Iterator[server_types.CompletionChunk],
) -> Iterator[server_types.ChatCompletionChunk]:
for i, chunk in enumerate(chunks):
if i == 0:
yield {
"id": "chat" + chunk["id"],
"model": chunk["model"],
"created": chunk["created"],
"object": "chat.completion.chunk",
"choices": [
{
"index": 0,
"delta": {
"role": "assistant",
},
"finish_reason": None,
}
],
}
yield {
"id": "chat" + chunk["id"],
"model": chunk["model"],
"created": chunk["created"],
"object": "chat.completion.chunk",
"choices": [
{
"index": 0,
"delta": {
"content": chunk["choices"][0]["text"],
},
"finish_reason": chunk["choices"][0]["finish_reason"],
}
],
}
def chat_completion(
messages: List[server_types.ChatCompletionMessage],
temperature: float = 0.8,
top_p: float = 0.95,
stream: bool = False,
stop: List[str] = [],
max_tokens: int = 128,
) -> Union[server_types.ChatCompletion, Iterator[server_types.ChatCompletionChunk]]:
instructions = """Complete the following chat conversation between the user and the assistant. System messages should be strictly followed as additional instructions."""
chat_history = "\n".join(
f'{message["role"]} {message.get("user", "")}: {message["content"]}'
for message in messages
)
PROMPT = f" \n\n### Instructions:{instructions}\n\n### Inputs:{chat_history}\n\n### Response:\nassistant: "
PROMPT_STOP = ["###", "\nuser", "\nassistant", "\nsystem"]
completion_or_chunks = completion(
prompt=PROMPT,
stop=PROMPT_STOP + stop,
temperature=temperature,
top_p=top_p,
stream=stream,
max_tokens=max_tokens,
)
if stream:
chunks: Iterator[CompletionChunk] = completion_or_chunks # type: ignore
return convert_text_completion_chunks_to_chat(chunks)
else:
completion: Completion = completion_or_chunks # type: ignore
return convert_text_completion_to_chat(completion)
class CreateCompletionRequest(BaseModel):
prompt: str
suffix: Optional[str] = Field(None)
max_tokens: int = 16
temperature: float = 0.8
top_p: float = 0.95
echo: bool = False
stop: List[str] = []
stream: bool = False
# ignored or currently unsupported
model: Optional[str] = Field(None)
n: Optional[int] = 1
logprobs: Optional[int] = Field(None)
presence_penalty: Optional[float] = 0
frequency_penalty: Optional[float] = 0
best_of: Optional[int] = 1
logit_bias: Optional[Dict[str, float]] = Field(None)
user: Optional[str] = Field(None)
class Config:
schema_extra = {
"example": {
"prompt": "\n\n### Instructions:\nWhat is the capital of France?\n\n### Response:\n",
"stop": ["\n", "###"],
}
}
CreateCompletionResponse = create_model_from_typeddict(server_types.Completion)
@app.post(
"/v1/completions",
response_model=CreateCompletionResponse,
)
def create_completion(request: CreateCompletionRequest):
completion_or_chunks = completion(
**request.dict(
exclude={
"model",
"n",
"logprobs",
"frequency_penalty",
"presence_penalty",
"best_of",
"logit_bias",
"user",
"top_k",
"repeat_penalty"
}
)
)
if request.stream:
_chunks: Iterator[server_types.CompletionChunk] = completion_or_chunks # type: ignore
return EventSourceResponse(dict(data=json.dumps(chunk)) for chunk in _chunks)
_completion: server_types.Completion = completion_or_chunks # type: ignore
return _completion
class CreateEmbeddingRequest(BaseModel):
model: Optional[str]
input: str
user: Optional[str]
class Config:
schema_extra = {
"example": {
"input": "The food was delicious and the waiter...",
}
}
CreateEmbeddingResponse = create_model_from_typeddict(server_types.Embedding)
@app.post(
"/v1/embeddings",
response_model=CreateEmbeddingResponse,
)
def create_embedding(request: CreateEmbeddingRequest):
raise NotImplementedError()
class ChatCompletionRequestMessage(BaseModel):
role: Union[Literal["system"], Literal["user"], Literal["assistant"]]
content: str
user: Optional[str] = None
class CreateChatCompletionRequest(BaseModel):
model: Optional[str]
messages: List[ChatCompletionRequestMessage]
temperature: float = 0.8
top_p: float = 0.95
stream: bool = False
stop: List[str] = []
max_tokens: int = 128
# ignored or currently unsupported
model: Optional[str] = Field(None)
n: Optional[int] = 1
presence_penalty: Optional[float] = 0
frequency_penalty: Optional[float] = 0
logit_bias: Optional[Dict[str, float]] = Field(None)
user: Optional[str] = Field(None)
class Config:
schema_extra = {
"example": {
"messages": [
ChatCompletionRequestMessage(
role="system", content="You are a helpful assistant."
),
ChatCompletionRequestMessage(
role="user", content="What is the capital of France?"
),
]
}
}
CreateChatCompletionResponse = create_model_from_typeddict(server_types.ChatCompletion)
@app.post(
"/v1/chat/completions",
response_model=CreateChatCompletionResponse,
)
async def create_chat_completion(
request: CreateChatCompletionRequest,
) -> Union[server_types.ChatCompletion, EventSourceResponse]:
completion_or_chunks = chat_completion(
**request.dict(
exclude={
"model",
"n",
"presence_penalty",
"frequency_penalty",
"logit_bias",
"user",
}
),
)
if request.stream:
async def server_sent_events(
chat_chunks: Iterator[server_types.ChatCompletionChunk],
):
for chat_chunk in chat_chunks:
yield dict(data=json.dumps(chat_chunk))
yield dict(data="[DONE]")
_chunks: Iterator[server_types.ChatCompletionChunk] = completion_or_chunks # type: ignore
return EventSourceResponse(
server_sent_events(_chunks),
)
_completion: server_types.ChatCompletion = completion_or_chunks # type: ignore
return _completion
class ModelData(TypedDict):
id: str
object: Literal["model"]
owned_by: str
permissions: List[str]
class ModelList(TypedDict):
object: Literal["list"]
data: List[ModelData]
GetModelResponse = create_model_from_typeddict(ModelList)
@app.get("/v1/models", response_model=GetModelResponse)
def get_models() -> ModelList:
return {
"object": "list",
"data": [
{
"id": settings.model,
"object": "model",
"owned_by": "me",
"permissions": [],
}
],
}
if __name__ == "__main__":
import os
import uvicorn
uvicorn.run(app, host=os.getenv("HOST", "localhost"), port=int(os.getenv("PORT", 8000))) | /rwkv_cpp_python-0.0.1.tar.gz/rwkv_cpp_python-0.0.1/rwkv/server.py | 0.735357 | 0.17545 | server.py | pypi |
from typing import List, Optional, Dict, Union
from typing_extensions import TypedDict, NotRequired, Literal
class EmbeddingUsage(TypedDict):
prompt_tokens: int
total_tokens: int
class EmbeddingData(TypedDict):
index: int
object: str
embedding: List[float]
class Embedding(TypedDict):
object: Literal["list"]
model: str
data: List[EmbeddingData]
usage: EmbeddingUsage
class CompletionLogprobs(TypedDict):
text_offset: List[int]
token_logprobs: List[float]
tokens: List[str]
top_logprobs: List[Dict[str, float]]
class CompletionChoice(TypedDict):
text: str
index: int
logprobs: Optional[CompletionLogprobs]
finish_reason: Optional[str]
class CompletionUsage(TypedDict):
prompt_tokens: int
completion_tokens: int
total_tokens: int
class CompletionChunk(TypedDict):
id: str
object: Literal["text_completion"]
created: int
model: str
choices: List[CompletionChoice]
class Completion(TypedDict):
id: str
object: Literal["text_completion"]
created: int
model: str
choices: List[CompletionChoice]
usage: CompletionUsage
class ChatCompletionMessage(TypedDict):
role: Union[Literal["assistant"], Literal["user"], Literal["system"]]
content: str
user: NotRequired[str]
class ChatCompletionChoice(TypedDict):
index: int
message: ChatCompletionMessage
finish_reason: Optional[str]
class ChatCompletion(TypedDict):
id: str
object: Literal["chat.completion"]
created: int
model: str
choices: List[ChatCompletionChoice]
usage: CompletionUsage
class ChatCompletionChunkDelta(TypedDict):
role: NotRequired[Literal["assistant"]]
content: NotRequired[str]
class ChatCompletionChunkChoice(TypedDict):
index: int
delta: ChatCompletionChunkDelta
finish_reason: Optional[str]
class ChatCompletionChunk(TypedDict):
id: str
model: str
object: Literal["chat.completion.chunk"]
created: int
choices: List[ChatCompletionChunkChoice] | /rwkv_cpp_python-0.0.1.tar.gz/rwkv_cpp_python-0.0.1/rwkv/server_types.py | 0.834407 | 0.436562 | server_types.py | pypi |
import os
import torch
import multiprocessing
import rwkv_cpp_shared_library
from typing import Tuple, Optional
class RWKVModel:
"""
PyTorch wrapper around rwkv.cpp model.
"""
def __init__(
self,
shared_library: rwkv_cpp_shared_library.RWKVSharedLibrary,
model_path: str,
thread_count: int = max(1, multiprocessing.cpu_count() // 2)
):
"""
Loads the model and prepares it for inference.
In case of any error, this method will throw an exception.
Parameters
----------
shared_library : RWKVSharedLibrary
rwkv.cpp shared library.
model_path : str
Path to RWKV model file in ggml format.
thread_count : int
Thread count to use. If not set, defaults to CPU count / 2.
"""
assert os.path.isfile(model_path), f'{model_path} is not a file'
assert thread_count > 0, 'Thread count must be positive'
self._library = shared_library
self._ctx = self._library.rwkv_init_from_file(model_path, thread_count)
self._state_buffer_element_count = self._library.rwkv_get_state_buffer_element_count(self._ctx)
self._logits_buffer_element_count = self._library.rwkv_get_logits_buffer_element_count(self._ctx)
self._valid = True
def eval(
self,
token: int,
state_in: Optional[torch.Tensor],
state_out: Optional[torch.Tensor] = None,
logits_out: Optional[torch.Tensor] = None
) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Evaluates the model for a single token.
In case of any error, this method will throw an exception.
Parameters
----------
token : int
Index of next token to be seen by the model. Must be in range 0 <= token < n_vocab.
state_in : Optional[torch.Tensor]
State from previous call of this method. If this is a first pass, set it to None.
state_out : Optional[torch.Tensor]
Optional output tensor for state. If provided, must be of type float32, contiguous and of shape (state_buffer_element_count).
logits_out : Optional[torch.Tensor]
Optional output tensor for logits. If provided, must be of type float32, contiguous and of shape (logits_buffer_element_count).
Returns
-------
logits, state
Logits vector of shape (n_vocab); state for the next step.
"""
assert self._valid, 'Model was freed'
def validate_buffer(buf: torch.Tensor, name: str, size: int) -> None:
assert buf.dtype == torch.float32, f'{name} is not of type float32'
assert buf.is_contiguous(), f'{name} is not contiguous'
assert buf.shape == (size,), f'{name} has invalid shape {buf.shape}, expected ({size})'
if state_in is not None:
validate_buffer(state_in, 'state_in', self._state_buffer_element_count)
state_in_ptr = state_in.storage().data_ptr()
else:
state_in_ptr = 0
if state_out is not None:
validate_buffer(state_out, 'state_out', self._state_buffer_element_count)
else:
state_out = torch.zeros(self._state_buffer_element_count, dtype=torch.float32, device='cpu')
if logits_out is not None:
validate_buffer(logits_out, 'logits_out', self._logits_buffer_element_count)
else:
logits_out = torch.zeros(self._logits_buffer_element_count, dtype=torch.float32, device='cpu')
self._library.rwkv_eval(
self._ctx,
token,
state_in_ptr,
state_out.storage().data_ptr(),
logits_out.storage().data_ptr()
)
return logits_out, state_out
def free(self):
"""
Frees all allocated resources.
In case of any error, this method will throw an exception.
The object must not be used anymore after calling this method.
"""
assert self._valid, 'Already freed'
self._valid = False
self._library.rwkv_free(self._ctx)
def __del__(self):
# Free the context on GC in case user forgot to call free() explicitly.
if hasattr(self, '_valid') and self._valid:
self.free() | /rwkv_cpp_python-0.0.1.tar.gz/rwkv_cpp_python-0.0.1/rwkv/rwkv_cpp_model.py | 0.893356 | 0.525186 | rwkv_cpp_model.py | pypi |
# rwkv.cpp
This is a port of [BlinkDL/RWKV-LM](https://github.com/BlinkDL/RWKV-LM) to [ggerganov/ggml](https://github.com/ggerganov/ggml).
Besides the usual **FP32**, it supports **FP16** and **quantized INT4** inference on CPU. This project is **CPU only**.
RWKV is a novel large language model architecture, [with the largest model in the family having 14B parameters](https://huggingface.co/BlinkDL/rwkv-4-pile-14b). In contrast to Transformer with `O(n^2)` attention, RWKV requires only state from previous step to calculate logits. This makes RWKV very CPU-friendly on large context lenghts.
This project provides [a C library rwkv.h](rwkv.h) and [a convinient Python wrapper](rwkv%2Frwkv_cpp_model.py) for it.
**TODO (contributions welcome!)**:
1. Optimize AVX2 implementation of `Q4_1_O` matmul — currently, it is as slow as `FP32`
2. Measure latency and perplexity of different model sizes (169M to 14B) and data types (`FP32`, `FP16`, `Q4_0`, `Q4_1`, `Q4_1_O`)
3. Test on Linux (including Colab) and MacOS
4. Make required memory calculation more robust (see [#4](https://github.com/saharNooby/rwkv.cpp/issues/4))
## How to use
### 1. Clone the repo
**Requirements**: [git](https://gitforwindows.org/).
```commandline
git clone https://github.com/saharNooby/rwkv.cpp.git
cd rwkv.cpp
```
### 2. Get the rwkv.cpp library
#### Option 2.1. Download a pre-compiled library
##### Windows / Linux / MacOS
Check out [Releases](https://github.com/saharNooby/rwkv.cpp/releases), download appropriate ZIP for your OS and CPU, extract `rwkv` library file into the repository directory.
On Windows: to check whether your CPU supports AVX2 or AVX-512, [use CPU-Z](https://www.cpuid.com/softwares/cpu-z.html).
#### Option 2.2. Build the library yourself
##### Windows
**Requirements**: [CMake](https://cmake.org/download/) or [CMake from anaconda](https://anaconda.org/conda-forge/cmake), MSVC compiler.
```commandline
cmake -DBUILD_SHARED_LIBS=ON .
cmake --build . --config Release
```
If everything went OK, `bin\Release\rwkv.dll` file should appear.
##### Linux / MacOS
**Requirements**: CMake (Linux: `sudo apt install cmake`, MacOS: `brew install cmake`, anaconoda: [cmake package](https://anaconda.org/conda-forge/cmake)).
```commandline
cmake -DBUILD_SHARED_LIBS=ON .
cmake --build . --config Release
```
**Anaconda & M1 users**: please verify that `CMAKE_SYSTEM_PROCESSOR: arm64` after running `cmake -DBUILD_SHARED_LIBS=ON .` — if it detects `x86_64`, edit the `CMakeLists.txt` file under the `# Compile flags` to add `set(CMAKE_SYSTEM_PROCESSOR "arm64")`.
If everything went OK, `librwkv.so` (Linux) or `librwkv.dylib` (MacOS) file should appear in the base repo folder.
### 3. Download an RWKV model from [Hugging Face](https://huggingface.co/BlinkDL) like [this one](https://huggingface.co/BlinkDL/rwkv-4-pile-169m/blob/main/RWKV-4-Pile-169M-20220807-8023.pth) and convert it into `ggml` format
**Requirements**: Python 3.x with [PyTorch](https://pytorch.org/get-started/locally/).
```commandline
# Windows
python rwkv\convert_pytorch_to_ggml.py C:\RWKV-4-Pile-169M-20220807-8023.pth C:\rwkv.cpp-169M.bin float16
# Linux / MacOS
python rwkv/convert_pytorch_to_ggml.py ~/Downloads/RWKV-4-Pile-169M-20220807-8023.pth ~/Downloads/rwkv.cpp-169M.bin float16
```
#### 3.1. Optionally, quantize the model
To convert the model into INT4 quantized format, run:
```commandline
# Windows
python rwkv\quantize.py C:\rwkv.cpp-169M.bin C:\rwkv.cpp-169M-Q4_1_O.bin 4
# Linux / MacOS
python rwkv/quantize.py ~/Downloads/rwkv.cpp-169M.bin ~/Downloads/rwkv.cpp-169M-Q4_1_O.bin 4
```
Formats available:
- `4`: `Q4_1_O`, best quality, very slow (as `FP32`).
- `3`: `Q4_1`, poor quality, very fast (as `FP16`).
- `2`: `Q4_0`, worst quality, breaks larger models, moderately fast (between `FP16` and `FP32`).
### 4. Run the model
**Requirements**: Python 3.x with [PyTorch](https://pytorch.org/get-started/locally/) and [tokenizers](https://pypi.org/project/tokenizers/).
**Note**: change the model path with the non-quantized model for the full weights model.
To generate some text, run:
```commandline
# Windows
python rwkv\generate_completions.py C:\rwkv.cpp-169M-Q4_1_O.bin
# Linux / MacOS
python rwkv/generate_completions.py ~/Downloads/rwkv.cpp-169M-Q4_1_O.bin
```
To chat with a bot, run:
```commandline
# Windows
python rwkv\chat_with_bot.py C:\rwkv.cpp-169M-Q4_1_O.bin
# Linux / MacOS
python rwkv/chat_with_bot.py ~/Downloads/rwkv.cpp-169M-Q4_1_O.bin
```
Edit [generate_completions.py](rwkv%2Fgenerate_completions.py) or [chat_with_bot.py](rwkv%2Fchat_with_bot.py) to change prompts and sampling settings.
---
Example of using `rwkv.cpp` in your custom Python script:
```python
import rwkv_cpp_model
import rwkv_cpp_shared_library
# Change to model paths used above (quantized or full weights)
model_path = r'C:\rwkv.cpp-169M.bin'
model = rwkv_cpp_model.RWKVModel(
rwkv_cpp_shared_library.load_rwkv_shared_library(),
model_path
)
logits, state = None, None
for token in [1, 2, 3]:
logits, state = model.eval(token, state)
print(f'Output logits: {logits}')
# Don't forget to free the memory after you've done working with the model
model.free()
```
| /rwkv_cpp-0.0.1.tar.gz/rwkv_cpp-0.0.1/README.md | 0.414188 | 0.940134 | README.md | pypi |
import argparse
import os
import pathlib
import time
import sampling
import tokenizers
import rwkv_cpp_model
import rwkv_cpp_shared_library
# ======================================== Script settings ========================================
prompt: str = """# rwkv.cpp
This is a port of [BlinkDL/RWKV-LM](https://github.com/BlinkDL/RWKV-LM) to [ggerganov/ggml](https://github.com/ggerganov/ggml).
Besides usual **FP32**, it supports **FP16** and **quantized INT4** inference on CPU. This project is **CPU only**."""
# How many completions to generate.
generation_count: int = 3
# Token count per single completion.
tokens_per_generation: int = 100
# Sampling settings.
temperature: float = 0.8
top_p: float = 0.5
# =================================================================================================
parser = argparse.ArgumentParser(description='Generate completions from RWKV model based on a prompt')
parser.add_argument('model_path', help='Path to RWKV model in ggml format')
args = parser.parse_args()
assert prompt != '', 'Prompt must not be empty'
print('Loading 20B tokenizer')
tokenizer_path = pathlib.Path(os.path.abspath(__file__)).parent / '20B_tokenizer.json'
tokenizer = tokenizers.Tokenizer.from_file(str(tokenizer_path))
library = rwkv_cpp_shared_library.load_rwkv_shared_library()
print(f'System info: {library.rwkv_get_system_info_string()}')
print('Loading RWKV model')
model = rwkv_cpp_model.RWKVModel(library, args.model_path)
prompt_tokens = tokenizer.encode(prompt).ids
prompt_token_count = len(prompt_tokens)
print(f'{prompt_token_count} tokens in prompt')
init_logits, init_state = None, None
for token in prompt_tokens:
init_logits, init_state = model.eval(token, init_state, init_state, init_logits)
for GENERATION in range(generation_count):
print(f'\n--- Generation {GENERATION} ---\n')
print(prompt, end='[')
start = time.time()
logits, state = init_logits.clone(), init_state.clone()
for i in range(tokens_per_generation):
token = sampling.sample_logits(logits, temperature, top_p)
print(tokenizer.decode([token]), end='')
logits, state = model.eval(token, state, state, logits)
delay = time.time() - start
print(']\n\nTook %.3f sec, %d ms per token' % (delay, delay / tokens_per_generation * 1000)) | /rwkv_cpp-0.0.1.tar.gz/rwkv_cpp-0.0.1/rwkv/generate_completions.py | 0.558809 | 0.403097 | generate_completions.py | pypi |
import os
import sys
import argparse
import pathlib
import sampling
import tokenizers
import rwkv_cpp_model
import rwkv_cpp_shared_library
# ======================================== Script settings ========================================
# Copied from https://github.com/ggerganov/llama.cpp/blob/6e7801d08d81c931a5427bae46f00763e993f54a/prompts/chat-with-bob.txt
prompt: str = """Transcript of a dialog, where the User interacts with an Assistant named Bob. Bob is helpful, kind, honest, good at writing, and never fails to answer the User's requests immediately and with precision.
User: Hello, Bob.
Bob: Hello. How may I help you today?
User: Please tell me the largest city in Europe.
Bob: Sure. The largest city in Europe is Moscow, the capital of Russia."""
# No trailing space here!
bot_message_prefix: str = 'Bob:'
user_message_prefix: str = 'User:'
max_tokens_per_generation: int = 100
# Sampling settings.
temperature: float = 0.8
top_p: float = 0.5
# =================================================================================================
parser = argparse.ArgumentParser(description='Provide terminal-based chat interface for RWKV model')
parser.add_argument('model_path', help='Path to RWKV model in ggml format')
args = parser.parse_args()
assert prompt != '', 'Prompt must not be empty'
print('Loading 20B tokenizer')
tokenizer_path = pathlib.Path(os.path.abspath(__file__)).parent / '20B_tokenizer.json'
tokenizer = tokenizers.Tokenizer.from_file(str(tokenizer_path))
library = rwkv_cpp_shared_library.load_rwkv_shared_library()
print(f'System info: {library.rwkv_get_system_info_string()}')
print('Loading RWKV model')
model = rwkv_cpp_model.RWKVModel(library, args.model_path)
prompt_tokens = tokenizer.encode(prompt).ids
prompt_token_count = len(prompt_tokens)
print(f'Processing {prompt_token_count} prompt tokens, may take a while')
logits, state = None, None
for token in prompt_tokens:
logits, state = model.eval(token, state, state, logits)
print('\nChat initialized! Write something and press Enter.')
while True:
# Read user input
print('> ', end='')
user_input = sys.stdin.readline()
# Process the input
new_tokens = tokenizer.encode('\n' + user_message_prefix + ' ' + user_input + '\n' + bot_message_prefix).ids
for token in new_tokens:
logits, state = model.eval(token, state, state, logits)
# Generate and print bot response
print(bot_message_prefix, end='')
decoded = ''
for i in range(max_tokens_per_generation):
token = sampling.sample_logits(logits, temperature, top_p)
decoded = tokenizer.decode([token])
print(decoded, end='', flush=True)
if '\n' in decoded:
break
logits, state = model.eval(token, state, state, logits)
if '\n' not in decoded:
print() | /rwkv_cpp-0.0.1.tar.gz/rwkv_cpp-0.0.1/rwkv/chat_with_bot.py | 0.467575 | 0.267193 | chat_with_bot.py | pypi |
import os
import time
import pathlib
import argparse
import tokenizers
import torch
import rwkv_cpp_model
import rwkv_cpp_shared_library
from typing import List
def parse_args():
parser = argparse.ArgumentParser(description='Measure perplexity and per-token latency of an RWKV model on a given text file')
parser.add_argument('model_path', help='Path to model checkpoint file')
parser.add_argument('text_path', help='Path to text file in UTF-8 encoding')
parser.add_argument('ignore_first_n_tokens', help='How many tokens should be skipped before loss is measured', type=int, default=1024)
return parser.parse_args()
args = parse_args()
# ---
print('Loading 20B tokenizer')
tokenizer_path: pathlib.Path = pathlib.Path(os.path.abspath(__file__)).parent / '20B_tokenizer.json'
tokenizer: tokenizers.Tokenizer = tokenizers.Tokenizer.from_file(str(tokenizer_path))
print('Loading text')
text: str = open(args.text_path, encoding='utf-8').read()
tokens: List[int] = tokenizer.encode(text).ids
token_count: int = len(tokens)
print(f'{token_count} tokens in the text')
assert token_count - args.ignore_first_n_tokens > 1, 'Need at least 2 tokens for evaluation'
# ---
def format_loss(loss: torch.Tensor) -> str:
return str(['%.3f' % (loss[i].item(),) for i in range(len(loss))]).replace('\'', '')[1:-1]
def format_loss_with_perplexity(loss: torch.Tensor) -> str:
return f'loss [{format_loss(loss)}], perplexity {"%.3f" % (torch.exp(loss[0]).item(),)}'
# ---
model: rwkv_cpp_model.RWKVModel = rwkv_cpp_model.RWKVModel(
rwkv_cpp_shared_library.load_rwkv_shared_library(),
args.model_path
)
logits, state = None, None
loss_sum: torch.Tensor = torch.tensor([0.0])
loss_count: int = 0
start: float = time.time()
run_count: int = token_count - 1
for i in range(run_count):
token: int = tokens[i]
target: int = tokens[i + 1]
logits, state = model.eval(token, state, state, logits)
if args.ignore_first_n_tokens == 0 or i + 1 >= args.ignore_first_n_tokens:
losses = torch.tensor([
torch.nn.functional.cross_entropy(logits, torch.tensor(target, dtype=torch.long), reduction='none').item()
])
loss_sum += losses
loss_count += 1
if i % 10 == 0:
avg_loss_so_far = loss_sum / loss_count
duration: float = time.time() - start
duration_per_token: float = duration / (i + 1)
runs_remaining: int = run_count - i - 1
duration_remaining: int = int(runs_remaining * duration_per_token)
print(f'Token #{i}/{token_count}, '
f'{int(100.0 * i / token_count)}%, '
f'ETA {duration_remaining // 60} m {duration_remaining % 60} s', end='')
if loss_count > 0:
print(f', averages so far: {format_loss_with_perplexity(avg_loss_so_far)}')
else:
print()
print()
print(f'Average latency: {int((time.time() - start) * 1000 / run_count)} ms per token')
print()
print(f'Model: {os.path.basename(args.model_path)}, '
f'data: {os.path.basename(args.text_path)} with {token_count} tokens, '
f'skipped {args.ignore_first_n_tokens} tokens, '
f'averages: {format_loss_with_perplexity(loss_sum / loss_count)}') | /rwkv_cpp-0.0.1.tar.gz/rwkv_cpp-0.0.1/rwkv/measure_pexplexity.py | 0.809878 | 0.321966 | measure_pexplexity.py | pypi |
import os
import sys
import ctypes
import pathlib
from typing import Optional
P_FLOAT = ctypes.POINTER(ctypes.c_float)
class RWKVContext:
def __init__(self, ptr: ctypes.pointer):
self.ptr = ptr
class RWKVSharedLibrary:
"""
Python wrapper around rwkv.cpp shared library.
"""
def __init__(self, shared_library_path: str):
"""
Loads the shared library from specified file.
In case of any error, this method will throw an exception.
Parameters
----------
shared_library_path : str
Path to rwkv.cpp shared library. On Windows, it would look like 'rwkv.dll'. On UNIX, 'rwkv.so'.
"""
self.library = ctypes.cdll.LoadLibrary(shared_library_path)
self.library.rwkv_init_from_file.argtypes = [ctypes.c_char_p, ctypes.c_uint32]
self.library.rwkv_init_from_file.restype = ctypes.c_void_p
self.library.rwkv_eval.argtypes = [
ctypes.c_void_p, # ctx
ctypes.c_int32, # token
P_FLOAT, # state_in
P_FLOAT, # state_out
P_FLOAT # logits_out
]
self.library.rwkv_eval.restype = ctypes.c_bool
self.library.rwkv_get_state_buffer_element_count.argtypes = [ctypes.c_void_p]
self.library.rwkv_get_state_buffer_element_count.restype = ctypes.c_uint32
self.library.rwkv_get_logits_buffer_element_count.argtypes = [ctypes.c_void_p]
self.library.rwkv_get_logits_buffer_element_count.restype = ctypes.c_uint32
self.library.rwkv_free.argtypes = [ctypes.c_void_p]
self.library.rwkv_free.restype = None
self.library.rwkv_free.argtypes = [ctypes.c_void_p]
self.library.rwkv_free.restype = None
self.library.rwkv_quantize_model_file.argtypes = [ctypes.c_char_p, ctypes.c_char_p, ctypes.c_uint32]
self.library.rwkv_quantize_model_file.restype = ctypes.c_bool
self.library.rwkv_get_system_info_string.argtypes = []
self.library.rwkv_get_system_info_string.restype = ctypes.c_char_p
def rwkv_init_from_file(self, model_file_path: str, thread_count: int) -> RWKVContext:
"""
Loads the model from a file and prepares it for inference.
Throws an exception in case of any error. Error messages would be printed to stderr.
Parameters
----------
model_file_path : str
Path to model file in ggml format.
thread_count : int
Count of threads to use, must be positive.
"""
ptr = self.library.rwkv_init_from_file(model_file_path.encode('utf-8'), ctypes.c_uint32(thread_count))
assert ptr is not None, 'rwkv_init_from_file failed, check stderr'
return RWKVContext(ptr)
def rwkv_eval(
self,
ctx: RWKVContext,
token: int,
state_in_address: Optional[int],
state_out_address: int,
logits_out_address: int
) -> None:
"""
Evaluates the model for a single token.
Throws an exception in case of any error. Error messages would be printed to stderr.
Parameters
----------
ctx : RWKVContext
RWKV context obtained from rwkv_init_from_file.
token : int
Next token index, in range 0 <= token < n_vocab.
state_in_address : int
Address of the first element of a FP32 buffer of size rwkv_get_state_buffer_element_count; or None, if this is a first pass.
state_out_address : int
Address of the first element of a FP32 buffer of size rwkv_get_state_buffer_element_count. This buffer will be written to.
logits_out_address : int
Address of the first element of a FP32 buffer of size rwkv_get_logits_buffer_element_count. This buffer will be written to.
"""
assert self.library.rwkv_eval(
ctx.ptr,
ctypes.c_int32(token),
ctypes.cast(0 if state_in_address is None else state_in_address, P_FLOAT),
ctypes.cast(state_out_address, P_FLOAT),
ctypes.cast(logits_out_address, P_FLOAT)
), 'rwkv_eval failed, check stderr'
def rwkv_get_state_buffer_element_count(self, ctx: RWKVContext) -> int:
"""
Returns count of FP32 elements in state buffer.
Parameters
----------
ctx : RWKVContext
RWKV context obtained from rwkv_init_from_file.
"""
return self.library.rwkv_get_state_buffer_element_count(ctx.ptr)
def rwkv_get_logits_buffer_element_count(self, ctx: RWKVContext) -> int:
"""
Returns count of FP32 elements in logits buffer.
Parameters
----------
ctx : RWKVContext
RWKV context obtained from rwkv_init_from_file.
"""
return self.library.rwkv_get_logits_buffer_element_count(ctx.ptr)
def rwkv_free(self, ctx: RWKVContext) -> None:
"""
Frees all allocated memory and the context.
Parameters
----------
ctx : RWKVContext
RWKV context obtained from rwkv_init_from_file.
"""
self.library.rwkv_free(ctx.ptr)
ctx.ptr = ctypes.cast(0, ctypes.c_void_p)
def rwkv_quantize_model_file(self, model_file_path_in: str, model_file_path_out: str, q_type: int) -> None:
"""
Quantizes FP32 or FP16 model to one of INT4 formats.
Throws an exception in case of any error. Error messages would be printed to stderr.
Parameters
----------
model_file_path_in : str
Path to model file in ggml format, must be either FP32 or FP16.
model_file_path_out : str
Quantized model will be written here.
q_type : int
Set to 2 for GGML_TYPE_Q4_0, set to 3 for GGML_TYPE_Q4_1.
"""
assert self.library.rwkv_quantize_model_file(
model_file_path_in.encode('utf-8'),
model_file_path_out.encode('utf-8'),
ctypes.c_uint32(q_type)
), 'rwkv_quantize_model_file failed, check stderr'
def rwkv_get_system_info_string(self) -> str:
"""
Returns system information string.
"""
return self.library.rwkv_get_system_info_string().decode('utf-8')
def load_rwkv_shared_library() -> RWKVSharedLibrary:
"""
Attempts to find rwkv.cpp shared library and load it.
To specify exact path to the library, create an instance of RWKVSharedLibrary explicitly.
"""
file_name: str
if 'win32' in sys.platform or 'cygwin' in sys.platform:
file_name = 'rwkv.dll'
elif 'darwin' in sys.platform:
file_name = 'librwkv.so'
else:
file_name = 'librwkv.so'
repo_root_dir: pathlib.Path = pathlib.Path(os.path.abspath(__file__)).parent.parent
paths = [
# If we are in "rwkv" directory
f'../bin/Release/{file_name}',
# If we are in repo root directory
f'bin/Release/{file_name}',
# Search relative to this file
str(repo_root_dir / 'bin' / 'Release' / file_name),
# Search in python package
str(repo_root_dir / 'rwkv' / file_name),
# Fallback
str(repo_root_dir / file_name)
]
for path in paths:
if os.path.isfile(path):
return RWKVSharedLibrary(path)
return RWKVSharedLibrary(paths[-1]) | /rwkv_cpp-0.0.1.tar.gz/rwkv_cpp-0.0.1/rwkv/rwkv_cpp_shared_library.py | 0.736021 | 0.333354 | rwkv_cpp_shared_library.py | pypi |
import os
import time
import uuid
import json
import pathlib
from typing import List, Optional, Literal, Union, Iterator, Dict
from typing_extensions import TypedDict
import sampling
import tokenizers
import rwkv_cpp_model
import rwkv_cpp_shared_library
import server_types
from fastapi import FastAPI
from fastapi.middleware.cors import CORSMiddleware
from pydantic import BaseModel, BaseSettings, Field, create_model_from_typeddict
from sse_starlette.sse import EventSourceResponse
class Settings(BaseSettings):
model: str
tokens_per_generation: int = 100
app = FastAPI(
title="RWKV API",
version="0.0.1",
)
app.add_middleware(
CORSMiddleware,
allow_origins=["*"],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
settings = Settings()
tokenizer_path = pathlib.Path(os.path.abspath(__file__)).parent / '20B_tokenizer.json'
tokenizer = tokenizers.Tokenizer.from_file(str(tokenizer_path))
library = rwkv_cpp_shared_library.load_rwkv_shared_library()
model = rwkv_cpp_model.RWKVModel(library, settings.model)
model_path = settings.model
def generate(prompt, temperature, top_p):
prompt_tokens = tokenizer.encode(prompt).ids
init_logits, init_state = None, None
for token in prompt_tokens:
init_logits, init_state = model.eval(token, init_state, init_state, init_logits)
while True:
logits, state = init_logits.clone(), init_state.clone()
for _ in range(settings.tokens_per_generation):
token = sampling.sample_logits(logits, temperature, top_p)
yield token
logits, state = model.eval(token, state, state, logits)
def create_completion_or_chunks(
prompt: str,
suffix: Optional[str] = None,
max_tokens: int = 16,
temperature: float = 0.8,
top_p: float = 0.95,
logprobs: Optional[int] = None,
echo: bool = False,
stop: List[str] = [],
stream: bool = False,
) -> Union[Iterator[server_types.Completion], Iterator[server_types.CompletionChunk],]:
completion_id = f"cmpl-{str(uuid.uuid4())}"
created = int(time.time())
completion_tokens: List[int] = []
prompt_tokens: List[int] = tokenizer.encode(prompt).ids # type: ignore
text = ""
returned_characters = 0
if stop != []:
stop_sequences = [s for s in stop]
else:
stop_sequences = []
finish_reason = None
for token in generate(
prompt,
top_p=top_p,
temperature=temperature,
):
if token == 0:
text = tokenizer.decode(completion_tokens) # type: ignore
finish_reason = "stop"
break
completion_tokens.append(token)
all_text: str = tokenizer.decode(completion_tokens) # type: ignore
any_stop = [s for s in stop_sequences if s in all_text]
if len(any_stop) > 0:
first_stop = any_stop[0]
text = all_text[: all_text.index(first_stop)]
finish_reason = "stop"
break
if stream:
start = returned_characters
longest = 0
# We want to avoid yielding any characters from
# the generated text if they are part of a stop
# sequence.
for s in stop_sequences:
for i in range(len(s), 0, -1):
if all_text.endswith(s[:i]):
if i > longest:
longest = i
break
text = all_text[: len(all_text) - longest]
returned_characters += len(text[start:])
yield {
"id": completion_id,
"object": "text_completion",
"created": created,
"model": model_path,
"choices": [
{
"text": text[start:],
"index": 0,
"logprobs": None,
"finish_reason": None,
}
],
}
if len(completion_tokens) >= max_tokens:
text = tokenizer.decode(completion_tokens)
finish_reason = "length"
break
if finish_reason is None:
finish_reason = "length"
if stream:
yield {
"id": completion_id,
"object": "text_completion",
"created": created,
"model": model_path,
"choices": [
{
"text": text[returned_characters:],
"index": 0,
"logprobs": None,
"finish_reason": finish_reason,
}
],
}
return
if echo:
text = prompt + text
if suffix is not None:
text = text + suffix
if logprobs is not None:
raise NotImplementedError("logprobs not implemented")
yield {
"id": completion_id,
"object": "text_completion",
"created": created,
"model": model_path,
"choices": [
{
"text": text,
"index": 0,
"logprobs": None,
"finish_reason": finish_reason,
}
],
"usage": {
"prompt_tokens": len(prompt_tokens),
"completion_tokens": len(completion_tokens),
"total_tokens": len(prompt_tokens) + len(completion_tokens),
},
}
def completion(
prompt: str,
suffix: Optional[str] = None,
max_tokens: int = 128,
temperature: float = 0.8,
top_p: float = 0.95,
logprobs: Optional[int] = None,
echo: bool = False,
stop: List[str] = [],
stream: bool = False,
) -> Union[server_types.Completion, Iterator[server_types.CompletionChunk]]:
completion_or_chunks = create_completion_or_chunks(
prompt=prompt,
suffix=suffix,
max_tokens=max_tokens,
temperature=temperature,
top_p=top_p,
logprobs=logprobs,
echo=echo,
stop=stop,
stream=stream,
)
if stream:
chunks: Iterator[server_types.CompletionChunk] = completion_or_chunks
return chunks
completion: server_types.Completion = next(completion_or_chunks) # type: ignore
return completion
def convert_text_completion_to_chat(
completion: server_types.Completion
) -> server_types.ChatCompletion:
return {
"id": "chat" + completion["id"],
"object": "chat.completion",
"created": completion["created"],
"model": completion["model"],
"choices": [
{
"index": 0,
"message": {
"role": "assistant",
"content": completion["choices"][0]["text"],
},
"finish_reason": completion["choices"][0]["finish_reason"],
}
],
"usage": completion["usage"],
}
def convert_text_completion_chunks_to_chat(
chunks: Iterator[server_types.CompletionChunk],
) -> Iterator[server_types.ChatCompletionChunk]:
for i, chunk in enumerate(chunks):
if i == 0:
yield {
"id": "chat" + chunk["id"],
"model": chunk["model"],
"created": chunk["created"],
"object": "chat.completion.chunk",
"choices": [
{
"index": 0,
"delta": {
"role": "assistant",
},
"finish_reason": None,
}
],
}
yield {
"id": "chat" + chunk["id"],
"model": chunk["model"],
"created": chunk["created"],
"object": "chat.completion.chunk",
"choices": [
{
"index": 0,
"delta": {
"content": chunk["choices"][0]["text"],
},
"finish_reason": chunk["choices"][0]["finish_reason"],
}
],
}
def chat_completion(
messages: List[server_types.ChatCompletionMessage],
temperature: float = 0.8,
top_p: float = 0.95,
stream: bool = False,
stop: List[str] = [],
max_tokens: int = 128,
) -> Union[server_types.ChatCompletion, Iterator[server_types.ChatCompletionChunk]]:
instructions = """Complete the following chat conversation between the user and the assistant. System messages should be strictly followed as additional instructions."""
chat_history = "\n".join(
f'{message["role"]} {message.get("user", "")}: {message["content"]}'
for message in messages
)
PROMPT = f" \n\n### Instructions:{instructions}\n\n### Inputs:{chat_history}\n\n### Response:\nassistant: "
PROMPT_STOP = ["###", "\nuser", "\nassistant", "\nsystem"]
completion_or_chunks = completion(
prompt=PROMPT,
stop=PROMPT_STOP + stop,
temperature=temperature,
top_p=top_p,
stream=stream,
max_tokens=max_tokens,
)
if stream:
chunks: Iterator[CompletionChunk] = completion_or_chunks # type: ignore
return convert_text_completion_chunks_to_chat(chunks)
else:
completion: Completion = completion_or_chunks # type: ignore
return convert_text_completion_to_chat(completion)
class CreateCompletionRequest(BaseModel):
prompt: str
suffix: Optional[str] = Field(None)
max_tokens: int = 16
temperature: float = 0.8
top_p: float = 0.95
echo: bool = False
stop: List[str] = []
stream: bool = False
# ignored or currently unsupported
model: Optional[str] = Field(None)
n: Optional[int] = 1
logprobs: Optional[int] = Field(None)
presence_penalty: Optional[float] = 0
frequency_penalty: Optional[float] = 0
best_of: Optional[int] = 1
logit_bias: Optional[Dict[str, float]] = Field(None)
user: Optional[str] = Field(None)
class Config:
schema_extra = {
"example": {
"prompt": "\n\n### Instructions:\nWhat is the capital of France?\n\n### Response:\n",
"stop": ["\n", "###"],
}
}
CreateCompletionResponse = create_model_from_typeddict(server_types.Completion)
@app.post(
"/v1/completions",
response_model=CreateCompletionResponse,
)
def create_completion(request: CreateCompletionRequest):
completion_or_chunks = completion(
**request.dict(
exclude={
"model",
"n",
"logprobs",
"frequency_penalty",
"presence_penalty",
"best_of",
"logit_bias",
"user",
"top_k",
"repeat_penalty"
}
)
)
if request.stream:
_chunks: Iterator[server_types.CompletionChunk] = completion_or_chunks # type: ignore
return EventSourceResponse(dict(data=json.dumps(chunk)) for chunk in _chunks)
_completion: server_types.Completion = completion_or_chunks # type: ignore
return _completion
class CreateEmbeddingRequest(BaseModel):
model: Optional[str]
input: str
user: Optional[str]
class Config:
schema_extra = {
"example": {
"input": "The food was delicious and the waiter...",
}
}
CreateEmbeddingResponse = create_model_from_typeddict(server_types.Embedding)
@app.post(
"/v1/embeddings",
response_model=CreateEmbeddingResponse,
)
def create_embedding(request: CreateEmbeddingRequest):
raise NotImplementedError()
class ChatCompletionRequestMessage(BaseModel):
role: Union[Literal["system"], Literal["user"], Literal["assistant"]]
content: str
user: Optional[str] = None
class CreateChatCompletionRequest(BaseModel):
model: Optional[str]
messages: List[ChatCompletionRequestMessage]
temperature: float = 0.8
top_p: float = 0.95
stream: bool = False
stop: List[str] = []
max_tokens: int = 128
# ignored or currently unsupported
model: Optional[str] = Field(None)
n: Optional[int] = 1
presence_penalty: Optional[float] = 0
frequency_penalty: Optional[float] = 0
logit_bias: Optional[Dict[str, float]] = Field(None)
user: Optional[str] = Field(None)
class Config:
schema_extra = {
"example": {
"messages": [
ChatCompletionRequestMessage(
role="system", content="You are a helpful assistant."
),
ChatCompletionRequestMessage(
role="user", content="What is the capital of France?"
),
]
}
}
CreateChatCompletionResponse = create_model_from_typeddict(server_types.ChatCompletion)
@app.post(
"/v1/chat/completions",
response_model=CreateChatCompletionResponse,
)
async def create_chat_completion(
request: CreateChatCompletionRequest,
) -> Union[server_types.ChatCompletion, EventSourceResponse]:
completion_or_chunks = chat_completion(
**request.dict(
exclude={
"model",
"n",
"presence_penalty",
"frequency_penalty",
"logit_bias",
"user",
}
),
)
if request.stream:
async def server_sent_events(
chat_chunks: Iterator[server_types.ChatCompletionChunk],
):
for chat_chunk in chat_chunks:
yield dict(data=json.dumps(chat_chunk))
yield dict(data="[DONE]")
_chunks: Iterator[server_types.ChatCompletionChunk] = completion_or_chunks # type: ignore
return EventSourceResponse(
server_sent_events(_chunks),
)
_completion: server_types.ChatCompletion = completion_or_chunks # type: ignore
return _completion
class ModelData(TypedDict):
id: str
object: Literal["model"]
owned_by: str
permissions: List[str]
class ModelList(TypedDict):
object: Literal["list"]
data: List[ModelData]
GetModelResponse = create_model_from_typeddict(ModelList)
@app.get("/v1/models", response_model=GetModelResponse)
def get_models() -> ModelList:
return {
"object": "list",
"data": [
{
"id": settings.model,
"object": "model",
"owned_by": "me",
"permissions": [],
}
],
}
if __name__ == "__main__":
import os
import uvicorn
uvicorn.run(app, host=os.getenv("HOST", "localhost"), port=int(os.getenv("PORT", 8000))) | /rwkv_cpp-0.0.1.tar.gz/rwkv_cpp-0.0.1/rwkv/server.py | 0.735357 | 0.17545 | server.py | pypi |
from typing import List, Optional, Dict, Union
from typing_extensions import TypedDict, NotRequired, Literal
class EmbeddingUsage(TypedDict):
prompt_tokens: int
total_tokens: int
class EmbeddingData(TypedDict):
index: int
object: str
embedding: List[float]
class Embedding(TypedDict):
object: Literal["list"]
model: str
data: List[EmbeddingData]
usage: EmbeddingUsage
class CompletionLogprobs(TypedDict):
text_offset: List[int]
token_logprobs: List[float]
tokens: List[str]
top_logprobs: List[Dict[str, float]]
class CompletionChoice(TypedDict):
text: str
index: int
logprobs: Optional[CompletionLogprobs]
finish_reason: Optional[str]
class CompletionUsage(TypedDict):
prompt_tokens: int
completion_tokens: int
total_tokens: int
class CompletionChunk(TypedDict):
id: str
object: Literal["text_completion"]
created: int
model: str
choices: List[CompletionChoice]
class Completion(TypedDict):
id: str
object: Literal["text_completion"]
created: int
model: str
choices: List[CompletionChoice]
usage: CompletionUsage
class ChatCompletionMessage(TypedDict):
role: Union[Literal["assistant"], Literal["user"], Literal["system"]]
content: str
user: NotRequired[str]
class ChatCompletionChoice(TypedDict):
index: int
message: ChatCompletionMessage
finish_reason: Optional[str]
class ChatCompletion(TypedDict):
id: str
object: Literal["chat.completion"]
created: int
model: str
choices: List[ChatCompletionChoice]
usage: CompletionUsage
class ChatCompletionChunkDelta(TypedDict):
role: NotRequired[Literal["assistant"]]
content: NotRequired[str]
class ChatCompletionChunkChoice(TypedDict):
index: int
delta: ChatCompletionChunkDelta
finish_reason: Optional[str]
class ChatCompletionChunk(TypedDict):
id: str
model: str
object: Literal["chat.completion.chunk"]
created: int
choices: List[ChatCompletionChunkChoice] | /rwkv_cpp-0.0.1.tar.gz/rwkv_cpp-0.0.1/rwkv/server_types.py | 0.834407 | 0.436562 | server_types.py | pypi |
import os
import torch
import multiprocessing
import rwkv_cpp_shared_library
from typing import Tuple, Optional
class RWKVModel:
"""
PyTorch wrapper around rwkv.cpp model.
"""
def __init__(
self,
shared_library: rwkv_cpp_shared_library.RWKVSharedLibrary,
model_path: str,
thread_count: int = max(1, multiprocessing.cpu_count() // 2)
):
"""
Loads the model and prepares it for inference.
In case of any error, this method will throw an exception.
Parameters
----------
shared_library : RWKVSharedLibrary
rwkv.cpp shared library.
model_path : str
Path to RWKV model file in ggml format.
thread_count : int
Thread count to use. If not set, defaults to CPU count / 2.
"""
assert os.path.isfile(model_path), f'{model_path} is not a file'
assert thread_count > 0, 'Thread count must be positive'
self._library = shared_library
self._ctx = self._library.rwkv_init_from_file(model_path, thread_count)
self._state_buffer_element_count = self._library.rwkv_get_state_buffer_element_count(self._ctx)
self._logits_buffer_element_count = self._library.rwkv_get_logits_buffer_element_count(self._ctx)
self._valid = True
def eval(
self,
token: int,
state_in: Optional[torch.Tensor],
state_out: Optional[torch.Tensor] = None,
logits_out: Optional[torch.Tensor] = None
) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Evaluates the model for a single token.
In case of any error, this method will throw an exception.
Parameters
----------
token : int
Index of next token to be seen by the model. Must be in range 0 <= token < n_vocab.
state_in : Optional[torch.Tensor]
State from previous call of this method. If this is a first pass, set it to None.
state_out : Optional[torch.Tensor]
Optional output tensor for state. If provided, must be of type float32, contiguous and of shape (state_buffer_element_count).
logits_out : Optional[torch.Tensor]
Optional output tensor for logits. If provided, must be of type float32, contiguous and of shape (logits_buffer_element_count).
Returns
-------
logits, state
Logits vector of shape (n_vocab); state for the next step.
"""
assert self._valid, 'Model was freed'
def validate_buffer(buf: torch.Tensor, name: str, size: int) -> None:
assert buf.dtype == torch.float32, f'{name} is not of type float32'
assert buf.is_contiguous(), f'{name} is not contiguous'
assert buf.shape == (size,), f'{name} has invalid shape {buf.shape}, expected ({size})'
if state_in is not None:
validate_buffer(state_in, 'state_in', self._state_buffer_element_count)
state_in_ptr = state_in.storage().data_ptr()
else:
state_in_ptr = 0
if state_out is not None:
validate_buffer(state_out, 'state_out', self._state_buffer_element_count)
else:
state_out = torch.zeros(self._state_buffer_element_count, dtype=torch.float32, device='cpu')
if logits_out is not None:
validate_buffer(logits_out, 'logits_out', self._logits_buffer_element_count)
else:
logits_out = torch.zeros(self._logits_buffer_element_count, dtype=torch.float32, device='cpu')
self._library.rwkv_eval(
self._ctx,
token,
state_in_ptr,
state_out.storage().data_ptr(),
logits_out.storage().data_ptr()
)
return logits_out, state_out
def free(self):
"""
Frees all allocated resources.
In case of any error, this method will throw an exception.
The object must not be used anymore after calling this method.
"""
assert self._valid, 'Already freed'
self._valid = False
self._library.rwkv_free(self._ctx)
def __del__(self):
# Free the context on GC in case user forgot to call free() explicitly.
if hasattr(self, '_valid') and self._valid:
self.free() | /rwkv_cpp-0.0.1.tar.gz/rwkv_cpp-0.0.1/rwkv/rwkv_cpp_model.py | 0.893356 | 0.525186 | rwkv_cpp_model.py | pypi |
"""Console script for ddlpy."""
import sys
import io
import logging
import click
import pandas as pd
import dateutil
import ddlpy
@click.group()
@click.option('-v', '--verbose', count=True)
def cli(verbose, args=None):
"""Console script for ddlpy."""
level = logging.INFO
if verbose >= 1:
level = logging.DEBUG
logging.basicConfig(level=level)
return 0
# Define a command
# Each command has options which are read from the console.
@cli.command()
@click.argument('output', type=click.STRING )
@click.option(
'--quantity',
help='Grootheid code',
multiple=True
)
@click.option(
'--quality',
help='Hoedanigheid code',
multiple=True
)
@click.option(
'--unit',
help='Eenheid code',
multiple=True
)
@click.option(
'--parameter-code',
help='Parameter code',
multiple=True
)
@click.option(
'--compartment-code',
help='Compartment code',
multiple=True
)
@click.option(
'--station',
help='Station codes',
multiple=True
)
@click.option(
'--format',
default='json',
help='output file format. Must be json',
type=click.Choice(['json'], case_sensitive=True)
)
def locations(output,
station,
quantity,
quality,
unit,
parameter_code,
compartment_code,
format):
"""
Write locations metadata to output file, given input codes.
"""
locations_df = ddlpy.locations()
stations = station
quantities = {'Grootheid.Code': list(quantity),
'Hoedanigheid.Code': list(quality),
'Eenheid.Code': list(unit),
'Parameter.Code': list(parameter_code),
'Compartiment.Code': list(compartment_code)
}
selected = locations_df.copy()
if (stations):
selected = selected[selected.index.isin(stations)]
for q in quantities.keys():
if (len(quantities[q]) != 0):
selected = selected[selected[q].isin(quantities[q])]
selected.reset_index(inplace= True)
if format == 'json':
output= output.split('.')[0] # make sure that extension is always json
selected.to_json(output+'.json', orient='records')
else:
raise ValueError('Unexpected format {}'.format(format))
# Another command to get the measurements from locations
@cli.command()
@click.option(
'--start-date',
help='Start date of the measurements'
)
@click.option(
'--end-date',
help='End date of the measurements'
)
@click.option(
'--locations',
default='locations.json',
help='file in json or parquet format containing locations and codes'
)
def measurements(locations, start_date, end_date):
"""
Obtain measurements from file with locations and codes
"""
try:
locations_df = pd.read_json(locations, orient='records')
except:
raise ValueError('location file not existing. Create one or specify its name.')
# conver strings to dates
if start_date:
start_date = dateutil.parser.parse(start_date)
if end_date:
end_date = dateutil.parser.parse(end_date)
for obs in range(locations_df.shape[0]): #goes through rows in table
selected = locations_df.loc[obs]
measurements = ddlpy.measurements(
selected, start_date=start_date, end_date=end_date)
if (len(measurements) > 0):
print('Measurements of %s were obtained' % selected['Code'])
station = selected['Code']
cc = selected['Compartiment.Code']
ec = selected['Eenheid.Code']
gc = selected['Grootheid.Code']
hc = selected['Hoedanigheid.Code']
pc = selected['Parameter.Code']
measurements.to_csv('%s_%s_%s_%s_%s_%s.csv' %
(station, cc, ec, gc, hc, pc))
else:
print('No Data of station %s were retrieved from Water Info' %
selected['Code'])
if __name__ == "__main__":
sys.exit(cli()) # pragma: no cover | /rws-ddlpy-0.1.0.tar.gz/rws-ddlpy-0.1.0/ddlpy/cli.py | 0.430985 | 0.156975 | cli.py | pypi |
import io
import logging
from datetime import datetime, timedelta
from typing import Optional, Tuple
import mpu
import pandas as pd
import requests
logging.basicConfig()
logger = logging.getLogger(__name__)
logger.setLevel("INFO")
def import_daily_data(
start: str, end: str, coord: Optional[Tuple[float, float]] = None
) -> pd.DataFrame:
"""Import daily aggregated KNMI data.
Post a request to the KNMI url and parse the response content to a pandas dataframe.
Parameters
----------
start : str
String containing starting date from which to get data in format YYYYMMDD.
end : str
String containing final date from which to get data in format YYYYMMDD.
coord : Optional[Tuple[float, float]]
Coordinate to search for nearest station in format (latitude, longitude). If
left as None, all stations are returned.
Returns
-------
pd.DataFrame
Pandas DataFrame containing the parsed content of the post response.
Examples
--------
.. code:: python
from rws_knmi_lib.knmi_downloader import import_daily_data
# Returns dataframe containing information on all KNMI stations.
df_daily = import_daily_data(start="20220501", end="20220505")
# Returns dataframe containing only information from the nearest KNMI station.
df_daily_nearest = import_daily_data(start="20220501", end="20220505",
coord=(52.460770, 4.625110))
"""
return _import_data(
start, end, "https://www.daggegevens.knmi.nl/klimatologie/daggegevens", coord
)
def import_hourly_data(
start: str, end: str, coord: Optional[Tuple[float, float]] = None
) -> pd.DataFrame:
"""Import hourly aggregated KNMI data.
Post a request to the KNMI url and parse the response content to a pandas
dataframe. If the timedelta between start and end is bigger than 30 days,
the request is posted in batches to avoid a Query Error from the server for
requesting too many values.
Parameters
----------
start : str
String containing starting date from which to get data in format YYYYMMDD.
end : str
String containing final date from which to get data in format YYYYMMDD.
coord : Optional[Tuple[float, float]]
Coordinate to search for nearest station in format (latitude, longitude). If
left as None, all stations are returned.
Returns
-------
pd.DataFrame
Pandas DataFrame containing the parsed content of the post response.
Examples
--------
.. code:: python
from rws_knmi_lib.knmi_downloader import import_hourly_data
# Returns dataframe containing information on all KNMI stations.
df_hourly = import_hourly_data(start="20220501", end="20220505")
# Returns dataframe containing only information from the nearest KNMI station.
df_hourly_nearest = import_hourly_data(start="20220501", end="20220505",
coord=(52.460770, 4.625110))
"""
start_time = datetime.strptime(start, "%Y%m%d")
end_time = datetime.strptime(end, "%Y%m%d")
if end_time - start_time > timedelta(days=30):
start_dates = pd.date_range(start=start_time, end=end_time, freq="MS")
end_dates = [sd + timedelta(days=30) for sd in start_dates]
knmi_data = []
for start_date, end_date in zip(start_dates, end_dates):
logger.debug("Downloading weather data for %s - %s...", start, end)
knmi_data += [
_import_data(
start=start_date.strftime("%Y%m%d"),
end=end_date.strftime("%Y%m%d"),
url="https://www.daggegevens.knmi.nl/klimatologie/uurgegevens",
coord=coord,
)
]
df_hourly = pd.concat(knmi_data).reset_index(drop=True)
else:
df_hourly = _import_data(
start,
end,
"https://www.daggegevens.knmi.nl/klimatologie/uurgegevens",
coord,
)
df_hourly["H"] = pd.to_timedelta(df_hourly["H"], unit="H")
df_hourly["Datetime"] = df_hourly["YYYYMMDD"] + df_hourly["H"]
return df_hourly
def _import_data(
start: str,
end: str,
url: str,
coord: Optional[Tuple[float, float]] = None,
) -> pd.DataFrame:
"""Import KNMI data from the given URL which can be hourly or daily aggregated.
Parameters
----------
start : str
String containing starting date from which to get data in format YYYYMMDD.
end : str
String containing final date from which to get data in format YYYYMMDD.
coord : Optional[Tuple[float, float]]
Coordinate to search for nearest station in format (latitude, longitude). If
left as None, all stations are returned.
Returns
-------
pd.DataFrame
Pandas DataFrame containing the parsed content of the post response.
"""
logger.debug("Posting request.")
response = requests.post(
url=url,
data={"start": start, "end": end},
)
if response.status_code != 200:
logger.error("Unable to retrieve download url for file.")
logger.error(response.text)
raise Exception("Unable to retrieve download url for file.")
logger.debug("Response received.")
decoded_response = response.content.decode("utf-8")
if "Query Error" in decoded_response:
logger.error(
"Probably a Query Error. Too many values returned. Adjust "
"time range in order to request less values."
)
raise Exception(
"Probably a Query Error. Too many values returned. Adjust "
"time range in order to request less values."
)
dataframe = _parse_knmi_response(decoded_response, coord)
return dataframe
def _parse_knmi_response(
dec_response: str, coord: Optional[Tuple[float, float]]
) -> pd.DataFrame:
"""Parse the decoded KNMI response object.
Parameters
----------
dec_response : str
UTF-8 decoded response in raw text format.
coord : Optional[Tuple[float, float]]
Coordinate to search for nearest station in format (latitude, longitude). If
left as None, all stations are returned.
Returns
-------
pd.DataFrame
Pandas DataFrame containing the parsed content of the post response.
"""
try:
# Find starting point of values for in the dataframe
pos_df = dec_response.find("YYYYMMDD") - 4
dataframe_response = dec_response[pos_df:]
dataframe = pd.read_csv(io.StringIO(dataframe_response), skipinitialspace=True)
dataframe = _format_dataframe(dataframe)
if coord is not None:
logger.debug("Finding nearest station.")
df_stations = _parse_station_locations(dec_response)
nearest_station = _find_nearest_station(df_stations, coord)
logger.debug("Nearest station is station %d", nearest_station)
dataframe = dataframe[dataframe["STN"].astype(int) == nearest_station]
dataframe["YYYYMMDD"] = pd.to_datetime(dataframe["YYYYMMDD"], format="%Y%m%d")
return dataframe
except Exception as exc:
logger.exception(exc)
raise
def _find_nearest_station(dataframe: pd.DataFrame, coord: Tuple[float, float]) -> int:
"""Calculate nearest KNMI station to given coordinate.
Parameters
----------
dataframe : pd.DataFrame
Formatted dataframe which can be used for further calculations.
coord : Tuple[float, float]
Coordinate to search for nearest station in format (latitude, longitude).
Can't be None, as this method would not be called otherwise.
Returns
-------
int
Integer value corresponding to the nearest station in the STN column.
"""
if coord[0] < 0 or coord[1] < 0:
raise ValueError("Negative longitude or latitude not allowed.")
dataframe["haversine_distance"] = dataframe.apply(
lambda x: mpu.haversine_distance(coord, (x["LAT(north)"], x["LON(east)"])),
axis=1,
)
nearest = dataframe[
dataframe["haversine_distance"] == dataframe["haversine_distance"].min()
]["STN"]
return int(nearest.item())
def _parse_station_locations(dec_response: str) -> pd.DataFrame:
"""Parse station location part of the KNMI response.
Parameters
----------
dec_response : str
UTF-8 decoded response in raw text format.
Returns
-------
pd.DataFrame
Pandas DataFrame containing the parsed content of the response.
"""
pos_station = dec_response.find("# DD")
station_response = dec_response[:pos_station]
stations_df = pd.read_csv(
io.StringIO(station_response),
header=5,
sep=r"\s{2,}",
skipinitialspace=True,
engine="python",
)
stations_df = _format_dataframe(stations_df)
stations_df["STN"] = stations_df["STN"].str.replace("# ", "")
return stations_df
def _format_dataframe(dataframe: pd.DataFrame) -> pd.DataFrame:
"""Format dataframe by changing names and selecting columns.
Parameters
----------
dataframe : pd.DataFrame
Dataframe coming from the read_csv method on the raw response.
Returns
-------
pd.DataFrame
Formatted dataframe which is easier to work with and has unused columns removed.
"""
alter_headers = {"# STN": "STN"}
dataframe = dataframe.rename(columns=alter_headers)
dataframe = dataframe.replace("# ", "")
dataframe = dataframe.dropna(how="all", axis=1)
if dataframe.empty:
raise Exception("No data found in the constructed dataframe.")
return dataframe | /rws_knmi_lib-1.0.0-py3-none-any.whl/rws_knmi_lib/knmi_downloader.py | 0.90676 | 0.514034 | knmi_downloader.py | pypi |
from typing import List, Optional, Tuple
from pydantic import BaseModel, validator
class NWBConfig(BaseModel):
"""NWB Config class.
only_state_roads
Whether to only keep state_roads (much faster when True),
fewer data of course.
output_file_path
The file path to write the downloaded data to.
keep_road_numbers
Which road numbers to keep. Keep in mind that this data works with leading zeroes,
e.g. '009' for A9.
keep_rpe_codes
Which RPE codes to keep (i.e. which side of the highway) (default all, valid values
are "L", "R" and "#").
bbox
The bounding box within which to get the data. Shall be of the format 'x0,y0,x1,y1'.
encoding
The encoding to store the output in.
"""
bounding_box: Optional[Tuple[float, float, float, float]] = None
only_state_roads: bool = True
keep_road_numbers: Optional[List[str]] = None
keep_rpe_codes: Optional[List[str]] = None
encoding: str = "utf-8"
@validator("keep_rpe_codes")
@classmethod
def validate_rpe_codes(cls, value: List[str]) -> List[str]:
"""Validate road RPE codes.
This method validates the RPE codes that can be specified. Valid
codes can be "L", "R", and "#".
Parameters
----------
value
List of RPE codes.
Returns
-------
List[str]
Validated input list.
"""
if not len(value) > 0:
raise ValueError("Specify at least one RPE code")
invalid_codes = cls._get_invalid_codes(value)
if any(invalid_codes):
raise ValueError(
f"Invalid RPE code(s) {invalid_codes}, supported are"
f" {','.join(NWBConfig.valid_rpe_codes())}"
)
return value
@staticmethod
def valid_rpe_codes() -> List[str]:
"""Return all valid RPE codes.
Returns
-------
List[str]
Supported RPE codes.
"""
return ["L", "R", "#"]
@classmethod
def _get_invalid_codes(cls, codes_list: List[str]) -> List[str]:
"""Return all invalid RPE codes in a list.
This method returns the difference between the list of valid RPE
codes and a supplied input list.
Parameters
----------
codes_list
List of strings (RPE codes) to check.
Returns
-------
List[str]
List of invalid RPE codes.
"""
invalid_codes = [
i for i in codes_list if i not in NWBConfig.valid_rpe_codes() or i == ""
]
return invalid_codes
@validator("bounding_box")
@classmethod
def validate_bounding_box(
cls, value: Tuple[float, float, float, float]
) -> Tuple[float, float, float, float]:
"""Validate the optional bounding box.
This method checks to see if bounding box is correctly specified.
Rules:
x1 >x0, y1> y0
all values must be positive >0
Parameters
----------
value
The bounding box in form x0,y0,x1,y1.
Returns
-------
Tuple[float,float,float,float]
The validated bounding box.
"""
for i in value:
if i < 0:
raise ValueError(f"Coordinate should be of positive value: {i}")
if value[0] > value[2] or value[1] > value[3]:
raise ValueError("x1 or y1 should be greater than x0 or y0")
return value
@validator("only_state_roads")
@classmethod
def validate_state_road_bbox_exclusivity(cls, value: bool, values: dict) -> bool:
"""Validate that only_state_roads/bounding_box are mutually exclusive.
Parameters
----------
value
The only_state_roads variable
values
Other values from the model.
Returns
-------
bool
Returns only_state_roads if validated, otherwise raises
ValidationError.
"""
if value and "bounding_box" in values and values["bounding_box"] is not None:
# This is not possible in the same URL params.
raise AssertionError(
"only_state_roads and bounding_box are "
"mutually exclusive for this release"
)
return value | /rws-nwb-lib-1.0.1.tar.gz/rws-nwb-lib-1.0.1/rws_nwb_lib/config.py | 0.944944 | 0.670303 | config.py | pypi |
import logging
import warnings
from typing import List, Tuple
import geopandas as gpd
import requests
from requests import Response
from rws_nwb_lib.config import NWBConfig
logging.getLogger(__name__).addHandler(logging.NullHandler())
# Filter out warning thrown by geopandas
warnings.filterwarnings(
"ignore",
category=DeprecationWarning,
message=".*distutils.*classes are deprecated.*",
)
warnings.filterwarnings(
"ignore", category=DeprecationWarning, message=".*imp module is deprecated.*"
)
# These 6 arguments give the user extra functionality
def process_nwb(
config: NWBConfig,
output_file_path: str = "nwb.gml",
) -> gpd.GeoDataFrame:
"""Download the latest data from the NWB.
Parameters
----------
config
Object holding configuration for downloading and filtering the NWB.
output_file_path
The output directory for the NWB shapefile.
Returns
-------
pd.DataFrame
A Pandas dataframe containing the NWB data for the selected region. For more
information on this dataset, please refer to
https://www.pdok.nl/geo-services/-/article/nationaal-wegen-bestand-nwb-
"""
complete_url = build_nwb_url(config.only_state_roads, config.bounding_box)
logging.info("Downloading data from NWB using the following URL: %s", complete_url)
response = download_nwb_shapefile(complete_url, config)
logging.info("Data downloaded")
with open(output_file_path, "wb") as out:
logging.info("Writing response data to file system")
out.write(response.text.encode(config.encoding))
logging.info("Reading response into GeoDataFrame")
result = gpd.read_file(output_file_path)
float_columns = ["beginkm", "eindkm"]
for float_column in float_columns:
result[float_column] = result[float_column].map(float)
result = apply_filters(
nwb_data=result,
keep_road_numbers=config.keep_road_numbers,
keep_rpe_codes=config.keep_rpe_codes,
)
return result
def download_nwb_shapefile(complete_url: str, config: NWBConfig) -> Response:
"""Download the NWB and return the HTTP response.
Parameters
----------
complete_url
The complete URL with filter parameters.
config
The rest of the NWBConfig.
Returns
-------
Response
The HTTP Response if successful.
Raises
------
RuntimeError
Raises a runtime error if any http exceptions were raised on client
or server error.
"""
try:
response = requests.get(complete_url)
response.raise_for_status()
if response.status_code != 200:
raise RuntimeError(f"Server error, status {response.status_code}")
except requests.exceptions.RequestException as exception:
raise RuntimeError(
f"Unable to process NWB request for config: " f"{config.json()}"
) from exception
return response
def apply_filters(
nwb_data: gpd.GeoDataFrame,
keep_road_numbers: List[str] = None,
keep_rpe_codes: List[str] = None,
) -> gpd.GeoDataFrame:
"""Download the latest data from the NWB.
Parameters
----------
keep_road_numbers
Which road numbers to keep. Keep in mind that this data works with leading zeroes,
e.g. '009' for A9.
keep_rpe_codes
Which RPE codes to keep (i.e. which side of the highway) (default all, valid values
are "L", "R" and "#").
Returns
-------
gpd.GeoDataframe
The input dataframe containing the NWB data for the selected region, filtered for
the applicable filters.
"""
result = nwb_data
keep_road_numbers = keep_road_numbers or []
keep_rpe_codes = keep_rpe_codes or ["L", "R", "#"]
if len(keep_road_numbers) > 0:
# Unfortunately, the WFS does not support querying for strings with leading zeros.
result = result.query("wegnummer in @keep_road_numbers")
result = result.query("rpe_code in @keep_rpe_codes")
return result.copy()
def build_nwb_url(
only_state_roads: bool = True, bbox: Tuple[float, float, float, float] = None
) -> str:
"""Download the latest data from the NWB.
Parameters
----------
only_state_roads
Whether to only keep rijkswegen (much faster when True), fewer data of course.
bbox
The bounding box within which to get the data. Shall be of the format 'x0,y0,x1,y1'.
Returns
-------
str
The URL containing the applicable filters.
"""
nwb_url = (
"https://geodata.nationaalgeoregister.nl/nwbwegen/wfs?request=GetFeature"
+ "&service=WFS&typenames=wegvakken&version=2.0.0"
)
if only_state_roads:
filter_url = (
"&filter=<Filter><PropertyIsEqualTo><PropertyName>wegbehsrt</PropertyName>"
+ "<Literal>R</Literal></PropertyIsEqualTo></Filter>"
)
else:
filter_url = ""
if bbox:
bbox_string = f"&bbox={','.join(str(i) for i in bbox)}"
else:
bbox_string = ""
return nwb_url + filter_url + bbox_string | /rws-nwb-lib-1.0.1.tar.gz/rws-nwb-lib-1.0.1/rws_nwb_lib/download_nwb.py | 0.851181 | 0.407216 | download_nwb.py | pypi |
_colors = {
"blue": {
100: "#00549F",
75: "#407FB7",
50: "#8EBAE5",
25: "#C7DDF2",
10: "#E8F1FA"
},
"black": {
100: "#000000",
75: "#646567",
50: "#9C9E9F",
25: "#CFD1D2",
10: "#ECEDED"
},
"magenta": {
100: "#E30066",
75: "#E96088",
50: "#F19EB1",
25: "#F9D2DA",
10: "#FDEEF0"
},
"yellow": {
100: "#FFED00",
75: "#FFF055",
50: "#FFF59B",
25: "#FFFAD1",
10: "#FFFDEE"
},
"petrol": {
100: "#006165",
75: "#2D7F83",
50: "#7DA4A7",
25: "#BFD0D1",
10: "#E6ECEC"
},
"turqoise": {
100: "#0098A1",
75: "#00B1B7",
50: "#89CCCF",
25: "#CAE7E7",
10: "#EBF6F6"
},
"green": {
100: "#57AB27",
75: "#8DC060",
50: "#B8D698",
25: "#DDEBCE",
10: "#F2F7EC"
},
"lime": {
100: "#BDCD00",
75: "#D0D95C",
50: "#E0E69A",
25: "#F0F3D0",
10: "#F9FAED"
},
"orange": {
100: "#F6A800",
75: "#FABE50",
50: "#FDD48F",
25: "#FEEAC9",
10: "#FFF7EA"
},
"red": {
100: "#CC071E",
75: "#D85C41",
50: "#E69679",
25: "#F3CDBB",
10: "#FAEBE3"
},
"darkred": {
100: "#A11035",
75: "#B65256",
50: "#CD8B87",
25: "#E5C5C0",
10: "#F5E8E5"
},
"bordeaux": {
100: "#612158",
75: "#834E75",
50: "#A8859E",
25: "#D2C0CD",
10: "#EDE5EA"
},
"lavender": {
100: "#7A6FAC",
75: "#9B91C1",
50: "#BCB5D7",
25: "#DEDAEB",
10: "#F2F0F7"
}
}
# Flatten
_flattened = {(name, op): val for name, values in _colors.items() for op, val in values.items()}
# And add defaults (100% opacity)
colors = {key: _colors[key][100] for key in _colors.keys()}
colors = {**colors, **_flattened}
def plot_colors():
"""
Plot the colors of the RWTH colorspace by key and opacity (uses matplotlib).
"""
try:
from matplotlib import pyplot as plt
except ImportError:
import logging
logging.error("Plotting the colorspace requires matplotlib.")
return
from math import floor
numcolors = len(_colors.keys())
rows = (floor(numcolors / 3) + 1 if numcolors % 3 else numcolors / 3)
cols = 3
fig, _ = plt.subplots(nrows=rows, ncols=cols)
for ax, (cname, cvalues) in zip(fig.axes, _colors.items()):
for idx, (op, c) in enumerate(cvalues.items()):
ax.bar(idx, 1, color=c, tick_label=op)
ax.set_title(cname, size=9)
ax.set_yticklabels([])
ax.set_yticks([])
ax.set_xticks(range(5))
ax.set_xticklabels((100, 75, 50, 25, 10))
for ax in fig.axes[numcolors:]:
ax.set_visible(False)
fig.suptitle("RWTH Colors by key and opacity")
plt.tight_layout()
plt.show()
if __name__ == "__main__":
plot_colors() | /rwth-CD-colors-0.1.1.tar.gz/rwth-CD-colors-0.1.1/rwth_colors.py | 0.747892 | 0.659295 | rwth_colors.py | pypi |
# rwth.nb
## Introduction
This project consists of Jupyter Notebook definitions used by RWTH Aachen University.
## Table of Contents
* [RWTH Plots](RWTH%20Plots.ipynb)
* [RWTH Colors](RWTH%20Colors.ipynb)
## Jupyter Quick Start
* To run all cells of a notebook: In the menu: Run <span class="fa-chevron-right fa"></span> Run All
* To restart the kernel: <span class="fa-refresh fa"></span>-Button
* To run a single cell: <span class="fa-play fa"></span> Run-Button
## Contributors
* [Christian Rohlfing](http://www.ient.rwth-aachen.de/cms/c_rohlfing/)
* [Lars Thieling](http://www.iks.rwth-aachen.de/institut/ueber-uns/mitarbeiter/lars-thieling/)
* [Christoph Weyer](http://www.iks.rwth-aachen.de/institut/ueber-uns/mitarbeiter/christoph-weyer-m-sc/)
* [Jens Schneider](http://www.ient.rwth-aachen.de/cms/j_schneider/)
The code is licensed under the [MIT license](https://opensource.org/licenses/MIT).
| /rwth_nb-0.1.8.tar.gz/rwth_nb-0.1.8/index.ipynb | 0.65368 | 0.621254 | index.ipynb | pypi |
from matplotlib import rcParams
rcParams["axes.axisbelow"] = False
# rcParams['font.family'] = 'sans-serif'
# rcParams['font.sans-serif'] = ['Arial'] # TODO
# rcParams['font.size'] = 14
# rcParams['text.usetex'] = True
# rcParams['text.latex.unicode'] = True
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
import matplotlib.gridspec as gridspec
import matplotlib.transforms as trans
import matplotlib.colors as mcolors
from rwth_nb.plots import colors
import numpy as np
# Propagate rwth_colors to default matplotlib colors
mcolors.get_named_colors_mapping().update(colors.rwth_colors)
# set rwth color cycle
from cycler import cycler
rcParams["axes.prop_cycle"] = cycler(color=['rwth:blue', 'rwth:orange', 'rwth:green', 'rwth:red', 'rwth:purple',
'rwth:bordeaux', 'rwth:violet', 'rwth:black-50', 'rwth:maigrun',
'rwth:turquoise'])
def axis(ax):
"""
Decorate axes RWTH way.
Parameters
----------
ax: matplotlib.axes.Axes
Axes to be beautified
See Also
--------
TODO
Notes
-----
Nada.
Examples
--------
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> import rwth_nb.plot.mpl_decorations as rwth_plt
>>>
>>> x = np.linspace(-5,5,100)
>>> fig,ax = plt.subplots()
>>> ax.plot(x, x**2)
>>> ax.set_xlabel('x'); ax.set_ylabel('f(x)');
>>> rwth_plt.axis(ax)
"""
ax.spines['right'].set_color('none')
ax.spines['top'].set_color('none')
def on_xlims_change(ax):
# update x spine position to be always at zero, left or right according to set limits
# update y-label x-position and horizontal alignment
if (np.array(ax.get_xlim()) < 0).all(): # all limits negative
left_spine_pos = ('axes', 1)
ylabel_xpos = 1
ylabel_halignment = 'right'
elif (np.array(ax.get_xlim()) > 0).all(): # all limits positive
left_spine_pos = ('axes', 0) # spine at the left
ylabel_xpos = 0
ylabel_halignment = 'left'
else: # zero is in plot
left_spine_pos = 'zero' # spine at zero ([0, 0])
xmin = ax.get_xlim()[0]
xmax = ax.get_xlim()[1]
ylabel_xpos = np.abs(xmin) / (np.abs(xmax) + np.abs(xmin))
ylabel_halignment = 'left'
ax.spines['left'].set_position(left_spine_pos)
ax.yaxis.set_label_coords(ylabel_xpos, 1)
ax.yaxis.label.set_horizontalalignment(ylabel_halignment)
def on_ylims_change(ax):
# update y spine position to be always at zero, top or bottom according to set limits
# update x-label y-position
if (np.array(ax.get_ylim()) < 0).all(): # all limits negative
bottom_spine_pos = ('axes', 1) # spine at the top
xlabel_ypos = 1
elif (np.array(ax.get_ylim()) > 0).all(): # all limits positive
bottom_spine_pos = ('axes', 0) # spine at the bottom
xlabel_ypos = 0
else: # zero is in plot
bottom_spine_pos = 'zero' # spine at zero ([0, 0])
ymin = ax.get_ylim()[0]
ymax = ax.get_ylim()[1]
xlabel_ypos = np.abs(ymin) / (np.abs(ymax) + np.abs(ymin))
ax.spines['bottom'].set_position(bottom_spine_pos)
ax.xaxis.set_label_coords(1, xlabel_ypos)
ax.callbacks.connect('xlim_changed', on_xlims_change)
ax.callbacks.connect('ylim_changed', on_ylims_change)
on_xlims_change(ax)
ax.xaxis.label.set_verticalalignment('bottom')
ax.xaxis.label.set_horizontalalignment('right')
ax.yaxis.label.set_rotation(0)
on_ylims_change(ax)
ax.yaxis.label.set_verticalalignment('top')
ax.yaxis.label.set_horizontalalignment('left')
ax.xaxis.label.set_fontsize(12)
ax.yaxis.label.set_fontsize(12)
def twinx(ax, visible_spine='left'):
"""
Create a twin Axes sharing the x-axis.
Parameters
----------
ax: matplotlib.axes.Axes
Existing Axes
visible_spine: {'left', 'right'}, str, optional
Position of the only visible axis spine
Returns
-------
ax_twin: matplotlib.axes.Axes
The newly created Axes instance
See also
--------
matplotlib.axes.Axes.twinx
twiny: Create a twin Axes sharing the y-axis.
"""
if visible_spine in ['left', 'right']:
# remove visible spine from hidden spine list
hidden_spines = ['top', 'bottom', 'left', 'right']
hidden_spines.remove(visible_spine)
# create twiny and hide spines
ax_twin = ax.twiny()
for pos in hidden_spines:
ax_twin.spines[pos].set_color('none')
# set label position according to spine position (left/right, top)
ax_twin.yaxis.set_label_coords(visible_spine == 'right', 1)
return ax_twin
else:
# invalid keyword
raise ValueError('Twin x-axis location must be either "left" or "right"')
def twiny(ax, visible_spine='top'):
"""
Create a twin Axes sharing the y-axis.
Parameters
----------
ax: matplotlib.axes.Axes
Existing Axes
visible_spine: {'top', 'bottom'}, str, optional
Position of the only visible axis spine
Returns
-------
ax_twin: matplotlib.axes.Axes
The newly created Axes instance
See also
--------
matplotlib.axes.Axes.twiny
twinx: Create a twin Axes sharing the x-axis.
"""
if visible_spine in ['top', 'bottom']:
# remove visible spine from hidden spine list
hidden_spines = ['top', 'bottom', 'left', 'right']
hidden_spines.remove(visible_spine)
# create twiny and hide spines
ax_twin = ax.twiny()
for pos in hidden_spines:
ax_twin.spines[pos].set_color('none')
# set label position according to spine position (right, bottom/top)
ax_twin.xaxis.set_label_coords(1, visible_spine == 'top')
return ax_twin
else:
# invalid keyword
raise ValueError('Twin y-axis location must be either "top" or "bottom"')
def annotate_xtick(ax, txt, x, y=0, col='black', fs=12):
"""
Annotate certain tick on the x-axis
Parameters
----------
ax: matplotlib.axes.Axes
Current axes
txt: string
Annotated text
x: float [scalar]
x position of tick and txt
y: float [scalar]
y position of txt
See Also
--------
annotate_ytick: Annotates a tick on the y-axis
"""
txt_ret = ax.text(x, y, txt, color=col, fontsize=fs, verticalalignment='top', horizontalalignment='center',
bbox=dict(facecolor='white', edgecolor='none', alpha=0.75))
line_ret, = ax.plot([x, x], [0, y], '--', color=col, lw=0.5)
return txt_ret, line_ret
def annotate_ytick(ax, txt, x, y, col='black', fs=12):
"""
Annotate certain tick on the y-axis
Parameters
----------
ax: matplotlib.axes.Axes
Current axes
txt: string
Annotated text
x: float [scalar]
x position of tick and txt
y: float [scalar]
y position of txt
See Also
--------
annotate_xtick: Annotates a tick on the x-axis
"""
txt_ret = ax.text(x, y, txt, color=col, fontsize=fs, verticalalignment='top', horizontalalignment='center',
bbox=dict(facecolor='white', edgecolor='none', alpha=0.75))
line_ret, = ax.plot([0, x], [y, y], '--', color=col, lw=0.5)
return txt_ret, line_ret
def annotate_distance(ax, txt, start, stop, color='rwth:black', **kwargs):
"""
Annotate distance between two points in given axis
Parameters
----------
ax: matplotlib.axes.Axes
current axis
txt: str
distance annotation text
start: (float, float)
x and y position of starting point
stop: (float, float)
x and y position of ending point
color: str, optional
annotations color
**kwargs: dict
all additional keyword arguments are passed to distances and texts matplotlib.axes.Axes.annotate call
Returns
-------
distance: matplotlib.text.Annotation
annotation marking distance between the two points
text: matplotlib.text.Annotation
annotated text
"""
distance = ax.annotate('', xy=start, xycoords='data', xytext=stop, textcoords='data',
arrowprops={'arrowstyle': '|-|,widthA=0.25,widthB=0.25', 'color': color}, **kwargs)
text = ax.annotate(txt, xy=((start[0] + stop[0]) / 2, (start[1] + stop[1]) / 2), xycoords='data',
xytext=(0, -2), textcoords='offset points', horizontalalignment='center',
verticalalignment='top',
bbox=wbbox, color=color, **kwargs)
return distance, text
# Grid
def grid(ax):
"""
Grid, but below axis
Parameters
----------
ax: matplotlib.axes.Axes
Current axes
"""
ax.grid()
ax.set_axisbelow(True)
def update_xlim(ax, x, dx, xmax=5):
ax.set_xlim([np.max([np.min(x), -xmax]) - dx, np.min([np.max(x), xmax]) + dx])
def update_ylim(ax, y, dy, ymax=5):
ax.set_ylim([np.max([np.min(y), -ymax]) - dy, np.min([np.max(y), ymax]) + dy])
# Default figure sizes
fig_width = 10
fig_aspect = 16/9
landscape = {'figsize': (fig_width, fig_width/fig_aspect)}
# Styles
style_poles = {'color': 'rwth:blue', 'marker': 'x', 'mew': 2, 'ms': 5.5, 'ls': 'None'}
style_zeros = {'color': 'rwth:blue', 'marker': 'o', 'mew': 2, 'ms': 5.5, 'mfc': 'None', 'ls': 'None'}
style_graph = {'color': 'rwth:blue'}
# Widget label style (full width)
wdgtl_style = {'description_width': 'initial'}
# Axis white background
wbbox = {"facecolor": "white", "edgecolor": "None", "pad": 0}
# Propagate rwth_colors to default matplotlib colors
mcolors.get_named_colors_mapping().update(colors.rwth_colors)
# Custom stem function
def stem(ax, x, y, color='rwth:blue', **kwargs):
container = ax.stem(x, y, use_line_collection=True, basefmt=" ", **kwargs)
plt.setp(container, 'color', color)
return container
def stem_set_data(container, x, y):
tmp = [np.array([[xt, 0], [xt, yt]]) for xt, yt in zip(x, y)]
container[1].set_segments(tmp)
container[0].set_data(x, y)
def stem_set_xdata(container, x):
y = container[0].get_ydata()
stem_set_data(container, x, y)
def stem_set_ydata(container, y):
x = container[0].get_xdata()
stem_set_data(container, x, y)
def plot_dirac(ax, x, y, color='rwth:blue', **kwargs):
"""
Custom dirac plot
Parameters
----------
ax: matplotlib.axes.Axes
Current axes
x: array-like
Diracs' x positions
y: array-like
Diracs' y-positions
color: str, optional
Diracs' colors
**kwargs: dict
all additional keyword arguments are passed to rwth_nb.plots.mpl_decorations.stem call
Returns
-------
cp: instance of class matplotlib.container.StemContainer
Container for diracs with positive weights
cn: instance of class matplotlib.container.StemContainer
Container for diracs with negative weights
"""
x = np.asarray(x)
y = np.asarray(y)
mask = y >= 0
xp = x[mask]
yp = y[mask]
if not len(xp):
xp = np.nan*np.ones(2); yp=xp
cp = stem(ax, xp, yp, color, markerfmt="^", **kwargs)
mask = y < 0
xn = x[mask]
yn = y[mask]
kwargs.pop('label', None) # one legend label is enough
if not len(xn):
xn = np.nan*np.ones(2); yn=xn
cn = stem(ax, xn, yn, color, markerfmt="v", **kwargs)
return cp, cn
def dirac_set_data(containers, x, y):
"""
Change data in existing dirac containers
Parameters
----------
containers: Tuple of matplotlib.container.StemContainer class
Tuple containing both positive and negative dirac containers
x: array-like
New dirac x positions
y: array-like
New dirac weights
"""
x = np.asarray(x)
y = np.asarray(y)
mask = y >= 0
xp = x[mask]
yp = y[mask]
if len(xp):
stem_set_data(containers[0], xp, yp)
else:
stem_set_data(containers[0], [], [])
mask = y < 0
xn = x[mask]
yn = y[mask]
if len(xn):
stem_set_data(containers[1], xn, yn)
else:
stem_set_data(containers[1], [], [])
def dirac_weights(ax, x, y, weights, **kwargs):
"""
Show diracs' weights in a plot
Parameters
----------
ax: matplotlib.axes.Axes
Current axes
x: array-like or scalar
Diracs' x positions
y: array-like or scalar
Diracs' weights' y-positions
weights: array-like or scalar
Diracs' weights
**kwargs: dict
all additional keyword arguments are passed to matplotlib.axes.Axes.text call
"""
x = np.atleast_1d(x)
y = np.atleast_1d(y)
weights = np.atleast_1d(weights)
for xt, yt, weight in zip(x, y, weights):
if weight != 1:
ax.text(xt, yt, '(' + str(weight) + ')', **kwargs)
# Laplace Region of Convergence
def plot_lroc(ax, roc, xmax=12, ymax=12):
y1 = [-ymax, -ymax]
y2 = [ymax, ymax]
mask = np.isinf(roc)
roc[mask] = np.sign(roc[mask]) * xmax
lleft, = ax.plot([roc[0], roc[0]], [y1[0], y2[0]], ls="--", c="rwth:blue-50")
lright, = ax.plot([roc[1], roc[1]], [y1[0], y2[0]], ls="--", c="rwth:blue-50")
hatch = ax.fill_between(roc, y1, y2, facecolor="none", hatch="\\", edgecolor="rwth:blue-50", linewidth=0.0)
return [lleft, lright, hatch]
def update_lroc(ax, plot, roc, xmax=12, ymax=12):
y1 = [-ymax, -ymax]
y2 = [ymax, ymax]
mask = np.isinf(roc)
roc[mask] = np.sign(roc[mask]) * xmax
plot[0].set_xdata([roc[0], roc[0]])
plot[1].set_xdata([roc[1], roc[1]])
plot[2].remove()
plot[2] = ax.fill_between(roc, y1, y2, facecolor="none", hatch="\\", edgecolor="rwth:blue-50",
linewidth=0.0)
return plot
# z Region of Convergence
def plot_zroc(ax, roc, rmax=12):
from matplotlib.patches import Circle
mask = np.isinf(roc)
roc[mask] = np.sign(roc[mask]) * rmax
# plot circles
unitcircle = Circle((0, 0), radius=1, edgecolor="rwth:black", fill=False, linestyle='-')
ax.add_artist(unitcircle)
theta = np.linspace(0, 2 * np.pi, 1001)
xs = np.outer(np.abs(roc), np.cos(theta))
ys = np.outer(np.abs(roc), np.sin(theta))
xs[1, :] = xs[1, ::-1]
ys[1, :] = ys[1, ::-1]
return ax.fill(np.ravel(xs), np.ravel(ys), facecolor="none", hatch="\\", edgecolor="rwth:blue-50", linestyle='--')[0]
def annotate_order(ax, p, ord):
for index, order in enumerate(ord):
if order > 1:
ax.text(np.real(p[index]), np.imag(p[index]), '(' + str(order) + ')', color='rwth:black')
ax.text(np.real(p[index]), -np.imag(p[index]), '(' + str(order) + ')', color='rwth:black') | /rwth_nb-0.1.8.tar.gz/rwth_nb-0.1.8/rwth_nb/plots/mpl_decorations.py | 0.576661 | 0.444324 | mpl_decorations.py | pypi |
from scipy import signal # butter
def butter(cutoff, fs, order=5, type='Tiefpass', fdelta=0):
"""Butterworth Filter of order n
Parameters
----------
cutoff : float
cutoff frequency
fs : float
sampling frequency, is used to calculate nyquist frequency
order : float, optional
order of filter
type: {'Tiefpass', 'Bandpass', 'Hochpass'}, optional
type of filter.
fdelta: float, optional
bandwith of filter
Returns
-------
b : ndarray
numerator polynomial of the IIR filter
a : ndarray
denominator polynomial of the IIR filter
"""
# bandpass type
btypes = {'Tiefpass': 'lowpass', 'Bandpass': 'bandpass', 'Hochpass': 'highpass'}
btype = btypes[type]
# Nyquist frequency
nyq = 0.5 * fs
if btype == 'bandpass':
# normalized min and max frequency
normal_min = (cutoff-fdelta/2) / nyq
normal_max = (cutoff+fdelta/2) / nyq
# normalized cutoff-frequency
normal_cutoff = [normal_min, normal_max]
else:
# normalized cutoff-frequency
normal_cutoff = cutoff / nyq
b, a = signal.butter(order, normal_cutoff, btype=btype, analog=False) # coefficients in z-domain
return b, a
# Shortcuts
def butter_bandpass(f0, fdelta, fs, order=5):
"""Bandpass Butterworth Filter
This filter is set as a bandpass filter with f0, fdelta, fs and order set by user.
See Also
--------
butter : Filter design using order and critical points
"""
b, a = butter(f0, fs, order, 'Bandpass', fdelta)
return b, a
def butter_lowpass(cutoff, fs, order=5):
"""Lowpass Butterworth Filter
This filter is set as a lowpass filter with cutoff, fs and order set by user.
See Also
--------
butter : Filter design using order and critical points
"""
b, a = butter(cutoff, fs, order, 'Tiefpass')
return b, a
def butter_highpass(cutoff, fs, order=5):
"""Highpass Butterworth Filter
This filter is set as a highpass filter with cutoff, fs and order set by user.
See Also
--------
butter : Filter design using order and critical points
"""
b, a = butter(cutoff, fs, order, 'Hochpass')
return b, a
def filter(s, b, a):
"""Digital Filter
Filter s(n) in z-Domain with filter coefficients a and b:
-1 -M
b[0] + b[1]z + ... + b[M] z
G(z) = -------------------------------- S(z)
-1 -N
a[0] + a[1]z + ... + a[N] z
Parameters
----------
s : array_like
n-dimensional input array
b : array_like
numerator coefficient vector in a 1-D sequence.
a : array_like
denominator coefficient vector in a 1-D sequence.
Returns
-------
g : array
output of digital filter.
"""
g = signal.lfilter(b, a, s)
return g | /rwth_nb-0.1.8.tar.gz/rwth_nb-0.1.8/rwth_nb/misc/filters.py | 0.916434 | 0.656149 | filters.py | pypi |
# Colors
When using `rwth_nb.plots.colors`, the RWTH [Corporate Design](http://www.rwth-aachen.de/cms/root/Die-RWTH/Einrichtungen/Verwaltung/Stabsstellen/Marketing/~eqbm/Corporate-Design/) color scheme is stored in a dictionary called `rwth_colors`. When loading `rwth_nb.plots.mpl_decorations`, the RWTH colors are propagated to Matplotlib as well. The following colors may be used:
```
import matplotlib.pyplot as plt
from rwth_nb.plots import colors
# adapted from https://matplotlib.org/2.0.0/examples/color/named_colors.html
colors = colors.rwth_colors;
fontsize = 12
ncols = 5; nrows = len(colors.keys()) // ncols + 1;
fig, ax = plt.subplots(figsize=(12, 6))
X, Y = fig.get_dpi() * fig.get_size_inches() # Get height and width
w = X / ncols; h = Y / nrows
for i, name in enumerate(colors.keys()):
col = i % ncols
row = i // ncols
y = Y - (row * h) - h
xi_line = w * (col + 0.05); xf_line = w * (col + 0.25); xi_text = w * (col + 0.3)
ax.text(xi_text, y, name, fontsize=fontsize, horizontalalignment='left', verticalalignment='center')
ax.hlines(y + h * 0.1, xi_line, xf_line, color=colors[name], linewidth=(h * 0.6))
ax.set_xlim(0, X); ax.set_ylim(0, Y); ax.set_axis_off();
fig.subplots_adjust(left=0, right=1, top=1, bottom=0, hspace=0, wspace=0)
```
When plotting, colors are cycled through for each graph by following order:
> *blue, orange, green, red, purple, bordeaux, violet, black-50, maigrun, turquoise*
```
import numpy as np
import rwth_nb.plots.mpl_decorations as rwth_plt
x = np.linspace(-2, 2, 501)
fig, ax = plt.subplots()
ax.set_ylim(-2, 2)
rwth_plt.axis(ax); rwth_plt.grid(ax)
for n in range(10):
ax.plot(x, x**n, label=r'$x^{}$'.format(n))
ax.legend();
```
For custom coloring, use as below.
```
import numpy as np
import rwth_nb.plots.mpl_decorations as rwth_plt
x = np.linspace(-4,4);
fig,ax = plt.subplots();
ax.plot(x, x**2, 'rwth:blue');
ax.plot(x, x**2 + np.random.randn(len(x)), '.', color='rwth:green');
```
This code is licensed under the [MIT license](https://opensource.org/licenses/MIT).
| /rwth_nb-0.1.8.tar.gz/rwth_nb-0.1.8/docs/source/examples/RWTH Colors.ipynb | 0.641198 | 0.885928 | RWTH Colors.ipynb | pypi |
# Miscellaneous
1. [Transforms](#Transforms)
1. [Fourier Transform](#Fourier-Transform)
2. [Laplace Transform](#Laplace-Transform)
3. [$z$-Transform](#$z$-Transform)
---
## Transforms
Following transforms are defined in `rwth_nb.misc.transforms`:
- [Fourier Transform](#Fourier-Transform)
- [Laplace Transform](#Laplace-Transform)
- [$z$-Transform](#$z$-Transform)
*Note that plotting basics are described in [RWTH Plots](RWTH%20Plots.ipynb).*
### Fourier Transform
```dft(s, fs, NFFT)```
```
import matplotlib.pyplot as plt
import numpy as np
import rwth_nb.plots.mpl_decorations as rwth_plots
import rwth_nb.misc.transforms as rwth_transforms
# Time Domain
fs = 44100 # very high sampling rate assumed, to simulate quasi-continuous time and frequency axis
t = np.linspace(-2.5, 2.5, 5*fs)
s = np.sin(2*np.pi*500*t)
# Fourier Transform
S,f = rwth_transforms.dft(s, fs)
# plots
fig,axs = plt.subplots(2,1, **rwth_plots.landscape);
ax = axs[0]; ax.plot(t*1000, s);
ax.set_xlabel(r'$\rightarrow t$ [ms]'); ax.set_ylabel(r'$\uparrow s(t)$')
ax.set_xlim([-11, 11]); ax.set_ylim([-1.1, 1.19]); rwth_plots.axis(ax);
ax = axs[1]; ax.plot(f, np.abs(S));
ax.set_xlabel(r'$\rightarrow f$ [Hz]'); ax.set_ylabel(r'$\uparrow |S(f)|$')
ax.set_xlim([-1100, 1100]); ax.set_ylim([0, 0.65]); rwth_plots.axis(ax);
```
Inverse Fourier transform
```idft(S, Ntime, NFF)```
```
s2 = rwth_transforms.idft(S, len(s));
fig,ax = plt.subplots(**rwth_plots.landscape);
ax.plot(t*1000, np.real(s2));
ax.set_xlabel(r'$\rightarrow t$ [ms]'); ax.set_ylabel(r'$\uparrow \mathcal{F}^{-1}\{S(f)\}$')
ax.set_xlim([-11, 11]); ax.set_ylim([-1.1, 1.19]); rwth_plots.axis(ax);
```
### Laplace Transform
Pole-zero plot is explained in [RWTH Plots](RWTH%20Plots.ipynb).
Inverse Laplace Transform
```ilaplace_ht(t, H0, pp, pz, ord_p, ord_z, roc)```
```ilaplace_Hf(f, H0, pp, pz, ord_p, ord_z, dB)```
```
fig,axs = plt.subplots(1, 2, figsize=(10, 4))
t = np.linspace(-6, 6, 1024)
f = np.linspace(-6, 6, 1024)
pp = np.array([-2]); pz = np.array([]) # Poles and Zeros
ord_p = np.array([1]); ord_z = np.array([]) # Poles' and Zeros' orders
roc = np.array([-2, np.inf]) # region of convergence
H0 = 1
# Time Domain
s1, t1d , s1d = rwth_transforms.ilaplace_ht(t, H0, pp, pz, ord_p, ord_z, roc)
ax = axs[0]
ax.set_xlabel(r'$\rightarrow t$'); ax.set_ylabel(r'$\uparrow s_1(t)$')
rwth_plots.grid(ax); rwth_plots.axis(ax)
ax.set_xlim([-5.5,5.5]); axs[0].set_ylim([-0.1,1.05]);
ax.plot(t, np.real(s1))
rwth_plots.plot_dirac(axs[0], t1d, s1d);
# Frequency Domain
S1f = rwth_transforms.ilaplace_Hf(f, H0, pp, pz, ord_p, ord_z, dB=False)
ax = axs[1]
ax.set_xlabel(r'$\rightarrow f$'); ax.set_ylabel(r'$\uparrow S_1(f)$')
rwth_plots.grid(ax); rwth_plots.axis(ax)
ax.set_xlim([-5.5,5.5]); ax.set_ylim([-0.1,0.55]);
ax.plot(f, S1f);
```
### $z$ Transform
Pole-zero plot is explained in [RWTH Plots](RWTH%20Plots.ipynb).
Inverse $z$ Transform
```iz_hn(n, H0, pp, pz, ord_p, ord_z, roc)```
```iz_Hf(f, H0, pp, pz, ord_p, ord_z, dB)```
```
fig,axs = plt.subplots(1, 2, figsize=(10, 4))
n = np.linspace(-6, 6, 13)
f = np.linspace(-6, 6, 1024)
zp = np.array([.5, 2]); zz = np.array([0]) # Poles and Zeros
ord_p = np.array([1, 1]); ord_z = np.array([1]) # Poles' and Zeros' orders
roc = np.array([.5, 2]) # region of convergence
H0 = -3/2
# Time Domain
s1= rwth_transforms.iz_hn(n, H0, zp, zz, ord_p, ord_z, roc)
ax = axs[0]
ax.set_xlabel(r'$\rightarrow n$'); ax.set_ylabel(r'$\uparrow s_1(n)$')
rwth_plots.grid(ax); rwth_plots.axis(ax)
ax.set_xlim([-5.5,5.5]); axs[0].set_ylim([-0.1,1.05]);
rwth_plots.stem(axs[0], n, s1);
# Frequency Domain
S1f = rwth_transforms.iz_Hf(f, H0, zp, zz, ord_p, ord_z, dB=False)
ax = axs[1]
ax.set_xlabel(r'$\rightarrow f$'); ax.set_ylabel(r'$\uparrow S_1(f)$')
rwth_plots.grid(ax); rwth_plots.axis(ax)
ax.set_xlim([-5.5,5.5]); ax.set_ylim([0.3, 3.1]);
ax.plot(f, S1f);
```
---
This code is licensed under the [MIT license](https://opensource.org/licenses/MIT).
| /rwth_nb-0.1.8.tar.gz/rwth_nb-0.1.8/docs/source/examples/RWTH Misc.ipynb | 0.647687 | 0.976602 | RWTH Misc.ipynb | pypi |
# Plots with Matplotlib
`rwth_nb.plots.mpl_decorations` extends Matplotlib to some useful functionality explained below
1. [Simple Plots](#Simple-Plots)
1. [Graph Plot](#Graph-Plot)
2. [Stem Plot](#Stem-Plot)
3. [Multiple Plots](#Multiple-Plots)
4. [Updating Plots](#Updating-Plots)
2. [Annotations](#Annotations)
1. [Ticks](#Ticks)
2. [Distances](#Distances)
3. [Misc](#Misc)
1. [Signal processing](#Signal-processing)
1. [Dirac Impulses](#Dirac-impulses)
2. [Pole-Zero Diagrams](#Pole-Zero-Diagrams)
---
## Simple Plots
### Graph Plot
See also:
* [Matplotlib pyplot tutorial](https://matplotlib.org/3.2.1/tutorials/introductory/pyplot.html)
* [Axis Styles](#Axis-Styles)
```
import matplotlib.pyplot as plt
import numpy as np
import rwth_nb.plots.mpl_decorations as rwth_plt
# numpy array
x = np.linspace(-5,5)
# create figure
fig, ax = plt.subplots()
# call axis and grid for beautification
rwth_plt.axis(ax);
rwth_plt.grid(ax)
# plot to axis while color is in RWTH Colors
ax.plot(x, x**2, 'rwth:blue');
# set x- and y-labels
ax.set_xlabel(r'$\rightarrow x$');
ax.set_ylabel(r'$\uparrow f(x)=x^2$');
```
where
* `rwth_plt.axis(ax)` relocates the axes spines,
* `rwth_plt.grid(ax)` displays an additional grid.
#### Axis Styles
Relocate axes spines.
```
rwth_plt.axis(ax)
```
#### Grid
Displays grid.
```
rwth_plt.grid(ax)
```
---
### Stem Plot
See also:
* [Matplotlib stem examples](https://matplotlib.org/3.2.1/gallery/lines_bars_and_markers/stem_plot.html)
* [Axis Styles](#Axis-Styles)
```
import matplotlib.pyplot as plt
import numpy as np
import rwth_nb.plots.mpl_decorations as rwth_plt
n = np.linspace(-5, 5, 11)
fig, ax = plt.subplots()
rwth_plt.axis(ax); rwth_plt.grid(ax)
ax.set_xlabel(r'$\rightarrow n$');
ax.set_ylabel(r'$\uparrow f(n)=n$');
# stem plot
rwth_plt.stem(ax, n, n, 'rwth:blue');
```
---
### Multiple Plots
Plots can be combined by using `matplotlib.pyplot.subplots()`, where arguments are unpacked from `rwth_plt.landscape` for 16/9 landscape view.
Optionally, own figure sizes can be defined.
See also: [Matplotlib subplots](https://matplotlib.org/3.2.1/api/_as_gen/matplotlib.pyplot.subplots.html)
```
import matplotlib.pyplot as plt
import numpy as np
import rwth_nb.plots.mpl_decorations as rwth_plt
t = np.linspace(-5, 5, 10001)
fig, axs = plt.subplots(1, 2, **rwth_plt.landscape)
ax = axs[0]
rwth_plt.axis(ax); rwth_plt.grid(ax)
ax.plot(t, t**2, 'rwth:blue')
ax = axs[1]
rwth_plt.axis(ax); rwth_plt.grid(ax)
ax.plot(t, t**3, 'rwth:red');
```
---
### Updating Plots
Plots can be dynamically updated for more efficient usage.
This is essential when using [Widgets](RWTH%20Widgets.ipynb).
#### Graphs
```
import matplotlib.pyplot as plt
import numpy as np
import rwth_nb.plots.mpl_decorations as rwth_plt
from ipywidgets import widgets
# initial plot
# create figure
fig, ax = plt.subplots()
# call axis and grid for beautification
rwth_plt.axis(ax);
rwth_plt.grid(ax)
# plot to axis
# this time: save plotted lines to a variable
x = np.linspace(-5, 5, 10001)
n = 1
line, = ax.plot(x, x**n, 'rwth:blue');
# set x- and y-labels
ax.set_xlabel(r'$\rightarrow x$');
ax.set_ylabel(r'$\uparrow f(x)=x^{}$'.format(n));
# set up widget for updating n (see RWTH Widgets.ipynb)
@widgets.interact(n=widgets.IntSlider(min=1, max=6, step=1, description='$n$', style=rwth_plt.wdgtl_style))
def update_n(n):
# updating plot
# change lines' y-data
line.set_ydata(x**n)
# change label
ax.set_ylabel(r'$\uparrow f(x)=x^{}$'.format(n));
```
#### Stems
Stem plots are stored in so called containers. These can be manipulated efficiently using
```
stem_set_data(container, x, y)
stem_set_xdata(container, x)
stem_set_ydata(container, y)
```
```
import matplotlib.pyplot as plt
import numpy as np
import rwth_nb.plots.mpl_decorations as rwth_plt
from ipywidgets import widgets
a = .5
n = np.linspace(-5, 5, 11)
fig, ax = plt.subplots()
rwth_plt.axis(ax); rwth_plt.grid(ax)
ax.set_xlabel(r'$\rightarrow n$');
ax.set_ylabel(r'$\uparrow f(n)={}\cdot n$'.format(a));
# initial stem plot
stem_container = rwth_plt.stem(ax, n, n, 'rwth:blue');
# set up widget for updating n (see RWTH Widgets.ipynb)
@widgets.interact(a=widgets.FloatSlider(min=-1, max=1, step=.1, value=.5, description='$a$', style=rwth_plt.wdgtl_style))
def update_a(a):
# change stem data
rwth_plt.stem_set_ydata(stem_container, a*n)
# change label
ax.set_ylabel(r'$\uparrow f(x)={}\cdot n$'.format(a));
```
---
## Annotations
### Ticks
`annotate_xtick`
```
%matplotlib inline
x0 = 2
fig,ax = plt.subplots(); ax.plot(x, (x-x0)**2, 'rwth:blue');
ax.set_xlabel(r'$\rightarrow x$'); ax.set_ylabel(r'$\uparrow f(x)=(x-x_0)^2$')
rwth_plt.axis(ax); rwth_plt.grid(ax);
rwth_plt.annotate_xtick(ax, r'$x_0$', x0, x0**2 );
```
`annotate_ytick`
```
x0 = 2
fig,ax = plt.subplots(); ax.plot(x, (x-x0)**2, 'rwth:blue');
ax.set_xlabel(r'$\rightarrow x$'); ax.set_ylabel(r'$\uparrow f(x)=(x-x_0)^2$')
rwth_plt.axis(ax); rwth_plt.grid(ax);
rwth_plt.annotate_ytick(ax, r'$y_0$', -1, 4 );
```
### Distances
`annotate_distance`
```
Delta = 3
fig,ax = plt.subplots(); ax.plot(x, np.exp(-(x/np.sqrt(Delta))**2), 'rwth:blue');
ax.set_xlabel(r'$\rightarrow x$');
rwth_plt.axis(ax); rwth_plt.grid(ax);
rwth_plt.annotate_distance(ax, r'$\Delta$', [-Delta/2,.5], [Delta/2,.5]);
```
---
## Misc
### Signal processing
#### Dirac impulses
`plot_dirac`, `dirac_weights`
```
# set up plot
fig,ax = plt.subplots(1,1);
ax.set_xlim([-2.75, 2.75]); ax.set_ylim([0, 2.4]); rwth_plt.grid(ax); rwth_plt.axis(ax);
# dirac x positions
dirac_x = [-1, 0, 1]
# dirac weights
dirac_weights = [1, 2, 1]
# plot diracs
rwth_plt.plot_dirac(ax, dirac_x, dirac_weights, 'rwth:blue');
# show weights
rwth_plt.dirac_weights(ax, dirac_x, dirac_weights, dirac_weights, color='rwth:black')
```
#### Pole-Zero Diagrams
`plot_lroc` Laplace Domain
```
# poles (pp) and zeros (pz)
pp = np.array([-1, 1+1j]); pz = np.array([0, 3-2j])
# poles' and zeros' orders
ord_p = np.array([1, 2]); ord_z = np.array([1, 1])
# region of convergence
roc = np.array([np.max(np.real(pp)), np.inf])
# set up plot
fig, ax = plt.subplots()
ax.set_xlabel(r'$\rightarrow \mathrm{Re}$'); ax.set_ylabel(r'$\uparrow \mathrm{Im}$');
ax.set_xlim(-2.5, 3.5); ax.set_ylim(-5, 5); rwth_plt.grid(ax); rwth_plt.axis(ax); ax.set_title('Pole-Zero Diagram');
# plot poles and zeros
ax.plot(np.real(pp), np.imag(pp), **rwth_plt.style_poles); ax.plot(np.real(pp), -np.imag(pp), **rwth_plt.style_poles);
ax.plot(np.real(pz), np.imag(pz), **rwth_plt.style_zeros); ax.plot(np.real(pz), -np.imag(pz), **rwth_plt.style_zeros);
# show poles' and zeros' orders
rwth_plt.annotate_order(ax, pp, ord_p)
rwth_plt.annotate_order(ax, pz, ord_z)
# show S_0
S_0 = 1
ax.text(2, 4, r'$S_0 =$ ' + str(S_0), fontsize=12, bbox=rwth_plt.wbbox)
# plot region of convergence
rwth_plt.plot_lroc(ax, roc);
```
`plot_zroc` $z$-Domain
```
# poles (zp) and zeros (zz)
zp = np.array([0.5, 1+1j]); zz = np.array([0])
# poles' and zeros' orders
ord_p = np.array([1, 2]); ord_z = np.array([1])
# region of convergence
roc = np.array([0.5, 1+1j])
# set up plot
fig, ax = plt.subplots()
ax.set_xlabel(r'$\rightarrow \mathrm{Re}$'); ax.set_ylabel(r'$\uparrow \mathrm{Im}$');
rwth_plt.grid(ax); rwth_plt.axis(ax); ax.set_title('Pole-Zero Diagram');
# square plot (!)
ax.set_aspect('equal')
# plot poles and zeros
ax.plot(np.real(zp), np.imag(zp), **rwth_plt.style_poles); ax.plot(np.real(zp), -np.imag(zp), **rwth_plt.style_poles);
ax.plot(np.real(zz), np.imag(zz), **rwth_plt.style_zeros); ax.plot(np.real(zz), -np.imag(zz), **rwth_plt.style_zeros);
# show orders
rwth_plt.annotate_order(ax, zp, ord_p)
rwth_plt.annotate_order(ax, zz, ord_z)
# plot region of convergence
rwth_plt.plot_zroc(ax, roc);
```
---
This code is licensed under the [MIT license](https://opensource.org/licenses/MIT).
| /rwth_nb-0.1.8.tar.gz/rwth_nb-0.1.8/docs/source/examples/RWTH Plots.ipynb | 0.591723 | 0.960025 | RWTH Plots.ipynb | pypi |
import matplotlib.pyplot as plt
from RWTHColors.colors import *
from cycler import cycler
class ColorManager:
RWTHBlau = RWTHBlau()
RWTHSchwarz = RWTHSchwarz()
RWTHMagenta = RWTHMagenta()
RWTHGelb = RWTHGelb()
RWTHPetrol = RWTHPetrol()
RWTHTuerkis = RWTHTuerkis()
RWTHGruen = RWTHGruen()
RWTHMaiGruen = RWTHMaiGruen()
RWTHOrange = RWTHOrange()
RWTHRot = RWTHRot()
RWTHBordeaux = RWTHBordeaux()
RWTHViolett = RWTHViolett()
RWTHLila = RWTHLila()
color_list = [RWTHBlau,
RWTHPetrol,
RWTHTuerkis,
RWTHGruen,
RWTHMaiGruen,
RWTHOrange,
RWTHRot,
RWTHBordeaux,
RWTHViolett,
RWTHLila,
RWTHSchwarz,
RWTHMagenta,
RWTHGelb]
def __init__(self, frmt: str = "HEX", cycle='default'):
if frmt not in ["HEX", "RGB"]:
raise ValueError("frmt must be HEX or RGB not %s" % frmt)
self.rwth_color_cycle = cycler(color=[self.RWTHBlau.p(100),
self.RWTHOrange.p(100),
self.RWTHGruen.p(100),
self.RWTHRot.p(100),
self.RWTHViolett.p(100),
self.RWTHBordeaux.p(100),
self.RWTHLila.p(100),
self.RWTHPetrol.p(100),
self.RWTHMaiGruen.p(100),
self.RWTHTuerkis.p(100),
])
self.rwth_full_color_cycle = cycler(color=[self.RWTHBlau.p(100),
self.RWTHOrange.p(100),
self.RWTHGruen.p(100),
self.RWTHRot.p(100),
self.RWTHViolett.p(100),
self.RWTHBordeaux.p(100),
self.RWTHLila.p(100),
self.RWTHPetrol.p(100),
self.RWTHMaiGruen.p(100),
self.RWTHTuerkis.p(100),
self.RWTHBlau.p(75),
self.RWTHOrange.p(75),
self.RWTHGruen.p(75),
self.RWTHRot.p(75),
self.RWTHViolett.p(75),
self.RWTHBordeaux.p(75),
self.RWTHLila.p(75),
self.RWTHPetrol.p(75),
self.RWTHMaiGruen.p(75),
self.RWTHTuerkis.p(75),
self.RWTHBlau.p(50),
self.RWTHOrange.p(50),
self.RWTHGruen.p(50),
self.RWTHRot.p(50),
self.RWTHViolett.p(50),
self.RWTHBordeaux.p(50),
self.RWTHLila.p(50),
self.RWTHPetrol.p(50),
self.RWTHMaiGruen.p(50),
self.RWTHTuerkis.p(50),
self.RWTHBlau.p(25),
self.RWTHOrange.p(25),
self.RWTHGruen.p(25),
self.RWTHRot.p(25),
self.RWTHViolett.p(25),
self.RWTHBordeaux.p(25),
self.RWTHLila.p(25),
self.RWTHPetrol.p(25),
self.RWTHMaiGruen.p(25),
self.RWTHTuerkis.p(25),
self.RWTHBlau.p(10),
self.RWTHOrange.p(10),
self.RWTHGruen.p(10),
self.RWTHRot.p(10),
self.RWTHViolett.p(10),
self.RWTHBordeaux.p(10),
self.RWTHLila.p(10),
self.RWTHPetrol.p(10),
self.RWTHMaiGruen.p(10),
self.RWTHTuerkis.p(10),
])
if cycle == 'default':
plt.rcParams["axes.prop_cycle"] = self.rwth_color_cycle
elif cycle == 'full':
plt.rcParams["axes.prop_cycle"] = self.rwth_full_color_cycle
else:
raise ValueError('Unknown cycle setting {}'.format(cycle))
self.frmt = frmt
@property
def frmt(self):
return self._frmt
@frmt.setter
def frmt(self, frmt: str = "HEX"):
if frmt not in ["HEX", "RGB"]:
raise ValueError("frmt must be HEX or RGB not %s" % frmt)
self._frmt = frmt
RWTHBlau.frmt = frmt
RWTHSchwarz.frmt = frmt
RWTHMagenta.frmt = frmt
RWTHGelb.frmt = frmt
RWTHPetrol.frmt = frmt
RWTHTuerkis.frmt = frmt
RWTHGruen.frmt = frmt
RWTHMaiGruen.frmt = frmt
RWTHOrange.frmt = frmt
RWTHRot.frmt = frmt
RWTHBordeaux.frmt = frmt
RWTHViolett.frmt = frmt
RWTHLila.frmt = frmt
@classmethod
def plot_color_palette(cls):
fig, ax = plt.subplots(1, 1, figsize=(5, 5), dpi=300)
for y, c in enumerate(ColorManager.color_list):
for x, shade in zip([1,2,3,4,5], [100, 75, 50, 25, 10]):
ax.scatter(x, y+1, c=c.p(shade), s=150)
ylabels = [c.__class__.__name__ for c in ColorManager.color_list]
ax.set_xticks([1, 2, 3, 4, 5], ["100 %", "75 %", "50 %", "25 %", "10 %"], rotation=45)
ax.set_yticks(list(range(1, 14)), ylabels, rotation=45)
plt.tight_layout()
return fig, ax | /rwthcolors-0.2.3-py3-none-any.whl/RWTHColors/cm.py | 0.666497 | 0.331282 | cm.py | pypi |
from abc import ABC, abstractmethod
class Color(ABC):
frmt = "HEX"
def __init__(self, frmt: str = "HEX"):
if frmt not in ["HEX", "RGB"]:
raise ValueError("frmt must be HEX or RGB not %s" % frmt)
self.frmt = frmt
@property
@abstractmethod
def HEX(self) -> dict:
pass
@property
@abstractmethod
def RGB(self) -> dict:
pass
@classmethod
def power(cls, p: int = 100):
if p not in [10, 25, 50, 75, 100]:
raise ValueError("Power must be 10, 25, 50, 75 or 100 but not %d" % p)
if cls.frmt == "HEX":
return cls.HEX[p]
else:
return cls.RGB[p]
@classmethod
def p(cls, p: int = 100):
return cls.power(p)
class RWTHBlau(Color):
HEX = {100: '#00549F',
75: '#407FB7',
50: '#8EBAE5',
25: '#C7DDF2',
10: '#E8F1FA'}
RGB = {100: (0, 84, 159),
75: (64, 127, 183),
50: (142, 186, 229),
25: (199, 221, 242),
10: (232, 241, 250)}
class RWTHSchwarz(Color):
HEX = {100: '#000000',
75: '#646567',
50: '#9C9E9F',
25: '#CFD1D2',
10: '#ECEDED'}
RGB = {100: (0, 0, 0),
75: (100, 101, 103),
50: (156, 158, 159),
25: (207, 209, 210),
10: (236, 237, 237)}
class RWTHMagenta(Color):
HEX = {100: '#E30066',
75: '#E96088',
50: '#F19EB1',
25: '#F9D2DA',
10: '#FDEEF0'}
RGB = {100: (227, 0, 102),
75: (233, 96, 136),
50: (241, 158, 177),
25: (249, 210, 218),
10: (253, 238, 240)}
class RWTHGelb(Color):
HEX = {100: '#FFED00',
75: '#FFF055',
50: '#FFF59B',
25: '#FFFAD1',
10: '#FFFDEE'}
RGB = {100: (255, 237, 0),
75: (255, 240, 85),
50: (255, 245, 155),
25: (255, 250, 209),
10: (255, 253, 238)}
class RWTHPetrol(Color):
HEX = {100: '#006165',
75: '#2D7F83',
50: '#7DA4A7',
25: '#BFD0D1',
10: '#E6ECEC'}
RGB = {100: (0, 97, 101),
75: (45, 127, 131),
50: (125, 164, 167),
25: (191, 208, 209),
10: (230, 236, 236)}
class RWTHTuerkis(Color):
HEX = {100: '#0098A1',
75: '#00B1B7',
50: '#89CCCF',
25: '#CAE7E7',
10: '#EBF6F6'}
RGB = {100: (0, 152, 161),
75: (0, 177, 183),
50: (137, 204, 207),
25: (202, 231, 231),
10: (235, 246, 246)}
class RWTHGruen(Color):
HEX = {100: '#57AB27',
75: '#8DC060',
50: '#B8D698',
25: '#DDEBCE',
10: '#F2F7EC'}
RGB = {100: (87, 171, 39),
75: (141, 192, 96),
50: (184, 214, 152),
25: (221, 235, 206),
10: (242, 247, 236)}
class RWTHMaiGruen(Color):
HEX = {100: '#BDCD00',
75: '#D0D95C',
50: '#E0E69A',
25: '#F0F3D0',
10: '#F9FAED'}
RGB = {100: (189, 205, 0),
75: (208, 217, 92),
50: (224, 230, 154),
25: (240, 243, 208),
10: (249, 250, 237)}
class RWTHOrange(Color):
HEX = {100: '#F6A800',
75: '#FABE50',
50: '#FDD48F',
25: '#FEEAC9',
10: '#FFF7EA'}
RGB = {100: (246, 168, 0),
75: (250, 190, 80),
50: (253, 212, 143),
25: (254, 234, 201),
10: (255, 247, 234)}
class RWTHRot(Color):
HEX = {100: '#CC071E',
75: '#D85C41',
50: '#E69679',
25: '#F3CDBB',
10: '#FAEBE3'}
RGB = {100: (204, 7, 30),
75: (216, 92, 65),
50: (230, 150, 121),
25: (243, 205, 187),
10: (250, 235, 227)}
class RWTHBordeaux(Color):
HEX = {100: '#A11035',
75: '#B65256',
50: '#CD8B87',
25: '#E5C5C0',
10: '#F5E8E5'}
RGB = {100: (161, 16, 53),
75: (182, 82, 86),
50: (205, 139, 135),
25: (229, 197, 192),
10: (245, 232, 229)}
class RWTHViolett(Color):
HEX = {100: '#612158',
75: '#834E75',
50: '#A8859E',
25: '#D2C0CD',
10: '#EDE5EA'}
RGB = {100: (97, 33, 88),
75: (131, 78, 117),
50: (168, 133, 158),
25: (210, 192, 205),
10: (237, 229, 234)}
class RWTHLila(Color):
HEX = {100: '#7A6FAC',
75: '#9B91C1',
50: '#BCB5D7',
25: '#DEDAEB',
10: '#F2F0F7'}
RGB = {100: (122, 111, 172),
75: (155, 145, 193),
50: (188, 181, 215),
25: (222, 218, 235),
10: (242, 240, 247)} | /rwthcolors-0.2.3-py3-none-any.whl/RWTHColors/colors/colors.py | 0.885539 | 0.420094 | colors.py | pypi |
import math
import matplotlib.pyplot as plt
from .Generaldistribution import Distribution
class Gaussian(Distribution):
""" Gaussian distribution class for calculating and
visualizing a Gaussian distribution.
Attributes:
mean (float) representing the mean value of the distribution
stdev (float) representing the standard deviation of the distribution
data_list (list of floats) a list of floats extracted from the data file
"""
def __init__(self, mu=0, sigma=1):
Distribution.__init__(self, mu, sigma)
def calculate_mean(self):
"""Function to calculate the mean of the data set.
Args:
None
Returns:
float: mean of the data set
"""
avg = 1.0 * sum(self.data) / len(self.data)
self.mean = avg
return self.mean
def calculate_stdev(self, sample=True):
"""Function to calculate the standard deviation of the data set.
Args:
sample (bool): whether the data represents a sample or population
Returns:
float: standard deviation of the data set
"""
if sample:
n = len(self.data) - 1
else:
n = len(self.data)
mean = self.calculate_mean()
sigma = 0
for d in self.data:
sigma += (d - mean) ** 2
sigma = math.sqrt(sigma / n)
self.stdev = sigma
return self.stdev
def plot_histogram(self):
"""Function to output a histogram of the instance variable data using
matplotlib pyplot library.
Args:
None
Returns:
None
"""
plt.hist(self.data)
plt.title('Histogram of Data')
plt.xlabel('data')
plt.ylabel('count')
def pdf(self, x):
"""Probability density function calculator for the gaussian distribution.
Args:
x (float): point for calculating the probability density function
Returns:
float: probability density function output
"""
return (1.0 / (self.stdev * math.sqrt(2*math.pi))) * math.exp(-0.5*((x - self.mean) / self.stdev) ** 2)
def plot_histogram_pdf(self, n_spaces = 50):
"""Function to plot the normalized histogram of the data and a plot of the
probability density function along the same range
Args:
n_spaces (int): number of data points
Returns:
list: x values for the pdf plot
list: y values for the pdf plot
"""
mu = self.mean
sigma = self.stdev
min_range = min(self.data)
max_range = max(self.data)
# calculates the interval between x values
interval = 1.0 * (max_range - min_range) / n_spaces
x = []
y = []
# calculate the x values to visualize
for i in range(n_spaces):
tmp = min_range + interval*i
x.append(tmp)
y.append(self.pdf(tmp))
# make the plots
fig, axes = plt.subplots(2,sharex=True)
fig.subplots_adjust(hspace=.5)
axes[0].hist(self.data, density=True)
axes[0].set_title('Normed Histogram of Data')
axes[0].set_ylabel('Density')
axes[1].plot(x, y)
axes[1].set_title('Normal Distribution for \n Sample Mean and Sample Standard Deviation')
axes[0].set_ylabel('Density')
plt.show()
return x, y
def __add__(self, other):
"""Function to add together two Gaussian distributions
Args:
other (Gaussian): Gaussian instance
Returns:
Gaussian: Gaussian distribution
"""
result = Gaussian()
result.mean = self.mean + other.mean
result.stdev = math.sqrt(self.stdev ** 2 + other.stdev ** 2)
return result
def __repr__(self):
"""Function to output the characteristics of the Gaussian instance
Args:
None
Returns:
string: characteristics of the Gaussian
"""
return "mean {}, standard deviation {}".format(self.mean, self.stdev) | /rwx824550565_udacity-0.1.tar.gz/rwx824550565_udacity-0.1/rwx824550565_udacity/Gaussiandistribution.py | 0.688364 | 0.853058 | Gaussiandistribution.py | pypi |
import numpy
import math
import pandas as pnd
import utils_noroot as utnr
import matplotlib.pyplot as plt
from data_splitter import splitter as dsplit
#----------------------------
class calculator:
log = utnr.getLogger('lep_reso')
#----------------------------------
def __init__(self, data=None, binning=None):
self._rdf = data
self._binning = binning
self._plot_dir= None
self._suffix = None
self._initialized = False
#----------------------------------
@property
def plot_dir(self):
return self._plot_dir
@plot_dir.setter
def plot_dir(self, value):
self._plot_dir = utnr.make_dir_path(value)
#----------------------------------
def _get_data(self):
rdf = self._rdf
rdf = rdf.Filter('TMath::Abs(L1_TRUEID) == 11')
rdf = rdf.Filter('TMath::Abs(L2_TRUEID) == 11')
rdf = rdf.Define('L1_TRUE_P' , 'TVector3 l1(L1_TRUEP_X, L1_TRUEP_Y, L1_TRUEP_Z); return l1.Mag();')
rdf = rdf.Define('L2_TRUE_P' , 'TVector3 l2(L2_TRUEP_X, L2_TRUEP_Y, L2_TRUEP_Z); return l2.Mag();')
rdf = rdf.Define('L1_TRUE_ETA', 'TVector3 l1(L1_TRUEP_X, L1_TRUEP_Y, L1_TRUEP_Z); return l1.Eta();')
rdf = rdf.Define('L2_TRUE_ETA', 'TVector3 l2(L2_TRUEP_X, L2_TRUEP_Y, L2_TRUEP_Z); return l2.Eta();')
rdf = rdf.Redefine('L1_HasBremAdded', 'int(L1_HasBremAdded)')
rdf = rdf.Redefine('L2_HasBremAdded', 'int(L2_HasBremAdded)')
d_lep1 = rdf.AsNumpy(['L1_P', 'L1_ETA', 'L1_TRUE_P', 'L1_TRUE_ETA', 'L1_HasBremAdded'])
d_lep2 = rdf.AsNumpy(['L2_P', 'L2_ETA', 'L2_TRUE_P', 'L2_TRUE_ETA', 'L2_HasBremAdded'])
df_1 = pnd.DataFrame(d_lep1)
df_2 = pnd.DataFrame(d_lep2)
df_1.columns= ['p', 'eta', 'tp', 'teta', 'brem']
df_2.columns= ['p', 'eta', 'tp', 'teta', 'brem']
df = pnd.concat([df_1, df_2])
df = df.reset_index(drop=True)
return df
#----------------------------------
def _get_resolution(self, df, bound):
size = df.shape[0]
p =df.p.to_numpy()
tp=df.tp.to_numpy()
dp = p - tp
i_size = dp.size
l_dp = utnr.remove_outliers(dp, l_zscore=[4, 4, 3])
dp = numpy.array(l_dp)
f_size = dp.size
if size > 0:
rms2 = numpy.sum( dp ** 2 ) / size
else:
rms2 = math.nan
rms = math.sqrt(rms2)
minp, maxp = bound
self.log.info(f'{size:<20}{minp:<10.0f}{maxp:<10.0f}{rms:<20.0f}{i_size:<20}{f_size:<20}')
if self._plot_dir is not None:
self._plot_dist(dp, minp, maxp, rms)
return rms
#----------------------------------
def _get_bounds(self):
l_val = self._binning['p']
size = len(l_val)
l_bound = [ (- math.inf , l_val[0]) ]
l_bound += [ (l_val[index], l_val[index + 1]) for index in range(size - 1) ]
l_bound += [ (l_val[-1] , math.inf) ]
return l_bound
#----------------------------------
def _plot_brem(self, d_res, label=None, ax=None):
l_mom = [ (high + low) / 2. for low, high in d_res ]
l_err = [ (high - low) / 2. for low, high in d_res ]
l_res = [ 2 * res/(high + low) for (low, high), res in d_res.items() ]
ax.errorbar(l_mom, l_res, xerr=l_err, marker='o', linestyle='None', label=label)
#----------------------------------
def _plot_reso(self, d_res_0, d_res_1):
fig = plt.figure()
ax = fig.add_subplot(111)
self._plot_brem(d_res_0, 'No brem', ax)
self._plot_brem(d_res_1, 'Brem', ax)
plt.ylabel('$\sigma(p)/p$')
plt.legend()
plt.savefig(f'{self._plot_dir}/resolution.png')
plt.close('all')
#----------------------------------
def _plot_dist(self, dp, minp, maxp, sg):
if dp.size == 0:
return
mu = numpy.mean(dp)
plt.hist(dp, range=(mu-4*sg, mu+4*sg), alpha=0.7, bins=30, label='$p_{reco} - p_{true}$')
plt.axvline(x=mu - sg, color='red', label='$\mu-\sigma$', linestyle='-')
plt.axvline(x=mu + sg, color='red', label='$\mu+\sigma$', linestyle='-')
plt.legend()
plt.title(f'$p\in${minp:.0f}[MeV]-{maxp:.0f}[MeV]')
plt.savefig(f'{self._plot_dir}/{self._suffix}/dist_{minp:.0f}_{maxp:.0f}.png')
plt.close('all')
#----------------------------------
def _check_sizes(self, l_df, l_bound):
size_df = len(l_df)
size_bn = len(l_bound)
if size_df != size_bn:
self.log.error(f'Sizes of bounds and dataframe list differ: {size_bn}/{size_df}')
raise
#----------------------------------
def get_resolution(self):
df = self._get_data()
df_0 = df[df.brem == 0]
df_1 = df[df.brem == 1]
d_res_0 = self._calculate(df_0, 'nobrem')
d_res_1 = self._calculate(df_1, 'brem')
if self._plot_dir is not None:
self._plot_reso(d_res_0, d_res_1)
d_res_0 = {str(key) : val for key, val in d_res_0.items()}
d_res_1 = {str(key) : val for key, val in d_res_1.items()}
return d_res_0, d_res_1
#----------------------------------
def _calculate(self, df, suffix):
self._suffix = suffix
utnr.make_dir_path(f'{self._plot_dir}/{self._suffix}')
self.log.info(f'Calculating {suffix} resolutions')
obj = dsplit(df, self._binning, spectators=['eta', 'tp', 'teta'])
l_df = obj.get_datasets()
l_bound = self._get_bounds()
self._check_sizes(l_df, l_bound)
self.log.info(f'{"Dataset size":<20}{"Low":<10}{"High":<10}{"RMS [MeV]":<20}{"Original":<20}{"Filtered":<20}')
d_res = { bound : self._get_resolution(df, bound) for df, bound in zip(l_df, l_bound) }
return d_res
#---------------------------- | /rx_tools-0.0.3.tar.gz/rx_tools-0.0.3/src/rk/lep_reso.py | 0.590425 | 0.367043 | lep_reso.py | pypi |
import utils_noroot as utnr
import read_calibration as rcal
import utils
from rk.cutflow import cutflow
from rk.efficiency import efficiency
from rk.selection import selection as rksl
import os
import re
import ROOT
#-----------------------------------------------------------
class pr_getter:
log = utnr.getLogger(__name__)
def __init__(self, proc, dset, trig, vers, q2bin, selection):
self._proc = proc
self._dset = dset
self._trig = trig
self._vers = vers
self._sele = selection
self._q2bin = q2bin
self._wks = ROOT.RooWorkspace('wks')
self._tree = None
self._min_mass = None
self._max_mass = None
self._max_evt = -1
self._rho = +1
self._bkg_cat_cut = None
self._diagnostic_dir = None
self._l_trig_sel = ['ETOS', 'GTIS']
self._l_trig_cal = ['gtis_inclusive', 'L0TIS_EM', 'L0TIS_MH', 'L0ElectronTIS', 'L0ElectronHAD', 'L0HadronElEL']
self._l_trig = self._l_trig_sel + self._l_trig_cal
self._l_proc = ['bpXcHs_ee', 'bdXcHs_ee']
self._l_dset = ['r1', 'r2p1', '2016', '2017', '2018']
self._l_vers = ['v10.11tf']
self._l_sele = ['final_nobdt_gorder_wide']
self._l_q2bin= ['jpsi', 'psi2']
self._l_keep_in_mem = []
self._d_q2bin_mass = { 'jpsi' : 'B_const_mass_M[0]' , 'psi2' : 'B_const_mass_psi2S_M[0]' }
self._evt_branch = 'eventNumber'
self._mass_branch = None
self._tree_name = 'KEE'
self._l_save_branch = ['mass', 'true_mass', 'L1_BremMultiplicity', 'L2_BremMultiplicity']
self._initialized = False
#-----------------------------------------------------------
@property
def max_evt(self):
return self._max_evt
@max_evt.setter
def max_evt(self, val):
self._max_evt = val
#-----------------------------------------------------------
@property
def rho(self):
return self._rho
@rho.setter
def rho(self, val):
self._rho = val
#-----------------------------------------------------------
@property
def bkg_cat(self):
return self._bkg_cat
@bkg_cat.setter
def bkg_cat(self, val):
self._bkg_cat_cut = val
#-----------------------------------------------------------
def _initialize(self):
if self._initialized:
return
if self._trig == 'GTIS_ee':
self._trig = 'gtis_inclusive'
utnr.check_included(self._proc , self._l_proc )
utnr.check_included(self._vers , self._l_vers )
utnr.check_included(self._dset , self._l_dset )
utnr.check_included(self._trig , self._l_trig )
utnr.check_included(self._sele , self._l_sele )
utnr.check_included(self._q2bin, self._l_q2bin)
self._initialized = True
#-----------------------------------------------------------
def _get_df(self, l_year):
dat_dir = os.environ['DATDIR']
file_dir = f'{dat_dir}/{self._proc}/{self._vers}'
l_df = []
for year in l_year:
file_path = f'{file_dir}/{year}.root'
self.log.visible(f'Using: {file_path}')
df = ROOT.RDataFrame(self._tree_name, file_path)
if self._max_evt > 0:
df = df.Range(self._max_evt)
df = self._filter(df, year)
df = self._add_columns(df)
l_df.append(df)
return l_df
#-----------------------------------------------------------
def _add_columns(self, df):
self._mass_branch = utnr.get_from_dic(self._d_q2bin_mass, self._q2bin)
df = df.Define('mass', self._mass_branch)
true_mass = '''
ROOT::Math::LorentzVector<ROOT::Math::XYZTVector> v_h( H_TRUEP_X, H_TRUEP_Y, H_TRUEP_Z, H_TRUEP_E);
ROOT::Math::LorentzVector<ROOT::Math::XYZTVector> v_1(L1_TRUEP_X, L1_TRUEP_Y, L1_TRUEP_Z, L1_TRUEP_E);
ROOT::Math::LorentzVector<ROOT::Math::XYZTVector> v_2(L2_TRUEP_X, L2_TRUEP_Y, L2_TRUEP_Z, L2_TRUEP_E);
auto v_b = v_h + v_1 + v_2;
return v_b.M();
'''
df = df.Define('true_mass', true_mass)
return df
#-----------------------------------------------------------
def _get_years(self):
if self._proc in ['bpXcHs_ee', 'bdXcHs_ee'] and self._dset in [ 'r1', '2011']:
l_year = []
elif self._proc == 'bpXcHs_ee' and self._dset in ['r2p1', '2015', '2016']:
l_year = []
#-----------------------------
elif self._proc == 'psi2Kstr_ee' and self._dset == 'r1':
l_year = ['2011', '2012']
elif self._proc in ['psi2Kstr_ee', 'bdXcHs_ee'] and self._dset == 'r2p1':
l_year = ['2015', '2016']
#-----------------------------
elif self._dset in ['2011', '2012', '2015', '2016', '2017', '2018']:
l_year = [self._dset]
else:
self.log.error(f'Cannot find list of year for process "{self._proc}" and dataset "{self._dset}"')
raise
self.log.info(f'Using years "{l_year}" for process "{self._proc}" and dataset "{self._dset}"')
return l_year
#-----------------------------------------------------------
def _get_mass(self, cut):
regex='\(B_[\w_\[\]]+\s+>\s(\d+)\)\s+&&\s+\(B_[\w_\[\]]+\s+<\s(\d+)\)'
try:
min_mass = utnr.get_regex_group(cut, regex, i_group=1)
max_mass = utnr.get_regex_group(cut, regex, i_group=2)
self._min_mass = int(min_mass)
self._max_mass = int(max_mass)
except:
self.log.error(f'Cannot extract mass window from "{cut}"')
raise
self.log.visible(f'Extracted mass window ({self._min_mass}, {self._max_mass})')
#-----------------------------------------------------------
def _add_bkg_cat(self, d_cut):
if self._bkg_cat_cut is None:
return d_cut
self.log.visible(f'Using background categories: {self._bkg_cat_cut}')
d_out = {'bkg_cat' : self._bkg_cat_cut}
d_out.update(d_cut)
return d_out
#-----------------------------------------------------------
def _get_analysis_selection(self, year):
self.log.visible('Applying cuts')
if self._trig in self._l_trig_cal:
#Dummy trigger, will be replaced later
trig = 'ETOS'
elif self._trig in self._l_trig_sel:
trig = self._trig
else:
self.log.error(f'Trigger {self._trig} not valid')
raise
d_cut = rksl(self._sele, trig, year, self._proc, q2bin=self._q2bin)
return d_cut
#-----------------------------------------------------------
def _get_selection(self, year):
d_cut = self._get_analysis_selection(year)
d_cut = self._add_bkg_cat(d_cut)
if self._trig in self._l_trig_sel:
return d_cut
cut = rcal.get(self._trig, year)
d_cut_final = dict()
for key, val in d_cut.items():
if key != 'ETOS':
d_cut_final[key] = val
else:
d_cut_final[self._trig] = cut
return d_cut_final
#-----------------------------------------------------------
def _filter(self, df, year):
d_cut = self._get_selection(year)
l_cut = []
for key, cut in d_cut.items():
if key == 'mass':
self._get_mass(cut)
df = df.Filter(cut, key)
l_cut.append(cut)
cfl = self._get_cutflow(df, l_cut)
self._save_cutflow(cfl, year)
return df
#-----------------------------------------------------------
def _get_cutflow(self, df, l_cut):
cfl = cutflow()
rep = df.Report()
for cut_info, cut_str in zip(rep, l_cut):
key = cut_info.GetName()
ival = cut_info.GetAll()
fval = cut_info.GetPass()
cfl[key] = efficiency(fval, arg_tot = ival, cut = cut_str)
return cfl
#-----------------------------------------------------------
def _save_cutflow(self, cfl, year):
if self.diagnostic_dir is None:
return
dir_path = utnr.make_dir_path(self.diagnostic_dir)
cfl_path = f'{dir_path}/cutflow.tex'
self.log.visible(f'Saving to: {cfl_path}')
cfl.df_eff.to_latex(buf=open(cfl_path, 'w'), index=False)
cut_path = f'{dir_path}/cuts.csv'
self.log.visible(f'Saving to: {cut_path}')
cfl.df_cut.to_csv(cut_path, index=False)
#-----------------------------------------------------------
def _save_events(self, l_df):
d_evt_mas = {}
for df in l_df:
d_data = df.AsNumpy([self._evt_branch, 'mass'])
arr_evt = d_data[self._evt_branch]
arr_mas = d_data['mass']
d_tmp = dict(zip(arr_evt.tolist(), arr_mas.tolist()))
d_evt_mas.update(d_tmp)
dir_path = utnr.make_dir_path(self.diagnostic_dir)
evt_path = f'{dir_path}/events.json'
self.log.visible(f'Saving to: {evt_path}')
utnr.dump_json(d_evt_mas, evt_path)
#-----------------------------------------------------------
def _get_tree(self, l_df):
self.log.visible('Getting trees')
chain = ROOT.TChain('tree')
for df in l_df:
itree, ifile = utils.get_tree_from_df(df, tree_name='tree', file_path=None, l_col=self._l_save_branch)
file_path = ifile.GetName()
ifile.Close()
chain.AddFile(file_path)
return chain
#-----------------------------------------------------------
def _split_by_brem(self, tree):
df = ROOT.RDataFrame(tree)
df = df.Define('nbrem', 'L1_BremMultiplicity + L2_BremMultiplicity')
df_z = df.Filter('nbrem == 0')
df_o = df.Filter('nbrem == 1')
df_m = df.Filter('nbrem > 1')
tree_z, file_z = utils.get_tree_from_df(df_z, tree_name='tree', file_path=None, l_col=self._l_save_branch)
tree_o, file_o = utils.get_tree_from_df(df_o, tree_name='tree', file_path=None, l_col=self._l_save_branch)
tree_m, file_m = utils.get_tree_from_df(df_m, tree_name='tree', file_path=None, l_col=self._l_save_branch)
self._l_keep_in_mem.append(file_z)
self._l_keep_in_mem.append(file_o)
self._l_keep_in_mem.append(file_m)
return [tree_z, tree_o, tree_m]
#-----------------------------------------------------------
def _get_observable(self, kind):
if kind == 'reco':
obs_name = 'mass'
elif kind == 'true':
obs_name = 'true_mass'
else:
log.error(f'Invalid kind {kind}')
raise
obs = ROOT.RooRealVar(obs_name, self._mass_branch, self._min_mass, self._max_mass)
return obs
#-----------------------------------------------------------
def _fit(self, l_tp_tree, kind=None):
self.log.visible('Fitting')
obs = self._get_observable(kind)
for dname, pname, tree in l_tp_tree:
if kind == 'true':
pname = f'{pname}_true'
dname = f'{dname}_true'
data = ROOT.RooDataSet(dname, '', ROOT.RooArgSet(obs), ROOT.RooFit.Import(tree))
pdf = ROOT.RooKeysPdf(pname, '', obs, data, ROOT.RooKeysPdf.MirrorBoth, self._rho)
self._wks.Import(pdf)
self._wks.Import(data)
#-----------------------------------------------------------
def _check_stats(self, tree, l_tree):
ntot = tree.GetEntries()
nsum = 0
for stree in l_tree:
nsum += stree.GetEntries()
if nsum != ntot:
self.log.error(f'Sum of partial trees does not equal full tree: {nsum} != {ntot}')
raise
#-----------------------------------------------------------
def get_wks(self):
self._initialize()
l_year = self._get_years()
if l_year == []:
self.log.warning(f'Cannot get model for dataset "{self._dset}", no corresponding files found, skipping')
raise
l_df = self._get_df(l_year)
self._save_events(l_df)
tree_a = self._get_tree(l_df)
tree_z, tree_o, tree_m = self._split_by_brem(tree_a)
self._check_stats(tree_a, [tree_z, tree_o, tree_m])
l_tp_tree = []
if True:
l_tp_tree.append(('data' , 'pdf' , tree_a))
l_tp_tree.append(('data_z', 'pdf_z', tree_z))
l_tp_tree.append(('data_o', 'pdf_o', tree_o))
l_tp_tree.append(('data_m', 'pdf_m', tree_m))
self._fit(l_tp_tree, kind='true')
self._fit(l_tp_tree, kind='reco')
self._tree = tree_a
return self._wks
#-----------------------------------------------------------
def get_tree(self):
if self._tree is None:
self.log.error(f'Tree not found, get_wks() needs to be run first')
raise
return self._tree
#----------------------------------------------------------- | /rx_tools-0.0.3.tar.gz/rx_tools-0.0.3/src/rk/pr_getter.py | 0.463201 | 0.153486 | pr_getter.py | pypi |
import utils_noroot as utnr
import matplotlib.pyplot as plt
import zutils.utils as zut
import ROOT
import zfit
import math
import tqdm
import numpy
import logging
import os
import utils
from zutils.plot import plot as zfplot
from data_splitter import splitter as dsplit
from fitter import zfitter
#----------------------------------------
class extractor:
log=utnr.getLogger('extractor')
#----------------------------------------
@property
def data(self):
return self._rdf_mc, self._rdf_dt
@data.setter
def data(self, value):
self._rdf_mc, self._rdf_dt = value
#----------------------------------------
@property
def model(self):
return self._l_pdf
@model.setter
def model(self, value):
self._l_pdf = value
#----------------------------------------
@property
def res_dir(self):
return self._res_dir
@res_dir.setter
def res_dir(self, value):
self._res_dir = value
#----------------------------------------
@property
def binning(self):
return self._d_bin
@binning.setter
def binning(self, value):
self._d_bin = value
#----------------------------------------
def __init__(self):
self._rdf_mc = None
self._rdf_dt = None
self._res_dir = None
self._d_bin = None
self._l_exp = None
self._l_var = None
self._l_pdf = None
self._d_res = {}
self._l_float = ['mu', 'sg']
self._mass_var = 'B_const_mass_M[0]'
self._initialized = False
#----------------------------------------
def _initialize(self):
if self._initialized:
return
if len(self._l_pdf) < 2:
self.log.error(f'Found fewer than 2 PDFs:')
print(self._l_pdf)
raise
zfitter.log.setLevel(logging.WARNING)
dsplit.log.setLevel(logging.WARNING)
ROOT.lhcbStyle()
self._initialized = True
#----------------------------------------
def _bound_filter_rdf(self, rdf):
if self._maxentries > 0:
rdf = rdf.Range(self._maxentries)
axis=0
for var, arr in self._d_bin.items():
min_var = min(arr)
max_var = max(arr)
cut = f'{min_var} < {var} && {var} < {max_var}'
rdf = rdf.Filter(cut, f'{axis} bound')
axis+=1
return rdf
#----------------------------------------
def _get_pdf(self, kind):
if kind == 'ctrl':
pdf = self._l_pdf[0]
elif kind == 'data':
pdf = zfit.pdf.SumPDF(self._l_pdf)
else:
self.log.error(f'Invalid PDF kind: {kind}')
raise
return pdf
#----------------------------------------
def _is_yield(self, pdf, par_name):
l_yld_nam = []
par = pdf.get_yield()
if isinstance(par, zfit.Parameter):
l_yld_nam = [par.name]
elif isinstance(par, zfit.ComposedParameter):
l_yld_nam = [par.name for _, par in par.params.items()]
else:
self.log.error(f'PDF parameter is invalid:')
print(par)
raise
if len(l_yld_nam) == 0:
self.log.error(f'No yields found in PDF:')
print(pdf)
raise
is_yield = par_name in l_yld_nam
self.log.debug(f'{is_yield} = {par_name} in {l_yld_nam}')
return is_yield
#----------------------------------------
def _fix_pars(self, pdf, i_df):
if i_df not in self._d_res:
self.log.warning(f'Dataset {i_df:03} does not have simulation parameters to fix data fit')
return pdf
res = self._d_res[i_df]
l_par = list(pdf.get_params(floating=True)) + list(pdf.get_params(floating=False))
self.log.debug(f'Fixing parameeters')
for par in l_par:
if par.name not in res.params or par.name in self._l_float or self._is_yield(pdf, par.name):
continue
val = res.params[par.name]['value']
par.assign(val)
par.floating = False
self.log.debug(f'{par.name:<20}{"->" :20}{val:>.3f}')
return pdf
#----------------------------------------
def _get_bin_info(self, df, kind, i_df):
arr = df['mass'].to_numpy()
if len(arr) == 0:
return None
try:
l_mean = [ df[var].mean() for var in self._l_var]
except:
self.log.error('Cannot extract mean list')
print(df)
print(self._l_var)
raise
self.log.debug(f'Fitting {i_df:03} dataset: {arr.shape}')
pdf = self._get_pdf(kind)
if kind == 'data':
pdf = self._fix_pars(pdf, i_df)
ftr = zfitter(pdf, arr)
try:
res = ftr.fit()
except:
self.log.warning(f'Fit failed, will assign yield as dataset size: {arr.size}')
return [arr.size, 0] + l_mean + [None]
with zfit.run.set_graph_mode(False):
res.hesse(name='hesse_np')
yld = res.params['nsg']['value']
try:
err = res.params['nsg']['hesse_np']['error']
except:
self.log.warning(f'Setting error 2 * sqrt(S), cannot recover hesse error:')
err = 2 * math.sqrt(yld)
self._plot_fit(pdf, arr, i_df, res, kind)
self._save_res(pdf, i_df, res, kind)
pdf.reset_cache_self()
if kind != 'data':
return [arr.size, 0] + l_mean + [res]
else:
return [yld, err] + l_mean + [res]
#----------------------------------------
def _save_res(self, pdf, i_df, res, kind):
pkl_dir = utnr.make_dir_path(f'{self._res_dir}/pickle/{kind}')
pkl_path = f'{pkl_dir}/result_{i_df:03}.pkl'
res.freeze()
utnr.dump_pickle(res, pkl_path)
tex_dir = utnr.make_dir_path(f'{self._res_dir}/latex/{kind}')
tex_path = f'{tex_dir}/result_{i_df:03}.tex'
zut.pdf_to_latex(pdf, tex_path)
#----------------------------------------
def _plot_his(self, his, kind):
if self._res_dir is None:
return
his_dir = utnr.make_dir_path(f'{self._res_dir}/plots/hist')
his = his.Project3D('yx')
can = ROOT.TCanvas(f'c_{kind}', '', 600, 400)
his.Draw('colz')
utils.Reformat2D(can)
can.SaveAs(f'{his_dir}/his_{kind}.png')
#----------------------------------------
def _plot_fit(self, pdf, arr, index, res, kind):
if self._res_dir is None:
return
fit_dir = utnr.make_dir_path(f'{self._res_dir}/plots/fits/{kind}')
obj=zfplot(model=pdf, data=arr, result=res, suffix=f'{index}')
plot_path = f'{fit_dir}/fit_{index:03}.png'
try:
obj.plot()
plt.savefig(plot_path)
plt.close('all')
except:
self.log.warning(f'Could not save {plot_path}')
#----------------------------------------
def _get_datasets(self, kind):
rdf = self._rdf_mc if kind == 'ctrl' else self._rdf_dt
self.log.info(f'Splitting {rdf.Count().GetValue()} entries')
obj = dsplit(rdf, self._d_bin, spectators=['mass'])
obj.plot_dir = 'tests/fitwgt/simple/splitting/'
l_df = obj.get_datasets()
return l_df
#----------------------------------------
def _get_fit_info(self, kind):
l_df = self._get_datasets(kind)
l_info = [ self._get_bin_info(df, kind, i_df) for i_df, df in enumerate(tqdm.tqdm(l_df, ascii=' -')) ]
return l_info
#----------------------------------------
def _get_hist(self, kind):
arr_x = numpy.array( list(self._d_bin.values())[0] ).astype(float)
arr_y = numpy.array( list(self._d_bin.values())[1] ).astype(float)
arr_z = numpy.array( list(self._d_bin.values())[2] ).astype(float)
hist = ROOT.TH3F(f'h_{kind}', kind, arr_x.size - 1, arr_x, arr_y.size - 1, arr_y, arr_z.size - 1, arr_z)
self.log.info(f'Bin contents for {kind}')
l_info = self._get_fit_info(kind)
for i_df, info in enumerate(l_info):
if info is None:
continue
[yld, err, xm, ym, zm, res] = info
if kind == 'ctrl' and res is not None:
self._d_res[i_df] = res
i_bin = hist.FindBin(xm, ym, zm)
hist.SetBinContent(i_bin, yld)
hist.SetBinError (i_bin, err)
self.log.debug(f'{i_bin:<10}{yld:<20.0f}')
self._plot_his(hist, kind)
return hist
#----------------------------------------
def get_histograms(self, force_redo=False):
self._initialize()
h_mc = self._get_hist('ctrl')
h_dt = self._get_hist('data')
return h_mc, h_dt
#---------------------------------------- | /rx_tools-0.0.3.tar.gz/rx_tools-0.0.3/src/rk/fithst.py | 0.401923 | 0.189371 | fithst.py | pypi |
import math
import numpy
import utils_noroot as utnr
import numdifftools as ndt
from scipy.optimize import minimize
#---------------------------
class extractor:
log = utnr.getLogger('reso_extractor')
#---------------------------
def __init__(self, data=None, method = None, bounds=None, init_x = None, nbins=10):
self._d_data = data
self._nbins = nbins
self._bounds = bounds
self._method = method
self._init_x = init_x
self._initialized = False
#---------------------------
def _initialize(self):
if self._initialized:
return
if self._nbins >= 2:
pass
else:
self.log.error(f'Invalid value for number of bins: {self._nbins}')
raise ValueError
if self._bounds is None:
self.log.info('Bounds not specified, using non-negative bounds')
self._bounds = self._nbins * [[0, None]]
if self._method is None:
self.log.info('Method not specified, using default')
if self._init_x is None:
self.log.info('Starting point not specified, using origin')
self._init_x = self._nbins * [0]
try:
self._check_data()
except:
self.log.error(f'Invalid data found in dictionary:')
self.log.info(self._d_data)
raise
self._initialized = True
#---------------------------
def _check_data(self):
for (x, y), [ree, eee] in self._d_data.items():
if not isinstance( x, (float, int)):
self.log.error(f'Invalid type for X coordinate: {x}')
raise TypeError
if not isinstance( y, (float, int)):
self.log.error(f'Invalid type for Y coordinate: {y}')
raise TypeError
if not isinstance(ree, float):
self.log.error(f'Invalid type for ee resolution: {ree}')
raise TypeError
if not isinstance(eee, float):
self.log.error(f'Invalid type for ee resolution: {eee}')
raise TypeError
if ree <= 0:
self.log.error(f'Invalid dielectron resolution value found: {ree}')
raise ValueError
if eee <= 0:
self.log.error(f'Invalid dielectron resolution error found: {eee}')
raise ValueError
#---------------------------
def _ee_reso(self, res_x, res_y):
return math.sqrt(res_x ** 2 + res_y ** 2)
#---------------------------
def _chi2(self, arr_res):
chi2 = 0
for (x, y), [mes, err] in self._d_data.items():
r_x = arr_res[x - 1]
r_y = arr_res[y - 1]
pred = self._ee_reso(r_x, r_y)
chi2+= (mes - pred) ** 2 / err ** 2
return chi2
#---------------------------
def _get_errors(self, arr_val):
h = ndt.Hessian(self._chi2)
hes = h(arr_val)
cov = 2 * numpy.linalg.inv(hes)
dia = numpy.diag(cov)
err = numpy.sqrt(dia)
return err
#---------------------------
def calculate(self):
self._initialize()
self.log.info(f'Minimizing')
res = minimize(self._chi2, self._init_x, method=self._method, bounds=self._bounds)
arr_val = res.x
self.log.info(f'Calculating errors')
arr_err = self._get_errors(arr_val)
arr_res = numpy.array([arr_val, arr_err]).T
return arr_res
#--------------------------- | /rx_tools-0.0.3.tar.gz/rx_tools-0.0.3/src/rk/reso_extractor.py | 0.508544 | 0.213644 | reso_extractor.py | pypi |
import ROOT
import os
import math
import numpy as np
import utils
import utils_noroot as utnr
from rk.oscillator import oscillator as osc
#-------------------------------------------
class reader:
log=utnr.getLogger(__name__)
def __init__(self):
self.d_map = {}
self.d_bound = {}
self.map_histname = { 'etra' : 'heffratio',
'mtra' : 'Ratio'}
self.epsilon = 1e-8
self.map_hist = None
self._maps = None
#-------------------------------------------
def setMapPath(self, mapdir):
if not os.path.isdir(mapdir):
self.log.error(f'Cannot find {mapdir}')
raise
self.mapdir = mapdir
#-------------------------------------------
def _setHist(self):
if self.map_hist is not None:
return
self._maps = f"{self.mapdir}/{self.cate}_{self.year:.0f}.root"
self.log.info(f'Using map in: {self._maps}')
try:
temFile = ROOT.TFile(self._maps)
self.map_hist = temFile.Get(self.map_histname[self.cate])
self.map_hist.SetDirectory(0)
except:
self.log.error(f'Cannot open: {self._maps}')
raise
#-------------------------------------------
@property
def maps(self):
return self._maps
#-------------------------------------------
def _adjust_point(self, phi, eta, pt, p):
if phi < - math.pi or phi > math.pi and self.cate == 'etra':
self.log.error(f'Invalid value of phi: {phi:.3f}')
raise
if eta < 1.9:
self.log.debug(f'Eta: {eta:.3f} ---> {1.9:.3f}')
eta = 1.9 + 0.1
elif eta > 4.5 and self.cate == 'etra':
self.log.debug(f'Eta_e: {eta:.3f} ---> {4.5:.3f}')
eta = 4.5 - 0.1
elif eta > 4.9 and self.cate == 'mtra':
self.log.debug(f'Eta_m: {eta:.3f} ---> {4.9:.3f}')
eta = 4.9 - 0.1
if pt < 150 and self.cate == 'etra':
self.log.debug(f'Pt_e: {pt:.0f} ---> {150:.0f}')
pt = 150 + 1
elif pt > 50000 and self.cate == 'etra':
self.log.debug(f'Pt_e: {pt:.0f} ---> {50000:.0f}')
pt = 50000 - 1
if p < 5000 and self.cate == 'mtra':
self.log.debug(f'P_m: {p:.0f} ---> {5000:.0f}')
p = 5000 + 1
elif p > 200000 and self.cate == 'mtra' and not (self.year == 2011 or self.year == 2012):
self.log.debug(f'P_m: {p:.0f} ---> {200000:.0f}')
p = 200000 - 1
elif p > 201000 and self.cate == 'mtra' and (self.year == 2011 or self.year == 2012):
self.log.debug(f'P_m: {p:.0f} ---> {201000:.0f}')
p = 201000 - 1
if (self.year == 2011 or self.year == 2012) and self.cate == 'mtra':
p = p/1000
# Unit of p in 2011 and 2012 maps are GeV, and it is MeV in other maps.
return (phi, eta, pt, p)
#-------------------------------------------
def __getTargetWeight(self, prefix):
wgt = []
arr_points = utils.getMatrix(self.df, [ f'{prefix}_Phi', f'{prefix}_ETA', f'{prefix}_PT', f'{prefix}_P', "yearLabbel"])
self.year = arr_points[0][4]
self._setHist()
tem_osc_obj = osc()
self.map_hist = tem_osc_obj.get_oscillated_map(self._maps, self.map_hist)
for [phi, eta, pt, p, yr] in arr_points:
phi, eta, pt, p = self._adjust_point(phi, eta, pt, p)
if self.cate == 'etra':
var_array = (phi, eta, pt)
elif self.cate == 'mtra':
var_array = (p, eta)
curr_globalbin = self.map_hist.FindBin(*var_array)
curr_wgt = self.map_hist.GetBinContent(curr_globalbin)
if curr_wgt <= 0:
if self.cate == 'etra':
self.log.warning(f'{curr_wgt:<20.3f}{pt:<20.3f}{eta:<20.3f}{phi:<20.3f}')
elif self.cate == 'mtra':
self.log.warning(f'{curr_wgt:<20.3f}{p:<20.3f}{eta:<20.3f}')
wgt.append(curr_wgt)
return np.array(wgt)
#-------------------------------------------
def _add_var(self, df, var, lep):
if var == 'Phi':
df = df.Define(f'{lep}_{var}', f'TVector3 v({lep}_PX, {lep}_PY, {lep}_PZ); return v.Phi();')
else:
self.log.error(f'Cannot define {var} for {lep}')
raise
return df
#-------------------------------------------
def _preprocess_df(self, df):
l_col = df.GetColumnNames()
if 'L1_Phi' not in l_col:
df = self._add_var(df, 'Phi', 'L1')
if 'L2_Phi' not in l_col:
df = self._add_var(df, 'Phi', 'L2')
return df
#-------------------------------------------
def _get_cate(self, df):
if not hasattr(df, 'treename'):
self.log.error(f'DataFrame does not contain "treename" attribute')
raise
if df.treename in ['ETOS', 'GTIS']:
cate = 'etra'
elif df.treename == 'MTOS':
cate = 'mtra'
else:
self.log.error(f'Invalid treename {df.treename}')
raise
return cate
#-------------------------------------------
def getWeight(self, df):
self.cate = self._get_cate(df)
self.df = self._preprocess_df(df)
wgt1 = self.__getTargetWeight("L1")
wgt2 = self.__getTargetWeight("L2")
return wgt1, wgt2
#------------------------------------------- | /rx_tools-0.0.3.tar.gz/rx_tools-0.0.3/src/rk/trackreader.py | 0.403567 | 0.1941 | trackreader.py | pypi |
from collections import UserDict
from rk.differential_efficiency import defficiency
from rk.efficiency import efficiency
from ndict import ndict
import utils_noroot as utnr
import pandas as pnd
#-----------------------------------------
class cutflow(UserDict):
log=utnr.getLogger('cutflow')
#-------------------------------
def __init__(self):
self._tot_eff = 1.
self._ful_eff = None
self._df_stat = None
self._df_cuts = None
self._initialized = False
super().__init__()
#-------------------------------
def __setitem__(self, cut, obj):
if not isinstance(obj, (efficiency, defficiency)):
self.log.error(f'Value has to be efficiency or differential efficiency, found: {type(eff)}')
raise
self.data[cut] = obj
if self._ful_eff is None:
self._ful_eff = obj.copy()
else:
self._ful_eff = self._ful_eff * obj
#-------------------------------
def _initialize(self):
if self._initialized:
return
d_cuts = {}
d_stat = {}
for label, obj in self.data.items():
if isinstance(obj, defficiency):
eff = obj.efficiency()
else:
eff = obj
eff_val, _, _ = eff.val
self._tot_eff*= eff_val
d_cuts[label] = eff.cut
utnr.add_to_dic_lst(d_stat, 'Total' , eff.fal + eff.pas)
utnr.add_to_dic_lst(d_stat, 'Pased' , eff.pas)
utnr.add_to_dic_lst(d_stat, 'Efficiency', eff_val)
utnr.add_to_dic_lst(d_stat, 'Cumulative', self._tot_eff)
utnr.add_to_dic_lst(d_stat, 'Cut' , label)
self._df_stat=pnd.DataFrame(d_stat, columns=['Cut', 'Total', 'Pased', 'Efficiency', 'Cumulative'])
self._df_stat=self._df_stat.set_index('Cut')
self._df_cuts=pnd.DataFrame(d_cuts, index=['Cut'])
self._df_cuts=self._df_cuts.T
self._initialized = True
#-------------------------------
@property
def df_eff(self):
self._initialize()
return self._df_stat
#-------------------------------
@property
def df_cut(self):
self._initialize()
return self._df_cuts
#-------------------------------
@property
def tot_eff(self):
'''
Returns numerical value of total efficiency
'''
self._initialize()
return self._tot_eff
#-------------------------------
@property
def efficiency(self):
'''
Returns efficiency object, product of all efficiencies
'''
self._initialize()
return self._ful_eff
#-------------------------------
def __str__(self):
self._initialize()
msg= f'_____\n{"Kind":<20}{"Passed":>10} [{"Entries":>10}] / {"Total":>10} [{"Entries":>10}] = {"Eff":<9} | {"Cut":<40}{"Label":<20}\n \n'
for kind, obj in self.items():
if isinstance(obj, defficiency):
eff = obj.efficiency()
else:
eff = obj
eff_str = eff.__str__()
msg += f'{kind:<20}{eff_str:<50}\n'
msg += '-----\n'
return msg
#-------------------------------
def __add__(self, other):
self._initialize()
if self.keys() != other.keys():
self.log.error(f'Cannot add cutflows with different cuts:')
print(self.df_eff)
print(other.df_eff)
raise
res_cfl = cutflow()
for key in other:
other_eff = other[key]
this_eff = self[key]
eff = other_eff + this_eff
res_cfl[key] = eff
return res_cfl
#-----------------------------------------
class cutflow_manager():
'''
Class used to build cutflow objects. It takes care of switching between efficiencies, depending on the systematics
'''
log=utnr.getLogger('cutflow_manager')
#----------------------------------
def __init__(self):
self._d_d_eff = {}
self._s_sys = set()
self._l_cut = []
self._has_dif = False
self._s_dif_var = None
#----------------------------------
def _check_nominal(self, d_eff, kind):
'''
Check if dictionary contains nominal efficiency
'''
if isinstance(d_eff, dict) and 'nom' not in d_eff:
self.log.error(f'Nominal efficiency not found for: {kind}')
print(d_eff.keys())
raise
elif isinstance(d_eff, ndict) and not d_eff.has_val('nom', axis='x'):
self.log.error(f'Nominal efficiency not found for: {kind}')
print(d_eff)
raise
#----------------------------------
def __setitem__(self, cut, d_eff):
self._check_nominal(d_eff, cut)
self._check_sys_lab(d_eff, cut)
if cut in self._l_cut:
self.log.error(f'Kind {cut} already added')
raise
else:
self._l_cut.append(cut)
if isinstance(d_eff, ndict) and not self._has_dif:
self._has_dif = True
self._s_dif_var = d_eff.y_axis
self._s_sys = d_eff.x_axis.union(self._s_sys)
elif isinstance(d_eff, dict):
self._s_sys= set(d_eff.keys()).union(self._s_sys)
elif isinstance(d_eff, ndict) and self._has_dif:
self.log.error(f'Cannot pass multiple differential efficiencies')
raise
else:
self.log.error(f'Argument is neither dict nor ndict, but: {type(d_eff)}')
raise
self._d_d_eff[cut] = d_eff
#----------------------------------
def _pad_eff_int(self, d_eff):
'''
Takes {sys:eff}, pads with nominal missing sistematics
'''
eff_nom = d_eff['nom']
for sys in self._s_sys:
if sys in d_eff:
continue
d_eff[sys] = eff_nom.copy(label=sys)
return d_eff
#----------------------------------
def _pad_eff_dif(self, d_eff):
for var in d_eff.y_axis:
nom_eff = d_eff['nom', var]
for sys in self._s_sys:
if (sys, var) not in d_eff:
d_eff[sys, var] = nom_eff.copy(label=sys, varname=var)
return d_eff
#----------------------------------
def _pad_all(self):
'''
Will pad with nominal (cut, syst) locations for systematics that do not make sense for given cut.
'''
d_d_eff = {}
for cut, d_eff in self._d_d_eff.items():
if isinstance(d_eff, dict):
d_d_eff[cut] = self._pad_eff_int(d_eff)
elif isinstance(d_eff, ndict):
d_d_eff[cut] = self._pad_eff_dif(d_eff)
else:
self.log.error(f'Object is not a dict or ndict, but: {type(d_eff)}')
raise
return d_d_eff
#----------------------------------
def _check_sys_lab(self, d_eff, cut):
for key, eff in d_eff.items():
try:
sys, var = key
except:
sys = key
if sys != eff.label:
self.log.error(f'For cut {cut} systematic and efficiency label dissagree: {sys}/{eff.label}')
print(eff)
raise
#----------------------------------
def _get_cf_int(self, sys, d_d_eff_pad):
'''
Takes sys string and {cut : {sys : eff...}...} and for given systematic returns cutflow object
'''
cf = cutflow()
for cut in self._l_cut:
d_eff = d_d_eff_pad[cut]
eff = d_eff[sys]
cf[cut] = eff
return cf
#----------------------------------
def _get_cf_dif(self, sys, var, d_d_eff_pad):
'''
Takes sys, var strings and {cut : {sys[,var] : [d]eff...}...},
i.e. inner dict (with sys -> eff) or ndict (with sys, var -> deff)
Returns cutflow for given sys, var combination.
'''
cf = cutflow()
for cut in self._l_cut:
d_eff = d_d_eff_pad[cut]
if isinstance(d_eff , dict):
eff = d_eff[sys]
elif isinstance(d_eff, ndict):
eff = d_eff[sys, var]
cf[cut] = eff
return cf
#----------------------------------
def get_cf(self):
'''
Returns either {sys : cutflow} dict or {sys, var : cutflow} ndict
Latter is returned if one of the efficiencies is differential
'''
d_d_eff_pad = self._pad_all()
d_cf = ndict() if self._has_dif else {}
self.log.info('Creating cutflows:')
for sys in self._s_sys:
self.log.info(sys)
if not self._has_dif:
d_cf[sys] = self._get_cf_int(sys, d_d_eff_pad)
else:
for var in self._s_dif_var:
d_cf[sys, var] = self._get_cf_dif(sys, var, d_d_eff_pad)
return d_cf
#---------------------------------- | /rx_tools-0.0.3.tar.gz/rx_tools-0.0.3/src/rk/cutflow.py | 0.581541 | 0.150684 | cutflow.py | pypi |
from logzero import logger as log
from zutils.plot import plot as zfp
from fitter import zfitter
import zfit
import math
import pandas as pnd
import utils_noroot as utnr
import matplotlib.pyplot as plt
#---------------------------
class calculator:
def __init__(self, pdf, poi_name = 'n_sig'):
self._pdf = pdf
self._poi_name = poi_name
self._d_par = {}
self._d_par_val = {}
self._d_const = {}
self._d_const_fit = {}
self._dat = None
self._plot_dir = None
self._initialized = False
#------------------------
def _initialize(self):
if self._initialized:
return
if not self._pdf.is_extended:
log.error(f'PDF introduced is not extended')
raise
self._d_par_val = self._store_pars()
self._dat = self._pdf.create_sampler()
self._d_par = self._get_nuisance_pars()
self._d_const_fit = self._get_constraints()
self._check_poi()
self._initialized = True
#------------------------
@property
def plot_dir(self):
return self._plot_dir
@plot_dir.setter
def plot_dir(self, plot_dir):
self._plot_dir = utnr.make_dir_path(plot_dir)
#------------------------
def _get_constraints(self):
if len(self._d_const) == 0:
log.info('Not using constraints')
return
d_const_fit = {}
log.info('All constraints:')
for name, par_sg in self._d_const.items():
par = self._d_par[name]
if math.isinf(par_sg):
continue
par_mu = self._d_par_val[name]
d_const_fit[name] = (par_mu, par_sg)
log.info(f'{"":<4}{name:<20}{par_mu:<10.3e}{par_sg:<10.3e}')
return d_const_fit
#------------------------
def __setitem__(self, name, value):
s_par = self._pdf.get_params(floating=True)
l_par_name = [ par.name for par in s_par]
if name not in l_par_name:
log.error(f'Parameter {name} cannot be found among:')
log.error(l_par_name)
raise ValueError
if not isinstance(value, float):
log.error(f'Constraint for {name} is not a float: {value}')
raise ValueError
if value < 0:
log.error(f'Constraint for {name} is not positive: {value}')
raise ValueError
self._d_const[name] = value
#------------------------
def _store_pars(self):
s_par_shp = self._pdf.get_params(is_yield=False)
s_par_yld = self._pdf.get_params(is_yield=True )
d_par = {}
for par in list(s_par_shp) + list(s_par_yld):
name = par.name
val = par.value().numpy()
d_par[name] = val
return d_par
#------------------------
def _check_poi(self):
s_par = self._pdf.get_params(is_yield=True)
g_par = filter(lambda par : par.name == self._poi_name, s_par)
try:
[poi] = list(g_par)
except:
log.error(f'Cannot extract POI={self._poi_name} from model')
raise
#------------------------
def _fit(self, fix_par_name=None, d_const={}):
obj = zfitter(self._pdf, self._dat)
res = obj.fit(d_const = d_const)
if res.status != 0:
log.warning(f'Fit failed')
return 0, 0
res.hesse()
res.freeze()
self._plot_fit(res, fix_par_name)
try:
val = res.params[self._poi_name]['value']
err = res.params[self._poi_name]['hesse']['error']
except:
log.warning(f'Cannot extract value and/or error of {self._poi_name} from:')
print(res.params[self._poi_name])
return 0, 0
return val, err
#------------------------
def _plot_fit(self, res, par_name):
obj = zfp(data=self._dat, model=self._pdf, result=res)
obj.plot(ext_text=f'Fixing: {par_name}')
plot_path = f'{self._plot_dir}/{par_name}.png'
log.info(f'Saving to: {plot_path}')
plt.savefig(plot_path)
plt.close('all')
#------------------------
def _get_nuisance_pars(self):
s_par = self._pdf.get_params(is_yield=False, floating=True)
s_par = filter(lambda par: not par.name.startswith('n_'), s_par)
log.debug('Nuisance parameters:')
d_par = {}
for par in s_par:
log.debug(f'{"":<4}{par.name:<20}')
d_par[par.name] = par
return d_par
#------------------------
def _reset_pars(self, fix_par=None):
'''
1. Float all parameters.
2. Set values to model values.
3. Fix `fix_par`, if not None.
'''
for par in self._d_par.values():
par.floating = True
val=self._d_par_val[par.name]
par.set_value(val)
if fix_par is None:
log.info('All parameters floating')
return
log.info(f'Fixing {fix_par.name}')
for par in self._d_par.values():
if par.name != fix_par.name:
continue
par.floating = False
#------------------------
def _plot_unc(self, df):
if self._plot_dir is None:
return
ax=df.plot(x='Parameter', y='Uncertainty', legend=None)
ax.set_ylim(bottom=0)
ax.set_ylabel(r'$100 \cdot \varepsilon(POI)/POI$')
plt.grid()
plt.tight_layout()
plt.savefig(f'{self._plot_dir}/uncertainty.png')
plt.close('all')
#------------------------
def _fill_df_fix_par(self, df):
poi_ini = self._d_par_val[self._poi_name]
l_par = list(self._d_par.values())
for par in [None] + l_par:
self._reset_pars(par)
name = 'none' if par is None else par.name
poi_fit, err_fit = self._fit(fix_par_name=name)
df.loc[-1] = [name, err_fit, poi_ini, poi_fit]
df.index = df.index + 1
df = df.sort_index()
df['Uncertainty'] = 100 * df.Error / df.Model
df['Bias'] = (df.Fit - df.Model) / df.Error
df = df.sort_values(by='Uncertainty', ascending=False)
df = df.reset_index(drop=True)
return df
#------------------------
def _fill_df_const(self, df):
poi_ini = self._d_par_val[self._poi_name]
d_const = {}
l_par_name = [None] + list(self._d_const_fit.keys())
for par_name in l_par_name:
if par_name is None:
par_name = 'none'
else:
const= self._d_const_fit[par_name]
d_const[par_name] = const
self._reset_pars()
poi_fit, err_fit = self._fit(fix_par_name=par_name, d_const=d_const)
df.loc[-1] = [par_name, err_fit, poi_ini, poi_fit]
df.index = df.index + 1
df = df.sort_index()
df['Uncertainty'] = 100 * df.Error / df.Model
df['Bias'] = (df.Fit - df.Model) / df.Error
df = df.reindex(index=df.index[::-1])
return df
#------------------------
def get_df(self):
self._initialize()
df = pnd.DataFrame(columns=['Parameter', 'Error', 'Model', 'Fit'])
if len(self._d_const) == 0:
df = self._fill_df_fix_par(df)
else:
df = self._fill_df_const(df)
self._plot_unc(df)
return df
#--------------------------- | /rx_tools-0.0.3.tar.gz/rx_tools-0.0.3/src/rk/model_uncertainty.py | 0.57344 | 0.224842 | model_uncertainty.py | pypi |
import numpy
import logging
import pandas as pnd
import utils_noroot as utnr
import matplotlib.pyplot as plt
from rk.eff_yld_loader import eff_yld_loader as eyl
from stats.covariance import covariance
#------------------------------
class calculator:
log=utnr.getLogger(__name__)
#---------------
def __init__(self, eff_version=None, yld_version=None, unc=None, proc='psi2', years=None):
self._eff_version = eff_version
self._yld_version = yld_version
self._unc = unc
self._proc = proc
self._l_year = years
self._tool_level = logging.WARNING
self._l_unc = ['bts', 'sys', 'osc']
self._l_good_year = ['2011', '2012', '2015', '2016', '2017', '2018', 'r1', 'r2p1']
self._nboost = 200
self._initialized = False
self._d_kind_quant= None
self._l_trig_year = None
self._weights = None
self._arr_mu_nom = None
self._arr_re_nom = None
self._arr_ce_nom = None
self._plot_dir = None
self._l_column = None
self._d_cov = None
self._d_yld = {}
self._d_d_eff = {}
self._df_ce_ee = None
self._df_re_ee = None
self._df_ce_mm = None
self._df_re_mm = None
self._df_cx = None
self._df_rx = None
self._df_mu = None
#---------------
@property
def plot_dir(self):
return self._plot_dir
@plot_dir.setter
def plot_dir(self, plot_dir):
self._plot_dir = utnr.make_dir_path(plot_dir)
#---------------
def _get_kind_quant(self):
d_quant = {}
d_quant['r_jpsi'] = '$r_{J/\psi}$'
d_quant['r_rare'] = '$r_{rare}$'
d_quant['mu'] = '$R_{rare}$'
d_quant['c_eff_ee'] = '$\\varepsilon(J/\psi \\to ee)$'
d_quant['c_eff_mm'] = '$\\varepsilon(J/\psi \\to \mu\mu)$'
d_quant['r_eff_ee'] = '$\\varepsilon(rare \\to ee)$'
d_quant['r_eff_mm'] = '$\\varepsilon(rare \\to \mu\mu))$'
return d_quant
#---------------
def _initialize(self):
if self._initialized:
return
utnr.check_included(self._unc, self._l_unc)
self._d_kind_quant = self._get_kind_quant()
self._l_trig_year = self._get_trig_year()
if self._unc == 'bts':
self._weights = 'pnom_tnom_gnom_lnom_hnom_rnom_qnom_ball'
elif self._unc == 'sys':
self._weights = 'pall_tall_gall_lall_hall_rall_qall_bnom'
else:
self.log.error(f'Not supported uncertainty {self._unc}')
raise
eyl.log.setLevel(self._tool_level)
self._l_column = [ f'{trig} {year}' for trig, year in self._l_trig_year]
self._df_ce_ee = pnd.DataFrame(columns=self._l_column)
self._df_re_ee = pnd.DataFrame(columns=self._l_column)
self._df_ce_mm = pnd.DataFrame(columns=self._l_column)
self._df_re_mm = pnd.DataFrame(columns=self._l_column)
self._df_cx = pnd.DataFrame(columns=self._l_column)
self._df_rx = pnd.DataFrame(columns=self._l_column)
self._df_mu = pnd.DataFrame(columns=self._l_column)
self._df_ce_ee.style.set_caption('Efficiency electron jpsi')
self._df_re_ee.style.set_caption('Efficiency electron psi2')
self._df_ce_mm.style.set_caption('Efficiency muon jpsi')
self._df_re_mm.style.set_caption('Efficiency muon psi2')
self._df_cx.style.set_caption('r_jpsi')
self._df_rx.style.set_caption('r_rare')
self._df_mu.style.set_caption('Double ratio of corrected yields')
self._fill_df('nom')
self._arr_rx_nom = self._df_rx.loc['nom'].to_numpy()
self._arr_cx_nom = self._df_cx.loc['nom'].to_numpy()
self._arr_mu_nom = self._df_mu.loc['nom'].to_numpy()
self._initialized = True
#---------------
def _get_trig_year(self):
l_trig_year = []
utnr.check_none(self._l_year)
for year in self._l_year:
if year not in self._l_good_year:
self.log.error(f'Invalid year introduced: {year}')
raise
l_trig_year += [('TOS', year), ('TIS', year)]
return l_trig_year
#---------------
def _get_data(self, proc, trig, year, syst):
key = f'{proc}_{trig}_{year}'
if key not in self._d_yld:
self.log.info(f'Loading {key}')
obj = eyl(proc, trig, year, self._weights)
yld, d_eff = obj.get_values(eff_version = self._eff_version, yld_version=self._yld_version)
self._d_yld[key] = yld
self._d_d_eff[key] = d_eff
d_eff = self._d_d_eff[key]
yld = self._d_yld[key]
#If systematic does not make sense (e.g. electron systematic applied to muon)
#use nominal value
if syst not in d_eff:
eff = d_eff['nom']
else:
eff = d_eff[syst]
return (yld, eff)
#---------------
def _get_syst(self, syst, trig, year):
trig = 'ETOS' if trig == 'TOS' else 'GTIS'
c_yld_ee, c_eff_ee = self._get_data( 'ctrl_ee', trig, year, syst)
c_yld_mm, c_eff_mm = self._get_data( 'ctrl_mm', 'MTOS', year, syst)
r_yld_ee, r_eff_ee = self._get_data(f'{self._proc}_ee', trig, year, syst)
r_yld_mm, r_eff_mm = self._get_data(f'{self._proc}_mm', 'MTOS', year, syst)
c_yld_ee_val, _ = c_yld_ee
c_yld_mm_val, _ = c_yld_mm
r_yld_ee_val, _ = r_yld_ee
r_yld_mm_val, _ = r_yld_mm
c_yld_rat = c_yld_mm_val / c_yld_ee_val
r_yld_rat = r_yld_mm_val / r_yld_ee_val
c_eff_rat = c_eff_mm / c_eff_ee
r_eff_rat = r_eff_mm / r_eff_ee
r_jpsi = c_yld_rat / c_eff_rat
r_rare = r_yld_rat / r_eff_rat
mu = r_rare / r_jpsi
d_data = {}
d_data['c_eff_ee'] = c_eff_ee.val[0]
d_data['r_eff_ee'] = r_eff_ee.val[0]
d_data['c_eff_mm'] = c_eff_mm.val[0]
d_data['r_eff_mm'] = r_eff_mm.val[0]
d_data['r_jpsi' ] = r_jpsi
d_data['r_rare' ] = r_rare
d_data['mu' ] = mu
return d_data
#---------------
def _fill_df(self, syst):
l_ce_ee = []
l_re_ee = []
l_ce_mm = []
l_re_mm = []
l_cx = []
l_rx = []
l_mu = []
for trig, year in self._l_trig_year:
d_data = self._get_syst(syst, trig, year)
ce_ee = d_data['c_eff_ee']
re_ee = d_data['r_eff_ee']
ce_mm = d_data['c_eff_mm']
re_mm = d_data['r_eff_mm']
l_ce_ee.append(ce_ee)
l_re_ee.append(re_ee)
l_ce_mm.append(ce_mm)
l_re_mm.append(re_mm)
rx = d_data['r_rare' ]
cx = d_data['r_jpsi' ]
mu = d_data['mu' ]
l_rx.append(rx)
l_cx.append(cx)
l_mu.append(mu)
label = syst.split('.')[0]
self._df_ce_ee = utnr.add_row_to_df(self._df_ce_ee, l_ce_ee, index=label)
self._df_re_ee = utnr.add_row_to_df(self._df_re_ee, l_re_ee, index=label)
self._df_ce_mm = utnr.add_row_to_df(self._df_ce_mm, l_ce_mm, index=label)
self._df_re_mm = utnr.add_row_to_df(self._df_re_mm, l_re_mm, index=label)
self._df_cx = utnr.add_row_to_df(self._df_cx, l_cx, index=label)
self._df_rx = utnr.add_row_to_df(self._df_rx, l_rx, index=label)
self._df_mu = utnr.add_row_to_df(self._df_mu, l_mu, index=label)
return label
#---------------
def _get_cov(self, label, l_syst):
l_arr_mu_syst = []
for syst in l_syst:
index = self._fill_df(syst)
arr_mu_syst = self._df_cx.loc[index].to_numpy()
l_arr_mu_syst.append(arr_mu_syst)
mat_mu_syst = numpy.array(l_arr_mu_syst)
obj = covariance(mat_mu_syst.T, self._arr_mu_nom)
cov = obj.get_cov()
return cov
#---------------
def _plot_df(self, df, column, kind):
if self._plot_dir is None:
return
nom_val = df.iloc[0][column]
nrm_col = f'{column} nrm'
df[nrm_col]= 100 * (df[column] - nom_val) / nom_val
df=df.drop('nom')
fig, ax = plt.subplots(figsize=(10,4))
ax.axhline(y=nom_val, color='red')
arr_val = df[ column].values
arr_nrm = df[nrm_col].values
st = '-' if self._unc != 'bts' else '.'
ax.plot(arr_val, linestyle=st)
l_loc, l_lab = utnr.get_axis(df, 'index')
if self._unc != 'bts':
plt.xticks(l_loc, l_lab, rotation=80)
ex=ax.twinx()
ex.plot(arr_nrm, alpha=0, color='red')
ax.legend(['Nominal', 'Systematic'])
ax.grid()
plt.title(column)
quant = self._d_kind_quant[kind]
ax.set_ylabel(quant)
ex.set_ylabel('Bias [%]')
fig.tight_layout()
syst_dir = utnr.make_dir_path(f'{self._plot_dir}/syst_{kind}')
plot_name= column.replace(' ', '_') + '.png'
plot_path= f'{syst_dir}/{plot_name}'
self.log.visible(f'Saving to: {plot_path}')
fig.savefig(plot_path)
plt.close('all')
#---------------
def _get_all_cov(self):
d_cov = {}
if self._unc == 'bts':
d_cov['bts'] = self._get_cov('bts' , [f'bts_{num}' for num in range(1, self._nboost)] )
elif self._unc == 'sys':
d_cov['gen'] = self._get_cov('gen' , ['gen_GTIS_mm', 'gen_npv', 'gen_nsp', 'gen_ntk'])
d_cov['rec_to'] = self._get_cov('rec_to', ['rec_GTIS_ee'])
d_cov['rec_ti'] = self._get_cov('rec_ti', ['rec_ETOS'])
d_cov['rec_mu'] = self._get_cov('rec_mu', ['rec_GTIS_mm'])
d_cov['lzr_mu'] = self._get_cov('lzr_mu', [ 'lzr_L0MuonHAD', 'lzr_L0MuonMU1'])
d_cov['lzr_el'] = self._get_cov('lzr_el', ['lzr_L0ElectronFAC', 'lzr_L0ElectronHAD'])
d_cov['lzr_ts'] = self._get_cov('lzr_ts', ['lzr_L0TIS_MMMH.L0HadronElEL.L0ElectronTIS', 'lzr_L0TIS_EMBN.L0HadronElEL.L0ElectronTIS'])
d_cov['pid_kp_el'] = self._get_cov('pid_kp', ['pid_kp_el_bin1', 'pid_kp_el_bin2', 'pid_kp_el_bin3', 'pid_kp_el_bin4', 'pid_kp_el_tis'])
d_cov['pid_kp_mu'] = self._get_cov('pid_kp', ['pid_kp_mu_bin1', 'pid_kp_mu_bin2', 'pid_kp_mu_bin3', 'pid_kp_mu_bin4', 'pid_kp_mu_tis'])
d_cov['pid_el' ] = self._get_cov('pid_el', ['pid_el_bin1', 'pid_el_tis'])
d_cov['pid_mu' ] = self._get_cov('pid_mu', ['pid_mu_bin1', 'pid_mu_bin2', 'pid_mu_bin3', 'pid_mu_bin4', 'pid_mu_tis'])
d_cov['qsq' ] = self._get_cov('qsq' , ['qsq_lsh', 'qsq_mom', 'qsq_trg'])
else:
self.log.error(f'Invalid uncertainty type: {self._unc}')
raise
return d_cov
#---------------
def _plot_all_df(self):
for column in self._l_column:
self._plot_df(self._df_ce_ee, column, 'c_eff_ee')
self._plot_df(self._df_re_ee, column, 'r_eff_ee')
self._plot_df(self._df_ce_mm, column, 'c_eff_mm')
self._plot_df(self._df_re_mm, column, 'r_eff_mm')
self._plot_df(self._df_cx, column, 'r_jpsi')
self._plot_df(self._df_rx, column, 'r_rare')
self._plot_df(self._df_mu, column, 'mu')
#---------------
def _get_rel_cov(self, cv_ij):
mu_ij = numpy.outer(self._arr_mu_nom, self._arr_mu_nom)
un_ij = cv_ij / mu_ij
return un_ij
#---------------
def _save_df(self):
self._save_table(self._df_cx, 'r_jpsi')
self._save_table(self._df_rx, 'r_rare')
self._save_table(self._df_mu, 'mu')
#---------------
def _save_table(self, df, label):
table_dir = utnr.make_dir_path(f'{self._plot_dir}/tables')
table_path = f'{table_dir}/{label}.tex'
utnr.df_to_tex(df, table_path)
#---------------
def cov(self, relative=False):
self._initialize()
if self._d_cov is None:
self._d_cov = self._get_all_cov()
self._plot_all_df()
self._save_df()
if relative:
d_cov = {syst : self._get_rel_cov(cov) for syst, cov in self._d_cov.items() }
else:
d_cov = self._d_cov
return d_cov
#------------------------------ | /rx_tools-0.0.3.tar.gz/rx_tools-0.0.3/src/rk/ckcov.py | 0.534612 | 0.198025 | ckcov.py | pypi |
import utils_noroot as utnr
import utils
import read_selection as rs
import logging
from rk.selection import selection as rksl
#--------------------------------------------------
class selection:
"""
Class used to apply selections to dataframes
"""
log=utnr.getLogger('selection')
#--------------------------------------------------
def __init__(self, df, sample=None, trigger=None, year=None, kind=None, q2bin=None):
self.__l_sample = ['data_ee', 'data_mm', 'ctrl_ee', 'ctrl_mm', 'psi2_ee', 'psi2_mm', 'test']
self.__l_trigger= ['MTOS', 'ETOS', 'GTIS', 'test']
self.__l_year = ['2011', '2012', '2015', '2016', '2017', '2018']
self.__l_kind = ['all_gorder', 'final_nobdt_gorder', 'calibration', 'test', 'loose_000']
self.__l_q2bin = ['jpsi', 'psi2']
self.__df = df
self.__sample = sample
self.__trigger = trigger
self.__year = year
self.__kind = kind
self.__q2bin = q2bin
self.__check_args()
#--------------------------------------------------
def __check_args(self):
utnr.check_included(self.__sample , self.__l_sample )
utnr.check_included(self.__trigger, self.__l_trigger)
utnr.check_included(self.__kind , self.__l_kind )
utnr.check_included(self.__year , self.__l_year )
#--------------------------------------------------
def get_df(self, fraction):
d_cut = rksl(self.__kind, self.__trigger, self.__year, self.__sample, q2bin=self.__q2bin)
if 0 < fraction < 1:
self.log.info(f'Using {fraction} fraction of dataset')
df = utils.filter_df(self.__df, fraction)
elif fraction == 1:
df = self.__df
else:
self.log.error(f'Invalid value of fraction: {fraction}')
raise
for key, cut in d_cut.items():
self.log.debug(f'{"Adding":<10}{"--->":<10}{key:<20}')
df = df.Filter(cut, key)
self.log.debug('Applying selection')
if self.log.level < logging.WARNING:
rep = df.Report()
rep.Print()
return df
#--------------------------------------------------
def apply_selection(df, sample=None, trigger=None, year=None, kind=None, q2bin=None, fraction=1):
utnr.check_numeric(fraction)
obj = selection(df, sample=sample, trigger=trigger, year=year, kind=kind, q2bin=q2bin)
df = obj.get_df(fraction)
return df
#-------------------------------------------------- | /rx_tools-0.0.3.tar.gz/rx_tools-0.0.3/src/rk/dfselect.py | 0.531696 | 0.16248 | dfselect.py | pypi |
import utils_noroot as utnr
import re
import math
#--------------------------------------
class boundaries:
log=utnr.getLogger('boundaries')
#--------------------------
def __init__(self, tp):
self._bounds = tp
self._regex = '([inf\d\.-]+),\s+([inf\d\.-]+)'
self._identifier = None
self._sbounds = None
self._initialized = False
#--------------------------
@property
def data(self):
self._initialize()
return self._bounds
#--------------------------
@property
def identifier(self):
self._initialize()
return self._identifier
#--------------------------
@property
def sbounds(self):
self._initialize()
return self._sbounds
#--------------------------
def _initialize(self):
if self._initialized:
return
if isinstance(self._bounds, str):
self._str_to_tuple()
self._check_tuple(self._bounds)
for elm in self._bounds:
self._check_tuple(elm)
try:
minv, maxv = elm
except:
self.log.error(f'Coordinate boundaries is not a tuple of two elements: {elm}')
raise ValueError
self._check_numeric(minv)
self._check_numeric(maxv)
self._identifier = self._get_identifier()
self._sbounds = self._get_sbounds()
self._initialized = True
#--------------------------
def _get_identifier(self):
l_name = [ f'{minv:.0f}_{maxv:.0f}' for minv, maxv in self._bounds]
name = '_'.join(l_name)
name = name.replace('.', 'p')
return name
#--------------------------
def _get_sbounds(self):
l_name = [ f'[{minv:.0f}, {maxv:.0f}]' for minv, maxv in self._bounds]
name = ', '.join(l_name)
return name
#--------------------------
def _str_to_tuple(self):
l_group = re.findall(self._regex, self._bounds)
if l_group == []:
self.log.error(f'Cannot find any bound in {self._bounds} using {self._regex}')
raise ValueError
l_coo = []
for minv, maxv in l_group:
minv = self._cast_to_num(minv)
maxv = self._cast_to_num(maxv)
l_coo.append((minv, maxv))
self._bounds = tuple(l_coo)
#--------------------------
def has_inf(self):
self._initialize()
for minv, maxv in self._bounds:
if abs(minv) == math.inf or abs(maxv) == math.inf:
return True
return False
#--------------------------
def _cast_to_num(self, val):
if val == '-inf':
fval = -math.inf
elif val == 'inf':
fval = +math.inf
else:
fval = float(val)
return fval
#--------------------------
def _check_tuple(self, obj):
if not isinstance(obj, tuple):
self.log.error(f'Object is not a tuple: {obj}/{type(obj)}')
raise TypeError
#--------------------------
def _check_numeric(self, val):
if not isinstance(val, (int, float)):
self.log.error(f'Value not an int or float: {val}')
raise ValueError
#--------------------------
def __str__(self):
self._initialize()
axis=1
val =''
for minv, maxv in self._bounds:
val += f'x{axis} in [{minv:<10.3f}, {maxv:<10.3f}]\n'
axis+=1
return val
#--------------------------
def __lt__(self, other):
self._initialize()
other._initialize()
b1 = reversed( self._bounds)
b2 = reversed(other._bounds)
tb1 = tuple(b1)
tb2 = tuple(b2)
less_than = tb1 < tb2
return less_than
#--------------------------
def __eq__(self, other):
return self.data == other.data
#--------------------------
def __hash__(self):
return hash(self.data)
#-------------------------------------- | /rx_tools-0.0.3.tar.gz/rx_tools-0.0.3/src/rk/boundaries.py | 0.618896 | 0.167457 | boundaries.py | pypi |
import numpy
import re
import math
import logging
import pandas as pnd
import utils_noroot as utnr
import matplotlib.pyplot as plt
import zutils.utils as zut
from fitter import zfitter
from rk.boundaries import boundaries
from data_splitter import splitter as dsplit
from zutils.plot import plot as zfp
from zutils.pdf import SUJohnson as zpdf_jh
import zfit
#----------------------------
class calculator:
log = utnr.getLogger('lep_reso')
#----------------------------------
def __init__(self, data=None, binning=None, fit=False, d_par={}, signal='dscb', l_ibin=[]):
self._rdf = data
self._binning = binning
self._jpsi_mass = 3097
self._nsam_var = 10000
self._fit = fit
self._d_par = d_par
self._d_dat_res = {}
self._signal = signal
self._l_ibin = l_ibin
self._plot_dir = None
self._suffix = None
self._is_mc = None
self._obs = zfit.Space('mass', limits=(2450, 3600))
self._mu = zfit.Parameter('mu', 3000, 3000, 3100)
self._sg = zfit.Parameter('sg', 40, 10, 120)
self._sig_pdf = None
self._bkg_pdf = None
self._initialized = False
#----------------------------------
@property
def plot_dir(self):
return self._plot_dir
@plot_dir.setter
def plot_dir(self, value):
self._plot_dir = utnr.make_dir_path(value)
#----------------------------------
def _initialize(self):
if self._initialized:
return
self._cast_ibin()
zfit.settings.changed_warnings.hesse_name = False
v_name = self._rdf.GetColumnNames()
l_name = [ name.c_str() for name in v_name ]
self._is_mc = 'Jpsi_TRUEID' in l_name
self._initialized = True
#----------------------------------
def _cast_ibin(self):
try:
self._l_ibin = [ int(ibin) for ibin in self._l_ibin ]
except:
self.log.error(f'Cannnot transform index of bins to index of int:')
self.log.error(self._l_ibin)
raise
if len(self._l_ibin) > 0:
self.log.info(f'Fitting only bins: {self._l_ibin}')
#----------------------------------
def _get_data(self):
rdf = self._rdf
if self._is_mc:
self.log.info(f'Found MC, truth matching')
rdf = rdf.Filter('TMath::Abs(L1_TRUEID) == 11')
rdf = rdf.Filter('TMath::Abs(L2_TRUEID) == 11')
rdf = rdf.Filter('TMath::Abs(Jpsi_TRUEID) == 443')
else:
self.log.info(f'Found data, not truth matching')
rdf = rdf.Redefine('L1_HasBremAdded', 'int(L1_HasBremAdded)')
rdf = rdf.Redefine('L2_HasBremAdded', 'int(L2_HasBremAdded)')
d_data = rdf.AsNumpy(['Jpsi_M', 'L1_P', 'L2_P', 'L1_HasBremAdded', 'L2_HasBremAdded'])
df = pnd.DataFrame(d_data)
df.columns= ['mass', 'p1', 'p2', 'brem1', 'brem2']
df = df.reset_index(drop=True)
return df
#----------------------------------
def _get_bin_resolution(self, df, bound):
if df is None:
return None
size = df.shape[0]
mass=df.mass.to_numpy()
arr_dmass = mass - numpy.mean(mass)
i_size = arr_dmass.size
l_dmass = utnr.remove_outliers(arr_dmass, l_zscore=[4, 4, 3])
arr_dmass = numpy.array(l_dmass)
f_size = arr_dmass.size
if size > 0:
rms2 = numpy.sum( arr_dmass ** 2 ) / size
else:
rms2 = math.nan
rms = math.sqrt(rms2)
bnd = boundaries(bound)
self.log.info(f'{size:<20}{bnd.sbounds:<40}{rms:<20.0f}{i_size:<20}{f_size:<20}')
if self._plot_dir is not None:
self._plot_dist(arr_dmass, bnd.sbounds, rms)
return rms
#----------------------------------
def _plot_dist(self, arr_dmass, sbound, sg):
if arr_dmass.size == 0:
return
mu = numpy.mean(arr_dmass)
plt.hist(arr_dmass, range=(mu-4*sg, mu+4*sg), alpha=0.7, bins=30, label='$m(e,e) - m_{J/\psi}$')
plt.axvline(x=mu - sg, color='red', label='$\mu-\sigma$', linestyle='--')
plt.axvline(x=mu + sg, color='red', label='$\mu+\sigma$', linestyle='--')
plt.legend()
bnd=boundaries(sbound)
label=bnd.identifier
sbnds=bnd.sbounds
plt.title(f'$(p_1, p_2)\in${sbnds}')
label=re.sub('_+', '_', label)
plot_dir = utnr.make_dir_path(f'{self._plot_dir}/{self._suffix}/dist')
plt.savefig(f'{plot_dir}/{label}.png')
plt.close('all')
#----------------------------------
def _get_sig_pdf(self):
if self._sig_pdf is not None:
return self._sig_pdf
if self._signal == 'gauss':
pdf = self._get_gauss_pdf()
elif self._signal == 'dscb':
pdf = self._get_dscb_pdf()
elif self._signal == 'cb':
pdf = self._get_cb_pdf()
elif self._signal == 'johnson':
pdf = self._get_johnson_pdf()
else:
self.log.error(f'Invalid signal PDF: {self._signal}')
raise
if self._rdf.is_mc:
self._sig_pdf = pdf
return self._sig_pdf
nsg = zfit.Parameter('nsg', 100, 0.0, 200000)
self._sig_pdf = pdf.create_extended(nsg)
return self._sig_pdf
#----------------------------------
def _get_cb_pdf(self):
al = zfit.Parameter('al', 1, 0.1, 5.0)
ar = zfit.Parameter('ar',-1,-5.0, -0.1)
nl = zfit.Parameter('nl', 1, 0.1, 8.0)
nr = zfit.Parameter('nr', 1, 0.1, 10.0)
fr = zfit.Parameter('fr', 1, 0.0, 1.0)
pdf_1 = zfit.pdf.CrystalBall(obs=self._obs, mu=self._mu, sigma=self._sg, alpha=al, n=nl)
pdf_2 = zfit.pdf.CrystalBall(obs=self._obs, mu=self._mu, sigma=self._sg, alpha=ar, n=nr)
pdf = zfit.pdf.SumPDF([pdf_1, pdf_2], fr)
return pdf
#----------------------------------
def _get_johnson_pdf(self):
gm = zfit.Parameter("gm", 1, 0.1, 10)
dl = zfit.Parameter("dl", 1, 0.1, 10)
pdf= zpdf_jh(obs=self._obs, mu=self._mu, lm=self._sg, gamma=gm, delta=dl)
return pdf
#----------------------------------
def _get_dscb_pdf(self):
al = zfit.Parameter('al', 0.6, 0.1, 5.0)
ar = zfit.Parameter('ar',-0.2,-5.0, -0.1)
nl = zfit.Parameter('nl', 5.0, 0.1, 8.0)
nr = zfit.Parameter('nr', 2.0, 0.1, 10.0)
pdf = zfit.pdf.DoubleCB(obs=self._obs, mu=self._mu, sigma=self._sg, alphal=al, nl=nl, alphar=ar, nr=nr)
return pdf
#----------------------------------
def _get_gauss_pdf(self):
pdf = zfit.pdf.Gauss(obs=self._obs, mu=self._mu, sigma=self._sg)
return pdf
#----------------------------------
def _get_bkg_pdf(self):
if self._bkg_pdf is not None:
return self._bkg_pdf
lam = zfit.Parameter('lam', -0.001, -0.1, 0.0)
bkg = zfit.pdf.Exponential(lam=lam, obs=self._obs, name='Combinatorial')
nbk = zfit.Parameter('nbk', 100, 0.0, 200000)
self._bkg_pdf = bkg.create_extended(nbk)
return self._bkg_pdf
#----------------------------------
def _get_tot_pdf(self):
sig = self._get_sig_pdf()
bkg = self._get_bkg_pdf()
pdf = zfit.pdf.SumPDF([sig, bkg])
return pdf
#----------------------------------
def _fit_df(self, df, bound):
bnd = boundaries(bound)
if bnd.has_inf():
self.log.debug(f'Skipping fit in {bound}')
return {}
if self._is_mc:
pdf = self._get_sig_pdf()
else:
pdf = self._get_tot_pdf()
dat = df['mass'].to_numpy()
if dat.size == 0:
self.log.info(f'Skipping empty data dataset for: {bound}')
if not self._is_mc:
self._d_dat_res[bound] = {}
return {}
self.log.info(f'Fitting {bound}')
if not self._is_mc and len(self._d_par) > 0:
zut.fix_pars(pdf, self._d_par[bnd])
obj = zfitter(pdf, dat)
if self._is_mc:
res = obj.fit(ntries=20, pval_threshold=0.04)
else:
res = obj.fit()
if res.status != 0:
self.log.error(f'Finished with status: {res.status}')
print(res)
d_par = self._get_pars(res)
text = f'Bin:{df.ibin}\n{bnd.sbounds}'
self._plot_fit(dat, pdf, res, bnd.identifier, text)
if not self._is_mc:
self._d_dat_res[bound] = self._get_dat_resolution()
return d_par
#-------------------
def _get_pars(self, res, method='minuit_hesse'):
res.hesse(method=method)
res.freeze()
try:
d_par = { name : [d_val['value'], d_val['hesse']['error']] for name, d_val in res.params.items() }
except:
self.log.warning(f'Cannot calculate errors, using zeros')
d_par = { name : [d_val['value'], 0] for name, d_val in res.params.items() }
return d_par
#-------------------
def _plot_fit(self, dat, pdf, res, identifier, text):
if self._plot_dir is None:
return
[[minv]], [[maxv]] = self._obs.limits
tp_rng = (minv, maxv)
obj=zfp(data=dat, model=pdf, result=res)
obj.plot(d_leg={}, plot_range=tp_rng, ext_text=text)
obj.axs[1].plot(tp_rng, [0, 0], linestyle='--', color='black')
plot_dir = utnr.make_dir_path(f'{self._plot_dir}/{self._suffix}/fits')
plot_path= f'{plot_dir}/{identifier}.png'
self.log.visible(f'Saving to: {plot_path}')
plt.savefig(plot_path, bbox_inches='tight')
plt.close('all')
#----------------------------------
def _get_dat_resolution(self):
dat = self._sig_pdf.create_sampler(n=self._nsam_var)
arr_mass = dat.value().numpy()
return numpy.std(arr_mass)
#----------------------------------
def _trim_dataset(self, d_df):
if len(self._l_ibin) == 0:
self.log.info(f'Using {len(d_df)} datasets, after skipping trimming')
return d_df
d_df_trimmed = {}
counter=0
for key, val in d_df.items():
if counter not in self._l_ibin:
counter += 1
continue
self.log.debug(f'Keeping dataset for bin: {counter}')
counter += 1
d_df_trimmed[key] = val
self.log.info(f'Using {len(d_df_trimmed)} datasets, after trimming')
return d_df_trimmed
#----------------------------------
def _calculate(self, df, suffix):
self._suffix = suffix
utnr.make_dir_path(f'{self._plot_dir}/{self._suffix}')
self.log.info(f'Calculating {suffix} resolutions')
obj = dsplit(df, self._binning, spectators=['mass'])
d_df = obj.get_datasets(as_type='dict', symmetrize=True)
d_df = self._trim_dataset(d_df)
if self._fit:
d_par = { bound : self._fit_df(df, bound) for bound, df in d_df.items() }
else:
d_par = {}
if self._is_mc:
self.log.info(f'{"Dataset size":<20}{"Bounds":<40}{"RMS [MeV]":<20}{"Original":<20}{"Filtered":<20}')
d_res = { bound : self._get_bin_resolution(df, bound) for bound, df in d_df.items() }
else:
d_res = self._d_dat_res
return d_res, d_par
#----------------------------------
def get_resolution(self, brem=None):
self._initialize()
df = self._get_data()
df = df[df.brem1 + df.brem2 == brem]
d_res, d_par= self._calculate(df, f'{brem}_brem')
return d_res, d_par
#----------------------------
log = utnr.getLogger(__name__)
#----------------------------
def get_axis_labels(d_res):
regex = r'\(\((\d+)\.0,\s(\d+)\.0\).*'
s_lab = set()
for key in d_res:
mtch = re.match(regex, key)
if not mtch:
log.error(f'Cannot match "{key}" with "{regex}"')
raise
low = mtch.group(1)
hig = mtch.group(2)
low = int(int(low) / 1000)
hig = int(int(hig) / 1000)
s_lab.add(f'{low}-{hig}')
l_lab = list(s_lab)
return sorted(l_lab, key = lambda label : int(label.split('-')[0]))
#----------------------------
def get_ndim(d_res):
size = len(d_res)
sqrt = math.sqrt(size)
sqrt = math.floor(sqrt)
if sqrt ** 2 != size:
log.error(f'{size} is not a perfect square')
raise ValueError
return sqrt
#----------------------------
def get_resolution(d_res):
ndim = get_ndim(d_res)
counter=0
l_row = [[]]
for reso in d_res.values():
if counter == ndim:
l_row.append([])
counter=0
l_row[-1].append(reso/1000.)
counter+=1
mat = numpy.array(l_row)
if mat.shape != (ndim, ndim):
log.error(f'Wrong shape for resolution matrix: {mat.shape}')
raise
return 1000 * mat
#----------------------------
def plot_reso(d_res_str, plot_dir, title=None, suffix=None, rng=None):
d_res_str = { key : val for key, val in d_res_str.items() if 'inf' not in key}
l_x = get_axis_labels(d_res_str)
d_res = {boundaries(key) : val for key, val in d_res_str.items()}
d_res = dict(sorted(d_res.items()))
mat = get_resolution(d_res)
plot_path = f'{plot_dir}/{suffix}.png'
log.visible(f'Saving to: {plot_path}')
cb=plt.pcolor(l_x, l_x, mat)
plt.colorbar(cb)
plt.title(title)
nbins = len(l_x)
for i in range(nbins):
for j in range(nbins):
val = mat[i,j]
if numpy.isnan(val):
sval = ''
else:
sval = f'{val:.2f}'
plt.text(j - 0.3, i, sval, color="k")
plt.savefig(plot_path)
plt.close('all')
#---------------------------- | /rx_tools-0.0.3.tar.gz/rx_tools-0.0.3/src/rk/dilep_reso.py | 0.529507 | 0.203213 | dilep_reso.py | pypi |
import utils_noroot as utnr
import matplotlib.pyplot as plt
import zfit
import math
import numpy
import re
import os
from zutils.plot import plot as zfp
from logzero import logger as log
from rk.scales import mass as mscale
from fitter import zfitter
#-----------------------------------------
class extractor:
def __init__(self, mc=None, dt=None):
self._arr_mc = mc
self._arr_dt = dt
self._sig_pdf = None
self._ful_pdf = None
self._cache_dir = None
self._plot_dir = None
self._stop_at = None
self._d_mc_pars = {}
self._initialized = False
#-----------------------------------------
@property
def plot_dir(self):
return self._plot_dir
@plot_dir.setter
def plot_dir(self, plot_dir):
try:
self._plot_dir = utnr.make_dir_path(plot_dir)
except:
log.error(f'Cannot make: {plot_dir}')
raise
#-----------------------------------------
@property
def cache_dir(self):
return self._cache_dir
@cache_dir.setter
def cache_dir(self, value):
try:
self._cache_dir = utnr.make_dir_path(value)
except:
log.error(f'Cannot make cache directory: {value}')
raise ValueError
#-----------------------------------------
@property
def model(self):
return self._ful_pdf
@model.setter
def model(self, ful_pdf):
if not isinstance(ful_pdf, zfit.pdf.SumPDF):
log.error(f'Model is not a zfit.pdf.SumPDF: {ful_pdf}')
raise
l_name = [ model.name for model in ful_pdf.models if model.name.startswith('Signal')]
if len(l_name) != 1:
log.error(f'Single signal component not found among: {l_name}')
raise
else:
[self._sig_pdf] = [ model for model in ful_pdf.models if model.name.startswith('Signal')]
log.info(f'Using signal: {self._sig_pdf.name})')
s_par = self._sig_pdf.get_params(floating=True)
l_par_name = [par.name for par in s_par]
if 'mu' not in l_par_name:
log.error(f'Missing "mu" floating parameter in signal component: {pdf}')
raise
if 'sg' not in l_par_name:
log.error(f'Missing "sg" floating parameter in signal component: {pdf}')
raise
self._ful_pdf = ful_pdf
#-----------------------------------------
@property
def stop_at(self):
return self._stop_at
@stop_at.setter
def stop_at(self, value):
if value not in ['mc_fit', 'dt_fit']:
log.error(f'Stopping value {value} invalid')
raise ValueError
self._stop_at = value
#-----------------------------------------
def _initialize(self):
if self._initialized:
return
self._initialized = True
#-----------------------------------------
def _load(self, name):
if self._cache_dir is None:
return
data_path = f'{self._cache_dir}/{name}.json'
if not os.path.isfile(data_path):
return
d_data = utnr.load_json(data_path)
log.info(f'Loading from: {data_path}')
return d_data
#-----------------------------------------
def _dump(self, obj, name):
if self._cache_dir is None:
return
data_path = f'{self._cache_dir}/{name}.json'
utnr.dump_json(obj, data_path)
log.info(f'Dumping to: {data_path}')
#-----------------------------------------
def _fit(self, name, pdf, arr_mass, ntries=None):
obj=self._load(name) if name in ['signal', 'data'] else None
if obj is not None:
return obj
obj=zfitter(pdf, arr_mass)
res=obj.fit(ntries=ntries)
if self._plot_dir:
self._plot_fit(name, model=pdf, data=arr_mass, result=res)
if res.status != 0:
log.error(f'Failed fit, status: {res.status}')
print(res)
raise
res.hesse()
res.freeze()
try:
d_par = { name : (d_val['value'], d_val['hesse']['error']) for name, d_val in res.params.items()}
except:
log.error(f'Cannot extract fit parameters:')
print(res)
print(res.params.items())
raise
self._dump(d_par, name)
return d_par
#-----------------------------------------
def _plot_fit(self, name, model=None, data=None, result=None):
obj=zfp(data=data, model=model, result=result)
obj.plot(ext_text=f'Kind: {name}')
plot_dir = utnr.make_dir_path(f'{self._plot_dir}/fits')
plot_path = f'{plot_dir}/{name}.png'
log.info(f'Saving to: {plot_path}')
plt.savefig(plot_path)
plt.close('all')
#-----------------------------------------
def _fix_pdf(self, pdf, d_par):
s_par_flt = pdf.get_params(floating=True )
s_par_fix = pdf.get_params(floating=False)
d_par_pdf = {par.name : par for par in list(s_par_flt) + list(s_par_fix)}
log.info(f'Fixing parameters:')
for name, (value, _) in d_par.items():
if name in ['mu', 'sg'] or name.startswith('yld_'):
continue
par = d_par_pdf[name]
par.set_value(value)
par.floating = False
log.info(f'{"":<4}{name:<10}{"->":<10}{value:<10.3e}')
return
#-----------------------------------------
def get_scales(self):
self._initialize()
d_par_mc = self._fit('signal', self._sig_pdf, self._arr_mc, ntries=5)
if self._stop_at == 'mc_fit':
log.info(f'Stopping after fitting MC signal')
return
self._fix_pdf(self._ful_pdf, d_par_mc)
d_par_dt = self._fit('data' , self._ful_pdf, self._arr_dt)
if self._stop_at == 'dt_fit':
log.info(f'Stopping after fitting MC full signal')
return
return mscale(dt=d_par_dt, mc=d_par_mc)
#----------------------------------------- | /rx_tools-0.0.3.tar.gz/rx_tools-0.0.3/src/rk/musg_extractor.py | 0.566139 | 0.248201 | musg_extractor.py | pypi |
import jacobi as jac
import math
import utils_noroot as utnr
import matplotlib.pyplot as plt
import zfit
from logzero import logger as log
#-----------------------------------------
class mass:
def __init__(self, dt=None, mc=None):
self._d_par_dt = dt
self._d_par_mc = mc
self._scale_v = None
self._scale_e = None
self._reso_v = None
self._reso_e = None
self._initialized = False
#--------------------------------
@property
def scale(self):
self._initialize()
return self._scale_v, self._scale_e
#--------------------------------
@property
def resolution(self):
self._initialize()
return self._reso_v, self._reso_e
#--------------------------------
def yld(self, kind):
self._initialize()
return self._d_par_mc['yld_sig'] if kind == 'mc' else self._d_par_dt['yld_sig']
#--------------------------------
def _initialize(self):
if self._initialized:
return
self._check_keys(self._d_par_dt)
self._check_keys(self._d_par_mc)
self._calculate_scales()
self._initialized = True
#--------------------------------
def _check_keys(self, d_par):
for key in ['mu', 'sg', 'yld_sig']:
if key not in d_par:
log.error(f'Parameeter {key} not found in: {d_par}')
raise
val = d_par[key]
try:
num, err = val
except:
log.error(f'Not found a tuple for {key}, instead {val}')
raise
#--------------------------------
def _calculate_scales(self):
mc_mu_val, mc_mu_err = self._d_par_mc['mu']
mc_sg_val, mc_sg_err = self._d_par_mc['sg']
dt_mu_val, dt_mu_err = self._d_par_dt['mu']
dt_sg_val, dt_sg_err = self._d_par_dt['sg']
scale_v, scale_e2= jac.propagate(lambda x : x[0] - x[1], [dt_mu_val, mc_mu_val], [[dt_mu_err**2, 0],[0, mc_mu_err**2]])
reso_v , reso_e2 = jac.propagate(lambda x : x[0] / x[1], [dt_sg_val, mc_sg_val], [[dt_sg_err**2, 0],[0, mc_sg_err**2]])
self._scale_v = float(scale_v)
self._reso_v = float(reso_v)
self._scale_e = math.sqrt(scale_e2)
self._reso_e = math.sqrt(reso_e2)
#-----------------------------------------
class fraction:
def __init__(self, d_mscale):
self._d_mscale = d_mscale
self._d_scale = None
self._initialized = False
#-----------------------------------
def _initialize(self):
if self._initialized:
return
self._d_scale = self._get_scales()
self._initialized = True
#-----------------------------------
def _get_fraction(self, cat, d_yld):
l_cat = list(d_yld.keys())
l_var = list(d_yld.values())
cat_i = l_cat.index(cat)
l_val = [ val for val, _ in l_var ]
nvar = len(l_var)
row_z = [0] * nvar
l_row = []
for index in range(nvar):
row_v = list(row_z)
_, err = l_var[index]
row_v[index] = err ** 2
l_row.append(row_v)
frc_val, frc_err2 = jac.propagate(lambda x : x[cat_i] / sum(x), l_val, l_row)
frc_err = math.sqrt(frc_err2)
return frc_val, frc_err
#-----------------------------------
def _get_fractions(self, kind):
d_yld = { cat : mscale.yld(kind) for cat, mscale in self._d_mscale.items() }
d_frc = { cat : self._get_fraction(cat, d_yld) for cat in d_yld}
return d_frc
#-----------------------------------
def _get_scales(self):
d_frc_mc = self._get_fractions('mc')
d_frc_dt = self._get_fractions('dt')
d_scl = {}
for cat, (mc_frc_val, mc_frc_err) in d_frc_mc.items():
dt_frc_val, dt_frc_err = d_frc_dt[cat]
arr_val, arr_err2 = jac.propagate(lambda x : x[0] / x[1], [dt_frc_val, mc_frc_val], [[dt_frc_err**2, 0], [0, mc_frc_err**2 ]])
d_scl[cat] = float(arr_val), math.sqrt(float(arr_err2))
return d_scl
#-----------------------------------
@property
def scales(self):
self._initialize()
return self._d_scale
#-----------------------------------------
class plotter:
def __init__(self, dmu=None, ssg=None, dfr=None):
self._dmu = dmu
self._ssg = ssg
self._d_fr= dfr
self._d_scale = None
self._initialized = False
#-----------------------------------
@property
def scales(self):
return self._d_scale
#-----------------------------------
@scales.setter
def scales(self, value):
if not isinstance(value, dict):
log.error(f'Input is not a dictionary: {value}')
raise
if self._d_fr.keys() != value.keys():
log.error(f'Categories from fractons and scales differ: {self._d_fr.keys()}/{value.keys()}')
raise
self._d_scale = value
#-----------------------------------
def _initialize(self):
if self._initialized:
return
if self._d_scale is None:
log.error(f'Scales were not passed')
raise
self._initialized = True
#-----------------------------------
def _get_scales(self):
fr = fraction(self._d_scale)
d_fr = fr.scales
d_sc = {}
for cat, msc in self._d_scale.items():
d_sc[cat] = (msc.scale, msc.resolution, d_fr[cat])
return d_sc
#-----------------------------------
def _plot(self, cat, sc, rs, fr):
sc_v, sc_e = sc
sc_p = (sc_v - self._dmu) / sc_e
rs_v, rs_e = rs
rs_p = (rs_v - self._ssg) / rs_e
fr_v, fr_e = fr
fr_p = (fr_v - self._d_fr[cat]) / fr_e
plt.errorbar(['Scale', 'Reso', 'Brem frac'], [sc_p, rs_p, fr_p], yerr=[1, 1, 1], marker='o', capsize=10, linestyle='None', label=cat)
#-----------------------------------
def save_to(self, plot_dir):
self._initialize()
utnr.make_dir_path(plot_dir)
d_sc = self._get_scales()
for cat, (sc, rs, fr) in d_sc.items():
self._plot(cat, sc, rs, fr)
plt.legend()
plt.axhline(color='black', linestyle=':')
plt.savefig(f'{plot_dir}/scales.png')
plt.close('all')
#-----------------------------------------
def dump_scales(d_scale, json_path):
fr = fraction(d_scale)
d_fr = fr.scales
d_scl = {}
for cat, msc in d_scale.items():
d_scl[f'scl_{cat}'] = msc.scale
d_scl[f'res_{cat}'] = msc.resolution
d_scl[f'frc_{cat}'] = d_fr[cat]
log.info(f'Saving to: {json_path}')
utnr.dump_json(d_scl, json_path)
#-----------------------------------------
class load_scales:
def __init__(self, trig=None, dset=None, brem=None):
self._trig = trig
self._dset = dset
self._brem = brem
self._l_par_name = ['scale', 'resolution', 'brem_frac']
self._l_brem = ['z', 'o', 't']
self._d_par = {}
self._d_data = None
self._scale_dir = None
self._json_path = None
self._initialized = False
#-----------------------------------------
def _initialize(self):
if self._initialized:
return
for par_name in self._l_par_name:
if par_name not in self._d_par:
log.error(f'Parameter {par_name} not specified')
raise
if self._brem not in self._l_brem:
log.error(f'Brem {self._brem} not among: {self._l_brem}')
raise
self._json_path = f'{self._scale_dir}/{self._trig}_{self._dset}.json'
utnr.check_file(self._json_path)
self._d_data = utnr.load_json(self._json_path)
self._initialized = True
#-----------------------------------------
def __setitem__(self, key, parameter):
if key not in self._l_par_name:
log.error(f'Invalid parameter: {key}')
raise
self._d_par[key] = parameter
#-----------------------------------------
@property
def scale_dir(self):
return self._scale_dir
@scale_dir.setter
def scale_dir(self, value):
utnr.check_file(f'{value}/{self._trig}_{self._dset}.json')
self._scale_dir = value
#-----------------------------------------
def _get_const(self, name_1, name_2):
par = self._d_par[name_1]
val, err = self._d_data[f'{name_2}_{self._brem}']
const = zfit.constraint.GaussianConstraint(par, val, err)
return const
#-----------------------------------------
def get_constraints(self):
self._initialize()
c_scl = self._get_const('scale' , 'scl')
c_res = self._get_const('resolution', 'res')
c_frc = self._get_const('brem_frac' , 'frc')
return {'scl' : c_scl, 'res' : c_res, 'frc' : c_frc}
#----------------------------------------- | /rx_tools-0.0.3.tar.gz/rx_tools-0.0.3/src/rk/scales.py | 0.649356 | 0.227705 | scales.py | pypi |
import funcy
import typing
from typing import Any
import requests
import rx
from rx import operators as ops
import pdb
from rxw.models import *
def default_unit(key: str) -> Unit:
"""
given a json key, returns the unit for that key's
corresponding measurement
"""
units = {
'temp': Unit(Unit.degree_symbol()+"C"),
'deg': Unit(Unit.degree_symbol()),
'speed': Unit('m/sec'),
'presssure': Unit('hPa'),
'humidity': Unit('%'),
}
return units[key] if key in units else None
class CurrentConditions:
""" class to handle communications with OpenWeatherMap """
host = 'api.openweathermap.org'
def __init__(self, api_key: str):
self.api_key = api_key
def show_for(self, zip: str, temp_only: bool=False):
self.rx_fetch(zip).pipe(
ops.flat_map(lambda js: self.parse_weather(js))
).subscribe(
on_next=lambda w: w.display(temp_only),
on_error=lambda e: self._handle_error(e)
)
def rx_fetch(self, zip: str) -> rx.Observable:
"""
creates an and returns obsersable on the
current conditions api request
"""
url = "http://"+self.host+'/data/2.5/weather'
def observable(observer, scheduler):
params = {'zip': zip, 'appid': self.api_key}
rsp = requests.get(url, params=params)
try:
rsp.raise_for_status()
observer.on_next(rsp.json())
observer.on_completed()
except requests.HTTPError as e:
observer.on_error(e)
return lambda: None
return rx.create(observable)
def parse_weather(self, json: dict) -> WeatherForecast:
"""
extract the various weather readings from the json
blob and return a WeatherForecast object
"""
def observable(observer, scheduler):
try:
if len(json) == 0:
raise(Exception('No Weather Data'))
location = Location(id=json['id'])
location.name = json['name']
if 'sys' in json:
sys = json['sys']
location.country = sys['country']
sunrise = SolarTimes.utc_to_localdatetime(sys['sunrise'])
sunset = SolarTimes.utc_to_localdatetime(sys['sunset'])
location.solar = SolarTimes(sunrise, sunset)
else:
raise Exception("Weather data invalid, missing 'sys'")
weather = WeatherForecast(location)
if 'coord' in json:
lat = json['coord']['lat']
lon = json['coord']['lon']
weather.location.geo_location = GeoPoint(lat, lon)
if 'main' not in json:
raise Exception("Weather data invalid, missing 'main'")
main = json['main']
cc = ClimateCondition()
cc.temperature = Measurement(
main['temp'],
default_unit('temp'))
cc.humidity = Measurement(
main['humidity'],
default_unit('humidity'))
if 'wind' in json:
wind = json['wind']
speed = Measurement(wind['speed'], default_unit('speed'))
dir = Measurement(wind['deg'], default_unit('deg'))
cc.wind = Vector(speed, dir)
if 'weather' in json:
ps = json['weather']
params = [Parameter(p['main'], p['description'])
for p in ps]
cc.conditions = params
weather.current = cc
except Exception as e:
observer.on_error(e)
else:
observer.on_next(weather)
finally:
return lambda: None
return rx.create(observable)
def _handle_error(self, e: Exception):
""" display an error message """
if type(e) is requests.HTTPError:
if e.response.status_code == 404:
print("Unable to find weather at the location specified")
else:
print("Network error " + e.response.reason)
else:
print(str(e)) | /rx_weather-2.0.1-py3-none-any.whl/rxw/openweathermap.py | 0.611962 | 0.225001 | openweathermap.py | pypi |
import typing
from typing import List, NewType, NamedTuple
from datetime import datetime
import pytz
import tzlocal
class Unit:
""" defines a unit of measure, "km/h or degress C """
@property
def symbol(self) -> str:
return self._symbol
def __init__(self, sym: str):
self._symbol = sym
def __str__(self):
return self.symbol
@staticmethod
def degree_symbol():
""" return a degrees utf-8 character """
return u'\N{DEGREE SIGN}'
Measurement = NamedTuple("Measurement",
[('value', float),
('unit', Unit)])
GeoPoint = NamedTuple('GeoPoint',
[('lat', float),
('lon', float)])
Parameter = NamedTuple('Parameter',
[('name', str),
('description', str)])
Vector = NamedTuple('Vector',
[('magnitude', Measurement),
('direction', Measurement)])
class SolarTimes:
""" sunrise and sunset times """
@property
def sunrise(self) -> datetime:
return self._sunrise
@sunrise.setter
def sunrise(self, new_value: datetime):
self._sunrise = new_value
@property
def sunset(self) -> datetime:
return self._sunset
@sunset.setter
def sunset(self, new_value: datetime):
self._sunset = new_value
@staticmethod
def utc_to_localdatetime(timestamp: float) -> datetime:
utc = datetime.utcfromtimestamp(timestamp)
ltz = tzlocal.get_localzone()
return utc.replace(tzinfo=pytz.utc).astimezone(ltz)
def __init__(self, rise: datetime, set: datetime):
self._sunrise = rise
self._sunset = set
class Location:
""" geographical weather area """
@property
def name(self) -> str:
return self._name
@name.setter
def name(self, new_value: str):
self._name = new_value
@property
def country(self) -> str:
return self._country
@country.setter
def country(self, new_value: str):
self._country = new_value
@property
def geo_location(self) -> GeoPoint:
return self._geo
@geo_location.setter
def geo_location(self, geo: GeoPoint):
self._geo = geo
@property
def solar(self) -> SolarTimes:
return self._solar
@solar.setter
def solar(self, new_value: SolarTimes):
self._solar = new_value
def __init__(self, id: int):
self.id = id
def string_times(self) -> (datetime, datetime):
rise = datetime.strftime(self.solar.sunrise, '%-I:%M%p')
set = datetime.strftime(self.solar.sunset, '%-I:%M%p')
return (rise, set)
def __str__(self):
str = "{0}, {1}\n".format(self.name, self.country)
str += "lat: {0} lon: {1}\n".format(
self.geo_location.lat, self.geo_location.lon)
(rise, set) = self.string_times()
str += "sunrise: {}\nsunset: {}".format(rise, set)
return str
class ClimateCondition:
"""
represents all the atttributes we care to display
as weather
"""
@property
def temperature(self) -> Measurement:
return self._temp
@temperature.setter
def temperature(self, new_value: Measurement):
self._temp = new_value
@property
def humidity(self) -> Measurement:
return self._humidity
@humidity.setter
def humidity(self, new_value: Measurement):
self._humidity = new_value
@property
def wind(self) -> Vector:
return self._wind
@wind.setter
def wind(self, new_value: Vector):
self._wind = new_value
@property
def conditions(self) -> List[Parameter]:
return self._conditions
@conditions.setter
def conditions(self, new_value: List[Parameter]):
self._conditions = new_value
def kelvin_to_farenheight(self, k: Measurement) -> Measurement:
val = (k.value - 273.15) * 1.8 + 32.0
return Measurement(val, Unit(Unit.degree_symbol()+"F"))
def msec_to_mph(self, msec: Measurement) -> Measurement:
val = msec.value * 2.236936
return Measurement(val, Unit("mph"))
def display_temp(self) -> str:
tfar = self.kelvin_to_farenheight(self.temperature)
str = "{0:.1f}{1}".format(tfar.value, tfar.unit)
return str
def __str__(self):
str = "Temperature: {0}\n".format(self.display_temp())
str += "Wind Speed:\n"
mph = self.msec_to_mph(self.wind.magnitude)
str += "\t{0:.1f}{1} at {2}{3}\n".format(mph.value, mph.unit,
self.wind.direction.value, self.wind.direction.unit)
str += "Conditions:\n\t"
str += ",".join([p.description for p in self.conditions])
return str
class WeatherForecast:
"""
models a weather forecast. a forecast is basically
just a series of climate conditions for a location on
a specific date
"""
@property
def location(self) -> Location:
return self._location
@location.setter
def location(self, new_value):
self._location = new_value
@property
def current(self) -> ClimateCondition:
return self._cur_condition
@current.setter
def current(self, new_value: ClimateCondition):
self._cur_condition = new_value
def __init__(self, location: Location):
self.location = location
def display(self, temp_only: bool=False):
""" print myself """
if temp_only:
print("{0}: {1}\n".format(
self.location.name, self.current.display_temp()))
return
print("{0}\n".format(self.location))
print("{0}\n".format(self.current)) | /rx_weather-2.0.1-py3-none-any.whl/rxw/models.py | 0.840193 | 0.532911 | models.py | pypi |
  
--------------------------------------------------------
"rx7" is a library help you code faster and shorter
--------------------------------------------------------
### \- Most Usefull function and mexthods are collected.
### \- Special features
### \- Simple and easy to understad API
<hr />
### Installation
pip install rx7
### Upgrade
pip install --upgrade rx7
<br />
# Here is the brief documentaion:
### *(Complete documentation with details will be added soon in the Wiki section)*
<h2>List of Variables:</h2>
| **Variable** | **Description** |
|--------------|---------------------------------------------------------|
| argv | sys.argv (return list of given arguments from terminal) |
| ABC | Parent for classes which have abstractmethods |
| exit | Equivalent sys.exit (returning exit code to terminal) |
| environ | Returns environment variables in a dictionary format |
<br>
List of Functions:
------------------
| **Function** | **Description** |
|----------------------------------|--------------------------------------------------------------------------------------------------|
| p() | print() function. |
| repeat(function,n) | Repeat F_Name function for n times. |
| rev(v) (REMOVED 2.4.0) | Reverse v and returns it. (Everything like str,list,int) |
| read(file) | Return content of the file. |
| write(file,mode,text) | Write things you want in file content. (Read Doc String) |
| wait(n)sleep(n) | Stop code executing for n seconds |
| cls()clear() | It Clears the Terminal |
| progressbar() (*removed in v3.1) | In-App Progressbar. (Read Doc String) |
| cons_integer(Frst,Lst) | Return string from Frst to Lst (Read Doc String) (v1.7) |
| force(tpl,*var) | Return tpl with adding var(s) to it. |
| erase(tpl,*var) | Return tpl with removing var(s) from it. |
| replace(tpl,ind,var) | Replace tpl[ind] with var |
| insert(tpl,ind,var) | Set tpl[ind] to var. (Note that tpl[ind] will be tpl[ind+1]) |
| pop(tpl,index) | Remove member with index of 'index' from a tuple |
| wait_for(button) | Waits for user to press specific button. |
| call_later(func,args,delay) | Call func(args) after delay time. |
| convert_bytes(num) | convert bytes to (KB,MB,GB,TB) |
| Input(prompt,default) | Prompt an input message with default answer (value) (ONLY ON WINDOWS) |
| default_input() | Same as `default_input` |
| restart_app() | Restart running python program |
| active_window_title() | Return Active Window Title |
| open_image(path) | Open image with default image viewer (Mac OS is not supported) |
| download(url) | To download files with memory saver and progressbar |
| extract(file,path,pwd) | Extract Zip file with password to path |
| screenshot(name) | Take a screenshot and save it. |
| func_info(function) | Print information of function |
| Check_Type | Decorator that raise TypeError if function argument type is wrong (Read Help) |
| Progressbar() | Generator of progressbar() which you can use it to do some stuff between your delays (Read Help) |
| pixel_color(X,Y) | Return RGB color of pixel[X,Y] |
| getpass(prompt) | Prompt for a password, with echo turned off. |
| import_module(path) | Return given path (file with any extension) as a Module |
<br>
<h2>List of Classes:</h2>
<h3> Class Random: <em>Random Variable Generator Class.</em></h3>
| **Function** | **Description** |
|--------------------------|------------------------------------------------|
| choose(iter,k,duplicate) | Choose k random items from iterable or string. |
| integer(Frst,Lst) | Choose integer in range [Frst,Lst] |
| O1(decimal_nom=17) | Return x in interval [0,1) |
| number(Frst,Lst) | Return x in interval [Frst,Lst] |
| shuffle(iterable) | Return shuffled version of iterable |
<br>
<h3> Class System: <em>Some system actions and information.</em></h3>
| **Function** | **Description** |
|-------------------------|-------------------------------------------------------------------------|
| accname() | return account username you have logged in. |
| pid() | Get pid number of terminal and return it. |
| disk_usage(path) | ######## |
| chdir | Change directory of terminal. |
| SHUT_DOWN() | Shut Down the PC. |
| RESTART() | Restart the PC. |
| terminal_size() | Return terminal size in tuple (columns,lines). |
| cwd() | Return Carrent Working Directory. |
| ip_global() | Returns Global IP. |
| ip_local() | Returns Local IP. |
| ram_total() | Returns total ram of the system. |
| ram_used() | Returns Used Space of the ram of the system. |
| ram_free() | Returns Available (Free) space of system ram. |
| boot_time() | Return system boot time in seconds since the epoch. |
| device_name() | Returns Device Name |
| ip_website(url) | Returns url ip address |
| win10_notification() | Display windows 10 notification (READ DOCSTRING) (ONLY WIN10 SUPPORTED) |
| cpu_count(logical=True) | Return the number of logical/physical CPUs in the system |
| pyshell_execute_bit() | To determine whether Python shell is executing in 32bit or 64bit |
| pids() | Return a list of current running PIDs |
| pid_exists(pid) | Return True if pid exists else False |
| cpu_percent() | Return the current CPU utilization as a percentage |
| os_name() | Returns OS name of machine |
<br>
<h3> Class Files: (Static<strong style="font-size: 14px;"> methods) </strong><em style="font-size: 14px;">Actions and information about files.</em></h3>
| **Function** | **Description** |
|----------------------------------------------|-------------------------------------------------------------------------------|
| size(path) | Return size of the file in byte(s). Also work on |
| delete(path) | Use this to delete a file (Not folder). |
| rename(path) | Rename files with this function. |
| abspath(path) | Return absolute path of given path. |
| exists(path) | Return Boolean. If exists True, else: False |
| mdftime(path) | Get last modify time of the file. |
| acstime(path) | Get last access time of the file. |
| move(src,dst) | Move file from src to dst. (Read Doc String of copy func) |
| copy(src,dst,metadata=True) | Copy file (with metadata) from src to dst. (Also work on folders) |
| hide(path) | Hide given path. (It can be file or directory.) |
| read_only(path,mode=True) | Make file or folder read-only. (Read Doc String) |
| read(path) | Return content of the path |
| write(path,text='',...) | Same as write function. |
| isdir(path) | Return True for directory and False for others. |
| isfile(path) | Return True for file and False for others. |
| is_hidden(path) | Check whether path is hidden or not |
| is_readonly(path) | Check whether path is readonly or not |
| search_file(pattern,path,mode) | search for pattern in path (Read function doc string) |
| search_content(path,word) | Search for word in all files in path, return list of files that contain word |
| mkdir(path) | Make directory (More than one if its possible!) |
| generate_tree(dir_path) | Returns a visual tree of dir_path |
| get_drives() | (Windows only) Get currently available drives |
| basename(path) | Returns the final component of a pathname |
| dirname(path) | Returns the directory component of a pathname |
| join_paths(path) | Joins multiple paths together and returns it |
| MEMBERS (Family) | |
| MEMBERS.all_exactdir | List of all things those are in exact directory |
| MEMBERS.files_exactdir | List of files which are in exact directory |
| MEMBERS.dirs_exactdir | List of dirs which are in exact directory |
| MEMBERS.files_all | List of files which are in exact directory and all sub-directories |
| MEMBERS.files_all_sep | List of files which are in exact directory and all sub-directories seprated by their directories |
| MEMBERS.dirs_all | List of directories (Exact dir and all sub-dirs) |
| MEMBERS.all_all_sep | List of everything thing in path (exact dir & sub-dirs) |
<br>
<h3> Class Style: <em>Changing text Color,BG & Style. (Read Doc String)</em></h3>
| **Function** | **Description** |
|-----------------------------------------------|-------------------------------------------------------------|
| print\(\*values, color, BG, style, end, sep\) | Print txt with selected color,BG,style\.\(Read Doc String\) |
| switch\(color,BG,style\) | Change Terminal Attributes Until another Call\. |
| switch\_default\(\) | Restore Terminal Attributes\. |
| reset | =switch\_default |
| log\_ \(Family\) | 5 Different Style\.print with ready color and style |
<br>
<h3> Class Record: <em>Record time of a certain actions. (Read Doc String)</em></h3>
| **Function** | **Description** |
|-------------------------------------|----------------------------------------------------------------------------|
| __init__() | Set Start Time. |
| self.stop(self) | Stops Recording (You can not lap anymore) |
| self.lap(self, save=True, round=15) | Rreturn time between start time. if save==True: add that time to self.laps |
| self.laps | A list that contains all laps you have done |
| self.reset(self, start=False) | Empty self.laps, if start is True: set start time to now |
| self.last_lap(save=True) | Return elapsed time from last lap (save it in self.laps if save is true) |
| timeit(code,setup, times,globals_) | Run the 'code' for 'times' times and return time it needs (all, not once) |
| timer(f) | Decorator for functions to print out how much each call takes |
<br>
<h3> Class Decorator: <em>Useful decorators you might want to use</em></h3>
| **Function** | **Description** |
|----------------|----------------------------------------------------------------------------------------------------|
| Check_Type | Decorator that raise TypeError if function argument type is wrong (Read Help) |
| overload | Make your function accept different kind of argument and structure with defining it more than once |
| attach_to_all | Attach Decorator.decorator_all to all functions of a class (Class decorator) |
| abstractmethod | A decorator indicating abstract methods. |
<br />
<h3> Class Terminal: <em>functions related to working with terminal</em></h3>
| **Function** | **Description** |
|:------------------- |:----------------------------------------------------------- |
| run(command) | executes `command` live in terminal |
| getoutput(commands) | runs the `command` in the background and returns the output |
| size() | Returns the size of terminal in tuple (columns,rows) |
<br>
<h3> Class IO: <em>Useful methods when working with user input</em></h3>
| **Function** | **Description** |
|------------------------------|--------------------------------------------------------------------------|
| wait_for_input(prompt) | Asks for user input, until they enter something else than whitespaces |
| selective_input() | Check repository wiki for full documentation |
| yesno_input(prompt, default) | wait for user to enter one of (`yes`,`no`, `y`, `n`). (Case insensitive) |
| Input(prompt, default_value) | (Windows only) Types default value before getting user's input |
| getpass(prompt) | Gets users input without showing their input (`getpass.getpass()`) |
<br>
<h3> Class Internet: <em>Methods for working with network and internet related stuffs</em></h3>
| **Function** | **Description** |
|-----------------------|-------------------------------------------------------------------------------------------------|
| is_connected(website) | Check for internet connection with trying to connect to `website` |
| connection_checker | Decorator to check if internet is connected before calling the function |
| ip_local() | Returns local ip |
| ip_global() | Returns global ip |
| ip_global(website) | returns ip of the given website |
| url_exists(url) | Checks if a url exists (with requests module) (needs http[s]) |
| url_links(URL) | Get all links that are used in a specific URL (All "a" tags from html source) (Needs 'http[s]') |
| find_urls(text) | returns list of all urls in a string using regex |
| is_url(URL) | checks if the string has the expression of a real url |
| open_browser(url) | opens given url in the new tab of default browser |
<br>
#### Recommendations:
- Using `import rx7 as rx`
<br />
Commands in Terminal:
--------------------------------
$ python -m rx7 --wiki (To open wiki page in browser)
$ python -m rx7 --colors (To show help for style class)
$ python -m rx7 --help (To open help menu)
<br />
Releases and Changelog:
---------
Take a look at [here](https://github.com/Ramin-RX7/RX7-Lib/blob/master/CHANGELOG.md) for the changelog
| /rx7-4.0.0.tar.gz/rx7-4.0.0/README.md | 0.728169 | 0.734304 | README.md | pypi |
RxPy back-pressure extension
============================

[](https://coveralls.io/github/MichaelSchneeberger/rxbackpressure?branch=master)

*rxbp* is an extension to the [RxPY](https://github.com/ReactiveX/RxPY) python
library, that integrates back-pressure into the *Observable* pattern
in form of *Flowables*.
The *rxbp* library is inspired by [Monix](https://github.com/monix/monix),
and **has still an experimental status**.
Installation
------------
rxbp v3.x runs on Python 3.7 or above. To install rxbp alpha version:
```
pip3 install --pre rxbp
```
Example
-------
*rxbackpressure* has a similar syntax as RxPY.
```python
# example taken from RxPY
import rxbp
source = rxbp.from_(["Alpha", "Beta", "Gamma", "Delta", "Epsilon"])
composed = source.pipe(
rxbp.op.map(lambda s: len(s)),
rxbp.op.filter(lambda i: i >= 5)
)
composed.subscribe(lambda value: print(f"Received {value}"))
```
Integrate RxPY
--------------
A RxPY Observable can be converted to a *Flowable* by using the `rxbp.from_rx` function.
Equivalently, a *Flowable* can be converted to an RxPY Observable
by using the `to_rx` function.
```python
import rx
import rxbp
rx_source = rx.of("Alpha", "Beta", "Gamma", "Delta", "Epsilon")
# convert Observable to Flowable
source = rxbp.from_rx(rx_source)
composed = source.pipe(
rxbp.op.map(lambda s: len(s)),
rxbp.op.filter(lambda i: i >= 5)
)
# convert Flowable to Observable
composed.to_rx().subscribe(lambda value: print(f"Received {value}"))
```
Differences from RxPY
---------------------
### Flowable
Similar to an RxPY Observable, a *Flowable* implements a `subscribe` method,
which is a mechanism that allows to describe a data flow from its source to
a sink. The description is done with *rxbp* operators exposed by `rxbp.op`.
Like in functional programming, usings *rxbp* operators
does not create any mutable states, but rather concatenates functions
without calling them yet. We first describe what we intend to
do in form of a plan and then execute the plan. A *Flowable* is
executed by calling its `subscribe` method. This will start a chain
reaction, where each downsream *Flowables* calls the `subscribe`
method of its upstream *Flowable* until
the sources start emitting the data. Once a *Flowable* is subscribed, we
allow it to have internal mutable states.
Compared to RxPY Observables, however, a *Flowable* uses `Observers` that are
able to back-pressure on an `on_next` method call. This has the effect that
certain operators behave differently from the ones in RxPY.
### MultiCast (experimental)
A *MultiCast* is used when a *Flowable* emits elements to more than one `Observer`,
and can be though of a nested *Flowable* of type `Observable[T[Flowable]]`.
The syntax to *multi-cast* a Flowable is quite different from RxPY and there are good
reasons for that. In RxPY, there is an operator called `share`, that turns an *Observable*
into a so-called hot *Observable* allowing multiple downstream subscribers to receive the
same elements. The first `subscribe` call has the side-effect that subsequent `subscribe`
calls will not propagate upstream, but register themselves to the hot *Observable*.
The following example illustrates the side-effect that happens when a shared *Observable*
is subscribed for the first time.
``` python
import rx
from rx import operators as op
o = rx.range(4).pipe(
op.share(),
)
o.subscribe(print)
o.subscribe(print) # the second time no elements are sent
```
The previous code outputs:
```
0
1
2
3
```
In *rxbp*, however, the elements of a *Flowable* sequence can only be multi-casted,
if the *Flowable* is nested inside a *MultiCast*. This can be done with the
`rxbp.multicast.return_flowable` function. `return_flowable` takes a *Flowable*, a
list of *Flowables* or a dictionary of *Flowables* and creates a *MultiCast* that
emits the nested *Flowables*. Similarly to a *Flowable*, a *MultiCast* implements a `pipe`
method that takes a sequence of *MultiCast* operators, which are exposed by
`rxbp.multicast.op`.
```python
import rxbp
f = rxbp.multicast.return_flowable(rxbp.range(10)).pipe(
rxbp.multicast.op.map(lambda base: base.pipe(
rxbp.op.zip(base.pipe(
rxbp.op.map(lambda v: v + 1),
rxbp.op.filter(lambda v: v % 2 == 0)),
),
)),
).to_flowable()
f.subscribe(print)
```
The previous code outputs:
```
(0, 2)
(1, 4)
(2, 6)
(3, 8)
(4, 10)
```
### match operator (experimental)
The `match` operator tries to match two *Flowables*, and raises an exception otherwise.
Two *Flowables* match if they have the same base or if there exists a mapping that maps
one base to the base of the other *Flowable*. These mappings propagated internally when
subscribing to a *Flowable*.
If two *Flowables* match, the elements of each *Flowable* sequence are filtered and
dublicated (if necessary) first and then zipped together. The following example creates
two *Flowables* where one is having base *10* and the other contains a mapping from
base *10* to it's own base *None* (base *None* refers to a unknown *Flowable* sequence).
The `match` operator applies the mapping to the Flowable of base *10* such that every
second element is selected due to `v % 2`.
```python
import rxbp
rxbp.from_range(10).pipe(
rxbp.op.match(rxbp.from_range(10).pipe(
rxbp.op.filter(lambda v: v % 2 == 0)),
)
).subscribe(print)
```
The previous code outputs:
```
(1, 1)
(3, 3)
(5, 5)
(7, 7)
(9, 9)
```
When to use a Flowable, when RxPY Observable?
-----------------------------------------
A *Flowable* is used when some asynchronous stage cannot process the data fast enough,
or needs to synchronize the data with some other event. Let's take the `zip` operator
as an example. It receives elements from two or more sources and emits a tuple once it
received one element from each source. But what happens if one source emits the
elements before the other does? Without back-pressure, the `zip` operator has to buffer
the elements from the eager source until it receives the elements from the other source.
This might be ok depending on how many elements need to be buffered. But often it is too
risky to buffer elements somewhere in our stream as it potentially leads to an
out of memory exception. The back-pressure capability prevents buffers to grow by holding
the data back until it is actually needed.
The advantage of a RxPY Observable is that it is generally faster and more lightweight.
Flowable
--------
### Create a Flowable
- `empty` - create a *Flowable* emitting no elements
- `from_` - create a *Flowable* that emits each element of an iterable
- `from_iterable` - see `from_`
- `from_list` - create a *Flowable* that emits each element of a list
- `from_range` - create a *Flowable* that emits elements defined by the range
- `from_rx` - wrap a rx.Observable and exposes it as a *Flowable*, relaying signals in a backpressure-aware manner.
- `return_flowable` - create a *Flowable* that emits a single element
### Transforming operators
- `filter` - emit only those elements for which the given predicate holds
- `first` - emit the first element only
- `flat_map` - apply a function to each item emitted by the source and
flattens the result
- `map` - map each element emitted by the source by applying the given
function
- `map_to_iterator` - create a *Flowable* that maps each element emitted
by the source to an iterator and emits each element of these iterators.
- `pairwise` - create a *Flowable* that emits a pair for each consecutive
pairs of elements in the *Flowable* sequence
- `reduce` - Apply an accumulator function over a Flowable sequence and
emits a single element
- `repeat_first` - Return a *Flowable* that repeats the first element it
receives from the source forever (until disposed).
- `scan` - apply an accumulator function over a *Flowable* sequence and
returns each intermediate result.
- `to_list` - Create a new *Flowable* that collects the elements from
the source sequence, and emits a single element of type List.
- `zip_with_index` - zip each item emitted by the source with the
enumerated index
### Combining operators
- `concat` - Concatentates *Flowable* sequences together by back-pressuring
the tail *Flowables* until the current *Flowable* has completed
- `controlled_zip` - create a new *Flowable* from two *Flowables* by combining
their elements in pairs. Which element gets paired with an element from
the other *Flowable* is determined by two functions called `request_left` and
`request_right`
- `match` - create a new *Flowable* from two *Flowables* by first filtering and
duplicating (if necessary) the elements of each *Flowable* and zip the resulting
*Flowable* sequences together
- `merge` - merge the elements of the *Flowable* sequences into a single *Flowable*
- `zip` - Create a new *Flowable* from two *Flowables* by combining their
item in pairs in a strict sequence
### Other operators
- `buffer` - buffer the element emitted by the source without back-pressure until
the buffer is full
- `debug` - print debug messages to the console
- `execute_on` - inject new scheduler that is used to subscribe the *Flowable*
- `observe_on` - schedule elements emitted by the source on a dedicated scheduler
- `set_base` - overwrite the base of the current Flowable sequence
- `share` - multi-cast the elements of the *Flowable* to possibly
multiple subscribers
### Create a rx Observable
- `to_rx` - create a rx Observable from a Observable
MultiCast (experimental)
------------------------
### Create a MultiCast
- `empty` - create a *MultiCast* emitting no elements
- `return_flowable` - turn zero or more *Flowables* into multi-cast *Flowables*
emitted as a single element inside a *MultiCast*
- `return_` - create a *MultiCast* emitting a single element
- `from_iterable` - create a *MultiCast* from an iterable
- `from_rx_observable` - create a *MultiCast* from an *rx.Observable*
- `from_flowable` - (similar to `from_rx_observable`) create a *MultiCast*
that emits each element received by the Flowable
### Transforming operators
- `default_if_empty` - either emits the elements of the source or a default element
- `filter` - emit only those *MultiCast* for which the given predicate hold
- `flat_map` - apply a function to each item emitted by the source and
flattens the result
- `lift` - lift the current `MultiCast[T1]` to a `MultiCast[T2[MultiCast[T1]]]`.
- `map` - map each element emitted by the source by applying the given
function
- `merge` - merge the elements of the *MultiCast* sequences into a single *MultiCast*
### Transforming operators (Flowables)
- `join_flowables` - zip one or more *Multicasts* (each emitting a single *Flowable*)
to a *Multicast* emitting a single element (tuple of *Flowables*)
- `loop_flowables` - create a loop inside *Flowables*
- `collect_flowables` - create a *Multicast* that emits a single element containing
the reduced *Flowables* of the first element sent by the source
### Other operators
- `debug` - print debug messages to the console
- `observe_on` - schedule elements emitted by the source on a dedicated scheduler
- `share` - multi-cast the elements of the source to possibly
multiple subscribers | /rxbp-3.0.0a9.tar.gz/rxbp-3.0.0a9/README.md | 0.843186 | 0.923351 | README.md | pypi |
from dataclasses import field
from pathlib import Path
from typing import List, Optional, Union
from pydantic.dataclasses import dataclass
from xsdata_pydantic.bindings import XmlParser
from .models.oai_dc.org.openarchives.oai.pkg_2.pkg_0.oai_dc.dc import Dc
from .models.oai_pmh.org.openarchives.oai.pkg_2.header_type import HeaderType
from .models.oai_pmh.org.openarchives.oai.pkg_2.oai_pmhtype import OaiPmhtype
from .models.oai_pmh.org.openarchives.oai.pkg_2.list_records_type import (
ListRecordsType,
)
from .models.oai_pmh.org.openarchives.oai.pkg_2.record_type import RecordType
@dataclass
class ChemRxivHeaderType(HeaderType):
"""This class only exists because chemRxiv does not follow OAI PMH schema."""
version: Optional[str] = field(
default=None,
metadata={
"type": "Element",
"namespace": "http://www.openarchives.org/OAI/2.0/",
},
)
version_history: List[str] = field(
default_factory=list,
metadata={
"name": "version-history",
"type": "Element",
"namespace": "http://www.openarchives.org/OAI/2.0/",
},
)
submitted_date: Optional[str] = field(
default=None,
metadata={
"name": "submitted-date",
"type": "Element",
"namespace": "http://www.openarchives.org/OAI/2.0/",
},
)
@dataclass
class ChemRxivMetadata:
dc: Optional[Dc] = field(
default=None,
metadata={
"type": "Element",
"namespace": "http://www.openarchives.org/OAI/2.0/oai_dc/",
},
)
@dataclass
class ChemRxivRecord(RecordType):
header: Optional[ChemRxivHeaderType] = RecordType.__dataclass_fields__["header"]
metadata: Optional[ChemRxivMetadata] = RecordType.__dataclass_fields__["metadata"]
@dataclass
class ChemRxivListRecords(ListRecordsType):
record: List[ChemRxivRecord] = ListRecordsType.__dataclass_fields__["record"]
@dataclass
class ChemRxiv(OaiPmhtype):
list_records: Optional[ChemRxivListRecords] = OaiPmhtype.__dataclass_fields__[
"list_records"
]
def chemrxiv_records(xml: Union[Path, str]) -> ChemRxiv:
parser = XmlParser()
result = parser.parse(xml, ChemRxiv)
return result | /rxiv_types-0.1.0.tar.gz/rxiv_types-0.1.0/src/rxiv_types/chemrxiv.py | 0.863478 | 0.334657 | chemrxiv.py | pypi |
from dataclasses import field
from pydantic.dataclasses import dataclass
from typing import List, Optional
from xsdata.models.datatype import XmlDateTime
from .get_record_type import GetRecordType
from .identify_type import IdentifyType
from .list_identifiers_type import ListIdentifiersType
from .list_metadata_formats_type import ListMetadataFormatsType
from .list_records_type import ListRecordsType
from .list_sets_type import ListSetsType
from .oai_pmherror_type import OaiPmherrorType
from .request_type import RequestType
__NAMESPACE__ = "http://www.openarchives.org/OAI/2.0/"
@dataclass
class OaiPmhtype:
class Meta:
name = "OAI-PMHtype"
response_date: Optional[XmlDateTime] = field(
default=None,
metadata={
"name": "responseDate",
"type": "Element",
"namespace": "http://www.openarchives.org/OAI/2.0/",
"required": True,
}
)
request: Optional[RequestType] = field(
default=None,
metadata={
"type": "Element",
"namespace": "http://www.openarchives.org/OAI/2.0/",
"required": True,
}
)
error: List[OaiPmherrorType] = field(
default_factory=list,
metadata={
"type": "Element",
"namespace": "http://www.openarchives.org/OAI/2.0/",
}
)
identify: Optional[IdentifyType] = field(
default=None,
metadata={
"name": "Identify",
"type": "Element",
"namespace": "http://www.openarchives.org/OAI/2.0/",
}
)
list_metadata_formats: Optional[ListMetadataFormatsType] = field(
default=None,
metadata={
"name": "ListMetadataFormats",
"type": "Element",
"namespace": "http://www.openarchives.org/OAI/2.0/",
}
)
list_sets: Optional[ListSetsType] = field(
default=None,
metadata={
"name": "ListSets",
"type": "Element",
"namespace": "http://www.openarchives.org/OAI/2.0/",
}
)
get_record: Optional[GetRecordType] = field(
default=None,
metadata={
"name": "GetRecord",
"type": "Element",
"namespace": "http://www.openarchives.org/OAI/2.0/",
}
)
list_identifiers: Optional[ListIdentifiersType] = field(
default=None,
metadata={
"name": "ListIdentifiers",
"type": "Element",
"namespace": "http://www.openarchives.org/OAI/2.0/",
}
)
list_records: Optional[ListRecordsType] = field(
default=None,
metadata={
"name": "ListRecords",
"type": "Element",
"namespace": "http://www.openarchives.org/OAI/2.0/",
}
) | /rxiv_types-0.1.0.tar.gz/rxiv_types-0.1.0/src/rxiv_types/models/oai_pmh/org/openarchives/oai/pkg_2/oai_pmhtype.py | 0.802013 | 0.277027 | oai_pmhtype.py | pypi |
from dataclasses import field
from pydantic.dataclasses import dataclass
from typing import List, Optional, Union
from xsdata.models.datatype import XmlDate
from .deleted_record_type import DeletedRecordType
from .description_type import DescriptionType
from .granularity_type import GranularityType
from .protocol_version_type import ProtocolVersionType
__NAMESPACE__ = "http://www.openarchives.org/OAI/2.0/"
@dataclass
class IdentifyType:
repository_name: Optional[str] = field(
default=None,
metadata={
"name": "repositoryName",
"type": "Element",
"namespace": "http://www.openarchives.org/OAI/2.0/",
"required": True,
}
)
base_url: Optional[str] = field(
default=None,
metadata={
"name": "baseURL",
"type": "Element",
"namespace": "http://www.openarchives.org/OAI/2.0/",
"required": True,
}
)
protocol_version: Optional[ProtocolVersionType] = field(
default=None,
metadata={
"name": "protocolVersion",
"type": "Element",
"namespace": "http://www.openarchives.org/OAI/2.0/",
"required": True,
}
)
admin_email: List[str] = field(
default_factory=list,
metadata={
"name": "adminEmail",
"type": "Element",
"namespace": "http://www.openarchives.org/OAI/2.0/",
"min_occurs": 1,
"pattern": r"\S+@(\S+\.)+\S+",
}
)
earliest_datestamp: Optional[Union[XmlDate, str]] = field(
default=None,
metadata={
"name": "earliestDatestamp",
"type": "Element",
"namespace": "http://www.openarchives.org/OAI/2.0/",
"required": True,
"pattern": r".*Z",
}
)
deleted_record: Optional[DeletedRecordType] = field(
default=None,
metadata={
"name": "deletedRecord",
"type": "Element",
"namespace": "http://www.openarchives.org/OAI/2.0/",
"required": True,
}
)
granularity: Optional[GranularityType] = field(
default=None,
metadata={
"type": "Element",
"namespace": "http://www.openarchives.org/OAI/2.0/",
"required": True,
}
)
compression: List[str] = field(
default_factory=list,
metadata={
"type": "Element",
"namespace": "http://www.openarchives.org/OAI/2.0/",
}
)
description: List[DescriptionType] = field(
default_factory=list,
metadata={
"type": "Element",
"namespace": "http://www.openarchives.org/OAI/2.0/",
}
) | /rxiv_types-0.1.0.tar.gz/rxiv_types-0.1.0/src/rxiv_types/models/oai_pmh/org/openarchives/oai/pkg_2/identify_type.py | 0.835618 | 0.312422 | identify_type.py | pypi |
from dataclasses import field
from pydantic.dataclasses import dataclass
from typing import Optional
from xsdata.models.datatype import XmlDate
__NAMESPACE__ = "https://api.bioriv.org/OAI/medRxivRaw/"
@dataclass
class MedRxivRawType:
class Meta:
name = "medRxivRaw_type"
id: Optional[str] = field(
default=None,
metadata={
"type": "Element",
"namespace": "https://api.bioriv.org/OAI/medRxivRaw/",
"required": True,
}
)
submitter: Optional[str] = field(
default=None,
metadata={
"type": "Element",
"namespace": "https://api.bioriv.org/OAI/medRxivRaw/",
"required": True,
}
)
version: Optional["MedRxivRawType.Version"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "https://api.bioriv.org/OAI/medRxivRaw/",
"required": True,
}
)
title: Optional[str] = field(
default=None,
metadata={
"type": "Element",
"namespace": "https://api.bioriv.org/OAI/medRxivRaw/",
"required": True,
}
)
authors: Optional[str] = field(
default=None,
metadata={
"type": "Element",
"namespace": "https://api.bioriv.org/OAI/medRxivRaw/",
"required": True,
}
)
corresponding_author: Optional[str] = field(
default=None,
metadata={
"type": "Element",
"namespace": "https://api.bioriv.org/OAI/medRxivRaw/",
"required": True,
}
)
corresponding_author_institution: Optional[str] = field(
default=None,
metadata={
"type": "Element",
"namespace": "https://api.bioriv.org/OAI/medRxivRaw/",
"required": True,
}
)
published: Optional[str] = field(
default=None,
metadata={
"type": "Element",
"namespace": "https://api.bioriv.org/OAI/medRxivRaw/",
"required": True,
}
)
categories: Optional[str] = field(
default=None,
metadata={
"type": "Element",
"namespace": "https://api.bioriv.org/OAI/medRxivRaw/",
"required": True,
}
)
comments: Optional[str] = field(
default=None,
metadata={
"type": "Element",
"namespace": "https://api.bioriv.org/OAI/medRxivRaw/",
"required": True,
}
)
msc_class: Optional[str] = field(
default=None,
metadata={
"name": "msc-class",
"type": "Element",
"namespace": "https://api.bioriv.org/OAI/medRxivRaw/",
"required": True,
}
)
abstract: Optional[str] = field(
default=None,
metadata={
"type": "Element",
"namespace": "https://api.bioriv.org/OAI/medRxivRaw/",
"required": True,
}
)
link_pdf: Optional[str] = field(
default=None,
metadata={
"type": "Element",
"namespace": "https://api.bioriv.org/OAI/medRxivRaw/",
"required": True,
}
)
@dataclass
class Version:
date: Optional[XmlDate] = field(
default=None,
metadata={
"type": "Element",
"namespace": "https://api.bioriv.org/OAI/medRxivRaw/",
"required": True,
}
)
size: Optional[str] = field(
default=None,
metadata={
"type": "Element",
"namespace": "https://api.bioriv.org/OAI/medRxivRaw/",
"required": True,
}
)
version: Optional[int] = field(
default=None,
metadata={
"type": "Attribute",
}
) | /rxiv_types-0.1.0.tar.gz/rxiv_types-0.1.0/src/rxiv_types/models/medrxiv/https/api/bio_rxiv/org/oaipmh/med_rxiv_raw/med_rxiv_raw_type.py | 0.886948 | 0.421314 | med_rxiv_raw_type.py | pypi |
from dataclasses import field
from pydantic.dataclasses import dataclass
from typing import Optional
from xsdata.models.datatype import XmlDate
__NAMESPACE__ = "https://api.biorxiv.org/oaipmh/bioRxivRaw/"
@dataclass
class BioRxivRawType:
class Meta:
name = "bioRxivRaw_type"
id: Optional[str] = field(
default=None,
metadata={
"type": "Element",
"namespace": "https://api.biorxiv.org/oaipmh/bioRxivRaw/",
"required": True,
}
)
submitter: Optional[str] = field(
default=None,
metadata={
"type": "Element",
"namespace": "https://api.biorxiv.org/oaipmh/bioRxivRaw/",
"required": True,
}
)
version: Optional["BioRxivRawType.Version"] = field(
default=None,
metadata={
"type": "Element",
"namespace": "https://api.biorxiv.org/oaipmh/bioRxivRaw/",
"required": True,
}
)
title: Optional[str] = field(
default=None,
metadata={
"type": "Element",
"namespace": "https://api.biorxiv.org/oaipmh/bioRxivRaw/",
"required": True,
}
)
authors: Optional[str] = field(
default=None,
metadata={
"type": "Element",
"namespace": "https://api.biorxiv.org/oaipmh/bioRxivRaw/",
"required": True,
}
)
corresponding_author: Optional[str] = field(
default=None,
metadata={
"type": "Element",
"namespace": "https://api.biorxiv.org/oaipmh/bioRxivRaw/",
"required": True,
}
)
corresponding_author_institution: Optional[str] = field(
default=None,
metadata={
"type": "Element",
"namespace": "https://api.biorxiv.org/oaipmh/bioRxivRaw/",
"required": True,
}
)
published: Optional[str] = field(
default=None,
metadata={
"type": "Element",
"namespace": "https://api.biorxiv.org/oaipmh/bioRxivRaw/",
"required": True,
}
)
categories: Optional[str] = field(
default=None,
metadata={
"type": "Element",
"namespace": "https://api.biorxiv.org/oaipmh/bioRxivRaw/",
"required": True,
}
)
comments: Optional[str] = field(
default=None,
metadata={
"type": "Element",
"namespace": "https://api.biorxiv.org/oaipmh/bioRxivRaw/",
"required": True,
}
)
msc_class: Optional[str] = field(
default=None,
metadata={
"name": "msc-class",
"type": "Element",
"namespace": "https://api.biorxiv.org/oaipmh/bioRxivRaw/",
"required": True,
}
)
abstract: Optional[str] = field(
default=None,
metadata={
"type": "Element",
"namespace": "https://api.biorxiv.org/oaipmh/bioRxivRaw/",
"required": True,
}
)
link_pdf: Optional[str] = field(
default=None,
metadata={
"type": "Element",
"namespace": "https://api.biorxiv.org/oaipmh/bioRxivRaw/",
"required": True,
}
)
@dataclass
class Version:
date: Optional[XmlDate] = field(
default=None,
metadata={
"type": "Element",
"namespace": "https://api.biorxiv.org/oaipmh/bioRxivRaw/",
"required": True,
}
)
size: Optional[str] = field(
default=None,
metadata={
"type": "Element",
"namespace": "https://api.biorxiv.org/oaipmh/bioRxivRaw/",
"required": True,
}
)
version: Optional[int] = field(
default=None,
metadata={
"type": "Attribute",
}
) | /rxiv_types-0.1.0.tar.gz/rxiv_types-0.1.0/src/rxiv_types/models/biorxiv/https/api/biorxiv/org/oaipmh/bio_rxiv_raw/bio_rxiv_raw_type.py | 0.891002 | 0.44354 | bio_rxiv_raw_type.py | pypi |
from dataclasses import field
from pydantic.dataclasses import dataclass
from typing import List
__NAMESPACE__ = "http://www.openarchives.org/OAI/2.0/oai_dc/"
@dataclass
class OaiDcType:
class Meta:
name = "oai_dcType"
title: List[str] = field(
default_factory=list,
metadata={
"type": "Element",
"namespace": "http://purl.org/dc/elements/1.1/",
}
)
creator: List[str] = field(
default_factory=list,
metadata={
"type": "Element",
"namespace": "http://purl.org/dc/elements/1.1/",
}
)
subject: List[str] = field(
default_factory=list,
metadata={
"type": "Element",
"namespace": "http://purl.org/dc/elements/1.1/",
}
)
description: List[str] = field(
default_factory=list,
metadata={
"type": "Element",
"namespace": "http://purl.org/dc/elements/1.1/",
}
)
publisher: List[str] = field(
default_factory=list,
metadata={
"type": "Element",
"namespace": "http://purl.org/dc/elements/1.1/",
}
)
contributor: List[str] = field(
default_factory=list,
metadata={
"type": "Element",
"namespace": "http://purl.org/dc/elements/1.1/",
}
)
date: List[str] = field(
default_factory=list,
metadata={
"type": "Element",
"namespace": "http://purl.org/dc/elements/1.1/",
}
)
type: List[str] = field(
default_factory=list,
metadata={
"type": "Element",
"namespace": "http://purl.org/dc/elements/1.1/",
}
)
format: List[str] = field(
default_factory=list,
metadata={
"type": "Element",
"namespace": "http://purl.org/dc/elements/1.1/",
}
)
identifier: List[str] = field(
default_factory=list,
metadata={
"type": "Element",
"namespace": "http://purl.org/dc/elements/1.1/",
}
)
source: List[str] = field(
default_factory=list,
metadata={
"type": "Element",
"namespace": "http://purl.org/dc/elements/1.1/",
}
)
language: List[str] = field(
default_factory=list,
metadata={
"type": "Element",
"namespace": "http://purl.org/dc/elements/1.1/",
}
)
relation: List[str] = field(
default_factory=list,
metadata={
"type": "Element",
"namespace": "http://purl.org/dc/elements/1.1/",
}
)
coverage: List[str] = field(
default_factory=list,
metadata={
"type": "Element",
"namespace": "http://purl.org/dc/elements/1.1/",
}
)
rights: List[str] = field(
default_factory=list,
metadata={
"type": "Element",
"namespace": "http://purl.org/dc/elements/1.1/",
}
) | /rxiv_types-0.1.0.tar.gz/rxiv_types-0.1.0/src/rxiv_types/models/oai_dc/org/openarchives/oai/pkg_2/pkg_0/oai_dc/oai_dc_type.py | 0.729905 | 0.321513 | oai_dc_type.py | pypi |
# :leaves: Biocatalysis Model
*Biocatalysed Synthesis Planning using Data-driven Learning*
## Table of Contents
- [Abstract](#abstract)
- [Data](#data)
- [ECREACT](#ecreact)
- [Data Sources](#data-sources)
- [Using the Pre-trained Model](#using-the-pre-trained-model)
- [Training your own Model](#training-your-own-model)
- [Setup Environment](#setup-environment)
- [Data Pre-processing](#data-pre-precessing)
- [Training using OpenNMT-py](#training-using-opennmt-py)
- [Evaluation](#evaluation)
## Abstract
Enzyme catalysts are an integral part of green chemistry strategies towards a more sustainable and resource-efficient chemical synthesis. However, the use of biocatalysed reactions in retrosynthetic planning clashes with the difficulties in predicting the enzymatic activity on unreported substrates and enzyme-specific stereo- and regioselectivity. As of now, only rule-based systems support retrosynthetic planning using biocatalysis, while initial data-driven approaches are limited to forward predictions. Here, we extend the data-driven forward reaction as well as retrosynthetic pathway prediction models based on the Molecular Transformer architecture to biocatalysis. The enzymatic knowledge is learned from an extensive data set of publicly available biochemical reactions with the aid of a new class token scheme based on the enzyme commission classification number, which captures catalysis patterns among different enzymes belonging to the same hierarchy. The forward reaction prediction model (top-1 accuracy of 49.6%), the retrosynthetic pathway (top-1 single-step round-trip accuracy of 39.6%) and the curated data set are made publicly available to facilitate the adoption of enzymatic catalysis in the design of greener chemistry processes.
## Data
Enzymatic reactions and the accompanying EC numbers were extracted from four databases, namely Rhea, BRENDA, PathBank, and MetaNetX and merged into a new data set, named ECREACT, containing enzyme-catalysed reactions with the respective EC number.
### ECREACT
The ECREACT data set contains samples of all 7 EC classes (1: Oxidoreductases, 2: Transferases, 3: Hydrolases, 4: Lyases, 5: Isomerases, 6: Ligases, 7: Translocases) distributed as shown in (a). The distributions of the substrates and products are shown in the TMAPS (b) and (c) respectively.

The data set is available as a `.csv` file in the following format:
```csv
rxn_smiles,ec,source
CC=O.O.O=O|1.2.3.1>>CC(=O)[O-],1.2.3.1,brenda_reaction_smiles
CC(N)=O.O|3.5.1.4>>CC(=O)O,3.5.1.4,brenda_reaction_smiles
...
```
The field `rxn_smiles` is a reaction SMILES extended with the EC number on the reactant side. The reactants and the EC number are separated by a pipe `|`. The field `ec` is the EC number. Be aware that they can also contain entries such as `1.20.1.M1` or `1.14.-.-`. The field `source` describes the source database of the reaction information.
**[Download ECREACT 1.0](data/ecreact-1.0.csv)**
### Data Sources
ECREACT is composed of data from four publicly accessible databases:
- [Rhea](https://www.rhea-db.org/), an expert-curated knowledgebase of chemical and transport reactions of biological interest - and the standard for enzyme and transporter annotation in UniProtKB. [:file_cabinet:](ftp://ftp.expasy.org/databases/rhea/txt/rhea-reactions.txt.gz)
- [BRENDA](https://www.brenda-enzymes.org/), an information system representing one of the most comprehensive enzyme repositories. [:file_cabinet:](https://www.brenda-enzymes.org/download_brenda_without_registration.php)
- [PathBank](https://pathbank.org/), an interactive, visual database containing more than 100 000 machine-readable pathways found in model organisms such as humans, mice, E. coli, yeast, and Arabidopsis thaliana. [:file_cabinet:](https://pathbank.org/downloads/pathbank_all_biopax.zip)
- [MetaNetX](https://www.metanetx.org/), an online platform for accessing, analyzing and manipulating genome-scale metabolic networks (GSM) as well as biochemical pathways. [:file_cabinet:](https://www.metanetx.org/mnxdoc/mnxref.html)
The contributions by EC class from each of the data sources is shown in the plot below.

## Using the Pre-trained Model
We provide a model for retrosynthetic pathway prediction pre-trained on ECREACT as part of the [IBM RXN for Chemistry](https://rxn.res.ibm.com/) platform. This model can also be used through the [Python wrapper](https://github.com/rxn4chemistry/rxn4chemistry) for the IBM RXN for Chemistry API. You can get a free API key [here](https://rxn.res.ibm.com/rxn/user/profile).
```python
api_key = 'API_KEY'
from rxn4chemistry import RXN4ChemistryWrapper
rxn4chemistry_wrapper = RXN4ChemistryWrapper(api_key=api_key)
# NOTE: you can create a project or set an esiting one using:
# rxn4chemistry_wrapper.set_project('PROJECT_ID')
rxn4chemistry_wrapper.create_project('test_wrapper')
print(rxn4chemistry_wrapper.project_id)
response = rxn4chemistry_wrapper.predict_automatic_retrosynthesis(
'OC1C(O)C=C(Br)C=C1', ai_model='enzymatic-2021-04-16'
)
results = rxn4chemistry_wrapper.get_predict_automatic_retrosynthesis_results(
response['prediction_id']
)
print(results['status'])
# NOTE: upon 'SUCCESS' you can inspect the predicted retrosynthetic paths.
print(results['retrosynthetic_paths'][0])
```
## Training your own Model
### Setup Environment
As not all dependencies are available from PyPI for all platforms, we suggest you create a conda environment from the supplied [conda.yml](conda.yml):
```bash
conda env create -f conda.yml
conda activate rxn-biocatalysis-tools
```
Alternatively, :leaves: RXN Biocatalysis Tools are available as a PyPI package and can be installed usining pip; however, not all dependencies will be installed depending on your platform.
```bash
pip install rxn_biocatalysis_tools
```
### Data Pre-processing
The :leaves: RXN Biocatalysis Tools Python package installs a script that can be used to preprocess reaction data. Reaction data can be combined, filtered, or augmented as explained in the usage documentation below. After these initial steps, the data is tokenized and split into training, validation, and testing `src` (reactants + EC) and `tgt` (product/s) files. The output data structure generated by the script is the following, depending on the options set.
```bash
.
└── experiments
├── 1
│ ├── combined.txt
│ ├── src-test.txt
│ ├── src-train.txt
│ ├── src-valid.txt
│ ├── tgt-test.txt
│ ├── tgt-train.txt
│ └── tgt-valid.txt
├── 2
│ ├── combined.txt
│ ├── src-test.txt
│ ├── src-train.txt
│ ├── src-valid.txt
│ ├── tgt-test.txt
│ ├── tgt-train.txt
│ └── tgt-valid.txt
└── 3
├── combined.txt
├── src-test.txt
├── src-train.txt
├── src-valid.txt
├── tgt-test.txt
├── tgt-train.txt
└── tgt-valid.txt
```
Usage of the script:
```bash
rbt-preprocess INPUT_FILE ... OUTPUT_DIRECTORY [--remove-patterns=FILE_PATH] [--remove-molecules=FILE_PATH] [--ec-level=LEVEL; default=3] [-max-products=MAX_PRODUCTS; default=1] [--min-atom-count=MIN_ATOM_COUNT; default=4] [--bi-directional] [--split-products]
```
| Argument / Option | Example | Description | Default |
|---------------------|---------------------------|--------------------------------------------------------------|------------|
| INPUT_FILE ... | file1.csv file2.csv | File(s) containing enzymatic reaction SMILES<sup>1</sup> | |
| OUTPUT_DIRECTORY | /output/directory/ | The directory to which output files will be written | |
| --remove-patterns | patterns.txt | SMARTS patterns for molecules to be removed<sup>2</sup> | |
| --remove-molecules | molecules.txt | Molecule SMILES to be removed<sup>3</sup> | |
| --ec-level | --ec-level 1 --ec-level 2 | The number of EC levels to be exported, can be repreated | 3 |
| --max-products | --max-products 1 | The max number of products (rxns with more are dropped) | 1 |
| --min-atom-count | --min-atom-count 4 | The min atom count (smaller molecules are removed) | 4 |
| --bi-directional | --bi-directional | Whether to create the inverse of every reaction<sup>4</sup> | |
| --split-products | --split-products | Whether to split reactions with multiple prodcuts<sup>5</sup>| |
<sup>1</sup>Example of an enzymatic reaction SMILES: `CC(N)=O.O|3.5.1.4>>CC(=O)O`<br />
<sup>2</sup>See [patterns.txt](data/patterns.txt) for an example<br />
<sup>3</sup>See [molecules.txt](data/molecules.txt) for an example<br />
<sup>4</sup>Example: For the reaction `CC(N)=O.O|3.5.1.4>>CC(=O)O`, the reaction `CC(=O)O|3.5.1.4>>CC(N)=O.O` will be added<br />
<sup>5</sup>Example: The reaction `A|3.5.1.4>>B.C` is split into reactions `A|3.5.1.4>>B` and `A|3.5.1.4>>C`<br />
### Training using OpenNMT-py
The first step in the OpenNMT is to run `onmt_preprocess` for both the forward and backward models. In the examples below, the data with 3 EC-levels is used. You will probably have to adapt the paths, depending on your directory structure and platform.
The pre-processed USPTO files can be found [here](https://github.com/rxn4chemistry/OpenNMT-py/tree/carbohydrate_transformer/data/uspto_dataset).
``` bash
DATASET=data/uspto_dataset
DATASET_TRANSFER=experiments/3
# forward
onmt_preprocess -train_src "${DATASET}/src-train.txt" "${DATASET_TRANSFER}/src-train.txt" \
-train_tgt "${DATASET}/tgt-train.txt" "${DATASET_TRANSFER}/tgt-train.txt" -train_ids uspto transfer \
-valid_src "${DATASET}/src-valid.txt" -valid_tgt "${DATASET_TRANSFER}/tgt-valid.txt" \
-save_data "preprocessing/multitask_forward" \
-src_seq_length 3000 -tgt_seq_length 3000 \
-src_vocab_size 3000 -tgt_vocab_size 3000 \
-share_vocab
# backward
onmt_preprocess -train_src "${DATASET}/tgt-train.txt" "${DATASET_TRANSFER}/tgt-train.txt" \
-train_tgt "${DATASET}/src-train.txt" "${DATASET_TRANSFER}/src-train.txt" -train_ids uspto transfer \
-valid_src "${DATASET}/tgt-valid.txt" -valid_tgt "${DATASET_TRANSFER}/src-valid.txt" \
-save_data "preprocessing/multitask_backward" \
-src_seq_length 3000 -tgt_seq_length 3000 \
-src_vocab_size 3000 -tgt_vocab_size 3000 \
-share_vocab
```
Once the OpenNMT pre-preprocessing has finished, the actual training can be started:
```bash
# if forward
DATASET="preprocessing/multitask_forward"
OUTDIR="/model/multitask_forward"
LOGDIR="/logs/forward"
# end if
# if backward
DATASET="preprocessing/multitask_backward"
OUTDIR="model/multitask_backward"
LOGDIR="logs/backward"
# end if
W1=9
W2=1
onmt_train -data ${DATASET} \
-save_model ${OUTDIR} \
-data_ids uspto transfer --data_weights ${W1} ${W2} \
-seed 42 -gpu_ranks 0 \
-train_steps 250000 -param_init 0 \
-param_init_glorot -max_generator_batches 32 \
-batch_size 6144 -batch_type tokens \
-normalization tokens -max_grad_norm 0 -accum_count 4 \
-optim adam -adam_beta1 0.9 -adam_beta2 0.998 -decay_method noam \
-warmup_steps 8000 -learning_rate 2 -label_smoothing 0.0 \
-layers 4 -rnn_size 384 -word_vec_size 384 \
-encoder_type transformer -decoder_type transformer \
-dropout 0.1 -position_encoding -share_embeddings \
-global_attention general -global_attention_function softmax \
-self_attn_type scaled-dot -heads 8 -transformer_ff 2048 \
--tensorboard --tensorboard_log_dir ${LOGDIR}
```
### Evaluation
The test set is evaluated using `onmt_translate`. Three new files are generated:
- `tgt-pred.txt` (forward prediction)
- `src-pred.txt` (backward prediction)
- `tgt-pred-rtrp.txt` (roundtriip prediction, a backward prediction followed by a forward prediction)
Before the roundtrip prediction, the SMILES in `src-pred.txt` should be standardized using the script `rbt-preprocess`. The script does not edit the file in place, so good practise is to rename `src-pred.txt` to `src-pred-noncanon.txt` and then run:
```bash
rbt-canonicalize src-pred-noncanon.txt src-pred.txt
```
Example evaluation scripts:
```bash
# forward prediction
DATASET_TRANSFER="experiments/3"
# Get the newest file from the model directory
MODEL=$(ls model/multitask_forward*.pt -t | head -1)
onmt_translate -model "${MODEL}" \
-src "${DATASET_TRANSFER}/src-test.txt" \
-output "${DATASET_TRANSFER}/tgt-pred.txt" \
-n_best 10 -beam_size 10 -max_length 300 -batch_size 64 \
-gpu 0
```
```bash
# backward prediction
DATASET_TRANSFER="experiments/3"
# Get the newest file from the model directory
MODEL=$(ls model/multitask_backward*.pt -t | head -1)
onmt_translate -model "${MODEL}" \
-src "${DATASET_TRANSFER}/tgt-test.txt" \
-output "${DATASET_TRANSFER}/src-pred.txt" \
-n_best 10 -beam_size 10 -max_length 300 -batch_size 64 \
-gpu 0
```
```bash
# roundtrip prediction
DATASET_TRANSFER="experiments/3"
# Get the newest file from the model directory
MODEL=$(ls model/multitask_forward*.pt -t | head -1)
onmt_translate -model "${MODEL}" \
-src "${DATASET_TRANSFER}/src-pred.txt" \
-output "${DATASET_TRANSFER}/tgt-pred-rtrp.txt" \
-n_best 1 -beam_size 5 -max_length 300 -batch_size 64 \
-gpu 0
```
The :leaves: RXN Biocatalysis Tools PyPI package contains an evaluation script that calculates accuracies from the files produced above. For these examples, the `INPUT_FOLDER` is the same as `DATASET_TRANSFER`, `--n-best-fw`, `--top-n-fw`, `--top-n-bw`, `--top-n-rtr` are all set to `10`.
```bash
rbt-evaluate INPUT_FOLDER --name=NAME [--n-best-fw=N_BEST_FW; default=5] [--n-best-bw=N_BEST_BW; default=10] [--n-best-rtr=N_BEST_RTR; default=1] [--top-n-fw=TOP_N_FW; default=1] [--top-n-fw=TOP_N_FW; default=1] [--top-n-bw=TOP_N_BW; default=1] [--top-n-rtr=TOP_N_RTR; default=1] [--top-n-range] [--isomeric-smiles | --no-isomeric-smiles; default:--isomeric-smiles]
```
| Argument / Option | Example | Description | Default |
|---------------------|---------------------------|--------------------------------------------------------------|------------|
| INPUT_FOLDER | /input/directory/ | The folder containing the src and tgt files | |
| --name | experiment-3 | The name of the output evaluation csv file | |
| --n-best-fw | --n-best-fw 10 | The number of calculated tgt predictions per src | 5 |
| --n-best-bw | --n-best-bw 10 | The number of calculated src predictions per tgt | 10 |
| --n-best-rtr | --n-best-rtr 1 | The number of calculated (roundtrip) tgt predictions per predicted src| 1 |
| --top-n-fw | --top-n-fw 10 | The number of forward predictions to consider in the evaluation | 1 |
| --top-n-bw | --top-n-bw 10 | The number of backward predictions to consider in the evaluation | 1 |
| --top-n-rtr | --top-n-rtr 10 | The number of roundtrip predictions to consider in the evaluation | 1 |
| --top-n-range | --top-n-range | Whether to consider the forward, backward, and roundtrip predictions *up to* the respective top-n numbers in the evaluation | |
| --isomeric-smiles | --isomeric-smiles | Do **not** ignore stereochemistry during the evaluation | |
| --no-isomeric-smiles| --no-isomeric-smiles | Ignore stereochemistry during the evaluation | |
The evaluation will produce multiple new files in `INPUT_FOLDER`: A `.csv` file with the fields `metric`, `type`, `top`, `ec`, and `value`; and multiple files containing correct and incorrect forward, backward, and roundtrip predictions for each top-n. In addition, the accuracy of *only* predicting the EC number is calculated and the respective files written out as well.
| /rxn-biocatalysis-tools-1.0.1.tar.gz/rxn-biocatalysis-tools-1.0.1/README.md | 0.632843 | 0.982774 | README.md | pypi |
import re
from typing import List, Any
from .chemical_reaction import ChemicalReaction
from rdkit.Chem import AllChem as rdk
from rdkit.Chem.rdchem import Mol
UNKNOWN_CHEMICAL_REGEX = re.compile(r"^(<.*>)$|^(<)|(>)$")
class EnzymaticReaction(ChemicalReaction):
"""Representation of an enzymatic reaction.
Reactions containing enzyme are represented as reaction SMILES using a '|'
to separate precursors from the EC number.
"""
def __init__(
self,
enzymatic_reaction_smiles: str,
remove_duplicates: bool = True,
sanitize: bool = True,
source: str = "unknown",
**kwargs: Any,
):
"""Constructor for EnzymaticReaction.
Args:
enzymatic_reaction_smiles: an enzymatic reaction SMILES.
remove_duplicates: duplicate removal. Defaults to True.
sanitize: whether sanitization is enabled. Defaults to True.
source: source for the enzymatic reaction. Defaults to "unknown".
"""
vals = re.split(r">|\|", enzymatic_reaction_smiles)
if len(vals) < 2:
vals.append("")
self.ec: List[str] = [level.strip() for level in vals[1].split(".")]
self.source = source
# hack
self.kwargs = kwargs
super().__init__(
enzymatic_reaction_smiles.replace(f"|{vals[1]}", ""),
remove_duplicates,
sanitize,
**kwargs,
)
def __str__(self) -> str:
"""Returns the extended reaction SMARTS of this instance (reactants|ec>agents>products).
Returns:
the extended reaction SMARTS representing this instance.
"""
s = (
".".join(
sorted([rdk.MolToSmiles(m, **self.kwargs) for m in self.reactants if m])
)
+ ">"
+ ".".join(
sorted([rdk.MolToSmiles(m, **self.kwargs) for m in self.agents if m])
)
+ ">"
+ ".".join(
sorted([rdk.MolToSmiles(m, **self.kwargs) for m in self.products if m])
)
)
s_parts = s.split(">")
if len(self.ec) > 0 and self.ec[0] != "":
s_parts[0] += f'|{".".join(self.ec)}'
return ">".join(s_parts).replace(" ", "")
def __eq__(self, other: object) -> bool:
"""Compares the count, order, and SMILES string of each molecule in this reaction as well as the EC.
Args:
other: another EnzymaticReaction instance to be compared with this instance.
Returns:
whether this instance is equal to another.
"""
if not isinstance(other, EnzymaticReaction):
raise NotImplementedError(
"EnzymaticReaction can be tested for equality with EnzymaticReaction objects"
)
return super().__eq__(other) and self.ec == other.ec
def __hash__(self) -> int:
"""Get hash for the enzymatic reaction.
Returns:
enzymatic reaction hash.
"""
return hash(str(self))
def mol_to_smiles(self, mol: Mol) -> str:
"""Applies the kwargs supplied to the reaction to MolToSmiles for a given molecule.
Args:
mol: an RDKit molecule instance.
Returns:
the string representing the molecule.
"""
return rdk.MolToSmiles(mol, **self.kwargs)
def to_string(self, ec_depth: int = 4) -> str:
"""Get the string representing this reaction with a certain number of EC levels.
Args:
ec_depth: the number of EC classes to include (top-down). Defaults to 4.
Returns:
the string representing this reaction with the chosen levels of EC.
"""
cpy = EnzymaticReaction(str(self))
cpy.ec = cpy.ec[:ec_depth]
return str(cpy).strip()
def get_ec(self, ec_depth: int = 4) -> str:
"""Get the string representing the EC of this reaction.
Args:
ec_depth: the number of EC classes to include (top-down). Defaults to 4.
Returns:
the EC of the reaction as a string.
"""
return ".".join(self.ec[:ec_depth]).strip()
def reverse(self) -> "EnzymaticReaction":
"""Reverses the reaction (switching reactants and products).
Returns:
the reversed enzymatic reactions.
"""
return EnzymaticReaction.from_smarts_and_ec(
f"{'.'.join(self.get_products_as_smiles())}>>{'.'.join(self.get_reactants_as_smiles())}",
self.get_ec(),
self.source,
)
@staticmethod
def from_smarts_and_ec(
reaction_smiles: str, ec: str, source: str = "unknown"
) -> "EnzymaticReaction":
"""Creates an EnzymaticReaction instance from a reaction SMILES and an EC number.
Args:
reaction_smiles: a reaction SMILES.
ec: EC number string representation.
source: source for the enzymatic reaction. Defaults to "unknown".
Returns:
an EnzymaticReaction instance.
"""
split = reaction_smiles.split(">>")
return EnzymaticReaction(split[0] + "|" + ec + ">>" + split[1], source=source)
@staticmethod
def is_valid(enzymatic_reaction_smiles: str) -> bool:
"""Checks whether an enzymatic reaction SMILES (e.g. O.CO|1.2.3.4>>C(=O)O) is valid.
Args:
enzymatic_reaction_smiles: an enzymatic reaction SMILES.
Returns:
a bool indicating whether the supplied enzymatic reaction SMILES is valid.
"""
if (
"|" not in enzymatic_reaction_smiles
or enzymatic_reaction_smiles.count(">") != 2
or "|>>" in enzymatic_reaction_smiles
):
return False
return True | /rxn-biocatalysis-tools-1.0.1.tar.gz/rxn-biocatalysis-tools-1.0.1/rxn_biocatalysis_tools/enzymatic_reaction.py | 0.915656 | 0.611237 | enzymatic_reaction.py | pypi |
import re
SMILES_TOKENIZER_PATTERN = r"(\%\([0-9]{3}\)|\[[^\]]+]|Br?|Cl?|N|O|S|P|F|I|b|c|n|o|s|p|\||\(|\)|\.|=|#|-|\+|\\|\/|:|~|@|\?|>>?|\*|\$|\%[0-9]{2}|[0-9])"
SMILES_REGEX = re.compile(SMILES_TOKENIZER_PATTERN)
def tokenize_enzymatic_reaction_smiles(rxn: str, keep_pipe=False) -> str:
"""Tokenize an enzymatic reaction SMILES in the form precursors|EC>>products.
Args:
rxn: an enzymatic reaction SMILES.
keep_pipe: whether or not to keep the pipe separating the precursors from the EC as a token.
Defaults to False.
Returns:
the tokenized enzymatic reaction SMILES.
"""
parts = re.split(r">|\|", rxn)
ec = parts[1].split(".")
rxn = rxn.replace(f"|{parts[1]}", "")
tokens = [token for token in SMILES_REGEX.findall(rxn)]
arrow_index = tokens.index(">>")
levels = ["v", "u", "t", "q"]
if ec[0] != "":
ec_tokens = [f"[{levels[i]}{e}]" for i, e in enumerate(ec)]
if keep_pipe:
ec_tokens.insert(0, "|")
tokens[arrow_index:arrow_index] = ec_tokens
return " ".join(tokens)
def detokenize_enzymatic_reaction_smiles(rxn: str) -> str:
"""Detokenize an enzymatic reaction SMILES in the form precursors|EC>>products.
Args:
rxn: a tokenized enzymatic reaction SMILES.
Returns:
the detokenized enzymatic reaction SMILES.
"""
rxn = rxn.replace(" ", "")
if "[v" in rxn and "|" not in rxn:
pipe_index = rxn.index("[v")
if pipe_index > -1:
rxn = rxn[:pipe_index] + "|" + rxn[pipe_index:]
if "[v" not in rxn and "|" in rxn:
rxn = rxn.replace("|", "")
if "|" not in rxn:
return rxn
precursor_split = rxn.split("|")
if len(precursor_split) < 2:
return ""
reaction_split = precursor_split[1].split(">>")
if len(reaction_split) < 2:
return ""
ec = (
reaction_split[0]
.replace("][", ".")
.replace("[v", "")
.replace("u", "")
.replace("t", "")
.replace("q", "")
.replace("]", "")
)
return precursor_split[0] + "|" + ec + ">>" + reaction_split[1]
def tokenize_smiles(smiles: str) -> str:
"""
Tokenize a SMILES molecule or reaction, and join the tokens with spaces.
Args:
smiles: SMILES string to tokenize, for instance 'CC(CO)=N>>CC(C=O)N'.
Returns:
SMILES string after tokenization, for instance 'C C ( C O ) = N >> C C ( C = O ) N'.
"""
tokens = [token for token in SMILES_REGEX.findall(smiles)]
return " ".join(tokens) | /rxn-biocatalysis-tools-1.0.1.tar.gz/rxn-biocatalysis-tools-1.0.1/rxn_biocatalysis_tools/tokenizer.py | 0.705176 | 0.370766 | tokenizer.py | pypi |
import logging
import random
from typing import Callable, List
from .miscellaneous import apply_to_any_smiles, apply_to_smiles_groups
logger = logging.getLogger(__name__)
logger.addHandler(logging.NullHandler())
class SmilesAugmenter:
"""
Class to augment any kind of SMILES string with the help of randomization
and shuffling.
"""
def __init__(
self,
augmentation_fn: Callable[[str], str],
augmentation_probability: float = 1.0,
shuffle: bool = True,
ignore_exceptions: bool = True,
):
"""
Args:
augmentation_fn: Function for augmenting the individual SMILES strings,
such as the functions provided in smiles_randomization.py.
augmentation_probability: Probability with which to augment individual
SMILES strings.
shuffle: Whether to shuffle the order of the compounds.
ignore_exceptions: Whether to ignore the error (and return the
original string) when an augmentation fails. If False, exceptions
will be propagated.
"""
self.augmentation_fn = augmentation_fn
self.augmentation_probability = augmentation_probability
self.shuffle = shuffle
self.ignore_exceptions = ignore_exceptions
def augment(self, smiles: str, number_augmentations: int) -> List[str]:
"""
Augment one SMILES string (of any kind).
Args:
smiles: SMILES string to augment.
number_augmentations: how many times to do the augmentation.
"""
# augmentation of the individual compound SMILES
augmented = [
apply_to_any_smiles(
smiles, self._augment_with_probability, force_multicomponent=True
)
for _ in range(number_augmentations)
]
# shuffle the order of the compounds
if self.shuffle:
augmented = [
apply_to_smiles_groups(s, SmilesAugmenter._shuffle) for s in augmented
]
return augmented
def _augment_with_probability(self, smiles: str) -> str:
"""Augmentat a SMILES, with the probability given by the member variable."""
# Note: no need to call random.uniform if the augmentation probability is 1.0.
if (
self.augmentation_probability == 1.0
or random.uniform(0, 1) <= self.augmentation_probability
):
try:
return self.augmentation_fn(smiles)
except Exception as e:
if self.ignore_exceptions:
logger.warning(f"Augmentation failed for {smiles}: {e}")
return smiles
else:
raise
# no augmentation
return smiles
@staticmethod
def _shuffle(smiles_list: List[str]) -> List[str]:
smiles_list = smiles_list.copy()
random.shuffle(smiles_list)
return smiles_list | /rxn_chem_utils-1.3.0-py3-none-any.whl/rxn/chemutils/smiles_augmenter.py | 0.878262 | 0.453746 | smiles_augmenter.py | pypi |
from enum import auto
from rxn.utilities.types import RxnEnum
from .extended_reaction_smiles import (
parse_extended_reaction_smiles,
to_extended_reaction_smiles,
)
from .reaction_equation import ReactionEquation
class ReactionFormat(RxnEnum):
"""
Existing reaction SMILES formats.
Attributes:
EXTENDED: extended reaction SMILES with fragment info,
f.i. ``|f:0.2,3.4.5|``.
STANDARD: standard reaction SMILES.
STANDARD_WITH_TILDE: standard reaction SMILES, where fragments are
indicated with tilde symbols, "~".
"""
EXTENDED = auto()
STANDARD = auto()
STANDARD_WITH_TILDE = auto()
def determine_format(reaction_smiles: str) -> ReactionFormat:
"""
Determine the format of a reaction SMILES.
"""
if " |" in reaction_smiles:
return ReactionFormat.EXTENDED
if "~" in reaction_smiles:
return ReactionFormat.STANDARD_WITH_TILDE
return ReactionFormat.STANDARD
def parse_any_reaction_smiles(smiles: str) -> ReactionEquation:
"""
Parse a reaction SMILES in any format (will be determined automatically).
"""
return parse_reaction_smiles(smiles, reaction_format=determine_format(smiles))
def parse_reaction_smiles(
smiles: str, reaction_format: ReactionFormat
) -> ReactionEquation:
"""
Parse the reaction SMILES in a given format.
"""
if reaction_format is ReactionFormat.EXTENDED:
return parse_extended_reaction_smiles(smiles, remove_atom_maps=False)
if reaction_format is ReactionFormat.STANDARD:
return ReactionEquation.from_string(smiles)
if reaction_format is ReactionFormat.STANDARD_WITH_TILDE:
return ReactionEquation.from_string(smiles, fragment_bond="~")
raise ValueError(f"Unsupported reaction format: {reaction_format}")
def to_reaction_smiles(
reaction_equation: ReactionEquation, reaction_format: ReactionFormat
) -> str:
"""
Convert a reaction equation into a reaction SMILES of the specified format.
"""
if reaction_format is ReactionFormat.EXTENDED:
return to_extended_reaction_smiles(reaction_equation)
if reaction_format is ReactionFormat.STANDARD:
return reaction_equation.to_string()
if reaction_format is ReactionFormat.STANDARD_WITH_TILDE:
return reaction_equation.to_string(fragment_bond="~")
raise ValueError(f"Unsupported reaction format: {reaction_format}") | /rxn_chem_utils-1.3.0-py3-none-any.whl/rxn/chemutils/reaction_smiles.py | 0.924874 | 0.501282 | reaction_smiles.py | pypi |
from functools import partial
from typing import Callable, Iterable, List, Optional
from rxn.utilities.containers import remove_duplicates
from .conversion import canonicalize_smiles
def multicomponent_smiles_to_list(
multicomponent_smiles: str, fragment_bond: Optional[str] = None
) -> List[str]:
"""
Convert a string of molecules into a list of molecules (taking fragment bonds into account).
Args:
multicomponent_smiles: multicomponent SMILES string to convert to a list.
fragment_bond: fragment bond.
Returns:
The list of molecule SMILES comprised in the multi-component SMILES string.
"""
molecules = multicomponent_smiles.split(".")
molecules = [molecule for molecule in molecules if molecule != ""]
# replace fragment bonds if necessary
if fragment_bond is not None:
molecules = [molecule.replace(fragment_bond, ".") for molecule in molecules]
return molecules
def list_to_multicomponent_smiles(
molecules: Iterable[str], fragment_bond: Optional[str] = None
) -> str:
"""
Convert a list of molecules into a string representation (taking fragment
bonds into account).
Args:
molecules: molecule SMILES strings to merge into a multi-component SMILES string.
fragment_bond: fragment bond.
Returns:
A multi-component SMILES string.
"""
# replace fragment bonds if necessary
if fragment_bond is not None:
molecules = [molecule.replace(".", fragment_bond) for molecule in molecules]
return ".".join(molecules)
def apply_to_multicomponent_smiles(
multicomponent_smiles: str,
fn: Callable[[str], str],
fragment_bond: Optional[str] = None,
) -> str:
"""
Apply a function to the individual compounds in a multi-component SMILES string.
Args:
multicomponent_smiles: multi-component SMILES string to apply the function to.
fn: function to apply on the distinct molecule SMILES.
fragment_bond: fragment bond to use when parsing.
Returns:
New multi-component SMILES string after application of the function to the molecules.
"""
molecules = multicomponent_smiles_to_list(
multicomponent_smiles, fragment_bond=fragment_bond
)
molecules = [fn(molecule) for molecule in molecules]
return list_to_multicomponent_smiles(molecules, fragment_bond=fragment_bond)
def canonicalize_multicomponent_smiles(
multicomponent_smiles: str,
fragment_bond: Optional[str] = None,
check_valence: bool = True,
) -> str:
"""
Canonicalize the molecules of a multi-component SMILES string.
"""
canonicalize_fn = partial(canonicalize_smiles, check_valence=check_valence)
return apply_to_multicomponent_smiles(
multicomponent_smiles, canonicalize_fn, fragment_bond=fragment_bond
)
def sort_multicomponent_smiles(multicomponent_smiles: str) -> str:
"""
Sort the molecule SMILES in a multi-component SMILES string alphabetically.
Note: no fragment bond is needed here, as it would not have any effect at all.
"""
return list_to_multicomponent_smiles(
sorted(multicomponent_smiles_to_list(multicomponent_smiles))
)
def remove_duplicates_in_multicomponent_smiles(multicomponent_smiles: str) -> str:
"""
Remove duplicate molecule SMILES strings in a multi-component SMILES string.
Note: no fragment bond is needed here, as it would not have any effect at all.
"""
return list_to_multicomponent_smiles(
remove_duplicates(multicomponent_smiles_to_list(multicomponent_smiles))
) | /rxn_chem_utils-1.3.0-py3-none-any.whl/rxn/chemutils/multicomponent_smiles.py | 0.957981 | 0.528959 | multicomponent_smiles.py | pypi |
import logging
import re
import shutil
from typing import List, Optional
from rxn.utilities.files import (
PathLike,
dump_list_to_file,
iterate_lines_from_file,
raise_if_paths_are_identical,
)
from .exceptions import UnclearWhetherTokenized
logger = logging.getLogger(__name__)
logger.addHandler(logging.NullHandler())
SMILES_TOKENIZER_PATTERN = r"(\%\([0-9]{3}\)|\[[^\]]+]|Br?|Cl?|N|O|S|P|F|I|b|c|n|o|s|p|\||\(|\)|\.|=|#|-|\+|\\|\/|:|~|@|\?|>>?|\*|\$|\%[0-9]{2}|[0-9])"
SMILES_REGEX = re.compile(SMILES_TOKENIZER_PATTERN)
class TokenizationError(ValueError):
"""Exception raised in RDKit."""
def __init__(self, title: str, detail: str):
"""
Initialize TokenizationError.
Args:
title (str): title of the error.
detail (str): decscription of the error.
"""
self.type = "TokenizationError"
self.title = title
self.detail = detail
def to_tokens(smiles: str) -> List[str]:
"""
Tokenize a SMILES molecule or reaction into a list of tokens.
Args:
smiles: SMILES string to tokenize.
Raises:
TokenizationError: in case of mismatch between the SMILES and the joined tokens.
Returns:
List of tokens (give back the original SMILES string if appended).
"""
tokens = [token for token in SMILES_REGEX.findall(smiles)]
if smiles != "".join(tokens):
raise TokenizationError(
"SmilesJoinedTokensMismatch",
f'SMILES="{smiles}" != joined_tokens="{"".join(tokens)}"',
)
return tokens
def tokenize_smiles(smiles: str, fallback_value: Optional[str] = None) -> str:
"""
Tokenize a SMILES molecule or reaction, and join the tokens with spaces.
Args:
smiles: SMILES string to tokenize, for instance 'CC(CO)=N>>CC(C=O)N'.
fallback_value: what value to returns when the tokenization is unsuccessful.
Default: no fallback, will propagate the TokenizationError exception.
Returns:
SMILES string after tokenization, for instance 'C C ( C O ) = N >> C C ( C = O ) N'.
"""
try:
tokens = to_tokens(smiles)
return " ".join(tokens)
except TokenizationError:
if fallback_value is not None:
logger.debug(f'Error when tokenizing "{smiles}"')
return fallback_value
raise
def detokenize_smiles(tokenized_smiles: str) -> str:
"""
Detokenize a tokenized SMILES string (that contains spaces between the characters).
Args:
tokenized_smiles: tokenized SMILES, for instance 'C C ( C O ) = N >> C C ( C = O ) N'
Returns:
SMILES after detokenization, for instance 'CC(CO)=N>>CC(C=O)N'
"""
return tokenized_smiles.replace(" ", "")
def string_is_tokenized(smiles_line: str) -> bool:
"""
Whether a string is a tokenized SMILES or not.
Args:
smiles_line: string to inspect
Raises:
ValueError: if not possible to determine whether tokenized or not
TokenizationError: propagated directly from tokenize_smiles()
"""
detokenized = detokenize_smiles(smiles_line)
tokens = to_tokens(detokenized)
if len(tokens) < 2:
raise UnclearWhetherTokenized(smiles_line)
return " ".join(tokens) == smiles_line
def tokenize_file(
input_file: PathLike, output_file: PathLike, fallback_value: str = ""
) -> None:
"""
Tokenize a file containing SMILES strings.
Args:
input_file: file to tokenize.
output_file: where to save the tokenized file.
fallback_value: placeholder for strings that cannot be tokenized.
"""
raise_if_paths_are_identical(input_file, output_file)
logger.info(f'Tokenizing "{input_file}" -> "{output_file}".')
tokenized = (
tokenize_smiles(line, fallback_value)
for line in iterate_lines_from_file(input_file)
)
dump_list_to_file(tokenized, output_file)
def detokenize_file(
input_file: PathLike,
output_file: PathLike,
) -> None:
raise_if_paths_are_identical(input_file, output_file)
logger.info(f'Detokenizing "{input_file}" -> "{output_file}".')
detokenized = (
detokenize_smiles(line) for line in iterate_lines_from_file(input_file)
)
dump_list_to_file(detokenized, output_file)
def ensure_tokenized_file(
file: PathLike, postfix: str = ".tokenized", fallback_value: str = ""
) -> str:
"""
Ensure that a file is tokenized: do nothing if the file is already tokenized, create
a tokenized copy otherwise.
Args:
file: path to the file that we want to ensure is tokenized.
postfix: postfix to add to the tokenized copy (if applicable).
fallback_value: placeholder for strings that cannot be tokenized (if applicable).
Returns:
The path to the tokenized file (original path, or path to new file).
"""
if file_is_tokenized(file):
return str(file)
tokenized_copy = str(file) + postfix
tokenize_file(file, tokenized_copy, fallback_value=fallback_value)
return tokenized_copy
def file_is_tokenized(filepath: PathLike) -> bool:
"""
Whether a file contains tokenized SMILES or not.
By default, this looks at the first non-empty line of the file only!
Raises:
TokenizationError: propagated from tokenize_smiles()
RuntimeError: for empty files or files with empty lines only.
Args:
filepath: path to the file.
"""
# Iterative formulation in case the first line(s) of the file don't make it
# clear whether tokenized or not.
for line in iterate_lines_from_file(filepath):
try:
return string_is_tokenized(line)
except UnclearWhetherTokenized:
continue
raise RuntimeError(
f'Could not determine whether "{filepath}" is tokenized: empty lines only.'
)
def copy_as_detokenized(src: PathLike, dest: PathLike) -> None:
"""
Copy a source file to a destination, while making sure that it is not tokenized.
"""
if file_is_tokenized(src):
logger.info(f'Copying and detokenizing "{src}" -> "{dest}".')
detokenize_file(src, dest)
else:
logger.info(f'Copying "{src}" -> "{dest}".')
shutil.copy(src, dest) | /rxn_chem_utils-1.3.0-py3-none-any.whl/rxn/chemutils/tokenization.py | 0.792424 | 0.320715 | tokenization.py | pypi |
from itertools import chain, repeat, zip_longest
from typing import Iterable, Iterator, Sequence, Tuple
from rxn.utilities.misc import get_multipliers
from .miscellaneous import merge_reactions
from .reaction_equation import ReactionEquation, canonicalize_compounds, sort_compounds
from .reaction_smiles import (
ReactionFormat,
parse_any_reaction_smiles,
to_reaction_smiles,
)
from .tokenization import detokenize_smiles
class ReactionCombiner:
"""
Class to combine sets of precursors with sets of products, or sets of partial
reactions with other sets of partial reactions.
This class is typically useful when one needs to produce the full reaction
SMILES starting from multiple files, such as A) one file for the precursors
and one for the products, or B) two files containing each one part of a
chemical equation.
This class is particularly useful when the said files have different sizes,
which can be the case when multiple predictions are made for each line of one
of these files.
"""
def __init__(
self,
standardize: bool = False,
reaction_format: ReactionFormat = ReactionFormat.STANDARD_WITH_TILDE,
fallback_reaction: str = ">>",
):
"""
Args:
standardize: whether to standardize (i.e. canonicalize and reorder) the reaction SMILES.
reaction_format: which format should be used for the reaction SMILES.
fallback_reaction: text / reaction to produce when a reaction is invalid.
"""
self.standardize = standardize
self.reaction_format = reaction_format
self.fallback_reaction = fallback_reaction
def combine(
self, fragments_1: Sequence[str], fragments_2: Sequence[str]
) -> Iterator[str]:
"""
See docstring of function ``combine_sequences``.
"""
yield from self.combine_sequences(fragments_1, fragments_2)
def combine_sequences(
self, fragments_1: Sequence[str], fragments_2: Sequence[str]
) -> Iterator[str]:
"""
Combine the two sequences of fragments into an iterator of reactions.
Args:
fragments_1: Sequence of sets of precursors strings (such as "CC.O.[Na+]~[Cl-]"),
or list of partial reactions.
fragments_2: Sequence of sets of product(s) strings, or list of partial
reactions.
Returns:
Iterator over the resulting reaction SMILES.
"""
fragments_1_multiplier, fragments_2_multiplier = self._get_multipliers(
fragments_1, fragments_2
)
yield from self.combine_iterators(
fragments_1=fragments_1,
fragments_2=fragments_2,
fragments_1_multiplier=fragments_1_multiplier,
fragments_2_multiplier=fragments_2_multiplier,
)
def combine_iterators(
self,
fragments_1: Iterable[str],
fragments_2: Iterable[str],
fragments_1_multiplier: int = 1,
fragments_2_multiplier: int = 1,
) -> Iterator[str]:
"""
Combine the two iterators of fragments into an iterator of reactions.
Args:
fragments_1: Sequence of sets of precursors strings (such as "CC.O.[Na+]~[Cl-]"),
or list of partial reactions.
fragments_2: Sequence of sets of product(s) strings, or list of partial
reactions.
fragments_1_multiplier: how many times to duplicate the fragments_1.
fragments_2_multiplier: how many times to duplicate the fragments_2.
Raises:
RuntimeError: if one of the iterators isn't fully consumed.
ValueError: when one is not exactly a multiple of the other.
Returns:
Iterator over the resulting reaction SMILES.
"""
self._validate_multipliers(fragments_1_multiplier, fragments_2_multiplier)
# repeat itemwise the elements: https://stackoverflow.com/a/45799320
fragment_1_iterator = chain.from_iterable(
(repeat(e, fragments_1_multiplier) for e in fragments_1)
)
fragment_2_iterator = chain.from_iterable(
(repeat(e, fragments_2_multiplier) for e in fragments_2)
)
for fragment_1, fragment_2 in zip_longest(
fragment_1_iterator, fragment_2_iterator
):
if fragment_1 is None or fragment_2 is None:
raise RuntimeError("Mismatch in expected iterator length")
yield self._to_reaction_smiles(fragment_1, fragment_2)
def _to_reaction_smiles(self, fragment_1: str, fragment_2: str) -> str:
try:
return self._try_to_reaction_smiles(fragment_1, fragment_2)
except Exception:
return self.fallback_reaction
def _try_to_reaction_smiles(self, fragment_1: str, fragment_2: str) -> str:
# 1) get the initial reaction SMILES
reaction_equation = self._to_raw_reaction(fragment_1, fragment_2)
# 2) standardize if necessary
if self.standardize:
reaction_equation = sort_compounds(
canonicalize_compounds(reaction_equation)
)
return to_reaction_smiles(
reaction_equation, reaction_format=self.reaction_format
)
def _to_raw_reaction(self, fragment_1: str, fragment_2: str) -> ReactionEquation:
"""Get a ReactionEquation from the two strings."""
fragment_1 = detokenize_smiles(fragment_1)
fragment_2 = detokenize_smiles(fragment_2)
fragment_1_is_reaction = ">" in fragment_1
fragment_2_is_reaction = ">" in fragment_2
# Case A: both are given in the reaction format
if fragment_1_is_reaction and fragment_2_is_reaction:
reaction_1 = parse_any_reaction_smiles(fragment_1)
reaction_2 = parse_any_reaction_smiles(fragment_2)
return merge_reactions(reaction_1, reaction_2)
# Case A: fragment_1 represents the precursor(s), fragment_2 the product(s)
if not fragment_1_is_reaction and not fragment_2_is_reaction:
reaction_smiles = fragment_1 + ">>" + fragment_2
return parse_any_reaction_smiles(reaction_smiles)
raise ValueError(
f'Cannot determine how to combine "{fragment_1}" and "{fragment_2}"'
)
def _get_multipliers(
self, fragments_1: Sequence[str], fragments_2: Sequence[str]
) -> Tuple[int, int]:
"""Get the multipliers to use when iterating through the respective fragments.
Raises:
ValueError: when one is not exactly a multiple of the other.
Returns:
Tuple: fragments_1 multiplier, fragments_2 multiplier
"""
a = len(fragments_1)
b = len(fragments_2)
m_a, m_b = get_multipliers(a, b)
return m_a, m_b
def _validate_multipliers(self, multiplier_1: int, multiplier_2: int) -> None:
"""
Make sure that the given multipliers can be used with the reaction combiner.
Raises:
ValueError: when one is not exactly a multiple of the other.
"""
# Fail if one is not exactly a multiple of the other
if 1 not in {multiplier_1, multiplier_2}:
raise ValueError(
"The number of fragments of reactions are not an exact multiple of "
f"each other: the multipliers are {multiplier_1} and {multiplier_2}."
) | /rxn_chem_utils-1.3.0-py3-none-any.whl/rxn/chemutils/reaction_combiner.py | 0.918969 | 0.570391 | reaction_combiner.py | pypi |
import logging
import re
import typing
from collections import Counter
from functools import partial
from typing import Callable, List
from rdkit.Chem import AddHs, Atom, Mol
from rxn.utilities.files import (
PathLike,
dump_list_to_file,
iterate_lines_from_file,
raise_if_paths_are_identical,
)
from .conversion import canonicalize_smiles, smiles_to_mol
from .exceptions import InvalidSmiles
from .multicomponent_smiles import (
apply_to_multicomponent_smiles,
list_to_multicomponent_smiles,
multicomponent_smiles_to_list,
sort_multicomponent_smiles,
)
from .reaction_equation import (
ReactionEquation,
apply_to_compound_groups,
apply_to_compounds,
sort_compounds,
)
from .reaction_smiles import (
determine_format,
parse_any_reaction_smiles,
parse_reaction_smiles,
to_reaction_smiles,
)
logger = logging.getLogger(__name__)
logger.addHandler(logging.NullHandler())
CHIRAL_CENTER_PATTERN = re.compile(
r"\[([^],@]+)@+([^]]*)]"
) # Matches stereo centres, and groups what comes before and after "@"
def is_valid_smiles(smiles: str, check_valence: bool = True) -> bool:
"""
Whether a given string corresponds to a valid SMILES string.
Args:
smiles: string to check.
check_valence: whether to check the valence.
Returns:
True if the given SMILES is valid, else False.
"""
try:
canonicalize_smiles(smiles, check_valence=check_valence)
return True
except InvalidSmiles:
return False
def equivalent_smiles(*smiles: str, check_valence: bool = False) -> bool:
"""
Returns true if all the given SMILES strings are equivalent.
Will catch the exceptions for invalid SMILES and return false in that case.
Args:
smiles: multiple SMILES to check for equivalence.
check_valence: if True, molecules with invalid valence will be invalidated.
"""
try:
canonical_smiles = [
canonicalize_smiles(s, check_valence=check_valence) for s in smiles
]
return len(set(canonical_smiles)) == 1
except InvalidSmiles:
return False
def atom_type_counter(smiles: str) -> typing.Counter[str]:
"""
Return a counter of atom types (as symbols).
"""
mol: Mol = AddHs(smiles_to_mol(smiles, sanitize=False))
atoms: List[Atom] = mol.GetAtoms()
return Counter(atom.GetSymbol() for atom in atoms)
def remove_chiral_centers(smiles: str) -> str:
"""
Return SMILES where all the chiral centres are removed.
Args:
smiles: non-atom-mapped SMILES string.
Returns:
SMILES with no chiral information. It is not canonical.
"""
return re.sub(CHIRAL_CENTER_PATTERN, r"[\g<1>\g<2>]", smiles)
def remove_double_bond_stereochemistry(smiles: str) -> str:
"""
Return SMILES where all the E/Z information on double bonds is removed.
Args:
smiles: SMILES string.
Returns:
SMILES with no sterochemical information for double bonds. The SMILES
is not guaranteed to be canonical.
"""
return smiles.replace("/", "").replace("\\", "")
def apply_to_any_smiles(
any_smiles: str, fn: Callable[[str], str], force_multicomponent: bool = False
) -> str:
"""
Apply a given function to individual compound SMILES strings given in any kind
of SMILES string (molecule SMILES, multicomponent SMILES, reaction SMILES).
In the case of reaction SMILES, the format is kept.
Args:
any_smiles: any kind of SMILES string.
fn: callback to apply to every compound SMILES.
force_multicomponent: by default, when a SMILES string contains no ">" or "~",
it is assumed to just be a normal single-component SMILES string. Providing
force_multicomponent=True leads to an interpretation as a multismiles string,
i.e. splitting at all the dots.
Raises:
Exception: different kinds of exception may be raised during parsing,
or during execution of the callback.
Returns:
the new (molecule, multicomponent, or reaction) SMILES string after
application of the callback to all the component SMILES.
"""
if ">" in any_smiles:
# we have a reaction SMILES
reaction_format = determine_format(any_smiles)
reaction = parse_reaction_smiles(any_smiles, reaction_format)
reaction = apply_to_compounds(reaction, fn)
return to_reaction_smiles(reaction, reaction_format)
elif "~" in any_smiles or force_multicomponent:
# we have a multicomponent SMILES
return apply_to_multicomponent_smiles(any_smiles, fn=fn, fragment_bond="~")
else:
# we have a single-component SMILES
return fn(any_smiles)
def apply_to_smiles_groups(
any_smiles: str, fn: Callable[[List[str]], List[str]]
) -> str:
"""
Apply a given function to groups of SMILES strings given in any
multicomponent SMILES or reaction SMILES.
This function can typically be used for sorting or shuffling precursors,
products, etc.
In the case of reaction SMILES, the format is kept.
Args:
any_smiles: any kind of SMILES string.
fn: callback to apply to every compound SMILES.
Raises:
Exception: different kinds of exception may be raised during parsing,
or during execution of the callback.
Returns:
the new (multicomponent, or reaction) SMILES string after
application of the callback to all the groups of SMILES.
"""
if ">" in any_smiles:
# we have a reaction SMILES
reaction_format = determine_format(any_smiles)
reaction = parse_reaction_smiles(any_smiles, reaction_format)
reaction = apply_to_compound_groups(reaction, fn)
return to_reaction_smiles(reaction, reaction_format)
else:
# we have a multicomponent SMILES
compounds = multicomponent_smiles_to_list(any_smiles, fragment_bond="~")
return list_to_multicomponent_smiles(fn(compounds), fragment_bond="~")
def canonicalize_any(
any_smiles: str,
check_valence: bool = True,
sort_molecules: bool = False,
fallback_value: typing.Optional[str] = None,
) -> str:
"""
Canonicalize any SMILES string (molecule SMILES, multicomponent SMILES, reaction SMILES).
In the case of reaction SMILES, the format is kept.
Args:
any_smiles: any kind of SMILES string.
check_valence: if False, will not do any valence check.
sort_molecules: whether to sort the compounds alphabetically at the same time.
fallback_value: what value to returns when the canonicalization is unsuccessful.
Default: no fallback, will propagate the exception.
Raises:
Exception: different kinds of exception may be raised during parsing.
InvalidSmiles: for canonicalization errors.
Returns:
the canonical (molecule, multicomponent, or reaction) SMILES string.
"""
try:
fn = partial(canonicalize_smiles, check_valence=check_valence)
canonical_smiles = apply_to_any_smiles(any_smiles, fn)
if sort_molecules:
canonical_smiles = sort_any(canonical_smiles)
return canonical_smiles
except Exception as e:
if fallback_value is not None:
logger.debug(f'Error when canonicalizing "{any_smiles}": {e}')
return fallback_value
raise
def canonicalize_file(
input_file: PathLike,
output_file: PathLike,
check_valence: bool = True,
fallback_value: str = "",
sort_molecules: bool = False,
) -> None:
raise_if_paths_are_identical(input_file, output_file)
logger.info(f'Canonicalizing file "{input_file}" -> "{output_file}".')
# We formulate it as a generator, so that the file below is written directly
canonical = (
canonicalize_any(
line,
check_valence=check_valence,
fallback_value=fallback_value,
sort_molecules=sort_molecules,
)
for line in iterate_lines_from_file(input_file)
)
dump_list_to_file(canonical, output_file)
def sort_any(any_smiles: str) -> str:
"""
Sort any SMILES string (molecule SMILES, multicomponent SMILES, reaction SMILES).
For single-component SMILES, the fragments will be reorderd.
In the case of reaction SMILES, the format is kept.
Args:
any_smiles: any kind of SMILES string.
Raises:
Exception: different kinds of exception may be raised during parsing.
Returns:
the sorted SMILES string.
"""
if ">" in any_smiles:
# we have a reaction SMILES
reaction_format = determine_format(any_smiles)
reaction = parse_reaction_smiles(any_smiles, reaction_format)
reaction = sort_compounds(reaction)
return to_reaction_smiles(reaction, reaction_format)
else:
# we call the same function for single- and multi-component SMILES
return sort_multicomponent_smiles(any_smiles)
def get_individual_compounds(any_smiles: str) -> List[str]:
"""
Get the individual compound SMILES strings starting from any SMILES string
(multicomponent SMILES, reaction SMILES).
Single-component SMILES with dots are interpreted as multicomponent SMILES strings.
Args:
any_smiles: any kind of SMILES string.
Raises:
Exception: different kinds of exception may be raised during parsing.
Returns:
List of individual compound SMILES.
"""
if ">" in any_smiles:
# We have a reaction SMILES
reaction = parse_any_reaction_smiles(any_smiles)
return list(reaction.iter_all_smiles())
else:
# We interpret it as a multicomponent SMILES.
# We use "~" as a fragment bond even if it is not actually needed.
return multicomponent_smiles_to_list(any_smiles, fragment_bond="~")
def merge_reactions(*reactions: ReactionEquation) -> ReactionEquation:
"""Merge several reactions into one.
Useful when ReactionEquation is used to store partial equations."""
reactants = []
agents = []
products = []
for reaction in reactions:
reactants.extend(reaction.reactants)
agents.extend(reaction.agents)
products.extend(reaction.products)
return ReactionEquation(reactants=reactants, agents=agents, products=products)
def mol_has_atom_mapping(mol: Mol) -> bool:
"""
Whether at least one atom of an RDKit Mol contains an atom map number.
Args:
mol: RDKit Mol.
"""
atom: Atom
for atom in mol.GetAtoms():
if atom.GetAtomMapNum() != 0:
return True
return False
def smiles_has_atom_mapping(smiles: str) -> bool:
"""
Whether at least one atom of a compound SMILES contains an atom map number.
Args:
smiles: compound SMILES.
"""
mol = smiles_to_mol(smiles, sanitize=False)
return mol_has_atom_mapping(mol) | /rxn_chem_utils-1.3.0-py3-none-any.whl/rxn/chemutils/miscellaneous.py | 0.876344 | 0.438785 | miscellaneous.py | pypi |
from functools import partial
from typing import (
Callable,
Generator,
Iterable,
Iterator,
List,
Optional,
Type,
TypeVar,
)
import attr
from rxn.utilities.containers import remove_duplicates
from .conversion import canonicalize_smiles, cleanup_smiles
from .exceptions import InvalidReactionSmiles
from .multicomponent_smiles import (
list_to_multicomponent_smiles,
multicomponent_smiles_to_list,
)
T = TypeVar("T", bound="ReactionEquation")
@attr.s(auto_attribs=True, init=False)
class ReactionEquation:
"""
Defines a reaction equation, as given by the molecules involved in a reaction.
Attributes:
reactants: SMILES strings for compounds on the left of the reaction arrow.
agents: SMILES strings for compounds above the reaction arrow. Are
sometimes merged with the reactants.
products: SMILES strings for compounds on the right of the reaction arrow.
"""
reactants: List[str]
agents: List[str]
products: List[str]
def __init__(
self, reactants: Iterable[str], agents: Iterable[str], products: Iterable[str]
):
"""
Overwrite init function in order to enable instantiation from any iterator and
to force copying the lists.
"""
self.__attrs_init__(list(reactants), list(agents), list(products))
def __iter__(self) -> Iterator[List[str]]:
"""Helper function to simplify functionality acting on all three
compound groups"""
return (i for i in (self.reactants, self.agents, self.products))
def iter_all_smiles(self) -> Generator[str, None, None]:
"""Helper function to iterate over all the SMILES in the reaction equation"""
return (molecule for group in self for molecule in group)
def to_string(self, fragment_bond: Optional[str] = None) -> str:
"""
Convert a ReactionEquation to an "rxn" reaction SMILES.
"""
smiles_groups = (
list_to_multicomponent_smiles(group, fragment_bond) for group in self
)
return ">".join(smiles_groups)
@classmethod
def from_string(
cls: Type[T], reaction_string: str, fragment_bond: Optional[str] = None
) -> T:
"""
Convert a ReactionEquation from an "rxn" reaction SMILES.
"""
groups = [
multicomponent_smiles_to_list(smiles_group, fragment_bond=fragment_bond)
for smiles_group in reaction_string.split(">")
]
try:
return cls(*groups)
except TypeError as e:
raise InvalidReactionSmiles(reaction_string) from e
def merge_reactants_and_agents(reaction: ReactionEquation) -> ReactionEquation:
return ReactionEquation(
reactants=reaction.reactants + reaction.agents,
agents=[],
products=reaction.products,
)
def sort_compounds(reaction: ReactionEquation) -> ReactionEquation:
"""
Reorder the compounds of each group in alphabetic order.
"""
return apply_to_compound_groups(reaction, sorted)
def apply_to_compounds(
reaction: ReactionEquation, fn: Callable[[str], str]
) -> ReactionEquation:
"""
Apply a function to the individual compounds in a ReactionEquation.
Args:
reaction: reaction equation to apply the function to.
fn: function to apply.
Returns:
New ReactionEquation instance after application of the function to the compounds.
"""
updated_compound_groups = (
[fn(compound) for compound in compound_group] for compound_group in reaction
)
return ReactionEquation(*updated_compound_groups)
def apply_to_compound_groups(
reaction: ReactionEquation, fn: Callable[[List[str]], List[str]]
) -> ReactionEquation:
"""
Apply a function to the groups of compounds in a ReactionEquation.
Args:
reaction: reaction equation to apply the function to.
fn: function to apply.
Returns:
New ReactionEquation instance after application of the function to the
compound groups.
"""
updated_compound_groups = (fn(compound_group) for compound_group in reaction)
return ReactionEquation(*updated_compound_groups)
def canonicalize_compounds(
reaction: ReactionEquation, check_valence: bool = True
) -> ReactionEquation:
"""
Canonicalize the molecules of a ReactionEquation.
"""
canonicalize_fn = partial(canonicalize_smiles, check_valence=check_valence)
return apply_to_compounds(reaction, canonicalize_fn)
def remove_duplicate_compounds(reaction: ReactionEquation) -> ReactionEquation:
"""
Remove compounds that are duplicated in the same category
"""
return apply_to_compound_groups(reaction, remove_duplicates)
def cleanup_compounds(reaction: ReactionEquation) -> ReactionEquation:
"""
Basic cleanup of the compounds.
"""
return apply_to_compounds(reaction, cleanup_smiles)
def rxn_standardization(reaction: ReactionEquation) -> ReactionEquation:
"""
Apply the standard RXN postprocessing of reaction equations.
Consists in the following
1. merge reactants and agents
2. canonicalize all the SMILES
3. sort the compounds in each group
4. remove the duplicates
"""
return remove_duplicate_compounds(
sort_compounds(canonicalize_compounds(merge_reactants_and_agents(reaction)))
)
def remove_precursors_from_products(reaction: ReactionEquation) -> ReactionEquation:
"""
Remove compounds in products that are also present in the reactants or reagents.
"""
precursors = reaction.reactants + reaction.agents
products_without_precursors = [
product for product in reaction.products if product not in precursors
]
return ReactionEquation(
reactants=reaction.reactants,
agents=reaction.agents,
products=products_without_precursors,
)
def has_repeated_molecules(reaction_equation: ReactionEquation) -> bool:
all_molecules = list(reaction_equation.iter_all_smiles())
return len(set(all_molecules)) < len(all_molecules) | /rxn_chem_utils-1.3.0-py3-none-any.whl/rxn/chemutils/reaction_equation.py | 0.939178 | 0.588121 | reaction_equation.py | pypi |
from typing import Any, Dict, Iterable, List, Sequence
import numpy as np
from rxn.chemutils.reaction_smiles import parse_any_reaction_smiles
from rxn.utilities.containers import chunker
from rxn.utilities.files import PathLike, iterate_lines_from_file
from .metrics import top_n_accuracy
from .metrics_calculator import MetricsCalculator
from .metrics_files import ContextFiles, MetricsFiles
from .utils import get_sequence_multiplier
class ContextMetrics(MetricsCalculator):
"""
Class to compute common metrics for context prediction models, starting from
files containing the ground truth and predictions.
Note: all files are expected to be standardized (canonicalized, sorted, etc.).
"""
def __init__(self, gt_tgt: Iterable[str], predicted_context: Iterable[str]):
self.gt_tgt = list(gt_tgt)
self.predicted_context = list(predicted_context)
def get_metrics(self) -> Dict[str, Any]:
topn = top_n_accuracy(
ground_truth=self.gt_tgt, predictions=self.predicted_context
)
partial_match = fraction_of_identical_compounds(
ground_truth=self.gt_tgt, predictions=self.predicted_context
)
return {"accuracy": topn, "partial_match": partial_match}
@classmethod
def from_metrics_files(cls, metrics_files: MetricsFiles) -> "ContextMetrics":
if not isinstance(metrics_files, ContextFiles):
raise ValueError("Invalid type provided")
return cls.from_raw_files(
gt_tgt_file=metrics_files.gt_tgt,
predicted_context_file=metrics_files.predicted_canonical,
)
@classmethod
def from_raw_files(
cls,
gt_tgt_file: PathLike,
predicted_context_file: PathLike,
) -> "ContextMetrics":
return cls(
gt_tgt=iterate_lines_from_file(gt_tgt_file),
predicted_context=iterate_lines_from_file(predicted_context_file),
)
def identical_fraction(ground_truth: str, prediction: str) -> float:
"""For context prediction models, fraction of compounds that are identical to
the ground truth.
The concept of overlap is hard to define uniquely; this is a tentative
implementation for getting an idea of how the models behave.
As denominator, takes the size of whichever list is larger."""
try:
gt_reaction = parse_any_reaction_smiles(ground_truth)
pred_reaction = parse_any_reaction_smiles(prediction)
n_compounds_tot = 0
n_compounds_match = 0
for gt_group, pred_group in zip(gt_reaction, pred_reaction):
gt_compounds = set(gt_group)
pred_compounds = set(pred_group)
overlap = gt_compounds.intersection(pred_compounds)
n_compounds_tot += max(len(gt_compounds), len(pred_compounds))
n_compounds_match += len(overlap)
if n_compounds_tot == 0:
return 1.0
return n_compounds_match / n_compounds_tot
except Exception:
return 0.0
def fraction_of_identical_compounds(
ground_truth: Sequence[str], predictions: Sequence[str]
) -> Dict[int, float]:
"""
Compute the fraction of identical compounds, split by n-th predictions.
Raises:
ValueError: if the list sizes are incompatible, forwarded from get_sequence_multiplier().
Returns:
Dictionary for the fraction of identical compounds, by top-n.
"""
multiplier = get_sequence_multiplier(
ground_truth=ground_truth, predictions=predictions
)
# we will get, for each prediction of each "n", the portion that is matching
overlap_for_n: List[List[float]] = [[] for _ in range(multiplier)]
# We will process sample by sample - for that, we need to chunk the predictions
prediction_chunks = chunker(predictions, chunk_size=multiplier)
for gt, predictions in zip(ground_truth, prediction_chunks):
for i, prediction in enumerate(predictions):
overlap = identical_fraction(gt, prediction)
overlap_for_n[i].append(overlap)
accuracy = {i + 1: float(np.mean(overlap_for_n[i])) for i in range(multiplier)}
return accuracy | /rxn_metrics-1.1.0-py3-none-any.whl/rxn/metrics/context_metrics.py | 0.91668 | 0.518241 | context_metrics.py | pypi |
from typing import Dict, List, Sequence, Tuple, TypeVar
import numpy as np
from rxn.utilities.containers import chunker
from .utils import get_sequence_multiplier
T = TypeVar("T")
def top_n_accuracy(
ground_truth: Sequence[T], predictions: Sequence[T]
) -> Dict[int, float]:
"""
Compute the top-n accuracy values.
Raises:
ValueError: if the list sizes are incompatible, forwarded from get_sequence_multiplier().
Returns:
Dictionary of top-n accuracy values.
"""
multiplier = get_sequence_multiplier(
ground_truth=ground_truth, predictions=predictions
)
# we will count, for each "n", how many predictions are correct
correct_for_topn: List[int] = [0 for _ in range(multiplier)]
# We will process sample by sample - for that, we need to chunk the predictions
prediction_chunks = chunker(predictions, chunk_size=multiplier)
for gt, predictions in zip(ground_truth, prediction_chunks):
for i in range(multiplier):
correct = gt in predictions[: i + 1]
correct_for_topn[i] += int(correct)
return {i + 1: correct_for_topn[i] / len(ground_truth) for i in range(multiplier)}
def round_trip_accuracy(
ground_truth: Sequence[T], predictions: Sequence[T]
) -> Tuple[Dict[int, float], Dict[int, float]]:
"""
Compute the round-trip accuracy values, split by n-th predictions.
Raises:
ValueError: if the list sizes are incompatible, forwarded from get_sequence_multiplier().
Returns:
Tuple of Dictionaries of round-trip accuracy "n" values and standard deviation (std_dev) "n" values.
Here the standard deviation is the measure of how much the average round-trip accuracy can change from
one sample to the other.
"""
multiplier = get_sequence_multiplier(
ground_truth=ground_truth, predictions=predictions
)
# we will get, for each prediction of each "n", how many predictions among the "n" are correct
correct_for_n: List[List[int]] = [[] for _ in range(multiplier)]
# We will process sample by sample - for that, we need to chunk the predictions
prediction_chunks = chunker(predictions, chunk_size=multiplier)
for gt, predictions in zip(ground_truth, prediction_chunks):
correct_values = 0
for i, prediction in enumerate(predictions):
correct = gt == prediction
correct_values += int(correct)
correct_for_n[i].append(correct_values)
# Note: for the "n"-th value, we must divide by "n=i+1" because the list elements were not averaged.
accuracy = {
i + 1: float(np.mean(correct_for_n[i])) / (i + 1) for i in range(multiplier)
}
std_dev = {
i + 1: float(np.std(correct_for_n[i])) / (i + 1) for i in range(multiplier)
}
return accuracy, std_dev
def coverage(ground_truth: Sequence[T], predictions: Sequence[T]) -> Dict[int, float]:
"""
Compute the coverage values, split by n-th predictions.
Raises:
ValueError: if the list sizes are incompatible, forwarded from get_sequence_multiplier().
Returns:
Dictionary of coverage "n" values.
"""
multiplier = get_sequence_multiplier(
ground_truth=ground_truth, predictions=predictions
)
# we will count, for each "n", if there is at list one correct prediction
one_correct_for_n: List[int] = [0 for _ in range(multiplier)]
# We will process sample by sample - for that, we need to chunk the predictions
prediction_chunks = chunker(predictions, chunk_size=multiplier)
for gt, predictions in zip(ground_truth, prediction_chunks):
found_correct = 0
for i, prediction in enumerate(predictions):
if gt == prediction:
found_correct = 1
one_correct_for_n[i] += found_correct
# Note: the total number of predictions to take into account for the "n"-th (= "i+1"th)
# value is ALWAYS "len(ground_truth)".
return {i + 1: one_correct_for_n[i] / len(ground_truth) for i in range(multiplier)}
def class_diversity(
ground_truth: Sequence[T],
predictions: Sequence[T],
predicted_classes: Sequence[str],
) -> Tuple[Dict[int, float], Dict[int, float]]:
"""
Compute the class diversity values, split by n-th predictions.
Raises:
ValueError: if the list sizes are incompatible, forwarded from get_sequence_multiplier().
Returns:
Tuple of Dictionaries of class diversity "n" values and standard deviation (std) "n" values.
Here the standard deviation is the measure of how much the average class diversity can change from
one sample to the other.
"""
multiplier = get_sequence_multiplier(
ground_truth=ground_truth, predictions=predictions
)
# we will count how many unique superclasses are present
predicted_superclasses = [
long_class.split(".")[0] for long_class in predicted_classes
]
# we will get, for each prediction of each "n", how many predictions among the "n" are correct
classes_for_n: List[List[int]] = [[] for _ in range(multiplier)]
# We will process sample by sample - for that, we need to chunk the predictions and the classes
predictions_and_classes = zip(predictions, predicted_superclasses)
prediction_and_classes_chunks = chunker(
predictions_and_classes, chunk_size=multiplier
)
for gt, preds_and_classes in zip(ground_truth, prediction_and_classes_chunks):
classes = set()
for i, (pred, pred_class) in enumerate(preds_and_classes):
if gt == pred and pred_class != "":
classes.add(pred_class)
classes_for_n[i].append(len(classes))
# Note: the total number of predictions to take into account for the "n"-th (= "i+1"th)
# value is "len(ground_truth)". A value < 1 is the consequence of having incorrect predictions
classdiversity = {
i + 1: float(np.mean(classes_for_n[i])) for i in range(multiplier)
}
std_dev = {i + 1: float(np.std(classes_for_n[i])) for i in range(multiplier)}
return classdiversity, std_dev | /rxn_metrics-1.1.0-py3-none-any.whl/rxn/metrics/metrics.py | 0.927486 | 0.685357 | metrics.py | pypi |
from typing import Any, Dict, Iterable, List, Optional
from rxn.utilities.files import PathLike, iterate_lines_from_file, load_list_from_file
from .metrics import class_diversity, coverage, round_trip_accuracy, top_n_accuracy
from .metrics_calculator import MetricsCalculator
from .metrics_files import MetricsFiles, RetroFiles
from .true_reactant_accuracy import true_reactant_accuracy
class RetroMetrics(MetricsCalculator):
"""
Class to compute common metrics for retro models, starting from files
containing the ground truth and predictions.
Note: all files are expected to be standardized (canonicalized, sorted, etc.).
"""
def __init__(
self,
gt_precursors: Iterable[str],
gt_products: Iterable[str],
predicted_precursors: Iterable[str],
predicted_products: Iterable[str],
predicted_classes: Optional[List[str]] = None,
gt_mapped_rxns: Optional[List[str]] = None,
predicted_mapped_rxns: Optional[List[str]] = None,
):
self.gt_products = list(gt_products)
self.gt_precursors = list(gt_precursors)
self.predicted_products = list(predicted_products)
self.predicted_precursors = list(predicted_precursors)
self.predicted_classes = predicted_classes
self.gt_mapped_rxns = gt_mapped_rxns
self.predicted_mapped_rxns = predicted_mapped_rxns
def get_metrics(self) -> Dict[str, Any]:
topn = top_n_accuracy(
ground_truth=self.gt_precursors, predictions=self.predicted_precursors
)
roundtrip, roundtrip_std = round_trip_accuracy(
ground_truth=self.gt_products, predictions=self.predicted_products
)
cov = coverage(
ground_truth=self.gt_products, predictions=self.predicted_products
)
if self.predicted_classes is not None:
classdiversity, classdiversity_std = class_diversity(
ground_truth=self.gt_products,
predictions=self.predicted_products,
predicted_classes=self.predicted_classes,
)
else:
classdiversity, classdiversity_std = {}, {}
if self.gt_mapped_rxns is not None and self.predicted_mapped_rxns is not None:
reactant_accuracy = true_reactant_accuracy(
self.gt_mapped_rxns, self.predicted_mapped_rxns
)
else:
reactant_accuracy = {}
return {
"accuracy": topn,
"round-trip": roundtrip,
"round-trip-std": roundtrip_std,
"coverage": cov,
"class-diversity": classdiversity,
"class-diversity-std": classdiversity_std,
"true-reactant-accuracy": reactant_accuracy,
}
@classmethod
def from_metrics_files(cls, metrics_files: MetricsFiles) -> "RetroMetrics":
if not isinstance(metrics_files, RetroFiles):
raise ValueError("Invalid type provided")
# Whether to use the reordered files - for class token
# To determine whether True or False, we check if the reordered files exist
reordered = RetroFiles.reordered(metrics_files.predicted_canonical).exists()
mapped = (
metrics_files.gt_mapped.exists() and metrics_files.predicted_mapped.exists()
)
return cls.from_raw_files(
gt_precursors_file=metrics_files.gt_tgt,
gt_products_file=metrics_files.gt_src,
predicted_precursors_file=(
metrics_files.predicted_canonical
if not reordered
else RetroFiles.reordered(metrics_files.predicted_canonical)
),
predicted_products_file=(
metrics_files.predicted_products_canonical
if not reordered
else RetroFiles.reordered(metrics_files.predicted_products_canonical)
),
predicted_classes_file=(
None
if not metrics_files.predicted_classes.exists()
else metrics_files.predicted_classes
if not reordered
else RetroFiles.reordered(metrics_files.predicted_classes)
),
gt_mapped_rxns_file=metrics_files.gt_mapped if mapped else None,
predicted_mapped_rxns_file=(
metrics_files.predicted_mapped if mapped else None
),
)
@classmethod
def from_raw_files(
cls,
gt_precursors_file: PathLike,
gt_products_file: PathLike,
predicted_precursors_file: PathLike,
predicted_products_file: PathLike,
predicted_classes_file: Optional[PathLike] = None,
gt_mapped_rxns_file: Optional[PathLike] = None,
predicted_mapped_rxns_file: Optional[PathLike] = None,
) -> "RetroMetrics":
# to simplify because it is called multiple times.
def maybe_load_lines(filename: Optional[PathLike]) -> Optional[List[str]]:
if filename is None:
return None
return load_list_from_file(filename)
return cls(
gt_precursors=iterate_lines_from_file(gt_precursors_file),
gt_products=iterate_lines_from_file(gt_products_file),
predicted_precursors=iterate_lines_from_file(predicted_precursors_file),
predicted_products=iterate_lines_from_file(predicted_products_file),
predicted_classes=maybe_load_lines(predicted_classes_file),
gt_mapped_rxns=maybe_load_lines(gt_mapped_rxns_file),
predicted_mapped_rxns=maybe_load_lines(predicted_mapped_rxns_file),
) | /rxn_metrics-1.1.0-py3-none-any.whl/rxn/metrics/retro_metrics.py | 0.901708 | 0.316871 | retro_metrics.py | pypi |
import logging
from rxn.utilities.files import (
PathLike,
dump_list_to_file,
iterate_lines_from_file,
raise_if_paths_are_identical,
)
logger = logging.getLogger(__name__)
logger.addHandler(logging.NullHandler())
def detokenize_class(tokenized_class: str) -> str:
"""
Function performing a detokenization of the reaction class used in the Transformer classification
model. E.g. '1 1.2 1.2.3' -> 1.2.3
Args:
tokenized_class: str to detokenize
Raises:
ValueError: if the input string format is not correct
"""
if tokenized_class == "0":
return tokenized_class
splitted_class = tokenized_class.split(" ")
if len(splitted_class) == 1 and len(splitted_class[0].split(".")) == 3:
# here the class is already detokenized
return tokenized_class
if len(splitted_class) != 3:
raise ValueError(
f'The class to be detokenized, "{tokenized_class}", is probably not in the correct format.'
)
return splitted_class[-1]
def tokenize_class(detokenized_class: str) -> str:
"""
Function performing a tokenization of the reaction class used in the Transformer classification
model. E.g. '1.2.3' -> '1 1.2 1.2.3'
Args:
detokenized_class: str to tokenize
Raises:
ValueError: if the input string format is not correct
"""
if detokenized_class == "0":
return detokenized_class
splitted_class = detokenized_class.split(".")
if len(splitted_class) == 4 and len(detokenized_class.split(" ")) == 3:
# here the class is already tokenized
return detokenized_class
if len(splitted_class) != 3:
raise ValueError(
f'The class to be tokenized, "{detokenized_class}", is probably not in the correct format.'
)
a, b, _ = splitted_class
return f"{a} {a}.{b} {detokenized_class}"
def tokenize_class_line(class_line: str, invalid_placeholder: str) -> str:
try:
return tokenize_class(class_line)
except ValueError:
logger.debug(f'Error when tokenizing the class "{class_line}"')
return invalid_placeholder
def detokenize_class_line(class_line: str, invalid_placeholder: str) -> str:
try:
return detokenize_class(class_line)
except ValueError:
logger.debug(f'Error when detokenizing the class "{class_line}"')
return invalid_placeholder
def detokenize_classification_file(
input_file: PathLike, output_file: PathLike, invalid_placeholder: str = ""
) -> None:
raise_if_paths_are_identical(input_file, output_file)
logger.info(f'Detokenizing "{input_file}" -> "{output_file}".')
detokenized = (
detokenize_class_line(line, invalid_placeholder)
for line in iterate_lines_from_file(input_file)
)
dump_list_to_file(detokenized, output_file)
def tokenize_classification_file(
input_file: PathLike, output_file: PathLike, invalid_placeholder: str = ""
) -> None:
raise_if_paths_are_identical(input_file, output_file)
logger.info(f'Tokenizing "{input_file}" -> "{output_file}".')
tokenized = (
tokenize_class_line(line, invalid_placeholder)
for line in iterate_lines_from_file(input_file)
)
dump_list_to_file(tokenized, output_file)
def classification_string_is_tokenized(classification_line: str) -> bool:
"""
Whether a classification line is tokenized or not.
Args:
classification_line: line to inspect
Raises:
ValueError: for errors in tokenization or detokenization
"""
detokenized = detokenize_class(classification_line)
tokenized = tokenize_class(detokenized)
return classification_line == tokenized
def classification_file_is_tokenized(filepath: PathLike) -> bool:
"""
Whether a file contains tokenized classes or not.
'1.2.3' -> '1 1.2 1.2.3'
By default, this looks at the first non-empty line of the file only!
Raises:
ValueError: for errors in tokenization or detokenization
RuntimeError: for empty files or files with empty lines only.
Args:
filepath: path to the file.
"""
for line in iterate_lines_from_file(filepath):
# Ignore empty lines
if line == "":
continue
return classification_string_is_tokenized(line)
raise RuntimeError(
f'Could not determine whether "{filepath}" is class-tokenized: empty lines only.'
) | /rxn_metrics-1.1.0-py3-none-any.whl/rxn/metrics/tokenize_file.py | 0.719679 | 0.389285 | tokenize_file.py | pypi |
from typing import Iterator, Sequence, TypeVar
from rxn.chemutils.reaction_combiner import ReactionCombiner
from rxn.chemutils.reaction_smiles import ReactionFormat
from rxn.utilities.files import PathLike, count_lines, iterate_lines_from_file
from rxn.utilities.misc import get_multiplier, get_multipliers
T = TypeVar("T")
def combine_precursors_and_products(
precursors: Iterator[str],
products: Iterator[str],
total_precursors: int,
total_products: int,
) -> Iterator[str]:
"""
Combine two matching iterables of precursors/products into an iterator of reaction SMILES.
Args:
precursors: iterable of sets of precursors.
products: iterable of sets of products.
total_precursors: total number of precursors.
total_products: total number of products.
Returns:
iterator over reaction SMILES.
"""
combiner = ReactionCombiner(reaction_format=ReactionFormat.STANDARD_WITH_TILDE)
precursor_multiplier, product_multiplier = get_multipliers(
total_precursors, total_products
)
yield from combiner.combine_iterators(
precursors, products, precursor_multiplier, product_multiplier
)
def combine_precursors_and_products_from_files(
precursors_file: PathLike, products_file: PathLike
) -> Iterator[str]:
"""
Combine the precursors file and the products file into an iterator of reaction SMILES.
Args:
precursors_file: file containing the sets of precursors.
products_file: file containing the sets of products.
Returns:
iterator over reaction SMILES.
"""
n_precursors = count_lines(precursors_file)
n_products = count_lines(products_file)
yield from combine_precursors_and_products(
precursors=iterate_lines_from_file(precursors_file),
products=iterate_lines_from_file(products_file),
total_precursors=n_precursors,
total_products=n_products,
)
def get_sequence_multiplier(ground_truth: Sequence[T], predictions: Sequence[T]) -> int:
"""
Get the multiplier for the number of predictions by ground truth sample.
Raises:
ValueError: if the lists have inadequate sizes (possibly forwarded
from get_multiplier).
"""
n_gt = len(ground_truth)
n_pred = len(predictions)
return get_multiplier(n_gt, n_pred) | /rxn_metrics-1.1.0-py3-none-any.whl/rxn/metrics/utils.py | 0.9363 | 0.285412 | utils.py | pypi |
import logging
from pathlib import Path
from typing import Optional, Union
from rxn.chemutils.tokenization import file_is_tokenized, tokenize_file
from rxn.onmt_utils import translate
from rxn.utilities.files import dump_list_to_file, is_path_exists_or_creatable
from .metrics_files import RetroFiles
from .tokenize_file import (
classification_file_is_tokenized,
detokenize_classification_file,
tokenize_classification_file,
)
from .utils import combine_precursors_and_products_from_files
logger = logging.getLogger(__name__)
logger.addHandler(logging.NullHandler())
def maybe_classify_predictions(
classification_model: Optional[Path],
retro_files: RetroFiles,
batch_size: int,
gpu: bool,
) -> None:
"""Classify the reactions for determining the diversity metric.
Only executed if a classification model is available."""
if classification_model is None:
return
create_rxn_from_files(
retro_files.predicted_canonical,
retro_files.predicted_products_canonical,
retro_files.predicted_rxn_canonical,
)
classification_translation(
src_file=retro_files.predicted_rxn_canonical,
tgt_file=None,
pred_file=retro_files.predicted_classes,
model=classification_model,
n_best=1,
beam_size=5,
batch_size=batch_size,
gpu=gpu,
)
def create_rxn_from_files(
input_file_precursors: Union[str, Path],
input_file_products: Union[str, Path],
output_file: Union[str, Path],
) -> None:
logger.info(
f'Combining files "{input_file_precursors}" and "{input_file_products}" -> "{output_file}".'
)
dump_list_to_file(
combine_precursors_and_products_from_files(
precursors_file=input_file_precursors,
products_file=input_file_products,
),
output_file,
)
def classification_translation(
src_file: Union[str, Path],
tgt_file: Optional[Union[str, Path]],
pred_file: Union[str, Path],
model: Union[str, Path],
n_best: int,
beam_size: int,
batch_size: int,
gpu: bool,
max_length: int = 3,
as_external_command: bool = False,
) -> None:
"""
Do a classification translation.
This function takes care of tokenizing/detokenizing the input.
Note: no check is made that the source is canonical.
Args:
src_file: source file (tokenized or detokenized).
tgt_file: ground truth class file (tokenized), not mandatory.
pred_file: file where to save the predictions.
model: model to do the translation
n_best: number of predictions to make for each input.
beam_size: beam size.
batch_size: batch size.
gpu: whether to use the GPU.
max_length: maximum sequence length.
"""
if not is_path_exists_or_creatable(pred_file):
raise RuntimeError(f'The file "{pred_file}" cannot be created.')
# src
if file_is_tokenized(src_file):
tokenized_src = src_file
else:
tokenized_src = str(src_file) + ".tokenized"
tokenize_file(src_file, tokenized_src, fallback_value="")
# tgt
if tgt_file is None:
tokenized_tgt = None
elif classification_file_is_tokenized(tgt_file):
tokenized_tgt = tgt_file
else:
tokenized_tgt = str(tgt_file) + ".tokenized"
tokenize_classification_file(tgt_file, tokenized_tgt)
tokenized_pred = str(pred_file) + ".tokenized"
translate(
model=model,
src=tokenized_src,
tgt=tokenized_tgt,
output=tokenized_pred,
n_best=n_best,
beam_size=beam_size,
max_length=max_length,
batch_size=batch_size,
gpu=gpu,
as_external_command=as_external_command,
)
detokenize_classification_file(tokenized_pred, pred_file) | /rxn_metrics-1.1.0-py3-none-any.whl/rxn/metrics/classification_translation.py | 0.931455 | 0.31662 | classification_translation.py | pypi |
import json
import logging
from pathlib import Path
from typing import Dict, Type
from rxn.chemutils.miscellaneous import canonicalize_file
from rxn.chemutils.tokenization import copy_as_detokenized
from rxn.onmt_models import rxn_translation
from rxn.utilities.files import PathLike, ensure_directory_exists_and_is_empty
from rxn.utilities.logging import setup_console_and_file_logger
from .context_metrics import ContextMetrics
from .forward_metrics import ForwardMetrics
from .metrics_calculator import MetricsCalculator
from .metrics_files import ContextFiles, ForwardFiles, MetricsFiles, RetroFiles
from .retro_metrics import RetroMetrics
logger = logging.getLogger(__name__)
logger.addHandler(logging.NullHandler())
_FILES_MAPPING: Dict[str, Type[MetricsFiles]] = {
"forward": ForwardFiles,
"context": ContextFiles,
"retro": RetroFiles,
}
_CALCULATOR_MAPPING: Dict[str, Type[MetricsCalculator]] = {
"forward": ForwardMetrics,
"context": ContextMetrics,
"retro": RetroMetrics,
}
def get_metrics_files(task: str, files_path: PathLike) -> MetricsFiles:
return _FILES_MAPPING[task](files_path)
def get_metrics_calculator(task: str, files: MetricsFiles) -> MetricsCalculator:
return _CALCULATOR_MAPPING[task].from_metrics_files(files)
def evaluate_metrics(task: str, files_path: PathLike) -> None:
logger.info(f"Evaluating the {task} metrics...")
files = get_metrics_files(task, files_path)
calculator = get_metrics_calculator(task, files)
metrics_dict = calculator.get_metrics()
if files.metrics_file.exists():
logger.warning(f'Overwriting "{files.metrics_file}"!')
with open(files.metrics_file, "wt") as f:
json.dump(metrics_dict, f, indent=2)
logger.info(f'Evaluating the {task} metrics... Saved to "{files.metrics_file}".')
def run_model_for_metrics(
task: str,
model_path: Path,
src_file: Path,
tgt_file: Path,
output_dir: Path,
n_best: int,
beam_size: int,
batch_size: int,
gpu: bool,
initialize_logger: bool = False,
) -> None:
ensure_directory_exists_and_is_empty(output_dir)
files = get_metrics_files(task, output_dir)
if initialize_logger:
setup_console_and_file_logger(files.log_file)
copy_as_detokenized(src_file, files.gt_src)
copy_as_detokenized(tgt_file, files.gt_tgt)
# context prediction
rxn_translation(
src_file=files.gt_src,
tgt_file=files.gt_tgt,
pred_file=files.predicted,
model=model_path,
n_best=n_best,
beam_size=beam_size,
batch_size=batch_size,
gpu=gpu,
)
canonicalize_file(
files.predicted,
files.predicted_canonical,
fallback_value="",
sort_molecules=True,
) | /rxn_metrics-1.1.0-py3-none-any.whl/rxn/metrics/run_metrics.py | 0.656438 | 0.188268 | run_metrics.py | pypi |
from pathlib import Path
from rxn.utilities.files import PathLike
class MetricsFiles:
def __init__(
self,
directory: PathLike,
gt_src: str = "gt_src.txt",
gt_tgt: str = "gt_tgt.txt",
predicted: str = "pred.txt",
predicted_canonical: str = "predicted_canonical.txt",
):
self.directory = Path(directory)
self.log_file = self.directory / "log.txt"
self.metrics_file = self.directory / "metrics.json"
self.gt_src = self.directory / gt_src
self.gt_tgt = self.directory / gt_tgt
self.predicted = self.directory / predicted
self.predicted_canonical = self.directory / predicted_canonical
class RetroFiles(MetricsFiles):
"""
Class holding the locations of the files to write to or to read from for
the evaluation of retro metrics.
"""
_REORDERED_FILE_EXTENSION = ".reordered"
def __init__(self, directory: PathLike):
super().__init__(
directory=directory,
gt_src="gt_products.txt",
gt_tgt="gt_precursors.txt",
predicted="predicted_precursors.txt",
predicted_canonical="predicted_precursors_canonical.txt",
)
self.class_token_products = self.directory / "class_token_products.txt"
self.class_token_precursors = self.directory / "class_token_precursors.txt"
self.predicted_precursors_log_probs = (
self.directory / "predicted_precursors.txt.tokenized_log_probs"
)
self.predicted_products = self.directory / "predicted_products.txt"
self.predicted_products_canonical = (
self.directory / "predicted_products_canonical.txt"
)
self.predicted_products_log_probs = (
self.directory / "predicted_products.txt.tokenized_log_probs"
)
self.predicted_rxn_canonical = self.directory / "predicted_rxn_canonical.txt"
self.predicted_classes = self.directory / "predicted_classes.txt"
self.gt_mapped = self.directory / "gt_mapped.txt"
self.predicted_mapped = self.directory / "predicted_mapped.txt"
@staticmethod
def reordered(path: PathLike) -> Path:
"""Add the reordered path extension."""
return Path(str(path) + RetroFiles._REORDERED_FILE_EXTENSION)
class ForwardFiles(MetricsFiles):
"""
Class holding the locations of the files to write to or to read from for
the evaluation of forward metrics.
"""
def __init__(self, directory: PathLike):
super().__init__(
directory=directory,
gt_src="gt_precursors.txt",
gt_tgt="gt_products.txt",
predicted="predicted_products.txt",
predicted_canonical="predicted_products_canonical.txt",
)
class ContextFiles(MetricsFiles):
"""
Class holding the locations of the files to write to or to read from for
the evaluation of context metrics.
"""
def __init__(self, directory: PathLike):
super().__init__(
directory=directory,
predicted="predicted_context.txt",
predicted_canonical="predicted_context_canonical.txt",
) | /rxn_metrics-1.1.0-py3-none-any.whl/rxn/metrics/metrics_files.py | 0.837454 | 0.248056 | metrics_files.py | pypi |
import logging
from pathlib import Path
from typing import List, Tuple, Union
import click
from rxn.utilities.containers import chunker
from rxn.utilities.files import dump_list_to_file, load_list_from_file
from rxn.metrics.metrics_files import RetroFiles
from rxn.metrics.utils import get_sequence_multiplier
logger = logging.getLogger(__name__)
logger.addHandler(logging.NullHandler())
def reorder_retro_predictions_class_token(
ground_truth_file: Union[str, Path],
predictions_file: Union[str, Path],
confidences_file: Union[str, Path],
fwd_predictions_file: Union[str, Path],
classes_predictions_file: Union[str, Path],
n_class_tokens: int,
) -> None:
"""
Reorder the retro-preditions generated from a class-token model.
For each sample x, N samples are created where N is the number of class token used.
The retro predictions are originally ordered like e.g.:
'[0] x' -> top1 prediction('[0] x')
-> top2 prediction('[0] x')
...
'[1] x' -> top1 prediction('[1] x')
-> top2 prediction('[1] x')
...
...
'[N] x' -> top1 prediction('[N] x')
-> top2 prediction('[N] x')
...
Starting from the log likelihood on each prediction we reorder them token-wise to remove the token dependency.
So the new predictions for x will be:
x -> sorted([top1 prediction('[i] x') for i in number_class_tokens])
-> sorted([top2 prediction('[i] x') for i in number_class_tokens])
...
"""
logger.info(
f'Reordering file "{predictions_file}", based on {n_class_tokens} class tokens.'
)
# We load the files and chunk the confidences
ground_truth = load_list_from_file(ground_truth_file)
predictions = load_list_from_file(predictions_file)
confidences = load_list_from_file(confidences_file)
fwd_predictions = load_list_from_file(fwd_predictions_file)
classes_predictions = load_list_from_file(classes_predictions_file)
# Get the exact multiplier
multiplier = get_sequence_multiplier(
ground_truth=ground_truth, predictions=predictions
)
if multiplier % n_class_tokens != 0:
raise ValueError(
f"The number of predictions ('{multiplier}') is not an exact "
f"multiple of the number of class tokens '({n_class_tokens})'"
)
topx_per_class_token = int(multiplier / n_class_tokens)
predictions_and_confidences = zip(
predictions, confidences, fwd_predictions, classes_predictions
)
predictions_and_confidences_chunks = chunker(
predictions_and_confidences, chunk_size=multiplier
)
# we will reorder the predictions class-token wise using the confidence
predictions_and_confidences_reordered: List[Tuple[str, str, str, str]] = []
for pred_and_conf in predictions_and_confidences_chunks:
for topn in range(topx_per_class_token):
# For each class token take the topn prediction and reorder them based on the
# (negative) confidence (index x[1])
topn_per_class_token = [
chunk[topn]
for chunk in chunker(pred_and_conf, chunk_size=topx_per_class_token)
]
reordered = sorted(
topn_per_class_token, key=lambda x: float(x[1]), reverse=True
)
predictions_and_confidences_reordered.extend(reordered)
dump_list_to_file(
(pred for pred, _, _, _ in predictions_and_confidences_reordered),
RetroFiles.reordered(predictions_file),
)
dump_list_to_file(
(conf for _, conf, _, _ in predictions_and_confidences_reordered),
RetroFiles.reordered(confidences_file),
)
dump_list_to_file(
(fwd_pred for _, _, fwd_pred, _ in predictions_and_confidences_reordered),
RetroFiles.reordered(fwd_predictions_file),
)
dump_list_to_file(
(
classes_pred
for _, _, _, classes_pred in predictions_and_confidences_reordered
),
RetroFiles.reordered(classes_predictions_file),
)
@click.command()
@click.option(
"--ground_truth_file", "-g", required=True, help="File with ground truth."
)
@click.option(
"--predictions_file", "-p", required=True, help="File with the predictions."
)
@click.option(
"--confidences_file", "-l", required=True, help="File with the confidences."
)
@click.option(
"--fwd_predictions_file",
"-f",
required=True,
help="File with the forward predictions.",
)
@click.option(
"--classes_predictions_file",
"-c",
required=True,
help="File with the classes predictions.",
)
@click.option(
"--n_class_tokens", "-n", required=True, type=int, help="Number of class tokens."
)
def main(
ground_truth_file: str,
predictions_file: str,
confidences_file: str,
fwd_predictions_file: str,
classes_predictions_file: str,
n_class_tokens: int,
) -> None:
logging.basicConfig(format="%(asctime)s [%(levelname)s] %(message)s", level="INFO")
# Note: we put the actual code in a separate function, so that it can be
# called also as a Python function.
reorder_retro_predictions_class_token(
ground_truth_file=ground_truth_file,
predictions_file=predictions_file,
confidences_file=confidences_file,
fwd_predictions_file=fwd_predictions_file,
classes_predictions_file=classes_predictions_file,
n_class_tokens=n_class_tokens,
)
if __name__ == "__main__":
main() | /rxn_metrics-1.1.0-py3-none-any.whl/rxn/metrics/scripts/reorder_retro_predictions_class_token.py | 0.798383 | 0.448849 | reorder_retro_predictions_class_token.py | pypi |
import logging
import math
from pathlib import Path
from typing import Iterable, Tuple
import click
from rxn.utilities.files import (
PathLike,
count_lines,
dump_list_to_file,
load_list_from_file,
)
from rxn.utilities.logging import setup_console_logger
logger = logging.getLogger(__name__)
logger.addHandler(logging.NullHandler())
def ensure_data_dimension(
txt_files: Iterable[PathLike], output_dir: PathLike, max_dimension: int
) -> None:
# Check the lengths of the files and ensure they are all the same
file_length = [count_lines(txt_file) for txt_file in txt_files]
if len(set(file_length)) != 1:
raise ValueError("The files provided have not the same number of lines.")
# Check that there are no files with the same name
filenames = [Path(txt_file).name for txt_file in txt_files]
if len(set(filenames)) != len(filenames):
raise ValueError("Found files with the same same. Aborting")
split_no = math.ceil(file_length[0] / max_dimension)
new_output_dir = Path(output_dir)
new_output_dir.mkdir(parents=True, exist_ok=True)
logger.info(
f"Splitting in {split_no} files with the same name of the original ones . "
f"Saving in {new_output_dir} ."
)
for txt_file in txt_files:
file_content = load_list_from_file(txt_file)
file_name = Path(txt_file).name
for chunk_no, chunk_start in enumerate(range(0, file_length[0], max_dimension)):
# create a sub_directory
sub_directory = Path(new_output_dir) / f"chunk_{chunk_no}"
sub_directory.mkdir(parents=True, exist_ok=True)
logger.info(f"Created directory {sub_directory} . Saving files .")
# save all subfiles
dump_list_to_file(
file_content[chunk_start : chunk_start + max_dimension],
Path(sub_directory) / file_name,
)
@click.command(context_settings={"show_default": True})
@click.argument("txt_files", nargs=-1)
@click.option("--output_dir", required=True, help="Where to save all the files")
@click.option(
"--max_dimension",
default=50000,
type=int,
help="Maximum file length allowed without splitting",
)
def main(txt_files: Tuple[str, ...], output_dir: str, max_dimension: int) -> None:
"""
Script to split too big files in subchunks . Useful for class token translations.
Takes as input an arbitrary number of files. Files are saved under output_dir/chunk_i
for i ranging from 0 to the number of splits needed.
"""
setup_console_logger()
ensure_data_dimension(txt_files, output_dir, max_dimension)
if __name__ == "__main__":
main() | /rxn_metrics-1.1.0-py3-none-any.whl/rxn/metrics/scripts/ensure_data_dimension.py | 0.55917 | 0.222278 | ensure_data_dimension.py | pypi |
import logging
from pathlib import Path
from typing import Optional
import click
from rxn.chemutils.miscellaneous import canonicalize_file
from rxn.chemutils.tokenization import copy_as_detokenized
from rxn.onmt_models import rxn_translation
from rxn.utilities.files import ensure_directory_exists_and_is_empty
from rxn.utilities.logging import setup_console_and_file_logger
from rxn.metrics.class_tokens import maybe_prepare_class_token_files
from rxn.metrics.classification_translation import maybe_classify_predictions
from rxn.metrics.metrics_files import RetroFiles
from rxn.metrics.run_metrics import evaluate_metrics
from rxn.metrics.true_reactant_accuracy import (
maybe_determine_true_reactants,
true_reactant_environment_check,
)
logger = logging.getLogger(__name__)
logger.addHandler(logging.NullHandler())
@click.command(context_settings={"show_default": True})
@click.option(
"--precursors_file",
required=True,
type=click.Path(exists=True, path_type=Path),
help="File containing the precursors of a test set",
)
@click.option(
"--products_file",
required=True,
type=click.Path(exists=True, path_type=Path),
help="File containing the products of a test set",
)
@click.option(
"--output_dir",
required=True,
type=click.Path(path_type=Path),
help="Where to save all the files",
)
@click.option(
"--retro_model",
required=True,
type=click.Path(exists=True, path_type=Path),
help="Path to the single-step retrosynthesis model",
)
@click.option(
"--forward_model",
required=True,
type=click.Path(exists=True, path_type=Path),
help="Path to the forward model",
)
@click.option(
"--classification_model",
type=click.Path(exists=True, path_type=Path),
required=False,
default=None,
help="Path to the classification model",
)
@click.option("--batch_size", default=64, type=int, help="Batch size")
@click.option(
"--n_best", default=10, type=int, help="Number of retro predictions to make (top-N)"
)
@click.option(
"--gpu/--no-gpu", default=False, help="Whether to run the predictions on a GPU."
)
@click.option(
"--no_metrics", is_flag=True, help="If given, the metrics will not be computed."
)
@click.option(
"--beam_size", default=15, type=int, help="Beam size for retro (> n_best)."
)
@click.option(
"--class_tokens",
default=None,
type=int,
help="The number of tokens used in the trainings",
)
@click.option(
"--with_true_reactant_accuracy/--no_true_reactant_accuracy",
default=False,
help="Whether to calculate the true reactant accuracy, based on rxnmapper.",
)
@click.option(
"--rxnmapper_batch_size",
default=8,
type=int,
help=(
"Batch size for RXNMapper. Considered "
"only if the true reactant accuracy is activated."
),
)
def main(
precursors_file: Path,
products_file: Path,
output_dir: Path,
retro_model: Path,
forward_model: Path,
classification_model: Optional[Path],
batch_size: int,
n_best: int,
gpu: bool,
no_metrics: bool,
beam_size: int,
class_tokens: Optional[int],
with_true_reactant_accuracy: bool,
rxnmapper_batch_size: int,
) -> None:
"""Starting from the ground truth files and two models (retro, forward),
generate the translation files needed for the metrics, and calculate the default metrics.
"""
true_reactant_environment_check(with_true_reactant_accuracy)
ensure_directory_exists_and_is_empty(output_dir)
retro_files = RetroFiles(output_dir)
setup_console_and_file_logger(retro_files.log_file)
copy_as_detokenized(products_file, retro_files.gt_src)
copy_as_detokenized(precursors_file, retro_files.gt_tgt)
maybe_prepare_class_token_files(class_tokens, retro_files)
# retro
rxn_translation(
src_file=(
retro_files.gt_src
if class_tokens is None
else retro_files.class_token_products
),
tgt_file=(
retro_files.gt_tgt
if class_tokens is None
else retro_files.class_token_precursors
),
pred_file=retro_files.predicted,
model=retro_model,
n_best=n_best,
beam_size=beam_size,
batch_size=batch_size,
gpu=gpu,
)
canonicalize_file(
retro_files.predicted,
retro_files.predicted_canonical,
fallback_value="",
sort_molecules=True,
)
# Forward
rxn_translation(
src_file=retro_files.predicted_canonical,
tgt_file=None,
pred_file=retro_files.predicted_products,
model=forward_model,
n_best=1,
beam_size=10,
batch_size=batch_size,
gpu=gpu,
)
canonicalize_file(
retro_files.predicted_products,
retro_files.predicted_products_canonical,
fallback_value="",
)
maybe_classify_predictions(classification_model, retro_files, batch_size, gpu)
maybe_determine_true_reactants(
with_true_reactant_accuracy, retro_files, rxnmapper_batch_size
)
if not no_metrics:
evaluate_metrics("retro", output_dir)
if __name__ == "__main__":
main() | /rxn_metrics-1.1.0-py3-none-any.whl/rxn/metrics/scripts/prepare_retro_metrics.py | 0.819207 | 0.174991 | prepare_retro_metrics.py | pypi |
import logging
import re
import shutil
from pathlib import Path
from typing import List, Tuple
import click
from rxn.utilities.files import PathLike, raise_if_paths_are_identical
from rxn.utilities.logging import setup_console_logger
logger = logging.getLogger(__name__)
logger.addHandler(logging.NullHandler())
def sorted_chunk_directories(input_path: Path) -> List[Path]:
# We match the directories ending with a number
directory_and_directory_no: List[Tuple[Path, int]] = []
for subdir in input_path.iterdir():
match = re.match(r".*?(\d+)$", str(subdir))
if match is not None:
directory_and_directory_no.append((subdir, int(match.group(1))))
return [
chunk_directory[0]
for chunk_directory in sorted(directory_and_directory_no, key=lambda x: x[1])
]
def join_data_files(input_dir: PathLike, output_dir: PathLike) -> None:
"""
Joining files with `shutil`, reference: https://stackoverflow.com/a/27077437
"""
raise_if_paths_are_identical(input_dir, output_dir)
output_path = Path(output_dir)
output_path.mkdir(parents=True, exist_ok=True)
# Assuming that all directories contain the same files
filenames = [filename.name for filename in (Path(input_dir) / "chunk_0").iterdir()]
sorted_chunk_dirs = sorted_chunk_directories(Path(input_dir))
for filename in filenames:
out_file_path = output_path / filename
logger.info(f"Joining files of type: {filename}")
with open(out_file_path, "wb") as f:
# looping over the directories and skipping files or directories in the wrong format
# directories need to end with a digit
for path in sorted_chunk_dirs:
src_path = path / filename
logger.debug(f"Source file: {src_path}")
if src_path.exists():
shutil.copyfileobj(open(src_path, "rb"), f)
else:
# Differing files between the 'chunk' directories are skipped
logger.warning(f"The file '{src_path}' does not exist. Not joining")
@click.command(context_settings={"show_default": True})
@click.option(
"--input_dir",
required=True,
help="Folder containing different subfolders with the data chunks.",
)
@click.option("--output_dir", required=True, help="Where to save all the files.")
def main(input_dir: str, output_dir: str) -> None:
"""
Joins files which were before splitted with the script ensure_data_dimension.py
"""
setup_console_logger()
join_data_files(input_dir, output_dir)
if __name__ == "__main__":
main() | /rxn_metrics-1.1.0-py3-none-any.whl/rxn/metrics/scripts/join_data_files.py | 0.6508 | 0.263757 | join_data_files.py | pypi |
import logging
from collections import defaultdict
from functools import partial
from typing import Callable, DefaultDict, Iterable, Iterator, Tuple
from rxn.chemutils.conversion import canonicalize_smiles, smiles_to_inchi
from rxn.chemutils.miscellaneous import apply_to_any_smiles, sort_any
from typing_extensions import TypeAlias
logger = logging.getLogger(__name__)
logger.addHandler(logging.NullHandler())
# We handle predictions as tuples of SMILES strings and associated confidences
Prediction: TypeAlias = Tuple[str, float]
class BaseCollapser:
"""
Helper class for collapsing things.
Having this base collapser can be useful for extending things in the future, if we
want to make this class more general than just working with tuples.
"""
def __init__(self, collapsing_fns: Iterable[Callable[[str], str]]):
self.collapsing_fns = list(collapsing_fns)
def collapse(self, predictions: Iterable[Prediction]) -> Iterator[Prediction]:
# Convert to a list, as we will need to iterate two times over it
predictions = list(predictions)
collapsed_mapping = dict()
collapsed_confidence: DefaultDict[str, float] = defaultdict(float)
for smiles, confidence in predictions:
collapsed_smiles = sort_any(smiles)
for fn in self.collapsing_fns:
try:
collapsed_smiles = apply_to_any_smiles(collapsed_smiles, fn)
except Exception as e:
logger.warning(f'Cannot collapse SMILES "{smiles}": {e}')
collapsed_mapping[smiles] = collapsed_smiles
collapsed_confidence[collapsed_smiles] += confidence
consumed = set()
for smiles, _ in predictions:
collapsed_smiles = collapsed_mapping[smiles]
# Check if the collapsed representation has already been seen
if collapsed_smiles in consumed:
continue
consumed.add(collapsed_smiles)
yield smiles, collapsed_confidence[collapsed_smiles]
class PredictionCollapser:
"""
Collapse the predictions of an RXN-onmt model based on canonical representations
of the predictions.
This is useful to remove predictions that are different in the raw string,
but correspond to identical compounds.
"""
def __init__(self, collapse_inchi: bool = True):
"""
Args:
collapse_inchi: whether to do the collapsing based on the InChI.
"""
self.collapser = self._instantiate_base_collapser(collapse_inchi)
@staticmethod
def _instantiate_base_collapser(collapse_inchi: bool) -> BaseCollapser:
canonicalize = partial(canonicalize_smiles, check_valence=False)
collapsing_fns = [canonicalize]
if collapse_inchi:
to_inchi = partial(smiles_to_inchi, extended_tautomer_check=True)
collapsing_fns.append(to_inchi)
return BaseCollapser(collapsing_fns=collapsing_fns)
def collapse_predictions(
self, predictions: Iterable[Prediction]
) -> Iterator[Prediction]:
yield from self.collapser.collapse(predictions) | /rxn-onmt-models-1.0.1.tar.gz/rxn-onmt-models-1.0.1/src/rxn/onmt_models/prediction_collapser.py | 0.876905 | 0.295408 | prediction_collapser.py | pypi |
from typing import Optional
from rxn.chemutils.tokenization import detokenize_file, file_is_tokenized, tokenize_file
from rxn.onmt_utils import translate
from rxn.utilities.files import PathLike, is_path_exists_or_creatable
def rxn_translation(
src_file: PathLike,
tgt_file: Optional[PathLike],
pred_file: PathLike,
model: PathLike,
n_best: int,
beam_size: int,
batch_size: int,
gpu: bool,
max_length: int = 300,
as_external_command: bool = False,
) -> None:
"""
Do a forward or retro translation.
This function takes care of tokenizing/detokenizing the input. In principle, by adapting
the "invalid" placeholder, this could also work when input/output are full reactions.
Note: no check is made that the source is canonical.
Args:
src_file: source file (tokenized or detokenized).
tgt_file: ground truth file (tokenized or detokenized), not mandatory.
pred_file: file where to save the predictions.
model: model to do the translation
n_best: number of predictions to make for each input.
beam_size: beam size.
batch_size: batch size.
gpu: whether to use the GPU.
max_length: maximum sequence length.
as_external_command: runs the onmt command instead of Python code.
"""
if not is_path_exists_or_creatable(pred_file):
raise RuntimeError(f'The file "{pred_file}" cannot be created.')
# src
if file_is_tokenized(src_file):
tokenized_src = src_file
else:
tokenized_src = str(src_file) + ".tokenized"
tokenize_file(src_file, tokenized_src, fallback_value="")
# tgt
if tgt_file is None:
tokenized_tgt = None
elif file_is_tokenized(tgt_file):
tokenized_tgt = tgt_file
else:
tokenized_tgt = str(tgt_file) + ".tokenized"
tokenize_file(tgt_file, tokenized_tgt, fallback_value="")
tokenized_pred = str(pred_file) + ".tokenized"
translate(
model=model,
src=tokenized_src,
tgt=tokenized_tgt,
output=tokenized_pred,
n_best=n_best,
beam_size=beam_size,
max_length=max_length,
batch_size=batch_size,
gpu=gpu,
as_external_command=as_external_command,
)
detokenize_file(tokenized_pred, pred_file) | /rxn-onmt-models-1.0.1.tar.gz/rxn-onmt-models-1.0.1/src/rxn/onmt_models/translation.py | 0.92887 | 0.363223 | translation.py | pypi |
import datetime
import logging
import subprocess
from typing import IO, List, Optional, cast
logger = logging.getLogger(__name__)
logger.addHandler(logging.NullHandler())
def log_file_name_from_time(prefix: Optional[str] = None) -> str:
"""
Get the name of a log file (typically to create it) from the current
date and time.
Returns:
String for a file name in the format "20221231-1425.log", or
"{prefix}-20221231-1425.log" if the prefix is specified.
"""
now = datetime.datetime.now()
now_formatted = now.strftime("%Y%m%d-%H%M")
if prefix is None:
return now_formatted + ".log"
else:
return prefix + "-" + now_formatted + ".log"
def run_command(command_and_args: List[str]) -> None:
"""
Run a command, printing its output (stdout and stderr) to the logs.
Raises:
RuntimeError: for different errors that may be encountered, and when
the return code of the executed command is not zero.
"""
command_str = " ".join(command_and_args)
command_str_short = f"{command_and_args[0]} [...]"
logger.info(f"Running command: {command_str}")
with subprocess.Popen(
command_str, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT
) as process:
out_stream = cast(IO[bytes], process.stdout)
try:
for line in iter(out_stream.readline, b""):
logger.info(line.decode("utf-8").rstrip())
except subprocess.CalledProcessError as e:
msg = f'Error when decoding output of "{command_str_short}"'
logger.error(msg)
raise RuntimeError(msg) from e
return_code = process.returncode
if return_code == 0:
logger.info(f'The command "{command_str_short}" ran successfully.')
else:
msg = (
f'The command "{command_str_short}" returned '
f"code {return_code}. Check the logs for more information."
)
logger.error(msg)
raise RuntimeError(msg) | /rxn-onmt-models-1.0.1.tar.gz/rxn-onmt-models-1.0.1/src/rxn/onmt_models/utils.py | 0.775732 | 0.181354 | utils.py | pypi |
import logging
import re
from itertools import count
from pathlib import Path
from typing import Optional
from rxn.utilities.files import PathLike
logger = logging.getLogger(__name__)
logger.addHandler(logging.NullHandler())
class ModelFiles:
"""
Class to make it easy to get the names/paths of the trained OpenNMT models.
"""
ONMT_CONFIG_FILE = "config_{idx}.yml"
MODEL_PREFIX = "model"
MODEL_STEP_PATTERN = re.compile(r"^model_step_(\d+)\.pt$")
def __init__(self, model_dir: PathLike):
# Directly converting to an absolute path
self.model_dir = Path(model_dir).resolve()
# Create the directory if it does not exist yet
self.model_dir.mkdir(parents=True, exist_ok=True)
@property
def model_prefix(self) -> Path:
"""Absolute path to the model prefix; during training, OpenNMT will
append "_step_10000.pt" to it (or other step numbers)."""
return self.model_dir / ModelFiles.MODEL_PREFIX
def next_config_file(self) -> Path:
"""Get the next available config file name."""
for idx in count(1):
config_file = self.model_dir / ModelFiles.ONMT_CONFIG_FILE.format(idx=idx)
if not config_file.exists():
return config_file
return Path() # Note: in order to satisfy mypy. This is never reached.
def get_last_checkpoint(self) -> Path:
"""Get the last checkpoint matching the naming including the step number.
Raises:
RuntimeError: no model is found in the expected directory.
"""
models_and_steps = [
(self._get_checkpoint_step(path), path) for path in self.model_dir.iterdir()
]
models_and_steps = [
(step, path) for step, path in models_and_steps if step is not None
]
if not models_and_steps:
raise RuntimeError(f'No model found in "{self.model_dir}"')
# Reverse sort, get the path of the first item.
return sorted(models_and_steps, reverse=True)[0][1]
@staticmethod
def _get_checkpoint_step(path: Path) -> Optional[int]:
"""Get the step from the path of a given model. None if no match."""
match = ModelFiles.MODEL_STEP_PATTERN.match(path.name)
if match is None:
return None
return int(match.group(1))
class OnmtPreprocessedFiles:
"""
Class to make it easy to get the names/paths of the OpenNMT-preprocessed files.
"""
PREFIX = "preprocessed"
def __init__(self, preprocessed_dir: PathLike):
# Directly converting to an absolute path
self.preprocessed_dir = Path(preprocessed_dir).resolve()
# Create the directory if it does not exist yet
self.preprocessed_dir.mkdir(parents=True, exist_ok=True)
@property
def preprocess_prefix(self) -> Path:
"""Absolute path to the prefix for the preprocessed files; during preprocessing,
OpenNMT will append ".train.0.pt", ".valid.0.pt", ".vocab.pt", etc."""
return self.preprocessed_dir / OnmtPreprocessedFiles.PREFIX
@property
def vocab_file(self) -> Path:
return self.preprocess_prefix.with_suffix(".vocab.pt")
class RxnPreprocessingFiles:
"""
Class to make it easy to get the names/paths of the files generated during data preprocessing.
This assumes that the default paths were used when calling rxn-data-pipeline.
"""
FILENAME_ROOT = "data"
def __init__(self, processed_data_dir: PathLike):
# Directly converting to an absolute path
self.processed_data_dir = Path(processed_data_dir).resolve()
def _add_extension(self, extension: str) -> Path:
"""
Helper function get the path of the file produced with the given extension.
Args:
extension: extension to add
Returns:
Path to the file with the given extension.
"""
if not extension.startswith("."):
extension = "." + extension
return self.processed_data_dir / (
RxnPreprocessingFiles.FILENAME_ROOT + extension
)
@property
def standardized_csv(self) -> Path:
return self._add_extension("standardized.csv")
@property
def processed_csv(self) -> Path:
return self._add_extension("processed.csv")
def get_processed_csv_for_split(self, split: str) -> Path:
split = self._validate_split(split)
return self._add_extension(f"processed.{split}.csv")
@property
def processed_train_csv(self) -> Path:
return self.get_processed_csv_for_split("train")
@property
def processed_validation_csv(self) -> Path:
return self.get_processed_csv_for_split("validation")
@property
def processed_test_csv(self) -> Path:
return self.get_processed_csv_for_split("test")
def get_precursors_for_split(self, split: str) -> Path:
split = self._validate_split(split)
return self._add_extension(f"processed.{split}.precursors_tokens")
def get_products_for_split(self, split: str) -> Path:
split = self._validate_split(split)
return self._add_extension(f"processed.{split}.products_tokens")
@property
def train_precursors(self) -> Path:
return self.get_precursors_for_split("train")
@property
def train_products(self) -> Path:
return self.get_products_for_split("train")
@property
def validation_precursors(self) -> Path:
return self.get_precursors_for_split("validation")
@property
def validation_products(self) -> Path:
return self.get_products_for_split("validation")
@property
def test_precursors(self) -> Path:
return self.get_precursors_for_split("test")
@property
def test_products(self) -> Path:
return self.get_products_for_split("test")
def get_context_tags_for_split(self, split: str) -> Path:
split = self._validate_split(split)
return self._add_extension(f"processed.{split}.context.tagged")
def get_context_src_for_split(self, split: str) -> Path:
split = self._validate_split(split)
return self._add_extension(f"processed.{split}.context.src")
def get_context_tgt_for_split(self, split: str) -> Path:
split = self._validate_split(split)
return self._add_extension(f"processed.{split}.context.tgt")
@staticmethod
def augmented(data_path: Path) -> Path:
"""Get the path for the augmented version of a data file."""
return data_path.with_name(data_path.name + ".augmented")
def _validate_split(self, split: str) -> str:
if split == "train":
return "train"
if split == "valid" or split == "validation":
return "validation"
if split == "test":
return "test"
raise ValueError(f'Unsupported split: "{split}"')
def get_src_file(self, split: str, model_task: str) -> Path:
"""Get the source file for the given task.
Note: the file is tokenized for the forward and retro tasks, but not
for the context task.
"""
if model_task == "forward":
return self.get_precursors_for_split(split)
if model_task == "retro":
return self.get_products_for_split(split)
if model_task == "context":
return self.get_context_src_for_split(split)
raise ValueError(f'Unsupported model task: "{model_task}"')
def get_tgt_file(self, split: str, model_task: str) -> Path:
"""Get the target file for the given task.
Note: the file is tokenized for the forward and retro tasks, but not
for the context task.
"""
if model_task == "forward":
return self.get_products_for_split(split)
if model_task == "retro":
return self.get_precursors_for_split(split)
if model_task == "context":
return self.get_context_tgt_for_split(split)
raise ValueError(f'Unsupported model task: "{model_task}"') | /rxn-onmt-models-1.0.1.tar.gz/rxn-onmt-models-1.0.1/src/rxn/onmt_models/training_files.py | 0.881328 | 0.343356 | training_files.py | pypi |
import logging
from typing import Tuple
import click
from rxn.onmt_utils import __version__ as onmt_utils_version
from rxn.onmt_utils.train_command import OnmtTrainCommand
from rxn.utilities.logging import setup_console_and_file_logger
from rxn.onmt_models import __version__ as onmt_models_version
from rxn.onmt_models import defaults
from rxn.onmt_models.training_files import ModelFiles, OnmtPreprocessedFiles
from rxn.onmt_models.utils import log_file_name_from_time, run_command
logger = logging.getLogger(__name__)
logger.addHandler(logging.NullHandler())
@click.command(context_settings=dict(show_default=True))
@click.option("--batch_size", default=defaults.BATCH_SIZE)
@click.option(
"--data_weights",
type=int,
multiple=True,
help="Weights of the different data sets for training. Only needed in a multi-task setting.",
)
@click.option("--dropout", default=defaults.DROPOUT)
@click.option("--heads", default=defaults.HEADS)
@click.option(
"--keep_checkpoint",
type=int,
default=defaults.KEEP_CHECKPOINT,
help='How many checkpoints to keep ("-1" means "keep all").',
)
@click.option("--layers", default=defaults.LAYERS)
@click.option("--learning_rate", type=float, default=defaults.LEARNING_RATE)
@click.option(
"--model_output_dir", type=str, required=True, help="Where to save the models"
)
@click.option("--no_gpu", is_flag=True, help="Run the training on CPU (slow!)")
@click.option(
"--preprocess_dir",
type=str,
required=True,
help="Directory with OpenNMT-preprocessed files",
)
@click.option("--rnn_size", default=defaults.RNN_SIZE)
@click.option("--seed", default=defaults.SEED)
@click.option("--train_num_steps", default=100000)
@click.option("--transformer_ff", default=defaults.TRANSFORMER_FF)
@click.option("--warmup_steps", default=defaults.WARMUP_STEPS)
@click.option("--word_vec_size", default=defaults.WORD_VEC_SIZE)
def main(
batch_size: int,
data_weights: Tuple[int, ...],
dropout: float,
heads: int,
keep_checkpoint: int,
layers: int,
learning_rate: float,
model_output_dir: str,
no_gpu: bool,
preprocess_dir: str,
rnn_size: int,
seed: int,
train_num_steps: int,
transformer_ff: int,
warmup_steps: int,
word_vec_size: int,
) -> None:
"""Train an OpenNMT model.
Multitask training is also supported, if at least two
`data_weights` parameters are given (Note: needs to be consistent with the
rxn-onmt-preprocess command executed before training.
"""
# set up paths
model_files = ModelFiles(model_output_dir)
onmt_preprocessed_files = OnmtPreprocessedFiles(preprocess_dir)
# Set up the logs
log_file = model_files.model_dir / log_file_name_from_time("rxn-onmt-train")
setup_console_and_file_logger(log_file)
logger.info("Training RXN model.")
logger.info(f"rxn-onmt-utils version: {onmt_utils_version}. ")
logger.info(f"rxn-onmt-models version: {onmt_models_version}. ")
config_file = model_files.next_config_file()
train_cmd = OnmtTrainCommand.train(
batch_size=batch_size,
data=onmt_preprocessed_files.preprocess_prefix,
dropout=dropout,
heads=heads,
keep_checkpoint=keep_checkpoint,
layers=layers,
learning_rate=learning_rate,
rnn_size=rnn_size,
save_model=model_files.model_prefix,
seed=seed,
train_steps=train_num_steps,
transformer_ff=transformer_ff,
warmup_steps=warmup_steps,
word_vec_size=word_vec_size,
no_gpu=no_gpu,
data_weights=data_weights,
)
# Write config file
command_and_args = train_cmd.save_to_config_cmd(config_file)
run_command(command_and_args)
# Actual training config file
command_and_args = train_cmd.execute_from_config_cmd(config_file)
run_command(command_and_args)
logger.info(
f'Training successful. Models saved under "{str(model_files.model_dir)}".'
)
if __name__ == "__main__":
main() | /rxn-onmt-models-1.0.1.tar.gz/rxn-onmt-models-1.0.1/src/rxn/onmt_models/scripts/rxn_onmt_train.py | 0.693784 | 0.169337 | rxn_onmt_train.py | pypi |
import logging
from typing import Optional, Tuple
import click
from rxn.onmt_utils import __version__ as onmt_utils_version
from rxn.onmt_utils.model_introspection import (
get_model_dropout,
get_model_seed,
model_vocab_is_compatible,
)
from rxn.onmt_utils.train_command import OnmtTrainCommand
from rxn.utilities.logging import setup_console_and_file_logger
from rxn.onmt_models import __version__ as onmt_models_version
from rxn.onmt_models import defaults
from rxn.onmt_models.training_files import ModelFiles, OnmtPreprocessedFiles
from rxn.onmt_models.utils import log_file_name_from_time, run_command
logger = logging.getLogger(__name__)
logger.addHandler(logging.NullHandler())
@click.command(context_settings=dict(show_default=True))
@click.option("--batch_size", default=defaults.BATCH_SIZE)
@click.option(
"--data_weights",
type=int,
multiple=True,
help="Weights of the different data sets for training. Only needed in a multi-task setting.",
)
@click.option(
"--keep_checkpoint",
type=int,
default=defaults.KEEP_CHECKPOINT,
help='How many checkpoints to keep ("-1" means "keep all").',
)
@click.option(
"--model_output_dir", type=str, required=True, help="Where to save the models"
)
@click.option("--no_gpu", is_flag=True, help="Run the training on CPU (slow!)")
@click.option(
"--preprocess_dir",
type=str,
required=True,
help="Directory with OpenNMT-preprocessed files",
)
@click.option(
"--train_from",
type=str,
help=(
"Model to continue training from. If not specified, "
"the last checkpoint from model_output_dir will be taken."
),
)
@click.option(
"--train_num_steps",
default=100000,
help="Number of steps, including steps from the initial training run.",
)
def main(
batch_size: int,
data_weights: Tuple[int, ...],
keep_checkpoint: int,
model_output_dir: str,
no_gpu: bool,
preprocess_dir: str,
train_from: Optional[str],
train_num_steps: int,
) -> None:
"""Continue training for an OpenNMT model.
Multitask training is also supported, if at least two
`data_weights` parameters are given (Note: needs to be consistent with the
rxn-onmt-preprocess command executed before training.
"""
model_files = ModelFiles(model_output_dir)
onmt_preprocessed_files = OnmtPreprocessedFiles(preprocess_dir)
# Set up the logs
log_file = model_files.model_dir / log_file_name_from_time(
"rxn-onmt-continue-training"
)
setup_console_and_file_logger(log_file)
logger.info("Continue training of RXN model.")
logger.info(f"rxn-onmt-utils version: {onmt_utils_version}. ")
logger.info(f"rxn-onmt-models version: {onmt_models_version}. ")
if train_from is None:
train_from = str(model_files.get_last_checkpoint())
logger.info(f"Training will be continued from {train_from}")
if not model_vocab_is_compatible(train_from, onmt_preprocessed_files.vocab_file):
raise RuntimeError(
"The vocabularies are not compatible. It is not advised to continue training."
)
config_file = model_files.next_config_file()
dropout = get_model_dropout(train_from)
seed = get_model_seed(train_from)
train_cmd = OnmtTrainCommand.continue_training(
batch_size=batch_size,
data=onmt_preprocessed_files.preprocess_prefix,
keep_checkpoint=keep_checkpoint,
dropout=dropout,
save_model=model_files.model_prefix,
seed=seed,
train_from=train_from,
train_steps=train_num_steps,
no_gpu=no_gpu,
data_weights=data_weights,
)
# Write config file
command_and_args = train_cmd.save_to_config_cmd(config_file)
run_command(command_and_args)
# Actual training config file
command_and_args = train_cmd.execute_from_config_cmd(config_file)
run_command(command_and_args)
logger.info(
f'Training successful. Models saved under "{str(model_files.model_dir)}".'
)
if __name__ == "__main__":
main() | /rxn-onmt-models-1.0.1.tar.gz/rxn-onmt-models-1.0.1/src/rxn/onmt_models/scripts/rxn_onmt_continue_training.py | 0.737158 | 0.204362 | rxn_onmt_continue_training.py | pypi |
import logging
from pathlib import Path
import click
from rxn.onmt_utils import __version__ as onmt_utils_version
from rxn.reaction_preprocessing.config import (
CommonConfig,
Config,
DataConfig,
FragmentBond,
InitialDataFormat,
PreprocessConfig,
RxnImportConfig,
SplitConfig,
StandardizeConfig,
)
from rxn.reaction_preprocessing.main import preprocess_data
from rxn.utilities.logging import setup_console_and_file_logger
from rxn.onmt_models import __version__ as onmt_models_version
from rxn.onmt_models import defaults
from rxn.onmt_models.training_files import RxnPreprocessingFiles
from rxn.onmt_models.utils import log_file_name_from_time
logger = logging.getLogger(__name__)
logger.addHandler(logging.NullHandler())
@click.command(context_settings=dict(show_default=True))
@click.option("--input_data", type=str, required=True, help="Input data TXT or CSV")
@click.option(
"--import_from",
type=str,
default="txt",
help=(
'Column to import reaction SMILES from in a CSV. The default, "txt", '
"means the input file is a simple TXT file."
),
)
@click.option(
"--output_dir",
type=str,
required=True,
help="Directory where to save the generated files",
)
@click.option(
"--min_reactants",
type=int,
default=2,
help=(
"Minimum number of precursors / reactants. "
"Reactions with fewer precursors than that will be discarded."
),
)
@click.option(
"--split_seed", default=defaults.SEED, help="Random seed for splitting step"
)
@click.option(
"--fragment_bond",
type=click.Choice(["DOT", "TILDE"], case_sensitive=False),
default="DOT",
)
def main(
input_data: str,
import_from: str,
output_dir: str,
min_reactants: int,
split_seed: int,
fragment_bond: str,
) -> None:
"""Preprocess the data to generate a dataset for training transformer models.
The script will automatically generate the following files in output_dir:
data.imported.csv
data.standardized.csv
data.processed.csv
data.processed.train.csv
data.processed.validation.csv
data.processed.test.csv
data.processed.train.precursors_tokens
data.processed.train.products_tokens
data.processed.validation.precursors_tokens
data.processed.validation.products_tokens
data.processed.test.precursors_tokens
data.processed.test.products_tokens
"""
# Running the command below fails if the paths are relative -> make them absolute
input_data_path = Path(input_data).resolve()
output_dir_path = Path(output_dir).resolve()
# make sure that the required output directory exists
output_dir_path.mkdir(parents=True, exist_ok=True)
# Set up the logs
log_file = output_dir_path / log_file_name_from_time("rxn-prepare-data")
setup_console_and_file_logger(log_file)
logger.info("Prepare reaction data for training with rxn-onmt-models.")
logger.info(f"rxn-onmt-utils version: {onmt_utils_version}. ")
logger.info(f"rxn-onmt-models version: {onmt_models_version}. ")
if import_from == "txt":
import_config = RxnImportConfig(data_format=InitialDataFormat.TXT)
else:
import_config = RxnImportConfig(
data_format=InitialDataFormat.CSV, input_csv_column_name=import_from
)
cfg = Config(
data=DataConfig(
path=str(input_data_path),
proc_dir=str(output_dir_path),
name=RxnPreprocessingFiles.FILENAME_ROOT,
),
common=CommonConfig(fragment_bond=FragmentBond[fragment_bond]),
rxn_import=import_config,
standardize=StandardizeConfig(
annotation_file_paths=[], discard_unannotated_metals=False
),
preprocess=PreprocessConfig(min_reactants=min_reactants),
split=SplitConfig(hash_seed=split_seed),
)
try:
logger.info("Running the data preprocessing")
preprocess_data(cfg)
except Exception as e:
logger.exception("Error during data preprocessing:")
raise SystemExit("Error during data preprocessing") from e
if __name__ == "__main__":
main() | /rxn-onmt-models-1.0.1.tar.gz/rxn-onmt-models-1.0.1/src/rxn/onmt_models/scripts/rxn_prepare_data.py | 0.470007 | 0.153962 | rxn_prepare_data.py | pypi |
import logging
import random
from pathlib import Path
from typing import List, Optional, Tuple
import click
from rxn.chemutils.tokenization import ensure_tokenized_file
from rxn.onmt_utils import __version__ as onmt_utils_version
from rxn.onmt_utils.train_command import preprocessed_id_names
from rxn.utilities.files import (
PathLike,
count_lines,
dump_list_to_file,
load_list_from_file,
)
from rxn.utilities.logging import setup_console_and_file_logger
from rxn.onmt_models import __version__ as onmt_models_version
from rxn.onmt_models import defaults
from rxn.onmt_models.training_files import OnmtPreprocessedFiles, RxnPreprocessingFiles
from rxn.onmt_models.utils import log_file_name_from_time, run_command
logger = logging.getLogger(__name__)
logger.addHandler(logging.NullHandler())
def determine_train_dataset(
data: RxnPreprocessingFiles, model_task: str
) -> Tuple[Path, Path]:
"""
Get the paths to the src and tgt dataset, trying to get the augmented
one if it exists.
Args:
data: info about training files.
model_task: model task.
Returns:
Tuple for the src and tgt files (the augmented ones if possible).
"""
src = data.get_src_file("train", model_task)
tgt = data.get_tgt_file("train", model_task)
augmented_src = data.augmented(src)
augmented_tgt = data.augmented(tgt)
if augmented_src.exists() and augmented_tgt.exists():
logger.info(f'Found augmented train split in "{data.processed_data_dir}"')
src = augmented_src
tgt = augmented_tgt
return src, tgt
@click.command()
@click.option(
"--input_dir",
type=str,
required=True,
help="Directory containing the output of prepare-data for the main data set.",
)
@click.option(
"--additional_data",
type=str,
multiple=True,
help="Directory containing the output of prepare-data for the additional data sets.",
)
@click.option(
"--output_dir",
type=str,
required=True,
help="Where to save the preprocessed OpenNMT files.",
)
@click.option(
"--model_task", type=click.Choice(["forward", "retro", "context"]), required=True
)
@click.option(
"--truncated_valid_size",
default=defaults.VALIDATION_TRUNCATE_SIZE,
help=(
"Number of samples from the validation set to consider for reporting the accuracy "
"on the validation set. From experiences, taking values larger than 10k just "
"leads to longer training times without much gain. Use -1 for no truncation."
),
)
@click.option(
"--truncation_shuffling_seed",
default=defaults.SEED,
help="Random seed to use for shuffling the validation reactions before truncation.",
)
@click.option(
"--vocab",
type=str,
help=(
"Token vocabulary file (one token per line). Required only in order "
"to add tokens not in the dataset when training the base model."
),
)
def main(
input_dir: str,
additional_data: Tuple[str, ...],
output_dir: str,
model_task: str,
truncated_valid_size: int,
truncation_shuffling_seed: int,
vocab: Optional[str],
) -> None:
"""Preprocess the training files for OpenNMT models (wraps onmt_preprocess).
The input_dir must contain the following files:
data.processed.train.precursors_tokens
data.processed.train.products_tokens
data.processed.validation.precursors_tokens
data.processed.validation.products_tokens
The script will generate the following files in output_dir:
preprocessed.train.0.pt
preprocessed.valid.0.pt
preprocessed.vocab.pt
... (and additional indices for train and valid if the dataset is large)
Preprocessing data for multitask training is also supported, if at least one
`additional_data` parameter is given.
"""
# Set up the paths
main_data_files = RxnPreprocessingFiles(input_dir)
onmt_preprocessed_files = OnmtPreprocessedFiles(output_dir)
# Set up the logs
log_file = onmt_preprocessed_files.preprocessed_dir / log_file_name_from_time(
"rxn-onmt-preprocess"
)
setup_console_and_file_logger(log_file)
logger.info("Preprocess data for RXN-OpenNMT models.")
logger.info(f"rxn-onmt-utils version: {onmt_utils_version}. ")
logger.info(f"rxn-onmt-models version: {onmt_models_version}. ")
train_src, train_tgt = determine_train_dataset(main_data_files, model_task)
valid_src: PathLike = main_data_files.get_src_file("valid", model_task)
valid_tgt: PathLike = main_data_files.get_tgt_file("valid", model_task)
train_srcs: List[PathLike] = [train_src]
train_tgts: List[PathLike] = [train_tgt]
for i, additional_data_path in enumerate(additional_data, 1):
data_files = RxnPreprocessingFiles(additional_data_path)
src, tgt = determine_train_dataset(data_files, model_task)
train_srcs.append(src)
train_tgts.append(tgt)
if truncated_valid_size != -1 and count_lines(valid_src) > truncated_valid_size:
logger.info(
f"The validation set will be truncated to {truncated_valid_size} lines."
)
# Load all samples and put in list of src-tgt tuples
valid_src_tgt = list(
zip(load_list_from_file(valid_src), load_list_from_file(valid_tgt))
)
# Shuffle the samples and truncate
random.seed(truncation_shuffling_seed)
random.shuffle(valid_src_tgt)
valid_src_tgt = valid_src_tgt[:truncated_valid_size]
# Write to new files
valid_src = onmt_preprocessed_files.preprocessed_dir / "truncated_valid_src.txt"
valid_tgt = onmt_preprocessed_files.preprocessed_dir / "truncated_valid_tgt.txt"
dump_list_to_file((src for src, _ in valid_src_tgt), valid_src)
dump_list_to_file((tgt for _, tgt in valid_src_tgt), valid_tgt)
logger.info(
f'The truncated validation set was saved to "{valid_src}" and "{valid_tgt}".'
)
# Tokenize all the files if necessary
train_srcs = [ensure_tokenized_file(f) for f in train_srcs]
train_tgts = [ensure_tokenized_file(f) for f in train_tgts]
valid_src = ensure_tokenized_file(valid_src)
valid_tgt = ensure_tokenized_file(valid_tgt)
# yapf: disable
command_and_args = [
str(e) for e in [
'onmt_preprocess',
'-train_src', *train_srcs,
'-train_tgt', *train_tgts,
'-valid_src', valid_src,
'-valid_tgt', valid_tgt,
'-save_data', onmt_preprocessed_files.preprocess_prefix,
'-src_seq_length', 3000,
'-tgt_seq_length', 3000,
'-src_vocab_size', 3000,
'-tgt_vocab_size', 3000,
'-share_vocab',
'-overwrite',
]
]
# yapf: enable
if vocab is not None:
command_and_args.extend(["-src_vocab", vocab, "-tgt_vocab", vocab])
if additional_data:
train_ids = preprocessed_id_names(len(additional_data))
command_and_args.extend(["-train_ids", *train_ids])
run_command(command_and_args)
if __name__ == "__main__":
main() | /rxn-onmt-models-1.0.1.tar.gz/rxn-onmt-models-1.0.1/src/rxn/onmt_models/scripts/rxn_onmt_preprocess.py | 0.829043 | 0.37711 | rxn_onmt_preprocess.py | pypi |
from typing import Iterator, List, Optional, Union
import click
from attr import define
from rxn.onmt_utils.train_command import RxnCommand
import rxn.onmt_models.defaults as defaults
_CONTEXT_DATA_BATCH_SIZE = 8
class Parameter:
"""
Parameter to be queried to the user, if the command(s) are necessary.
"""
def __init__(
self,
key: str,
query: str,
default: Union[int, float],
commands: RxnCommand,
optional: bool = True,
):
"""
Args:
key: parameter name as it is forwarded to the scripts. `
query: string displayed to the user when querying.
default: default value for this parameter.
commands: command(s) that this parameter is needed for.
optional: if a parameter is optional and its queried value is
equal to the default, it will not be displayed to the user in
the command(s) to execute.
"""
self.key = key
self.query = query
self.default = default
self.type = type(default)
self.commands = commands
self.optional = optional
@define
class ContextOptions:
tagging_batch_size: int
@define
class AugmentOptions:
number_augmentations: int
@define
class DatasetOptions:
txt_path: str
processed_path: str
weight: int
augment: Optional[AugmentOptions]
class TrainingPlanner:
"""
Class that will take the user through the values needed for training models,
in an interactive manner.
"""
def __init__(self) -> None:
# All the logic runs directly in the constructor, to avoid the
# necessity of initially setting all the values to None.
self.model_task = click.prompt(
"Please enter the model task",
type=click.Choice(["forward", "retro", "context"]),
)
self._query_about_finetuning()
self.on_gpu = click.confirm("GPU available?", default=True)
self.datasets = self._get_datasets()
self.preprocess_seed = click.prompt(
"Seed for data preprocessing", type=int, default=defaults.SEED
)
self.context_options = self._maybe_get_context_options()
self.onmt_preprocessed = click.prompt(
"Where to save the OpenNMT-preprocessed data", type=str
)
self.onmt_models = click.prompt("Where to save the OpenNMT models", type=str)
self._initialize_parameters()
self._query_parameters()
def prepare_data_cmd(self) -> Iterator[str]:
for dataset in self.datasets:
yield self._prepare_data_cmd(dataset, self.preprocess_seed)
def prepare_context_data_cmd(self) -> Iterator[str]:
for dataset in self.datasets:
yield self._prepare_context_data_cmd(dataset.processed_path)
def augment_data_cmd(self) -> Iterator[str]:
for dataset in self.datasets:
cmd = self._augment_cmd(dataset)
if cmd is not None:
yield cmd
def preprocess_cmd(self) -> str:
cmd = (
"rxn-onmt-preprocess "
f"--input_dir {self.datasets[0].processed_path} "
f"--output_dir {self.onmt_preprocessed} "
f"--model_task {self.model_task} "
)
for dataset in self.datasets[1:]:
cmd += f"--additional_data {dataset.processed_path} "
return cmd
def train_or_finetune_cmd(self) -> str:
if self.finetuning:
return self.finetune_cmd()
else:
return self.train_cmd()
def train_cmd(self) -> str:
cmd = (
"rxn-onmt-train "
f"--model_output_dir {self.onmt_models} "
f"--preprocess_dir {self.onmt_preprocessed} "
)
cmd += self._parameters_for_cmd(RxnCommand.T)
cmd += self._data_weights()
cmd += self._gpu()
return cmd
def finetune_cmd(self) -> str:
cmd = (
"rxn-onmt-finetune "
f"--train_from {self.train_from} "
f"--model_output_dir {self.onmt_models} "
f"--preprocess_dir {self.onmt_preprocessed} "
)
cmd += self._parameters_for_cmd(RxnCommand.F)
cmd += self._data_weights()
cmd += self._gpu()
return cmd
def continue_training_cmd(self) -> str:
cmd = (
"rxn-onmt-continue-training "
f"--model_output_dir {self.onmt_models} "
f"--preprocess_dir {self.onmt_preprocessed} "
)
cmd += self._parameters_for_cmd(RxnCommand.C)
cmd += self._data_weights()
cmd += self._gpu()
return cmd
def _query_about_finetuning(self) -> None:
self.finetuning = click.confirm(
"Are you fine-tuning an existing model?", default=False
)
if self.finetuning:
self.needed_commands = [RxnCommand.F, RxnCommand.C]
self.train_from = click.prompt("Path to the base model", type=str)
else:
self.needed_commands = [RxnCommand.T, RxnCommand.C]
self.train_from = None
def _initialize_parameters(self) -> None:
self.parameters = [
Parameter("batch_size", "Batch size", defaults.BATCH_SIZE, RxnCommand.TCF),
Parameter(
"train_num_steps",
"Number of training steps",
100000,
RxnCommand.TCF,
optional=False,
),
Parameter(
"learning_rate", "Learning rate", defaults.LEARNING_RATE, RxnCommand.TF
),
Parameter("dropout", "Dropout", defaults.DROPOUT, RxnCommand.TF),
Parameter(
"heads", "Number of transformer heads", defaults.HEADS, RxnCommand.T
),
Parameter("layers", "Number of layers", defaults.LAYERS, RxnCommand.T),
Parameter("rnn_size", "RNN size", defaults.RNN_SIZE, RxnCommand.T),
Parameter(
"transformer_ff",
"Size of hidden transformer feed-forward",
defaults.TRANSFORMER_FF,
RxnCommand.T,
),
Parameter(
"word_vec_size",
"Word embedding size",
defaults.WORD_VEC_SIZE,
RxnCommand.T,
),
Parameter(
"warmup_steps",
"Number of warmup steps",
defaults.WARMUP_STEPS,
RxnCommand.TF,
),
Parameter("seed", "Random seed for training", defaults.SEED, RxnCommand.TF),
]
def _query_parameters(self) -> None:
"""
Query the user about the values of all necessary parameters.
"""
self.param_values = {}
for p in self.parameters:
is_needed = any(cmd in p.commands for cmd in self.needed_commands)
if not is_needed:
continue
value = click.prompt(p.query, type=p.type, default=p.default)
self.param_values[p.key] = value
def _get_datasets(self) -> List[DatasetOptions]:
"""
Get the information on datasets from the user.
"""
datasets = []
number_datasets = click.prompt(
"Number of datasets (more than one means multitask learning)",
type=click.IntRange(min=1),
default=1,
)
for i in range(number_datasets):
data_txt = click.prompt(f"Path to the data set (TXT) no {i + 1}", type=str)
data_dir = click.prompt(
f"Where to save the processed data set no {i + 1}", type=str
)
# weight does not need to be queried if there's only one dataset
if number_datasets == 1:
weight = 1
else:
weight = click.prompt(
f"Training weight for data set no {i + 1}",
type=click.IntRange(min=1),
)
datasets.append(
DatasetOptions(
txt_path=data_txt,
processed_path=data_dir,
weight=weight,
augment=self._maybe_get_augment_options(i + 1),
)
)
return datasets
def _maybe_get_context_options(self) -> Optional[ContextOptions]:
if self.model_task != "context":
return None
tagging_batch_size = click.prompt(
"Batch size for generating context prediction data",
type=int,
default=_CONTEXT_DATA_BATCH_SIZE,
)
return ContextOptions(tagging_batch_size=tagging_batch_size)
def _maybe_get_augment_options(self, dataset_no: int) -> Optional[AugmentOptions]:
augment = click.confirm(
f"Would you like to augment the data set {dataset_no}?", default=False
)
if not augment:
return None
n_augmentations = click.prompt(
"Number of augmentations per sample", type=click.IntRange(min=1)
)
return AugmentOptions(number_augmentations=n_augmentations)
def _parameters_for_cmd(self, command: RxnCommand) -> str:
"""
Get the string to append to the command for all the parameters associated
with a command type.
"""
to_add = ""
for p in self.parameters:
if command not in p.commands:
continue
param_value = self.param_values[p.key]
equal_to_default = param_value == p.default
if p.optional and equal_to_default:
continue
to_add += f"--{p.key} {param_value} "
return to_add
@staticmethod
def _prepare_data_cmd(dataset: DatasetOptions, prepare_seed: int) -> str:
command = (
f"rxn-prepare-data --input_data {dataset.txt_path} "
f"--output_dir {dataset.processed_path} "
)
if prepare_seed != defaults.SEED:
command += f"--split_seed {prepare_seed} "
return command
def _augment_cmd(self, dataset: DatasetOptions) -> Optional[str]:
if dataset.augment is None:
return None
return (
f"rxn-onmt-augment --data_dir {dataset.processed_path} --model_task "
f"{self.model_task} -n {dataset.augment.number_augmentations}"
)
def _prepare_context_data_cmd(self, data_dir: str) -> str:
if self.context_options is None:
raise RuntimeError("Context options not defined.")
command = f"rxn-create-context-dataset --data_dir {data_dir} "
if self.context_options.tagging_batch_size != _CONTEXT_DATA_BATCH_SIZE:
command += f"--batch_size {self.context_options.tagging_batch_size} "
return command
def _data_weights(self) -> str:
data_weights = ""
if len(self.datasets) > 1:
for dataset in self.datasets:
data_weights += f"--data_weights {dataset.weight} "
return data_weights
def _gpu(self) -> str:
if self.on_gpu:
return ""
return "--no_gpu "
@click.command()
def main() -> None:
"""Interactive program to plan the training of RXN OpenNMT models.
It will ask a user for the values needed for training, and then print all
the commands to be executed.
"""
print("Interactive program to plan the training of RXN OpenNMT models.")
print("NOTE: Please avoid using paths with whitespaces.")
tp = TrainingPlanner()
print("Here are the commands to launch a training with RXN:\n")
print("# 1) Prepare the data (standardization, filtering, etc.)")
for prepare_cmd in tp.prepare_data_cmd():
print(prepare_cmd)
print()
if tp.model_task == "context":
print(
"# 1b) Prepare context prediction data (requires rxn-context-prediction package)"
)
for prepare_context_cmd in tp.prepare_context_data_cmd():
print(prepare_context_cmd)
print()
if any(dataset.augment is not None for dataset in tp.datasets):
print("# 1c) Augment the data")
for augment_cmd in tp.augment_data_cmd():
print(augment_cmd)
print()
print(f"# 2) Preprocess the data with OpenNMT\n{tp.preprocess_cmd()}\n")
print(f"# 3) Train the model\n{tp.train_or_finetune_cmd()}\n")
print(f"# 4) If necessary: continue training\n{tp.continue_training_cmd()}")
if __name__ == "__main__":
main() | /rxn-onmt-models-1.0.1.tar.gz/rxn-onmt-models-1.0.1/src/rxn/onmt_models/scripts/rxn_plan_training.py | 0.885761 | 0.262286 | rxn_plan_training.py | pypi |
import logging
import random
from pathlib import Path
from typing import Tuple
import click
from rxn.utilities.files import stable_shuffle
from rxn.utilities.logging import setup_console_logger
from rxn.onmt_models.augmentation import augment_translation_dataset
from rxn.onmt_models.training_files import RxnPreprocessingFiles
logger = logging.getLogger(__name__)
logger.addHandler(logging.NullHandler())
@click.command(context_settings=dict(show_default=True))
@click.option(
"--data_dir",
type=click.Path(exists=True, file_okay=False, path_type=Path),
required=True,
help="Path to the output directory of rxn-prepare-data.",
)
@click.option(
"--model_task", type=click.Choice(["forward", "retro", "context"]), required=True
)
@click.option(
"--splits",
"-s",
type=click.Choice(["train", "validation", "test"]),
default=("train",),
multiple=True,
help="Which split(s) to augment.",
)
@click.option(
"--n_augmentations",
"-n",
type=int,
required=True,
help="How many augmented samples to produce for each input.",
)
@click.option(
"--keep_original/--discard_original",
default=True,
help="Whether to keep the original sample along the augmented ones.",
)
@click.option(
"--shuffle/--no_shuffle",
default=True,
help="Whether to shuffle the augmented files.",
)
@click.option(
"--seed",
default=42,
help="Random seed.",
)
def main(
data_dir: Path,
model_task: str,
splits: Tuple[str, ...],
n_augmentations: int,
keep_original: bool,
shuffle: bool,
seed: int,
) -> None:
"""
Augment the training data.
Notes:
1) the input is augmented, while the output is not. But it may need
to be duplicated if there are several augmented samples for each input.
2) Preferred to the augmentation in rxn-reaction-preprocessing, which
is limited to forward and retro predictions.
"""
setup_console_logger()
random.seed(seed)
data_files = RxnPreprocessingFiles(data_dir)
for split in splits:
logger.info(f"Augmenting split: {split}")
src = data_files.get_src_file(split, model_task)
tgt = data_files.get_tgt_file(split, model_task)
src_augmented = data_files.augmented(src)
tgt_augmented = data_files.augmented(tgt)
logger.info(
f'Augmenting the dataset: "{src}" -> "{src_augmented}" and '
f'"{tgt}" -> {tgt_augmented}"'
)
augment_translation_dataset(
src_in=src,
src_out=src_augmented,
tgt_in=tgt,
tgt_out=tgt_augmented,
n_augmentations=n_augmentations,
keep_original=keep_original,
)
if shuffle:
logger.info(
f'Shuffling the src "{src_augmented}" and tgt "{tgt_augmented}"'
)
# Note: the seed must be identical for both shuffles!
stable_shuffle(src_augmented, src_augmented, seed)
stable_shuffle(tgt_augmented, tgt_augmented, seed)
if __name__ == "__main__":
main() | /rxn-onmt-models-1.0.1.tar.gz/rxn-onmt-models-1.0.1/src/rxn/onmt_models/scripts/rxn_onmt_augment.py | 0.761361 | 0.217639 | rxn_onmt_augment.py | pypi |
from argparse import Namespace
from typing import Any, Dict, List
import torch
from onmt.inputters.text_dataset import TextMultiField
from rxn.utilities.files import PathLike
def get_model_vocab(model_path: PathLike) -> List[str]:
"""
Get the vocabulary from a model checkpoint.
Args:
model_path: model checkpoint, such as `model_step_100000.pt`.
"""
checkpoint = torch.load(model_path, map_location=lambda storage, loc: storage)
vocab = checkpoint["vocab"]
return _torch_vocab_to_list(vocab)
def get_preprocessed_vocab(vocab_path: PathLike) -> List[str]:
"""
Get the vocabulary from the file saved by OpenNMT during preprocessing.
Args:
vocab_path: vocab file, such as `preprocessed.vocab.pt`.
"""
vocab = torch.load(vocab_path)
return _torch_vocab_to_list(vocab)
def model_vocab_is_compatible(model_pt: PathLike, vocab_pt: PathLike) -> bool:
"""
Determine whether the vocabulary contained in a model checkpoint contains
all the necessary tokens from a vocab file.
Args:
model_pt: model checkpoint, such as `model_step_100000.pt`.
vocab_pt: vocab file, such as `preprocessed.vocab.pt`.
"""
model_vocab = set(get_model_vocab(model_pt))
data_vocab = set(get_preprocessed_vocab(vocab_pt))
return data_vocab.issubset(model_vocab)
def _torch_vocab_to_list(vocab: Dict[str, Any]) -> List[str]:
src_vocab = _multifield_vocab_to_list(vocab["src"])
tgt_vocab = _multifield_vocab_to_list(vocab["tgt"])
if src_vocab != tgt_vocab:
raise RuntimeError("Handling of different src/tgt vocab not implemented")
return src_vocab
def _multifield_vocab_to_list(multifield: TextMultiField) -> List[str]:
return multifield.base_field.vocab.itos[:]
def get_model_opt(model_path: PathLike) -> Namespace:
"""
Get the args ("opt") of rnn_size for the given model checkpoint.
Args:
model_path: model checkpoint, such as `model_step_100000.pt`.
"""
checkpoint = torch.load(model_path, map_location=lambda storage, loc: storage)
return checkpoint["opt"]
def get_model_rnn_size(model_path: PathLike) -> int:
"""
Get the value of rnn_size for the given model checkpoint.
Args:
model_path: model checkpoint, such as `model_step_100000.pt`.
"""
return get_model_opt(model_path).rnn_size
def get_model_dropout(model_path: PathLike) -> float:
"""
Get the value of the dropout for the given model checkpoint.
Args:
model_path: model checkpoint, such as `model_step_100000.pt`.
"""
# Note: OpenNMT has support for several dropout values (changing during
# training). We do not support this at the moment.
dropouts = get_model_opt(model_path).dropout
if len(dropouts) != 1:
raise ValueError(f"Expected one dropout value. Actual: {dropouts}")
return dropouts[0]
def get_model_seed(model_path: PathLike) -> int:
"""
Get the value of the seed for the given model checkpoint.
Args:
model_path: model checkpoint, such as `model_step_100000.pt`.
"""
return get_model_opt(model_path).seed | /rxn-onmt-utils-1.0.3.tar.gz/rxn-onmt-utils-1.0.3/src/rxn/onmt_utils/model_introspection.py | 0.958421 | 0.527256 | model_introspection.py | pypi |
import logging
from typing import List
import torch
import torch.nn as nn
from onmt.model_builder import build_model # type: ignore
from onmt.utils.parse import ArgumentParser # type: ignore
from rxn.utilities.files import PathLike
from torch.nn.init import xavier_uniform_
logger = logging.getLogger(__name__)
logger.addHandler(logging.NullHandler())
def init_parameters(model_opt, parameters):
"""Initialise extended weights
Args:
model_opt ([type]): original model opts
parameters ([type]): newly created weights
"""
if model_opt.param_init != 0.0:
parameters.data.uniform_(-model_opt.param_init, model_opt.param_init)
if model_opt.param_init_glorot:
if parameters.dim() > 1:
xavier_uniform_(parameters)
if parameters.dim() == 1:
# If there is only one dimension: the parameters are likely to
# correspond to a bias vector -> set to zero.
parameters.data.zero_()
class ModelResizer:
def __init__(self, model_path: PathLike):
"""Resizing pretrained onmt models for training on extended vocab.
Args:
model_path: Path to model checkpoint
"""
self.checkpoint = torch.load(
model_path, map_location=lambda storage, loc: storage
)
self.vocab = self.checkpoint["vocab"]
self.model_opt = ArgumentParser.ckpt_model_opts(self.checkpoint["opt"])
ArgumentParser.update_model_opts(self.model_opt)
ArgumentParser.validate_model_opts(self.model_opt)
# make it CUDA independent
self.model_opt.gpu = -1
self.model_opt.gpu_ranks = []
self.model = build_model(
self.model_opt, self.model_opt, self.vocab, self.checkpoint
)
def extend_vocab(self, new_vocab_path: PathLike):
"""Extend vocab and size of a model using a new vocab file
s
Args:
new_vocab_path: Path to new vocab file (vocab.pt generated by ONMT)
"""
new_vocab = torch.load(
new_vocab_path, map_location=lambda storage, loc: storage
)
self._extend_field_vocab(new_vocab, "src")
self._extend_field_vocab(new_vocab, "tgt")
self._resize_encoder()
self._resize_decoder()
self._resize_generator()
def _extend_field_vocab(self, new_vocab, field: str) -> List[str]:
"""Extends model vocab with new vocab and returns the added tokens as a list."""
# check the tokens and enlarge encoder
# Update frequency as well (?)
added_tokens = []
for t in new_vocab[field].base_field.vocab.itos:
if t not in self.vocab[field].base_field.vocab.stoi:
added_tokens.append(t)
self.vocab[field].base_field.vocab.itos.append(t)
self.vocab[field].base_field.vocab.stoi[t] = (
len(self.vocab[field].base_field.vocab.itos) - 1
)
logger.debug(f"Added {len(added_tokens)} {field} tokens:\n{added_tokens}")
return added_tokens
def _resize_embedding(self, old_embeddings, num_added_tokens: int):
sparse = old_embeddings.sparse
padding_idx = old_embeddings.padding_idx
embedding_dim = old_embeddings.embedding_dim
weight_extension = nn.Parameter( # type: ignore
torch.Tensor(num_added_tokens, embedding_dim) # type: ignore
) # type: ignore
init_parameters(self.model_opt, weight_extension)
new_weights = nn.Parameter( # type: ignore
torch.cat([old_embeddings.weight, weight_extension.data])
) # type: ignore
new_embeddings = nn.Embedding(
new_weights.shape[0], embedding_dim, sparse=sparse, padding_idx=padding_idx
)
new_embeddings.load_state_dict({"weight": new_weights})
return new_embeddings
def _resize_encoder(self):
old_embeddings = self.model.encoder.embeddings.make_embedding.emb_luts[0]
num_added_tokens = (
len(self.vocab["src"].base_field.vocab) - old_embeddings.num_embeddings
)
new_embeddings = self._resize_embedding(old_embeddings, num_added_tokens)
self.model.encoder.embeddings.make_embedding.emb_luts[0] = new_embeddings
def _resize_decoder(self):
old_embeddings = self.model.decoder.embeddings.make_embedding.emb_luts[0]
num_added_tokens = (
len(self.vocab["tgt"].base_field.vocab) - old_embeddings.num_embeddings
)
new_embeddings = self._resize_embedding(old_embeddings, num_added_tokens)
self.model.decoder.embeddings.make_embedding.emb_luts[0] = new_embeddings
def _resize_generator(self):
old_linear = self.model.generator[0]
num_added_tokens = (
len(self.vocab["tgt"].base_field.vocab) - old_linear.out_features
)
weight_extension = nn.Parameter( # type: ignore
torch.Tensor(num_added_tokens, old_linear.in_features) # type: ignore
)
init_parameters(self.model_opt, weight_extension)
new_weights = nn.Parameter( # type: ignore
torch.cat([old_linear.weight, weight_extension.data])
)
bias_extension = nn.Parameter(torch.Tensor(num_added_tokens)) # type: ignore
init_parameters(self.model_opt, bias_extension)
new_bias = nn.Parameter(torch.cat([old_linear.bias, bias_extension.data])) # type: ignore
new_linear = nn.Linear(
old_linear.in_features, len(self.vocab["tgt"].base_field.vocab)
)
new_linear.load_state_dict({"weight": new_weights, "bias": new_bias})
self.model.generator[0] = new_linear
def save_checkpoint(self, save_path: PathLike):
"""Save checkpoint of resized model
Args:
save_path: output path
"""
model_state_dict = self.model.state_dict()
model_state_dict = {
k: v for k, v in model_state_dict.items() if "generator" not in k
}
generator_state_dict = self.model.generator.state_dict()
checkpoint = {
"model": model_state_dict,
"generator": generator_state_dict,
"vocab": self.vocab,
"opt": self.model_opt,
"optim": self.checkpoint["optim"],
}
logger.debug(f"Saving checkpoint to {save_path}.")
torch.save(checkpoint, save_path) | /rxn-onmt-utils-1.0.3.tar.gz/rxn-onmt-utils-1.0.3/src/rxn/onmt_utils/model_resize.py | 0.926628 | 0.307631 | model_resize.py | pypi |
import logging
import subprocess
from typing import List, Optional
from rxn.utilities.files import PathLike, iterate_lines_from_file
from .translator import Translator
logger = logging.getLogger(__name__)
logger.addHandler(logging.NullHandler())
def translate(
model: PathLike,
src: PathLike,
tgt: Optional[PathLike],
output: PathLike,
n_best: int,
beam_size: int,
max_length: int,
batch_size: int,
gpu: bool,
as_external_command: bool,
) -> None:
"""
Run translate script.
This is independent of any chemistry! As such, this does not take care of
any tokenization either.
This currently launches a subprocess relying on the OpenNMT binaries.
In principle, the same could be achieved from Python code directly.
Args:
model: model checkpoint(s) to use.
src: pointer to the file containing the source.
tgt: pointer to the file containing the target, for calculation of the gold score.
output: pointer to the file where to save the predictions.
n_best: how many predictions to make per input.
beam_size: beam size.
max_length: max sequence length.
batch_size: batch size for the prediction.
gpu: whether to run the prediction on GPU.
as_external_command: runs the onmt command instead of Python code.
"""
if not gpu:
logger.warning(
"GPU option not set. Only CPUs will be used. The translation may be slow!"
)
if beam_size < n_best:
logger.warning(
f"The beam size ({beam_size}) is lower than the number of required "
f"predictions ({n_best}). While this works, consider increasing the "
f"beam size for better results."
)
if as_external_command:
fn = translate_as_external_command
else:
fn = translate_as_python_code
fn(
model=model,
src=src,
tgt=tgt,
output=output,
n_best=n_best,
beam_size=beam_size,
max_length=max_length,
batch_size=batch_size,
gpu=gpu,
)
logger.info("Translation successful.")
def translate_as_python_code(
model: PathLike,
src: PathLike,
tgt: Optional[PathLike],
output: PathLike,
n_best: int,
beam_size: int,
max_length: int,
batch_size: int,
gpu: bool,
) -> None:
"""
Translate directly from Python - not by executing the OpenNMT command as a subprocess.
See the function translate() for the documentation of the arguments.
"""
logger.info(
f'Running translation "{src}" -> "{output}", directly from Python code.'
)
if tgt is not None:
# Note: the gold score is determined by comparing with the provided tgt.
# This is not supported at the moment, when running from the Python code.
logger.warning(
"No gold scores can be calculated at the moment "
"when translating directly in Python."
)
translator = Translator.from_model_path(
model_path=str(model),
beam_size=beam_size,
max_length=max_length,
batch_size=batch_size,
gpu=0 if gpu else -1,
)
src_iterator = iterate_lines_from_file(src)
results_iterator = translator.translate_multiple_with_scores(
src_iterator, n_best=n_best
)
# Note: this corresponds to the name of our OpenNMT fork
log_probs_filename = str(output) + "_log_probs"
with open(output, "wt") as f_tgt:
with open(log_probs_filename, "wt") as f_lp:
for result_list in results_iterator:
for result in result_list:
f_tgt.write(f"{result.text}\n")
f_lp.write(f"{result.score}\n")
def translate_as_external_command(
model: PathLike,
src: PathLike,
tgt: Optional[PathLike],
output: PathLike,
n_best: int,
beam_size: int,
max_length: int,
batch_size: int,
gpu: bool,
) -> None:
"""
Translate by executing the OpenNMT command as a subprocess.
See the function translate() for the documentation of the arguments.
"""
if not gpu:
logger.warning(
"Running translation on CPU as a subprocess. Be careful "
"when executing on a cluster: the subprocess may try to access "
"all available cores."
)
command: List[str] = [
"onmt_translate",
"-model",
str(model),
"-src",
str(src),
"-output",
str(output),
"-log_probs",
"-n_best",
str(n_best),
"-beam_size",
str(beam_size),
"-max_length",
str(max_length),
"-batch_size",
str(batch_size),
]
if tgt is not None:
command.extend(["-tgt", str(tgt)])
if gpu:
command.extend(["-gpu", "0"])
command_str = " ".join(command)
logger.info(f"Running translation with command: {command_str}")
try:
subprocess.check_call(command)
except subprocess.CalledProcessError as e:
exception_str = f'The command "{command_str}" failed.'
logger.error(exception_str)
raise RuntimeError(exception_str) from e | /rxn-onmt-utils-1.0.3.tar.gz/rxn-onmt-utils-1.0.3/src/rxn/onmt_utils/translate.py | 0.904564 | 0.407098 | translate.py | pypi |
from argparse import Namespace
from typing import Any, Iterable, Iterator, List, Optional, Union
from .internal_translation_utils import RawTranslator, TranslationResult, get_onmt_opt
class Translator:
"""
Wraps the OpenNMT translation functionality into a class.
"""
def __init__(self, opt: Namespace):
"""
Should not be called directly as implementation may change; call the
classmethods from_model_path or from_opt instead.
Args:
opt: model options.
"""
self.onmt_translator = RawTranslator(opt=opt)
def translate_single(self, sentence: str) -> str:
"""
Translate one single sentence.
"""
translations = self.translate_sentences([sentence])
assert len(translations) == 1
return translations[0]
def translate_sentences(self, sentences: Iterable[str]) -> List[str]:
"""
Translate multiple sentences.
"""
translations = self.translate_multiple_with_scores(sentences)
return [t[0].text for t in translations]
def translate_multiple_with_scores(
self, sentences: Iterable[str], n_best: Optional[int] = None
) -> Iterator[List[TranslationResult]]:
"""
Translate multiple sentences.
Args:
sentences: Sentences to translate.
n_best: if provided, will overwrite the number of predictions to make.
"""
additional_opt_kwargs = {}
if n_best is not None:
additional_opt_kwargs["n_best"] = n_best
translations = self.onmt_translator.translate_sentences_with_onmt(
sentences, **additional_opt_kwargs
)
yield from translations
@classmethod
def from_model_path(
cls, model_path: Union[str, Iterable[str]], **kwargs: Any
) -> "Translator":
"""
Create a Translator instance from the model path(s).
Args:
model_path: path to the translation model file(s).
If multiple are given, will be an ensemble model.
kwargs: Additional values to be parsed for instantiating the translator,
such as n_best, beam_size, max_length, etc.
"""
if isinstance(model_path, str):
model_path = [model_path]
opt = get_onmt_opt(translation_model=list(model_path), **kwargs)
return cls(opt=opt)
@classmethod
def from_opt(cls, opt: Namespace) -> "Translator":
"""
Create a Translator instance from the opt arguments.
Args:
opt: model options.
"""
return cls(opt=opt) | /rxn-onmt-utils-1.0.3.tar.gz/rxn-onmt-utils-1.0.3/src/rxn/onmt_utils/translator.py | 0.942692 | 0.381824 | translator.py | pypi |
"""Training on a single process."""
import os
import torch
from onmt.inputters.inputter import build_dataset_iter, \
load_old_vocab, old_style_vocab, build_dataset_iter_multiple
from onmt.model_builder import build_model
from onmt.utils.optimizers import Optimizer
from onmt.utils.misc import set_random_seed
from onmt.trainer import build_trainer
from onmt.models import build_model_saver
from onmt.utils.logging import init_logger, logger
from onmt.utils.parse import ArgumentParser
def _check_save_model_path(opt):
save_model_path = os.path.abspath(opt.save_model)
model_dirname = os.path.dirname(save_model_path)
if not os.path.exists(model_dirname):
os.makedirs(model_dirname)
def _tally_parameters(model):
enc = 0
dec = 0
for name, param in model.named_parameters():
if 'encoder' in name:
enc += param.nelement()
else:
dec += param.nelement()
return enc + dec, enc, dec
def configure_process(opt, device_id):
if device_id >= 0:
torch.cuda.set_device(device_id)
set_random_seed(opt.seed, device_id >= 0)
def main(opt, device_id, batch_queue=None, semaphore=None):
# NOTE: It's important that ``opt`` has been validated and updated
# at this point.
configure_process(opt, device_id)
init_logger(opt.log_file)
assert len(opt.accum_count) == len(opt.accum_steps), \
'Number of accum_count values must match number of accum_steps'
# Load checkpoint if we resume from a previous training.
if opt.train_from:
logger.info('Loading checkpoint from %s' % opt.train_from)
checkpoint = torch.load(opt.train_from,
map_location=lambda storage, loc: storage)
model_opt = ArgumentParser.ckpt_model_opts(checkpoint["opt"])
ArgumentParser.update_model_opts(model_opt)
ArgumentParser.validate_model_opts(model_opt)
logger.info('Loading vocab from checkpoint at %s.' % opt.train_from)
vocab = checkpoint['vocab']
else:
checkpoint = None
model_opt = opt
vocab = torch.load(opt.data + '.vocab.pt')
# check for code where vocab is saved instead of fields
# (in the future this will be done in a smarter way)
if old_style_vocab(vocab):
fields = load_old_vocab(
vocab, opt.model_type, dynamic_dict=opt.copy_attn)
else:
fields = vocab
# Report src and tgt vocab sizes, including for features
for side in ['src', 'tgt']:
f = fields[side]
try:
f_iter = iter(f)
except TypeError:
f_iter = [(side, f)]
for sn, sf in f_iter:
if sf.use_vocab:
logger.info(' * %s vocab size = %d' % (sn, len(sf.vocab)))
# Build model.
model = build_model(model_opt, opt, fields, checkpoint)
n_params, enc, dec = _tally_parameters(model)
logger.info('encoder: %d' % enc)
logger.info('decoder: %d' % dec)
logger.info('* number of parameters: %d' % n_params)
_check_save_model_path(opt)
# Build optimizer.
optim = Optimizer.from_opt(model, opt, checkpoint=checkpoint)
# Build model saver
model_saver = build_model_saver(model_opt, opt, model, fields, optim)
trainer = build_trainer(
opt, device_id, model, fields, optim, model_saver=model_saver)
if batch_queue is None:
if len(opt.data_ids) > 1:
train_shards = []
for train_id in opt.data_ids:
shard_base = "train_" + train_id
train_shards.append(shard_base)
train_iter = build_dataset_iter_multiple(train_shards, fields, opt)
else:
if opt.data_ids[0] is not None and opt.data_ids[0] != 'None':
shard_base = "train_" + opt.data_ids[0]
else:
shard_base = "train"
train_iter = build_dataset_iter(shard_base, fields, opt)
else:
assert semaphore is not None, \
"Using batch_queue requires semaphore as well"
def _train_iter():
while True:
batch = batch_queue.get()
semaphore.release()
yield batch
train_iter = _train_iter()
valid_iter = build_dataset_iter(
"valid", fields, opt, is_train=False)
if len(opt.gpu_ranks):
logger.info('Starting training on GPU: %s' % opt.gpu_ranks)
else:
logger.info('Starting training on CPU, could be very slow')
train_steps = opt.train_steps
if opt.single_pass and train_steps > 0:
logger.warning("Option single_pass is enabled, ignoring train_steps.")
train_steps = 0
# added for mlflow integration
if opt.mlflow:
import mlflow
if opt.mlflow_experiment_name is not None:
mlflow.set_experiment(opt.mlflow_experiment_name)
if opt.mlflow_run_name is not None:
mlflow.start_run(run_name=opt.mlflow_run_name)
else:
mlflow.start_run()
for k, v in vars(opt).items():
mlflow.log_param(k, v)
mlflow.log_param('n_enc_parameters', enc)
mlflow.log_param('n_dec_parameters', dec)
mlflow.log_param('n_total_parameters', n_params)
import onmt
mlflow.log_param('onmt_version', onmt.__version__)
elif opt.wandb:
import wandb
init_dict = {}
if opt.wandb_project_name is not None:
init_dict['project'] = opt.wandb_project_name
if opt.wandb_run_name is not None:
init_dict['name'] = opt.wandb_run_name
wandb.init(**init_dict)
wandb.config.update({k:v for k, v in vars(opt).items()})
import onmt
wandb.config.update({'n_enc_parameters': enc,
'n_dec_parameters': dec,
'n_total_parameters': n_params,
'onmt_version': onmt.__version__
})
trainer.train(
train_iter,
train_steps,
save_checkpoint_steps=opt.save_checkpoint_steps,
valid_iter=valid_iter,
valid_steps=opt.valid_steps)
if trainer.report_manager.tensorboard_writer is not None:
if opt.mlflow:
mlflow.end_run()
elif opt.wandb:
wandb.finish()
else:
trainer.report_manager.tensorboard_writer.close() | /rxn_opennmt_py-1.1.4-py3-none-any.whl/onmt/train_single.py | 0.646906 | 0.3229 | train_single.py | pypi |
import torch
import torch.nn as nn
def context_gate_factory(gate_type, embeddings_size, decoder_size,
attention_size, output_size):
"""Returns the correct ContextGate class"""
gate_types = {'source': SourceContextGate,
'target': TargetContextGate,
'both': BothContextGate}
assert gate_type in gate_types, "Not valid ContextGate type: {0}".format(
gate_type)
return gate_types[gate_type](embeddings_size, decoder_size, attention_size,
output_size)
class ContextGate(nn.Module):
"""
Context gate is a decoder module that takes as input the previous word
embedding, the current decoder state and the attention state, and
produces a gate.
The gate can be used to select the input from the target side context
(decoder state), from the source context (attention state) or both.
"""
def __init__(self, embeddings_size, decoder_size,
attention_size, output_size):
super(ContextGate, self).__init__()
input_size = embeddings_size + decoder_size + attention_size
self.gate = nn.Linear(input_size, output_size, bias=True)
self.sig = nn.Sigmoid()
self.source_proj = nn.Linear(attention_size, output_size)
self.target_proj = nn.Linear(embeddings_size + decoder_size,
output_size)
def forward(self, prev_emb, dec_state, attn_state):
input_tensor = torch.cat((prev_emb, dec_state, attn_state), dim=1)
z = self.sig(self.gate(input_tensor))
proj_source = self.source_proj(attn_state)
proj_target = self.target_proj(
torch.cat((prev_emb, dec_state), dim=1))
return z, proj_source, proj_target
class SourceContextGate(nn.Module):
"""Apply the context gate only to the source context"""
def __init__(self, embeddings_size, decoder_size,
attention_size, output_size):
super(SourceContextGate, self).__init__()
self.context_gate = ContextGate(embeddings_size, decoder_size,
attention_size, output_size)
self.tanh = nn.Tanh()
def forward(self, prev_emb, dec_state, attn_state):
z, source, target = self.context_gate(
prev_emb, dec_state, attn_state)
return self.tanh(target + z * source)
class TargetContextGate(nn.Module):
"""Apply the context gate only to the target context"""
def __init__(self, embeddings_size, decoder_size,
attention_size, output_size):
super(TargetContextGate, self).__init__()
self.context_gate = ContextGate(embeddings_size, decoder_size,
attention_size, output_size)
self.tanh = nn.Tanh()
def forward(self, prev_emb, dec_state, attn_state):
z, source, target = self.context_gate(prev_emb, dec_state, attn_state)
return self.tanh(z * target + source)
class BothContextGate(nn.Module):
"""Apply the context gate to both contexts"""
def __init__(self, embeddings_size, decoder_size,
attention_size, output_size):
super(BothContextGate, self).__init__()
self.context_gate = ContextGate(embeddings_size, decoder_size,
attention_size, output_size)
self.tanh = nn.Tanh()
def forward(self, prev_emb, dec_state, attn_state):
z, source, target = self.context_gate(prev_emb, dec_state, attn_state)
return self.tanh((1. - z) * target + z * source) | /rxn_opennmt_py-1.1.4-py3-none-any.whl/onmt/modules/gate.py | 0.967039 | 0.58519 | gate.py | pypi |
import torch
import torch.nn as nn
import torch.nn.functional as F
from onmt.modules.sparse_activations import sparsemax
from onmt.utils.misc import aeq, sequence_mask
# This class is mainly used by decoder.py for RNNs but also
# by the CNN / transformer decoder when copy attention is used
# CNN has its own attention mechanism ConvMultiStepAttention
# Transformer has its own MultiHeadedAttention
class GlobalAttention(nn.Module):
r"""
Global attention takes a matrix and a query vector. It
then computes a parameterized convex combination of the matrix
based on the input query.
Constructs a unit mapping a query `q` of size `dim`
and a source matrix `H` of size `n x dim`, to an output
of size `dim`.
.. mermaid::
graph BT
A[Query]
subgraph RNN
C[H 1]
D[H 2]
E[H N]
end
F[Attn]
G[Output]
A --> F
C --> F
D --> F
E --> F
C -.-> G
D -.-> G
E -.-> G
F --> G
All models compute the output as
:math:`c = \sum_{j=1}^{\text{SeqLength}} a_j H_j` where
:math:`a_j` is the softmax of a score function.
Then then apply a projection layer to [q, c].
However they
differ on how they compute the attention score.
* Luong Attention (dot, general):
* dot: :math:`\text{score}(H_j,q) = H_j^T q`
* general: :math:`\text{score}(H_j, q) = H_j^T W_a q`
* Bahdanau Attention (mlp):
* :math:`\text{score}(H_j, q) = v_a^T \text{tanh}(W_a q + U_a h_j)`
Args:
dim (int): dimensionality of query and key
coverage (bool): use coverage term
attn_type (str): type of attention to use, options [dot,general,mlp]
attn_func (str): attention function to use, options [softmax,sparsemax]
"""
def __init__(self, dim, coverage=False, attn_type="dot",
attn_func="softmax"):
super(GlobalAttention, self).__init__()
self.dim = dim
assert attn_type in ["dot", "general", "mlp"], (
"Please select a valid attention type (got {:s}).".format(
attn_type))
self.attn_type = attn_type
assert attn_func in ["softmax", "sparsemax"], (
"Please select a valid attention function.")
self.attn_func = attn_func
if self.attn_type == "general":
self.linear_in = nn.Linear(dim, dim, bias=False)
elif self.attn_type == "mlp":
self.linear_context = nn.Linear(dim, dim, bias=False)
self.linear_query = nn.Linear(dim, dim, bias=True)
self.v = nn.Linear(dim, 1, bias=False)
# mlp wants it with bias
out_bias = self.attn_type == "mlp"
self.linear_out = nn.Linear(dim * 2, dim, bias=out_bias)
if coverage:
self.linear_cover = nn.Linear(1, dim, bias=False)
def score(self, h_t, h_s):
"""
Args:
h_t (FloatTensor): sequence of queries ``(batch, tgt_len, dim)``
h_s (FloatTensor): sequence of sources ``(batch, src_len, dim``
Returns:
FloatTensor: raw attention scores (unnormalized) for each src index
``(batch, tgt_len, src_len)``
"""
# Check input sizes
src_batch, src_len, src_dim = h_s.size()
tgt_batch, tgt_len, tgt_dim = h_t.size()
aeq(src_batch, tgt_batch)
aeq(src_dim, tgt_dim)
aeq(self.dim, src_dim)
if self.attn_type in ["general", "dot"]:
if self.attn_type == "general":
h_t_ = h_t.view(tgt_batch * tgt_len, tgt_dim)
h_t_ = self.linear_in(h_t_)
h_t = h_t_.view(tgt_batch, tgt_len, tgt_dim)
h_s_ = h_s.transpose(1, 2)
# (batch, t_len, d) x (batch, d, s_len) --> (batch, t_len, s_len)
return torch.bmm(h_t, h_s_)
else:
dim = self.dim
wq = self.linear_query(h_t.view(-1, dim))
wq = wq.view(tgt_batch, tgt_len, 1, dim)
wq = wq.expand(tgt_batch, tgt_len, src_len, dim)
uh = self.linear_context(h_s.contiguous().view(-1, dim))
uh = uh.view(src_batch, 1, src_len, dim)
uh = uh.expand(src_batch, tgt_len, src_len, dim)
# (batch, t_len, s_len, d)
wquh = torch.tanh(wq + uh)
return self.v(wquh.view(-1, dim)).view(tgt_batch, tgt_len, src_len)
def forward(self, source, memory_bank, memory_lengths=None, coverage=None):
"""
Args:
source (FloatTensor): query vectors ``(batch, tgt_len, dim)``
memory_bank (FloatTensor): source vectors ``(batch, src_len, dim)``
memory_lengths (LongTensor): the source context lengths ``(batch,)``
coverage (FloatTensor): None (not supported yet)
Returns:
(FloatTensor, FloatTensor):
* Computed vector ``(tgt_len, batch, dim)``
* Attention distribtutions for each query
``(tgt_len, batch, src_len)``
"""
# one step input
if source.dim() == 2:
one_step = True
source = source.unsqueeze(1)
else:
one_step = False
batch, source_l, dim = memory_bank.size()
batch_, target_l, dim_ = source.size()
aeq(batch, batch_)
aeq(dim, dim_)
aeq(self.dim, dim)
if coverage is not None:
batch_, source_l_ = coverage.size()
aeq(batch, batch_)
aeq(source_l, source_l_)
if coverage is not None:
cover = coverage.view(-1).unsqueeze(1)
memory_bank += self.linear_cover(cover).view_as(memory_bank)
memory_bank = torch.tanh(memory_bank)
# compute attention scores, as in Luong et al.
align = self.score(source, memory_bank)
if memory_lengths is not None:
mask = sequence_mask(memory_lengths, max_len=align.size(-1))
mask = mask.unsqueeze(1) # Make it broadcastable.
align.masked_fill_(~mask, -float('inf'))
# Softmax or sparsemax to normalize attention weights
if self.attn_func == "softmax":
align_vectors = F.softmax(align.view(batch*target_l, source_l), -1)
else:
align_vectors = sparsemax(align.view(batch*target_l, source_l), -1)
align_vectors = align_vectors.view(batch, target_l, source_l)
# each context vector c_t is the weighted average
# over all the source hidden states
c = torch.bmm(align_vectors, memory_bank)
# concatenate
concat_c = torch.cat([c, source], 2).view(batch*target_l, dim*2)
attn_h = self.linear_out(concat_c).view(batch, target_l, dim)
if self.attn_type in ["general", "dot"]:
attn_h = torch.tanh(attn_h)
if one_step:
attn_h = attn_h.squeeze(1)
align_vectors = align_vectors.squeeze(1)
# Check output sizes
batch_, dim_ = attn_h.size()
aeq(batch, batch_)
aeq(dim, dim_)
batch_, source_l_ = align_vectors.size()
aeq(batch, batch_)
aeq(source_l, source_l_)
else:
attn_h = attn_h.transpose(0, 1).contiguous()
align_vectors = align_vectors.transpose(0, 1).contiguous()
# Check output sizes
target_l_, batch_, dim_ = attn_h.size()
aeq(target_l, target_l_)
aeq(batch, batch_)
aeq(dim, dim_)
target_l_, batch_, source_l_ = align_vectors.size()
aeq(target_l, target_l_)
aeq(batch, batch_)
aeq(source_l, source_l_)
return attn_h, align_vectors | /rxn_opennmt_py-1.1.4-py3-none-any.whl/onmt/modules/global_attention.py | 0.927831 | 0.698783 | global_attention.py | pypi |
import torch
import torch.nn as nn
from torch.autograd import Function
from onmt.modules.sparse_activations import _threshold_and_support
from onmt.utils.misc import aeq
class SparsemaxLossFunction(Function):
@staticmethod
def forward(ctx, input, target):
"""
input (FloatTensor): ``(n, num_classes)``.
target (LongTensor): ``(n,)``, the indices of the target classes
"""
input_batch, classes = input.size()
target_batch = target.size(0)
aeq(input_batch, target_batch)
z_k = input.gather(1, target.unsqueeze(1)).squeeze()
tau_z, support_size = _threshold_and_support(input, dim=1)
support = input > tau_z
x = torch.where(
support, input**2 - tau_z**2,
torch.tensor(0.0, device=input.device)
).sum(dim=1)
ctx.save_for_backward(input, target, tau_z)
# clamping necessary because of numerical errors: loss should be lower
# bounded by zero, but negative values near zero are possible without
# the clamp
return torch.clamp(x / 2 - z_k + 0.5, min=0.0)
@staticmethod
def backward(ctx, grad_output):
input, target, tau_z = ctx.saved_tensors
sparsemax_out = torch.clamp(input - tau_z, min=0)
delta = torch.zeros_like(sparsemax_out)
delta.scatter_(1, target.unsqueeze(1), 1)
return sparsemax_out - delta, None
sparsemax_loss = SparsemaxLossFunction.apply
class SparsemaxLoss(nn.Module):
"""
An implementation of sparsemax loss, first proposed in
:cite:`DBLP:journals/corr/MartinsA16`. If using
a sparse output layer, it is not possible to use negative log likelihood
because the loss is infinite in the case the target is assigned zero
probability. Inputs to SparsemaxLoss are arbitrary dense real-valued
vectors (like in nn.CrossEntropyLoss), not probability vectors (like in
nn.NLLLoss).
"""
def __init__(self, weight=None, ignore_index=-100,
reduction='elementwise_mean'):
assert reduction in ['elementwise_mean', 'sum', 'none']
self.reduction = reduction
self.weight = weight
self.ignore_index = ignore_index
super(SparsemaxLoss, self).__init__()
def forward(self, input, target):
loss = sparsemax_loss(input, target)
if self.ignore_index >= 0:
ignored_positions = target == self.ignore_index
size = float((target.size(0) - ignored_positions.sum()).item())
loss.masked_fill_(ignored_positions, 0.0)
else:
size = float(target.size(0))
if self.reduction == 'sum':
loss = loss.sum()
elif self.reduction == 'elementwise_mean':
loss = loss.sum() / size
return loss | /rxn_opennmt_py-1.1.4-py3-none-any.whl/onmt/modules/sparse_losses.py | 0.962116 | 0.615637 | sparse_losses.py | pypi |
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import Parameter
def get_var_maybe_avg(namespace, var_name, training, polyak_decay):
""" utility for retrieving polyak averaged params
Update average
"""
v = getattr(namespace, var_name)
v_avg = getattr(namespace, var_name + '_avg')
v_avg -= (1 - polyak_decay) * (v_avg - v.data)
if training:
return v
else:
return v_avg
def get_vars_maybe_avg(namespace, var_names, training, polyak_decay):
""" utility for retrieving polyak averaged params """
vars = []
for vn in var_names:
vars.append(get_var_maybe_avg(
namespace, vn, training, polyak_decay))
return vars
class WeightNormLinear(nn.Linear):
"""
Implementation of "Weight Normalization: A Simple Reparameterization
to Accelerate Training of Deep Neural Networks"
:cite:`DBLP:journals/corr/SalimansK16`
As a reparameterization method, weight normalization is same
as BatchNormalization, but it doesn't depend on minibatch.
NOTE: This is used nowhere in the code at this stage
Vincent Nguyen 05/18/2018
"""
def __init__(self, in_features, out_features,
init_scale=1., polyak_decay=0.9995):
super(WeightNormLinear, self).__init__(
in_features, out_features, bias=True)
self.V = self.weight
self.g = Parameter(torch.Tensor(out_features))
self.b = self.bias
self.register_buffer(
'V_avg', torch.zeros(out_features, in_features))
self.register_buffer('g_avg', torch.zeros(out_features))
self.register_buffer('b_avg', torch.zeros(out_features))
self.init_scale = init_scale
self.polyak_decay = polyak_decay
self.reset_parameters()
def reset_parameters(self):
return
def forward(self, x, init=False):
if init is True:
# out_features * in_features
self.V.data.copy_(torch.randn(self.V.data.size()).type_as(
self.V.data) * 0.05)
# norm is out_features * 1
v_norm = self.V.data / \
self.V.data.norm(2, 1).expand_as(self.V.data)
# batch_size * out_features
x_init = F.linear(x, v_norm).data
# out_features
m_init, v_init = x_init.mean(0).squeeze(
0), x_init.var(0).squeeze(0)
# out_features
scale_init = self.init_scale / \
torch.sqrt(v_init + 1e-10)
self.g.data.copy_(scale_init)
self.b.data.copy_(-m_init * scale_init)
x_init = scale_init.view(1, -1).expand_as(x_init) \
* (x_init - m_init.view(1, -1).expand_as(x_init))
self.V_avg.copy_(self.V.data)
self.g_avg.copy_(self.g.data)
self.b_avg.copy_(self.b.data)
return x_init
else:
v, g, b = get_vars_maybe_avg(self, ['V', 'g', 'b'],
self.training,
polyak_decay=self.polyak_decay)
# batch_size * out_features
x = F.linear(x, v)
scalar = g / torch.norm(v, 2, 1).squeeze(1)
x = scalar.view(1, -1).expand_as(x) * x + \
b.view(1, -1).expand_as(x)
return x
class WeightNormConv2d(nn.Conv2d):
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, dilation=1, groups=1, init_scale=1.,
polyak_decay=0.9995):
super(WeightNormConv2d, self).__init__(in_channels, out_channels,
kernel_size, stride, padding,
dilation, groups)
self.V = self.weight
self.g = Parameter(torch.Tensor(out_channels))
self.b = self.bias
self.register_buffer('V_avg', torch.zeros(self.V.size()))
self.register_buffer('g_avg', torch.zeros(out_channels))
self.register_buffer('b_avg', torch.zeros(out_channels))
self.init_scale = init_scale
self.polyak_decay = polyak_decay
self.reset_parameters()
def reset_parameters(self):
return
def forward(self, x, init=False):
if init is True:
# out_channels, in_channels // groups, * kernel_size
self.V.data.copy_(torch.randn(self.V.data.size()
).type_as(self.V.data) * 0.05)
v_norm = self.V.data / self.V.data.view(self.out_channels, -1)\
.norm(2, 1).view(self.out_channels, *(
[1] * (len(self.kernel_size) + 1))).expand_as(self.V.data)
x_init = F.conv2d(x, v_norm, None, self.stride,
self.padding, self.dilation, self.groups).data
t_x_init = x_init.transpose(0, 1).contiguous().view(
self.out_channels, -1)
m_init, v_init = t_x_init.mean(1).squeeze(
1), t_x_init.var(1).squeeze(1)
# out_features
scale_init = self.init_scale / \
torch.sqrt(v_init + 1e-10)
self.g.data.copy_(scale_init)
self.b.data.copy_(-m_init * scale_init)
scale_init_shape = scale_init.view(
1, self.out_channels, *([1] * (len(x_init.size()) - 2)))
m_init_shape = m_init.view(
1, self.out_channels, *([1] * (len(x_init.size()) - 2)))
x_init = scale_init_shape.expand_as(
x_init) * (x_init - m_init_shape.expand_as(x_init))
self.V_avg.copy_(self.V.data)
self.g_avg.copy_(self.g.data)
self.b_avg.copy_(self.b.data)
return x_init
else:
v, g, b = get_vars_maybe_avg(
self, ['V', 'g', 'b'], self.training,
polyak_decay=self.polyak_decay)
scalar = torch.norm(v.view(self.out_channels, -1), 2, 1)
if len(scalar.size()) == 2:
scalar = g / scalar.squeeze(1)
else:
scalar = g / scalar
w = scalar.view(self.out_channels, *
([1] * (len(v.size()) - 1))).expand_as(v) * v
x = F.conv2d(x, w, b, self.stride,
self.padding, self.dilation, self.groups)
return x
# This is used nowhere in the code at the moment (Vincent Nguyen 05/18/2018)
class WeightNormConvTranspose2d(nn.ConvTranspose2d):
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, output_padding=0, groups=1, init_scale=1.,
polyak_decay=0.9995):
super(WeightNormConvTranspose2d, self).__init__(
in_channels, out_channels,
kernel_size, stride,
padding, output_padding,
groups)
# in_channels, out_channels, *kernel_size
self.V = self.weight
self.g = Parameter(torch.Tensor(out_channels))
self.b = self.bias
self.register_buffer('V_avg', torch.zeros(self.V.size()))
self.register_buffer('g_avg', torch.zeros(out_channels))
self.register_buffer('b_avg', torch.zeros(out_channels))
self.init_scale = init_scale
self.polyak_decay = polyak_decay
self.reset_parameters()
def reset_parameters(self):
return
def forward(self, x, init=False):
if init is True:
# in_channels, out_channels, *kernel_size
self.V.data.copy_(torch.randn(self.V.data.size()).type_as(
self.V.data) * 0.05)
v_norm = self.V.data / self.V.data.transpose(0, 1).contiguous() \
.view(self.out_channels, -1).norm(2, 1).view(
self.in_channels, self.out_channels,
*([1] * len(self.kernel_size))).expand_as(self.V.data)
x_init = F.conv_transpose2d(
x, v_norm, None, self.stride,
self.padding, self.output_padding, self.groups).data
# self.out_channels, 1
t_x_init = x_init.tranpose(0, 1).contiguous().view(
self.out_channels, -1)
# out_features
m_init, v_init = t_x_init.mean(1).squeeze(
1), t_x_init.var(1).squeeze(1)
# out_features
scale_init = self.init_scale / \
torch.sqrt(v_init + 1e-10)
self.g.data.copy_(scale_init)
self.b.data.copy_(-m_init * scale_init)
scale_init_shape = scale_init.view(
1, self.out_channels, *([1] * (len(x_init.size()) - 2)))
m_init_shape = m_init.view(
1, self.out_channels, *([1] * (len(x_init.size()) - 2)))
x_init = scale_init_shape.expand_as(x_init)\
* (x_init - m_init_shape.expand_as(x_init))
self.V_avg.copy_(self.V.data)
self.g_avg.copy_(self.g.data)
self.b_avg.copy_(self.b.data)
return x_init
else:
v, g, b = get_vars_maybe_avg(
self, ['V', 'g', 'b'], self.training,
polyak_decay=self.polyak_decay)
scalar = g / \
torch.norm(v.transpose(0, 1).contiguous().view(
self.out_channels, -1), 2, 1).squeeze(1)
w = scalar.view(self.in_channels, self.out_channels,
*([1] * (len(v.size()) - 2))).expand_as(v) * v
x = F.conv_transpose2d(x, w, b, self.stride,
self.padding, self.output_padding,
self.groups)
return x | /rxn_opennmt_py-1.1.4-py3-none-any.whl/onmt/modules/weight_norm.py | 0.900891 | 0.483283 | weight_norm.py | pypi |
import torch
import torch.nn as nn
import torch.nn.functional as F
from onmt.utils.misc import aeq
SCALE_WEIGHT = 0.5 ** 0.5
def seq_linear(linear, x):
""" linear transform for 3-d tensor """
batch, hidden_size, length, _ = x.size()
h = linear(torch.transpose(x, 1, 2).contiguous().view(
batch * length, hidden_size))
return torch.transpose(h.view(batch, length, hidden_size, 1), 1, 2)
class ConvMultiStepAttention(nn.Module):
"""
Conv attention takes a key matrix, a value matrix and a query vector.
Attention weight is calculated by key matrix with the query vector
and sum on the value matrix. And the same operation is applied
in each decode conv layer.
"""
def __init__(self, input_size):
super(ConvMultiStepAttention, self).__init__()
self.linear_in = nn.Linear(input_size, input_size)
self.mask = None
def apply_mask(self, mask):
""" Apply mask """
self.mask = mask
def forward(self, base_target_emb, input_from_dec, encoder_out_top,
encoder_out_combine):
"""
Args:
base_target_emb: target emb tensor
input_from_dec: output of decode conv
encoder_out_top: the key matrix for calculation of attetion weight,
which is the top output of encode conv
encoder_out_combine:
the value matrix for the attention-weighted sum,
which is the combination of base emb and top output of encode
"""
# checks
# batch, channel, height, width = base_target_emb.size()
batch, _, height, _ = base_target_emb.size()
# batch_, channel_, height_, width_ = input_from_dec.size()
batch_, _, height_, _ = input_from_dec.size()
aeq(batch, batch_)
aeq(height, height_)
# enc_batch, enc_channel, enc_height = encoder_out_top.size()
enc_batch, _, enc_height = encoder_out_top.size()
# enc_batch_, enc_channel_, enc_height_ = encoder_out_combine.size()
enc_batch_, _, enc_height_ = encoder_out_combine.size()
aeq(enc_batch, enc_batch_)
aeq(enc_height, enc_height_)
preatt = seq_linear(self.linear_in, input_from_dec)
target = (base_target_emb + preatt) * SCALE_WEIGHT
target = torch.squeeze(target, 3)
target = torch.transpose(target, 1, 2)
pre_attn = torch.bmm(target, encoder_out_top)
if self.mask is not None:
pre_attn.data.masked_fill_(self.mask, -float('inf'))
attn = F.softmax(pre_attn, dim=2)
context_output = torch.bmm(
attn, torch.transpose(encoder_out_combine, 1, 2))
context_output = torch.transpose(
torch.unsqueeze(context_output, 3), 1, 2)
return context_output, attn | /rxn_opennmt_py-1.1.4-py3-none-any.whl/onmt/modules/conv_multi_step_attention.py | 0.954616 | 0.692759 | conv_multi_step_attention.py | pypi |
import torch
from torch.autograd import Function
import torch.nn as nn
def _make_ix_like(input, dim=0):
d = input.size(dim)
rho = torch.arange(1, d + 1, device=input.device, dtype=input.dtype)
view = [1] * input.dim()
view[0] = -1
return rho.view(view).transpose(0, dim)
def _threshold_and_support(input, dim=0):
"""Sparsemax building block: compute the threshold
Args:
input: any dimension
dim: dimension along which to apply the sparsemax
Returns:
the threshold value
"""
input_srt, _ = torch.sort(input, descending=True, dim=dim)
input_cumsum = input_srt.cumsum(dim) - 1
rhos = _make_ix_like(input, dim)
support = rhos * input_srt > input_cumsum
support_size = support.sum(dim=dim).unsqueeze(dim)
tau = input_cumsum.gather(dim, support_size - 1)
tau /= support_size.to(input.dtype)
return tau, support_size
class SparsemaxFunction(Function):
@staticmethod
def forward(ctx, input, dim=0):
"""sparsemax: normalizing sparse transform (a la softmax)
Parameters:
input (Tensor): any shape
dim: dimension along which to apply sparsemax
Returns:
output (Tensor): same shape as input
"""
ctx.dim = dim
max_val, _ = input.max(dim=dim, keepdim=True)
input -= max_val # same numerical stability trick as for softmax
tau, supp_size = _threshold_and_support(input, dim=dim)
output = torch.clamp(input - tau, min=0)
ctx.save_for_backward(supp_size, output)
return output
@staticmethod
def backward(ctx, grad_output):
supp_size, output = ctx.saved_tensors
dim = ctx.dim
grad_input = grad_output.clone()
grad_input[output == 0] = 0
v_hat = grad_input.sum(dim=dim) / supp_size.to(output.dtype).squeeze()
v_hat = v_hat.unsqueeze(dim)
grad_input = torch.where(output != 0, grad_input - v_hat, grad_input)
return grad_input, None
sparsemax = SparsemaxFunction.apply
class Sparsemax(nn.Module):
def __init__(self, dim=0):
self.dim = dim
super(Sparsemax, self).__init__()
def forward(self, input):
return sparsemax(input, self.dim)
class LogSparsemax(nn.Module):
def __init__(self, dim=0):
self.dim = dim
super(LogSparsemax, self).__init__()
def forward(self, input):
return torch.log(sparsemax(input, self.dim)) | /rxn_opennmt_py-1.1.4-py3-none-any.whl/onmt/modules/sparse_activations.py | 0.938752 | 0.645357 | sparse_activations.py | pypi |
"""Average Attention module."""
import torch
import torch.nn as nn
from onmt.modules.position_ffn import PositionwiseFeedForward
class AverageAttention(nn.Module):
"""
Average Attention module from
"Accelerating Neural Transformer via an Average Attention Network"
:cite:`DBLP:journals/corr/abs-1805-00631`.
Args:
model_dim (int): the dimension of keys/values/queries,
must be divisible by head_count
dropout (float): dropout parameter
"""
def __init__(self, model_dim, dropout=0.1, aan_useffn=False):
self.model_dim = model_dim
self.aan_useffn = aan_useffn
super(AverageAttention, self).__init__()
if aan_useffn:
self.average_layer = PositionwiseFeedForward(model_dim, model_dim,
dropout)
self.gating_layer = nn.Linear(model_dim * 2, model_dim * 2)
def cumulative_average_mask(self, batch_size, inputs_len, device):
"""
Builds the mask to compute the cumulative average as described in
:cite:`DBLP:journals/corr/abs-1805-00631` -- Figure 3
Args:
batch_size (int): batch size
inputs_len (int): length of the inputs
Returns:
(FloatTensor):
* A Tensor of shape ``(batch_size, input_len, input_len)``
"""
triangle = torch.tril(torch.ones(inputs_len, inputs_len,
dtype=torch.float, device=device))
weights = torch.ones(1, inputs_len, dtype=torch.float, device=device) \
/ torch.arange(1, inputs_len + 1, dtype=torch.float, device=device)
mask = triangle * weights.transpose(0, 1)
return mask.unsqueeze(0).expand(batch_size, inputs_len, inputs_len)
def cumulative_average(self, inputs, mask_or_step,
layer_cache=None, step=None):
"""
Computes the cumulative average as described in
:cite:`DBLP:journals/corr/abs-1805-00631` -- Equations (1) (5) (6)
Args:
inputs (FloatTensor): sequence to average
``(batch_size, input_len, dimension)``
mask_or_step: if cache is set, this is assumed
to be the current step of the
dynamic decoding. Otherwise, it is the mask matrix
used to compute the cumulative average.
layer_cache: a dictionary containing the cumulative average
of the previous step.
Returns:
a tensor of the same shape and type as ``inputs``.
"""
if layer_cache is not None:
step = mask_or_step
average_attention = (inputs + step *
layer_cache["prev_g"]) / (step + 1)
layer_cache["prev_g"] = average_attention
return average_attention
else:
mask = mask_or_step
return torch.matmul(mask.to(inputs.dtype), inputs)
def forward(self, inputs, mask=None, layer_cache=None, step=None):
"""
Args:
inputs (FloatTensor): ``(batch_size, input_len, model_dim)``
Returns:
(FloatTensor, FloatTensor):
* gating_outputs ``(batch_size, input_len, model_dim)``
* average_outputs average attention
``(batch_size, input_len, model_dim)``
"""
batch_size = inputs.size(0)
inputs_len = inputs.size(1)
average_outputs = self.cumulative_average(
inputs, self.cumulative_average_mask(batch_size,
inputs_len, inputs.device)
if layer_cache is None else step, layer_cache=layer_cache)
if self.aan_useffn:
average_outputs = self.average_layer(average_outputs)
gating_outputs = self.gating_layer(torch.cat((inputs,
average_outputs), -1))
input_gate, forget_gate = torch.chunk(gating_outputs, 2, dim=2)
gating_outputs = torch.sigmoid(input_gate) * inputs + \
torch.sigmoid(forget_gate) * average_outputs
return gating_outputs, average_outputs | /rxn_opennmt_py-1.1.4-py3-none-any.whl/onmt/modules/average_attn.py | 0.97151 | 0.671188 | average_attn.py | pypi |
from itertools import chain, starmap
from collections import Counter
import torch
from torchtext.data import Dataset as TorchtextDataset
from torchtext.data import Example
from torchtext.vocab import Vocab
def _join_dicts(*args):
"""
Args:
dictionaries with disjoint keys.
Returns:
a single dictionary that has the union of these keys.
"""
return dict(chain(*[d.items() for d in args]))
def _dynamic_dict(example, src_field, tgt_field):
"""Create copy-vocab and numericalize with it.
In-place adds ``"src_map"`` to ``example``. That is the copy-vocab
numericalization of the tokenized ``example["src"]``. If ``example``
has a ``"tgt"`` key, adds ``"alignment"`` to example. That is the
copy-vocab numericalization of the tokenized ``example["tgt"]``. The
alignment has an initial and final UNK token to match the BOS and EOS
tokens.
Args:
example (dict): An example dictionary with a ``"src"`` key and
maybe a ``"tgt"`` key. (This argument changes in place!)
src_field (torchtext.data.Field): Field object.
tgt_field (torchtext.data.Field): Field object.
Returns:
torchtext.data.Vocab and ``example``, changed as described.
"""
src = src_field.tokenize(example["src"])
# make a small vocab containing just the tokens in the source sequence
unk = src_field.unk_token
pad = src_field.pad_token
src_ex_vocab = Vocab(Counter(src), specials=[unk, pad])
unk_idx = src_ex_vocab.stoi[unk]
# Map source tokens to indices in the dynamic dict.
src_map = torch.LongTensor([src_ex_vocab.stoi[w] for w in src])
example["src_map"] = src_map
example["src_ex_vocab"] = src_ex_vocab
if "tgt" in example:
tgt = tgt_field.tokenize(example["tgt"])
mask = torch.LongTensor(
[unk_idx] + [src_ex_vocab.stoi[w] for w in tgt] + [unk_idx])
example["alignment"] = mask
return src_ex_vocab, example
class Dataset(TorchtextDataset):
"""Contain data and process it.
A dataset is an object that accepts sequences of raw data (sentence pairs
in the case of machine translation) and fields which describe how this
raw data should be processed to produce tensors. When a dataset is
instantiated, it applies the fields' preprocessing pipeline (but not
the bit that numericalizes it or turns it into batch tensors) to the raw
data, producing a list of :class:`torchtext.data.Example` objects.
torchtext's iterators then know how to use these examples to make batches.
Args:
fields (dict[str, Field]): a dict with the structure
returned by :func:`onmt.inputters.get_fields()`. Usually
that means the dataset side, ``"src"`` or ``"tgt"``. Keys match
the keys of items yielded by the ``readers``, while values
are lists of (name, Field) pairs. An attribute with this
name will be created for each :class:`torchtext.data.Example`
object and its value will be the result of applying the Field
to the data that matches the key. The advantage of having
sequences of fields for each piece of raw input is that it allows
the dataset to store multiple "views" of each input, which allows
for easy implementation of token-level features, mixed word-
and character-level models, and so on. (See also
:class:`onmt.inputters.TextMultiField`.)
readers (Iterable[onmt.inputters.DataReaderBase]): Reader objects
for disk-to-dict. The yielded dicts are then processed
according to ``fields``.
data (Iterable[Tuple[str, Any]]): (name, ``data_arg``) pairs
where ``data_arg`` is passed to the ``read()`` method of the
reader in ``readers`` at that position. (See the reader object for
details on the ``Any`` type.)
dirs (Iterable[str or NoneType]): A list of directories where
data is contained. See the reader object for more details.
sort_key (Callable[[torchtext.data.Example], Any]): A function
for determining the value on which data is sorted (i.e. length).
filter_pred (Callable[[torchtext.data.Example], bool]): A function
that accepts Example objects and returns a boolean value
indicating whether to include that example in the dataset.
Attributes:
src_vocabs (List[torchtext.data.Vocab]): Used with dynamic dict/copy
attention. There is a very short vocab for each src example.
It contains just the source words, e.g. so that the generator can
predict to copy them.
"""
def __init__(self, fields, readers, data, dirs, sort_key,
filter_pred=None):
self.sort_key = sort_key
can_copy = 'src_map' in fields and 'alignment' in fields
read_iters = [r.read(dat[1], dat[0], dir_) for r, dat, dir_
in zip(readers, data, dirs)]
# self.src_vocabs is used in collapse_copy_scores and Translator.py
self.src_vocabs = []
examples = []
for ex_dict in starmap(_join_dicts, zip(*read_iters)):
if can_copy:
src_field = fields['src']
tgt_field = fields['tgt']
# this assumes src_field and tgt_field are both text
src_ex_vocab, ex_dict = _dynamic_dict(
ex_dict, src_field.base_field, tgt_field.base_field)
self.src_vocabs.append(src_ex_vocab)
ex_fields = {k: [(k, v)] for k, v in fields.items() if
k in ex_dict}
ex = Example.fromdict(ex_dict, ex_fields)
examples.append(ex)
# fields needs to have only keys that examples have as attrs
fields = []
for _, nf_list in ex_fields.items():
assert len(nf_list) == 1
fields.append(nf_list[0])
super(Dataset, self).__init__(examples, fields, filter_pred)
def __getattr__(self, attr):
# avoid infinite recursion when fields isn't defined
if 'fields' not in vars(self):
raise AttributeError
if attr in self.fields:
return (getattr(x, attr) for x in self.examples)
else:
raise AttributeError
def save(self, path, remove_fields=True):
if remove_fields:
self.fields = []
torch.save(self, path) | /rxn_opennmt_py-1.1.4-py3-none-any.whl/onmt/inputters/dataset_base.py | 0.911557 | 0.564399 | dataset_base.py | pypi |
import os
import torch
from torchtext.data import Field
from onmt.inputters.datareader_base import DataReaderBase
# domain specific dependencies
try:
from PIL import Image
from torchvision import transforms
import cv2
except ImportError:
Image, transforms, cv2 = None, None, None
class ImageDataReader(DataReaderBase):
"""Read image data from disk.
Args:
truncate (tuple[int] or NoneType): maximum img size. Use
``(0,0)`` or ``None`` for unlimited.
channel_size (int): Number of channels per image.
Raises:
onmt.inputters.datareader_base.MissingDependencyException: If
importing any of ``PIL``, ``torchvision``, or ``cv2`` fail.
"""
def __init__(self, truncate=None, channel_size=3):
self._check_deps()
self.truncate = truncate
self.channel_size = channel_size
@classmethod
def from_opt(cls, opt):
return cls(channel_size=opt.image_channel_size)
@classmethod
def _check_deps(cls):
if any([Image is None, transforms is None, cv2 is None]):
cls._raise_missing_dep(
"PIL", "torchvision", "cv2")
def read(self, images, side, img_dir=None):
"""Read data into dicts.
Args:
images (str or Iterable[str]): Sequence of image paths or
path to file containing audio paths.
In either case, the filenames may be relative to ``src_dir``
(default behavior) or absolute.
side (str): Prefix used in return dict. Usually
``"src"`` or ``"tgt"``.
img_dir (str): Location of source image files. See ``images``.
Yields:
a dictionary containing image data, path and index for each line.
"""
if isinstance(images, str):
images = DataReaderBase._read_file(images)
for i, filename in enumerate(images):
filename = filename.decode("utf-8").strip()
img_path = os.path.join(img_dir, filename)
if not os.path.exists(img_path):
img_path = filename
assert os.path.exists(img_path), \
'img path %s not found' % filename
if self.channel_size == 1:
img = transforms.ToTensor()(
Image.fromarray(cv2.imread(img_path, 0)))
else:
img = transforms.ToTensor()(Image.open(img_path))
if self.truncate and self.truncate != (0, 0):
if not (img.size(1) <= self.truncate[0]
and img.size(2) <= self.truncate[1]):
continue
yield {side: img, side + '_path': filename, 'indices': i}
def img_sort_key(ex):
"""Sort using the size of the image: (width, height)."""
return ex.src.size(2), ex.src.size(1)
def batch_img(data, vocab):
"""Pad and batch a sequence of images."""
c = data[0].size(0)
h = max([t.size(1) for t in data])
w = max([t.size(2) for t in data])
imgs = torch.zeros(len(data), c, h, w).fill_(1)
for i, img in enumerate(data):
imgs[i, :, 0:img.size(1), 0:img.size(2)] = img
return imgs
def image_fields(**kwargs):
img = Field(
use_vocab=False, dtype=torch.float,
postprocessing=batch_img, sequential=False)
return img | /rxn_opennmt_py-1.1.4-py3-none-any.whl/onmt/inputters/image_dataset.py | 0.898994 | 0.435841 | image_dataset.py | pypi |
import os
import torch
from torchtext.data import Field
from onmt.inputters.datareader_base import DataReaderBase
try:
import numpy as np
except ImportError:
np = None
class VecDataReader(DataReaderBase):
"""Read feature vector data from disk.
Raises:
onmt.inputters.datareader_base.MissingDependencyException: If
importing ``np`` fails.
"""
def __init__(self):
self._check_deps()
@classmethod
def _check_deps(cls):
if np is None:
cls._raise_missing_dep("np")
def read(self, vecs, side, vec_dir=None):
"""Read data into dicts.
Args:
vecs (str or Iterable[str]): Sequence of feature vector paths or
path to file containing feature vector paths.
In either case, the filenames may be relative to ``vec_dir``
(default behavior) or absolute.
side (str): Prefix used in return dict. Usually
``"src"`` or ``"tgt"``.
vec_dir (str): Location of source vectors. See ``vecs``.
Yields:
A dictionary containing feature vector data.
"""
if isinstance(vecs, str):
vecs = DataReaderBase._read_file(vecs)
for i, filename in enumerate(vecs):
filename = filename.decode("utf-8").strip()
vec_path = os.path.join(vec_dir, filename)
if not os.path.exists(vec_path):
vec_path = filename
assert os.path.exists(vec_path), \
'vec path %s not found' % filename
vec = np.load(vec_path)
yield {side: torch.from_numpy(vec),
side + "_path": filename, "indices": i}
def vec_sort_key(ex):
"""Sort using the length of the vector sequence."""
return ex.src.shape[0]
class VecSeqField(Field):
"""Defines an vector datatype and instructions for converting to Tensor.
See :class:`Fields` for attribute descriptions.
"""
def __init__(self, preprocessing=None, postprocessing=None,
include_lengths=False, batch_first=False, pad_index=0,
is_target=False):
super(VecSeqField, self).__init__(
sequential=True, use_vocab=False, init_token=None,
eos_token=None, fix_length=False, dtype=torch.float,
preprocessing=preprocessing, postprocessing=postprocessing,
lower=False, tokenize=None, include_lengths=include_lengths,
batch_first=batch_first, pad_token=pad_index, unk_token=None,
pad_first=False, truncate_first=False, stop_words=None,
is_target=is_target
)
def pad(self, minibatch):
"""Pad a batch of examples to the length of the longest example.
Args:
minibatch (List[torch.FloatTensor]): A list of audio data,
each having shape ``(len, n_feats, feat_dim)``
where len is variable.
Returns:
torch.FloatTensor or Tuple[torch.FloatTensor, List[int]]: The
padded tensor of shape
``(batch_size, max_len, n_feats, feat_dim)``.
and a list of the lengths if `self.include_lengths` is `True`
else just returns the padded tensor.
"""
assert not self.pad_first and not self.truncate_first \
and not self.fix_length and self.sequential
minibatch = list(minibatch)
lengths = [x.size(0) for x in minibatch]
max_len = max(lengths)
nfeats = minibatch[0].size(1)
feat_dim = minibatch[0].size(2)
feats = torch.full((len(minibatch), max_len, nfeats, feat_dim),
self.pad_token)
for i, (feat, len_) in enumerate(zip(minibatch, lengths)):
feats[i, 0:len_, :, :] = feat
if self.include_lengths:
return (feats, lengths)
return feats
def numericalize(self, arr, device=None):
"""Turn a batch of examples that use this field into a Variable.
If the field has ``include_lengths=True``, a tensor of lengths will be
included in the return value.
Args:
arr (torch.FloatTensor or Tuple(torch.FloatTensor, List[int])):
List of tokenized and padded examples, or tuple of List of
tokenized and padded examples and List of lengths of each
example if self.include_lengths is True.
device (str or torch.device): See `Field.numericalize`.
"""
assert self.use_vocab is False
if self.include_lengths and not isinstance(arr, tuple):
raise ValueError("Field has include_lengths set to True, but "
"input data is not a tuple of "
"(data batch, batch lengths).")
if isinstance(arr, tuple):
arr, lengths = arr
lengths = torch.tensor(lengths, dtype=torch.int, device=device)
arr = arr.to(device)
if self.postprocessing is not None:
arr = self.postprocessing(arr, None)
if self.sequential and not self.batch_first:
arr = arr.permute(1, 0, 2, 3)
if self.sequential:
arr = arr.contiguous()
if self.include_lengths:
return arr, lengths
return arr
def vec_fields(**kwargs):
vec = VecSeqField(pad_index=0, include_lengths=True)
return vec | /rxn_opennmt_py-1.1.4-py3-none-any.whl/onmt/inputters/vec_dataset.py | 0.911436 | 0.556641 | vec_dataset.py | pypi |
import os
from tqdm import tqdm
import torch
from torchtext.data import Field
from onmt.inputters.datareader_base import DataReaderBase
# imports of datatype-specific dependencies
try:
import torchaudio
import librosa
import numpy as np
except ImportError:
torchaudio, librosa, np = None, None, None
class AudioDataReader(DataReaderBase):
"""Read audio data from disk.
Args:
sample_rate (int): sample_rate.
window_size (float) : window size for spectrogram in seconds.
window_stride (float): window stride for spectrogram in seconds.
window (str): window type for spectrogram generation. See
:func:`librosa.stft()` ``window`` for more details.
normalize_audio (bool): subtract spectrogram by mean and divide
by std or not.
truncate (int or NoneType): maximum audio length
(0 or None for unlimited).
Raises:
onmt.inputters.datareader_base.MissingDependencyException: If
importing any of ``torchaudio``, ``librosa``, or ``numpy`` fail.
"""
def __init__(self, sample_rate=0, window_size=0, window_stride=0,
window=None, normalize_audio=True, truncate=None):
self._check_deps()
self.sample_rate = sample_rate
self.window_size = window_size
self.window_stride = window_stride
self.window = window
self.normalize_audio = normalize_audio
self.truncate = truncate
@classmethod
def from_opt(cls, opt):
return cls(sample_rate=opt.sample_rate, window_size=opt.window_size,
window_stride=opt.window_stride, window=opt.window)
@classmethod
def _check_deps(cls):
if any([torchaudio is None, librosa is None, np is None]):
cls._raise_missing_dep(
"torchaudio", "librosa", "numpy")
def extract_features(self, audio_path):
# torchaudio loading options recently changed. It's probably
# straightforward to rewrite the audio handling to make use of
# up-to-date torchaudio, but in the meantime there is a legacy
# method which uses the old defaults
sound, sample_rate_ = torchaudio.legacy.load(audio_path)
if self.truncate and self.truncate > 0:
if sound.size(0) > self.truncate:
sound = sound[:self.truncate]
assert sample_rate_ == self.sample_rate, \
'Sample rate of %s != -sample_rate (%d vs %d)' \
% (audio_path, sample_rate_, self.sample_rate)
sound = sound.numpy()
if len(sound.shape) > 1:
if sound.shape[1] == 1:
sound = sound.squeeze()
else:
sound = sound.mean(axis=1) # average multiple channels
n_fft = int(self.sample_rate * self.window_size)
win_length = n_fft
hop_length = int(self.sample_rate * self.window_stride)
# STFT
d = librosa.stft(sound, n_fft=n_fft, hop_length=hop_length,
win_length=win_length, window=self.window)
spect, _ = librosa.magphase(d)
spect = np.log1p(spect)
spect = torch.FloatTensor(spect)
if self.normalize_audio:
mean = spect.mean()
std = spect.std()
spect.add_(-mean)
spect.div_(std)
return spect
def read(self, data, side, src_dir=None):
"""Read data into dicts.
Args:
data (str or Iterable[str]): Sequence of audio paths or
path to file containing audio paths.
In either case, the filenames may be relative to ``src_dir``
(default behavior) or absolute.
side (str): Prefix used in return dict. Usually
``"src"`` or ``"tgt"``.
src_dir (str): Location of source audio files. See ``data``.
Yields:
A dictionary containing audio data for each line.
"""
assert src_dir is not None and os.path.exists(src_dir),\
"src_dir must be a valid directory if data_type is audio"
if isinstance(data, str):
data = DataReaderBase._read_file(data)
for i, line in enumerate(tqdm(data)):
line = line.decode("utf-8").strip()
audio_path = os.path.join(src_dir, line)
if not os.path.exists(audio_path):
audio_path = line
assert os.path.exists(audio_path), \
'audio path %s not found' % line
spect = self.extract_features(audio_path)
yield {side: spect, side + '_path': line, 'indices': i}
def audio_sort_key(ex):
"""Sort using duration time of the sound spectrogram."""
return ex.src.size(1)
class AudioSeqField(Field):
"""Defines an audio datatype and instructions for converting to Tensor.
See :class:`Fields` for attribute descriptions.
"""
def __init__(self, preprocessing=None, postprocessing=None,
include_lengths=False, batch_first=False, pad_index=0,
is_target=False):
super(AudioSeqField, self).__init__(
sequential=True, use_vocab=False, init_token=None,
eos_token=None, fix_length=False, dtype=torch.float,
preprocessing=preprocessing, postprocessing=postprocessing,
lower=False, tokenize=None, include_lengths=include_lengths,
batch_first=batch_first, pad_token=pad_index, unk_token=None,
pad_first=False, truncate_first=False, stop_words=None,
is_target=is_target
)
def pad(self, minibatch):
"""Pad a batch of examples to the length of the longest example.
Args:
minibatch (List[torch.FloatTensor]): A list of audio data,
each having shape 1 x n_feats x len where len is variable.
Returns:
torch.FloatTensor or Tuple[torch.FloatTensor, List[int]]: The
padded tensor of shape ``(batch_size, 1, n_feats, max_len)``.
and a list of the lengths if `self.include_lengths` is `True`
else just returns the padded tensor.
"""
assert not self.pad_first and not self.truncate_first \
and not self.fix_length and self.sequential
minibatch = list(minibatch)
lengths = [x.size(1) for x in minibatch]
max_len = max(lengths)
nfft = minibatch[0].size(0)
sounds = torch.full((len(minibatch), 1, nfft, max_len), self.pad_token)
for i, (spect, len_) in enumerate(zip(minibatch, lengths)):
sounds[i, :, :, 0:len_] = spect
if self.include_lengths:
return (sounds, lengths)
return sounds
def numericalize(self, arr, device=None):
"""Turn a batch of examples that use this field into a Variable.
If the field has ``include_lengths=True``, a tensor of lengths will be
included in the return value.
Args:
arr (torch.FloatTensor or Tuple(torch.FloatTensor, List[int])):
List of tokenized and padded examples, or tuple of List of
tokenized and padded examples and List of lengths of each
example if self.include_lengths is True. Examples have shape
``(batch_size, 1, n_feats, max_len)`` if `self.batch_first`
else ``(max_len, batch_size, 1, n_feats)``.
device (str or torch.device): See `Field.numericalize`.
"""
assert self.use_vocab is False
if self.include_lengths and not isinstance(arr, tuple):
raise ValueError("Field has include_lengths set to True, but "
"input data is not a tuple of "
"(data batch, batch lengths).")
if isinstance(arr, tuple):
arr, lengths = arr
lengths = torch.tensor(lengths, dtype=torch.int, device=device)
if self.postprocessing is not None:
arr = self.postprocessing(arr, None)
if self.sequential and not self.batch_first:
arr = arr.permute(3, 0, 1, 2)
if self.sequential:
arr = arr.contiguous()
arr = arr.to(device)
if self.include_lengths:
return arr, lengths
return arr
def audio_fields(**kwargs):
audio = AudioSeqField(pad_index=0, batch_first=True, include_lengths=True)
return audio | /rxn_opennmt_py-1.1.4-py3-none-any.whl/onmt/inputters/audio_dataset.py | 0.827898 | 0.338214 | audio_dataset.py | pypi |
from functools import partial
import six
import torch
from torchtext.data import Field, RawField
from onmt.inputters.datareader_base import DataReaderBase
class TextDataReader(DataReaderBase):
def read(self, sequences, side, _dir=None):
"""Read text data from disk.
Args:
sequences (str or Iterable[str]):
path to text file or iterable of the actual text data.
side (str): Prefix used in return dict. Usually
``"src"`` or ``"tgt"``.
_dir (NoneType): Leave as ``None``. This parameter exists to
conform with the :func:`DataReaderBase.read()` signature.
Yields:
dictionaries whose keys are the names of fields and whose
values are more or less the result of tokenizing with those
fields.
"""
assert _dir is None or _dir == "", \
"Cannot use _dir with TextDataReader."
if isinstance(sequences, str):
sequences = DataReaderBase._read_file(sequences)
for i, seq in enumerate(sequences):
if isinstance(seq, six.binary_type):
seq = seq.decode("utf-8")
yield {side: seq, "indices": i}
def text_sort_key(ex):
"""Sort using the number of tokens in the sequence."""
if hasattr(ex, "tgt"):
return len(ex.src[0]), len(ex.tgt[0])
return len(ex.src[0])
# mix this with partial
def _feature_tokenize(
string, layer=0, tok_delim=None, feat_delim=None, truncate=None):
"""Split apart word features (like POS/NER tags) from the tokens.
Args:
string (str): A string with ``tok_delim`` joining tokens and
features joined by ``feat_delim``. For example,
``"hello|NOUN|'' Earth|NOUN|PLANET"``.
layer (int): Which feature to extract. (Not used if there are no
features, indicated by ``feat_delim is None``). In the
example above, layer 2 is ``'' PLANET``.
truncate (int or NoneType): Restrict sequences to this length of
tokens.
Returns:
List[str] of tokens.
"""
tokens = string.split(tok_delim)
if truncate is not None:
tokens = tokens[:truncate]
if feat_delim is not None:
tokens = [t.split(feat_delim)[layer] for t in tokens]
return tokens
class TextMultiField(RawField):
"""Container for subfields.
Text data might use POS/NER/etc labels in addition to tokens.
This class associates the "base" :class:`Field` with any subfields.
It also handles padding the data and stacking it.
Args:
base_name (str): Name for the base field.
base_field (Field): The token field.
feats_fields (Iterable[Tuple[str, Field]]): A list of name-field
pairs.
Attributes:
fields (Iterable[Tuple[str, Field]]): A list of name-field pairs.
The order is defined as the base field first, then
``feats_fields`` in alphabetical order.
"""
def __init__(self, base_name, base_field, feats_fields):
super(TextMultiField, self).__init__()
self.fields = [(base_name, base_field)]
for name, ff in sorted(feats_fields, key=lambda kv: kv[0]):
self.fields.append((name, ff))
@property
def base_field(self):
return self.fields[0][1]
def process(self, batch, device=None):
"""Convert outputs of preprocess into Tensors.
Args:
batch (List[List[List[str]]]): A list of length batch size.
Each element is a list of the preprocess results for each
field (which are lists of str "words" or feature tags.
device (torch.device or str): The device on which the tensor(s)
are built.
Returns:
torch.LongTensor or Tuple[LongTensor, LongTensor]:
A tensor of shape ``(seq_len, batch_size, len(self.fields))``
where the field features are ordered like ``self.fields``.
If the base field returns lengths, these are also returned
and have shape ``(batch_size,)``.
"""
# batch (list(list(list))): batch_size x len(self.fields) x seq_len
batch_by_feat = list(zip(*batch))
base_data = self.base_field.process(batch_by_feat[0], device=device)
if self.base_field.include_lengths:
# lengths: batch_size
base_data, lengths = base_data
feats = [ff.process(batch_by_feat[i], device=device)
for i, (_, ff) in enumerate(self.fields[1:], 1)]
levels = [base_data] + feats
# data: seq_len x batch_size x len(self.fields)
data = torch.stack(levels, 2)
if self.base_field.include_lengths:
return data, lengths
else:
return data
def preprocess(self, x):
"""Preprocess data.
Args:
x (str): A sentence string (words joined by whitespace).
Returns:
List[List[str]]: A list of length ``len(self.fields)`` containing
lists of tokens/feature tags for the sentence. The output
is ordered like ``self.fields``.
"""
return [f.preprocess(x) for _, f in self.fields]
def __getitem__(self, item):
return self.fields[item]
def text_fields(**kwargs):
"""Create text fields.
Args:
base_name (str): Name associated with the field.
n_feats (int): Number of word level feats (not counting the tokens)
include_lengths (bool): Optionally return the sequence lengths.
pad (str, optional): Defaults to ``"<blank>"``.
bos (str or NoneType, optional): Defaults to ``"<s>"``.
eos (str or NoneType, optional): Defaults to ``"</s>"``.
truncate (bool or NoneType, optional): Defaults to ``None``.
Returns:
TextMultiField
"""
n_feats = kwargs["n_feats"]
include_lengths = kwargs["include_lengths"]
base_name = kwargs["base_name"]
pad = kwargs.get("pad", "<blank>")
bos = kwargs.get("bos", "<s>")
eos = kwargs.get("eos", "</s>")
truncate = kwargs.get("truncate", None)
fields_ = []
feat_delim = u"│" if n_feats > 0 else None
for i in range(n_feats + 1):
name = base_name + "_feat_" + str(i - 1) if i > 0 else base_name
tokenize = partial(
_feature_tokenize,
layer=i,
truncate=truncate,
feat_delim=feat_delim)
use_len = i == 0 and include_lengths
feat = Field(
init_token=bos, eos_token=eos,
pad_token=pad, tokenize=tokenize,
include_lengths=use_len)
fields_.append((name, feat))
assert fields_[0][0] == base_name # sanity check
field = TextMultiField(fields_[0][0], fields_[0][1], fields_[1:])
return field | /rxn_opennmt_py-1.1.4-py3-none-any.whl/onmt/inputters/text_dataset.py | 0.918815 | 0.525369 | text_dataset.py | pypi |
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.utils.rnn import pack_padded_sequence as pack
from torch.nn.utils.rnn import pad_packed_sequence as unpack
from onmt.encoders.encoder import EncoderBase
from onmt.utils.rnn_factory import rnn_factory
class RNNEncoder(EncoderBase):
""" A generic recurrent neural network encoder.
Args:
rnn_type (str):
style of recurrent unit to use, one of [RNN, LSTM, GRU, SRU]
bidirectional (bool) : use a bidirectional RNN
num_layers (int) : number of stacked layers
hidden_size (int) : hidden size of each layer
dropout (float) : dropout value for :class:`torch.nn.Dropout`
embeddings (onmt.modules.Embeddings): embedding module to use
"""
def __init__(self, rnn_type, bidirectional, num_layers,
hidden_size, dropout=0.0, embeddings=None,
use_bridge=False):
super(RNNEncoder, self).__init__()
assert embeddings is not None
num_directions = 2 if bidirectional else 1
assert hidden_size % num_directions == 0
hidden_size = hidden_size // num_directions
self.embeddings = embeddings
self.rnn, self.no_pack_padded_seq = \
rnn_factory(rnn_type,
input_size=embeddings.embedding_size,
hidden_size=hidden_size,
num_layers=num_layers,
dropout=dropout,
bidirectional=bidirectional)
# Initialize the bridge layer
self.use_bridge = use_bridge
if self.use_bridge:
self._initialize_bridge(rnn_type,
hidden_size,
num_layers)
@classmethod
def from_opt(cls, opt, embeddings):
"""Alternate constructor."""
return cls(
opt.rnn_type,
opt.brnn,
opt.enc_layers,
opt.enc_rnn_size,
opt.dropout[0] if type(opt.dropout) is list else opt.dropout,
embeddings,
opt.bridge)
def forward(self, src, lengths=None):
"""See :func:`EncoderBase.forward()`"""
self._check_args(src, lengths)
emb = self.embeddings(src)
# s_len, batch, emb_dim = emb.size()
packed_emb = emb
if lengths is not None and not self.no_pack_padded_seq:
# Lengths data is wrapped inside a Tensor.
lengths_list = lengths.view(-1).tolist()
packed_emb = pack(emb, lengths_list)
memory_bank, encoder_final = self.rnn(packed_emb)
if lengths is not None and not self.no_pack_padded_seq:
memory_bank = unpack(memory_bank)[0]
if self.use_bridge:
encoder_final = self._bridge(encoder_final)
return encoder_final, memory_bank, lengths
def _initialize_bridge(self, rnn_type,
hidden_size,
num_layers):
# LSTM has hidden and cell state, other only one
number_of_states = 2 if rnn_type == "LSTM" else 1
# Total number of states
self.total_hidden_dim = hidden_size * num_layers
# Build a linear layer for each
self.bridge = nn.ModuleList([nn.Linear(self.total_hidden_dim,
self.total_hidden_dim,
bias=True)
for _ in range(number_of_states)])
def _bridge(self, hidden):
"""Forward hidden state through bridge."""
def bottle_hidden(linear, states):
"""
Transform from 3D to 2D, apply linear and return initial size
"""
size = states.size()
result = linear(states.view(-1, self.total_hidden_dim))
return F.relu(result).view(size)
if isinstance(hidden, tuple): # LSTM
outs = tuple([bottle_hidden(layer, hidden[ix])
for ix, layer in enumerate(self.bridge)])
else:
outs = bottle_hidden(self.bridge[0], hidden)
return outs
def update_dropout(self, dropout):
self.rnn.dropout = dropout | /rxn_opennmt_py-1.1.4-py3-none-any.whl/onmt/encoders/rnn_encoder.py | 0.95469 | 0.521837 | rnn_encoder.py | pypi |
import torch.nn as nn
from onmt.encoders.encoder import EncoderBase
from onmt.modules import MultiHeadedAttention
from onmt.modules.position_ffn import PositionwiseFeedForward
from onmt.utils.misc import sequence_mask
class TransformerEncoderLayer(nn.Module):
"""
A single layer of the transformer encoder.
Args:
d_model (int): the dimension of keys/values/queries in
MultiHeadedAttention, also the input size of
the first-layer of the PositionwiseFeedForward.
heads (int): the number of head for MultiHeadedAttention.
d_ff (int): the second-layer of the PositionwiseFeedForward.
dropout (float): dropout probability(0-1.0).
"""
def __init__(self, d_model, heads, d_ff, dropout, attention_dropout,
max_relative_positions=0):
super(TransformerEncoderLayer, self).__init__()
self.self_attn = MultiHeadedAttention(
heads, d_model, dropout=attention_dropout,
max_relative_positions=max_relative_positions)
self.feed_forward = PositionwiseFeedForward(d_model, d_ff, dropout)
self.layer_norm = nn.LayerNorm(d_model, eps=1e-6)
self.dropout = nn.Dropout(dropout)
def forward(self, inputs, mask):
"""
Args:
inputs (FloatTensor): ``(batch_size, src_len, model_dim)``
mask (LongTensor): ``(batch_size, 1, src_len)``
Returns:
(FloatTensor):
* outputs ``(batch_size, src_len, model_dim)``
"""
input_norm = self.layer_norm(inputs)
context, _ = self.self_attn(input_norm, input_norm, input_norm,
mask=mask, attn_type="self")
out = self.dropout(context) + inputs
return self.feed_forward(out)
def update_dropout(self, dropout, attention_dropout):
self.self_attn.update_dropout(attention_dropout)
self.feed_forward.update_dropout(dropout)
self.dropout.p = dropout
class TransformerEncoder(EncoderBase):
"""The Transformer encoder from "Attention is All You Need"
:cite:`DBLP:journals/corr/VaswaniSPUJGKP17`
.. mermaid::
graph BT
A[input]
B[multi-head self-attn]
C[feed forward]
O[output]
A --> B
B --> C
C --> O
Args:
num_layers (int): number of encoder layers
d_model (int): size of the model
heads (int): number of heads
d_ff (int): size of the inner FF layer
dropout (float): dropout parameters
embeddings (onmt.modules.Embeddings):
embeddings to use, should have positional encodings
Returns:
(torch.FloatTensor, torch.FloatTensor):
* embeddings ``(src_len, batch_size, model_dim)``
* memory_bank ``(src_len, batch_size, model_dim)``
"""
def __init__(self, num_layers, d_model, heads, d_ff, dropout,
attention_dropout, embeddings, max_relative_positions):
super(TransformerEncoder, self).__init__()
self.embeddings = embeddings
self.transformer = nn.ModuleList(
[TransformerEncoderLayer(
d_model, heads, d_ff, dropout, attention_dropout,
max_relative_positions=max_relative_positions)
for i in range(num_layers)])
self.layer_norm = nn.LayerNorm(d_model, eps=1e-6)
@classmethod
def from_opt(cls, opt, embeddings):
"""Alternate constructor."""
return cls(
opt.enc_layers,
opt.enc_rnn_size,
opt.heads,
opt.transformer_ff,
opt.dropout[0] if type(opt.dropout) is list else opt.dropout,
opt.attention_dropout[0] if type(opt.attention_dropout)
is list else opt.attention_dropout,
embeddings,
opt.max_relative_positions)
def forward(self, src, lengths=None):
"""See :func:`EncoderBase.forward()`"""
self._check_args(src, lengths)
emb = self.embeddings(src)
out = emb.transpose(0, 1).contiguous()
mask = ~sequence_mask(lengths).unsqueeze(1)
# Run the forward pass of every layer of the tranformer.
for layer in self.transformer:
out = layer(out, mask)
out = self.layer_norm(out)
return emb, out.transpose(0, 1).contiguous(), lengths
def update_dropout(self, dropout, attention_dropout):
self.embeddings.update_dropout(dropout)
for layer in self.transformer:
layer.update_dropout(dropout, attention_dropout) | /rxn_opennmt_py-1.1.4-py3-none-any.whl/onmt/encoders/transformer.py | 0.956685 | 0.355188 | transformer.py | pypi |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.