code stringlengths 114 1.05M | path stringlengths 3 312 | quality_prob float64 0.5 0.99 | learning_prob float64 0.2 1 | filename stringlengths 3 168 | kind stringclasses 1
value |
|---|---|---|---|---|---|
import dash_bio as dashbio
from .base import *
representation_options = [
{"label": "backbone", "value": "backbone"},
{"label": "ball+stick", "value": "ball+stick"},
{"label": "cartoon", "value": "cartoon"},
{"label": "hyperball", "value": "hyperball"},
{"label": "licorice", "value": "licorice"},
{"label": "axes+box", "value": "axes+box"},
{"label": "helixorient", "value": "helixorient"},
]
def get_no_pdb_toast():
"""
Layout for the toast (infobox) shown when pdb file is missing for the selected ID.
:return: no pdb file toast layout
"""
toast = dbc.Toast(
"No pdb file found for this ID!",
header="File error",
id="no_pdb_toast",
is_open=False,
dismissable=True,
duration=4000,
style={
"position": "fixed",
"top": 66,
"right": 10,
"width": 350,
},
)
return toast
def get_pdb_offcanvas():
"""
Layout of the offcanvas for the settings of the molecule displaying (pdb)
:return: offcanvas layout
"""
offcanvas = dbc.Offcanvas(
id="molecules_offcanvas",
title="Settings",
is_open=False,
style={"width": "50%", "max-width": "600px"},
children=[
dcc.Markdown("Representations:"),
dcc.Dropdown(
id="representation_dropdown",
options=representation_options,
multi=True,
value=["cartoon"],
),
html.Br(),
dbc.Row(
[
dbc.Col(
[
dcc.Markdown("Start:"),
dcc.Dropdown(
id="range_start",
disabled=True,
),
]
),
dbc.Col(
[
dcc.Markdown("End:"),
dcc.Dropdown(
id="range_end",
disabled=True,
),
]
),
dbc.Col(
[
dcc.Markdown(
"Highlighted atoms:",
),
dcc.Dropdown(
id="selected_atoms",
multi=True,
disabled=True,
),
]
),
]
),
html.Br(),
dcc.Markdown("Spacing:"),
dcc.Slider(
id="spacing_slider",
min=10,
max=200,
value=50,
marks=None,
tooltip={
"placement": "bottom",
"always_visible": False,
},
),
dcc.Markdown("Space distribution:"),
dcc.Slider(
id="distribution_slider",
min=3,
max=9,
value=6,
step=1,
marks=None,
),
dbc.Button(
"Recalculate molecule viewing size",
id="recal_size_button",
class_name="d-grid mx-auto",
color="dark",
outline=True,
style={"margin-bottom": "10px"},
),
dcc.Markdown("Height:"),
dcc.Slider(
id="height_slider",
min=200,
max=5000,
value=300,
marks=None,
tooltip={
"placement": "bottom",
"always_visible": False,
},
),
dcc.Markdown("Width:"),
dcc.Slider(
id="width_slider",
min=200,
max=5000,
value=500,
marks=None,
tooltip={
"placement": "bottom",
"always_visible": False,
},
),
dbc.Row(
[
dbc.Col(
dcc.Input(
id="filename_input",
type="text",
placeholder="filename",
style={
"height": "38px",
"margin-right": "20px",
},
),
width=6,
),
dbc.Col(
dbc.Button(
"Download image",
id="download_molecule_button",
color="dark",
outline=True,
disabled=True,
),
width=6,
),
]
),
],
)
return offcanvas
def get_reset_view_button_tooltip(button_id: str):
tooltip = dbc.Tooltip(
"Reset view",
target=button_id,
placement="bottom",
)
return tooltip
def init_app_pdb(
original_id_col: list,
umap_paras: dict,
csv_header: list[str],
fig: go.Figure,
dim_red: str,
tsne_paras: dict,
):
"""
Layout for the molecule displaying, in general the right column in pdb mode
:return: application layout
"""
app = get_app()
app.layout = dbc.Container(
[
# side components like header and the toasts
get_side_components(app),
# sizing of the molecule viewer
dcc.Location(id="url"),
html.Div(id="molviewer_sizing_div", hidden=True),
# storage to save the selected molecules
# Needed for image download name
dcc.Store(id="mol_name_storage"),
# Storage to save the values of the molecules dropdown
dcc.Store(id="molecules_dropdown_save", data=[]),
# Toast to display that pdb file is missing
get_no_pdb_toast(),
# offcanvas for the settings
get_pdb_offcanvas(),
get_settings_button_tooltip(button_id="molecules_settings_button"),
get_reset_view_button_tooltip(button_id="reset_view_button"),
# graph and controls
dbc.Row(
[
dbc.Col(
get_graph_container(
umap_paras,
True,
csv_header,
fig,
dim_red,
tsne_paras,
original_id_col,
),
id="left_col",
width=6,
style={"border-right": "solid black 1px"},
),
dbc.Col(
[
dbc.Row(
[
dbc.Col(
[
dcc.Markdown(
"Molecules:",
style={
"margin-top": "20px",
"margin-bottom": "0px",
"padding-top": "0px",
"padding-bottom": "0px",
"height": "30px",
},
),
],
xxl=9,
xl=8,
lg=7,
md=6,
sm=5,
xs=4,
),
dbc.Col(
[
dbc.Stack(
direction="horizontal",
children=[
dbc.Button(
"",
id="reset_view_button",
class_name=(
"bi bi-arrow-counterclockwise"
),
color="dark",
outline=True,
style=main_button_style,
),
dbc.Button(
"",
id="molecules_settings_button",
class_name=(
"bi bi-gear-wide-connected"
),
outline=True,
color="dark",
style=main_button_style,
),
],
),
],
xxl=3,
xl=4,
lg=5,
md=6,
sm=7,
xs=8,
),
]
),
dcc.Dropdown(
id="molecules_dropdown",
options=original_id_col,
multi=True,
style={"margin-bottom": "5px"},
),
html.Div(
[
dashbio.NglMoleculeViewer(
id="ngl_molecule_viewer",
),
],
id="moleculeviewer_div",
style={
"border-bottom": "1px solid grey",
"border-right": "1px solid grey",
"margin-left": "0px",
},
),
],
id="right_col",
width=6,
),
]
),
# modal with disclaimer that opens on startup
# has to be at the end, otherwise automatic sizing doesn't work...
get_disclaimer_modal(),
],
fluid=True,
)
return app | /rostspace-0.1.1-py3-none-any.whl/src/visualization/pdb.py | 0.591133 | 0.185892 | pdb.py | pypi |
# ROT2Prog
This is a python interface to the [Alfa ROT2Prog Controller](http://alfaradio.ca/docs/Manuals/RAS/Alfa_ROT2Prog_Controller-28March2019-Master.pdf). The ROT2Prog is an electronic controller used for turning rotators. The Controller may be connected to one Azimuth and Elevation rotator and operates with direct current motors. The ROT2Prog is designed to work with either an Alfa RAS or BIGRAS or a combination of one azimuth rotator RAU, RAK and a REAL rotator.
This package is responsible for implementing the serial [protocol](#protocol) to interact with the ROT2Prog controller. There is also a [simulation model](#simulation) of the ROT2Prog controller included in the package, which can be used for testing when hardware is not available.
### Contents
- [Getting Started](#getting-started)
+ [Installation](#installation)
+ [Usage](#usage)
* [API](https://htmlpreview.github.io/?https://github.com/tj-scherer/rot2prog/blob/master/docs/rot2prog/index.html)
+ [Simulation](#simulation)
- [Protocol](#protocol)
+ [Command Packet](#command-packet)
+ [Response Packet](#response-packet)
+ [Degrees Per Pulse](#degrees-per-pulse)
+ [Stop Command](#stop-command)
* [Example](#stop-command-example)
+ [Status Command](#status-command)
* [Example](#status-command-example)
+ [Set Command](#set-command)
* [Example](#set-command-example)
# Getting Started
If you intend to use this package with hardware:
1. Press setup key `S` until `PS` is displayed on the far left screen of the controller.
2. Use the `<` `>` keys to set the value (to the right of `PS`) to `SP`.
3. Press the function key `F` until `A` is displayed on the far left screen of the controller.
4. Congratulations! Your ROT2Prog will now respond to SPID commands.
> NOTE: The hardware is not required for testing, see [Simulation](#simulation).
### Installation
The `rot2prog` package is published on PyPi and can be installed in the terminal.
```
pip install rot2prog
```
This package was developed using Python `3.10.2`, and has not yet been tested with earlier releases of Python. If using an earlier version of Python, it is recommended to proceed with caution, running the [simulation](#simulation) and [standalone script](#usage) together to exercise all commands.
### Usage
1. Importing
```python
import rot2prog
rot = rot2prog.ROT2Prog('COM1')
```
> NOTE: For more information, reference the [API](https://htmlpreview.github.io/?https://github.com/tj-scherer/rot2prog/blob/master/docs/rot2prog/index.html) in `/docs/rot2prog`.
2. Standalone
```
python -m rot2prog.utils.run
```
> NOTE: The standalone mode offers direct access to the `stop`, `status`, and `set` commands, allowing the hardware to be controlled directly from the terminal.
### Simulation
Begin by establishing a connection between the two desired ports:
1. Use a tool such as [Free Virtual Serial Ports](https://freevirtualserialports.com/) to connect two virtual ports of the same host.
2. Use a male-male USB cable connected to two physical ports of the same host.
3. Use a male-male USB cable connected to two physical ports on different hosts. In this case, each host must run its own software to communicate.
```
python -m rot2prog.utils.sim
```
> NOTE: The simulator's serial connection should be established first.
> NOTE: The simulator does not perfectly match real-world behavior in regard to executing commands. The real system cannot move to a new position instantaneously, whereas the simulator currently does.
# Protocol
- The SPID protocol supports 3 commands:
+ **STOP**: Stops the rotator in its current position.
+ **STATUS**: Returns the current position of the rotator.
+ **SET**: Tells the rotator to rotate to a given position.
- The rotator controller communicates with the host using a serial port. The serial communication parameters are:
+ `600 bps`
+ `8 bits`
+ `no parity`
+ `1 stop bit`
- All commands are issued as 13 byte packets.
- All responses are received as 12 byte packets.
### Command Packet
| Byte | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 |
|:----------|:------|:-----|:-----|:-----|:-----|:-----|:-----|:-----|:-----|:-----|:-----|:-----|:-----|
| **Field** | START | H1 | H2 | H3 | H4 | PH | V1 | V2 | V3 | V4 | PV | K | END |
| **Value** | 0x57 | 0x3? | 0x3? | 0x3? | 0x3? | 0x0? | 0x3? | 0x3? | 0x3? | 0x3? | 0x0? | 0x?F | 0x20 |
- **START** - Start byte (always 0x57)
- **H1 - H4** - Azimuth as ASCII characters 0-9
- **PH** - Azimuth resolution in pulses per degree (ignored in command packet)
- **V1 - V4** - Elevation as ASCII characters 0-9
- **PV** - Elevation resolution in pulses per degree (ignored in command packet)
- **K** - Command
+ 0x0F = STOP
+ 0x1F = STATUS
+ 0x2F = SET
- **END** - End byte (always 0x20)
### Response Packet
| Byte | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 |
|:----------|:------|:-----|:-----|:-----|:-----|:-----|:-----|:-----|:-----|:-----|:-----|:-----|
| **Field** | START | H1 | H2 | H3 | H4 | PH | V1 | V2 | V3 | V4 | PV | END |
| **Value** | 0x57 | 0x0? | 0x0? | 0x0? | 0x0? | 0x0? | 0x0? | 0x0? | 0x0? | 0x0? | 0x0? | 0x20 |
- **START** - Start byte (always 0x57)
- **H1 - H4** - Azimuth as byte values
- **PH** - Azimuth resolution in pulses per degree
- **V1 - V4** - Elevation as byte values
- **PV** - Elevation resolution in pulses per degree
- **END** - End byte (always 0x20)
Positions from the response packet are decoded using the following formulas:
```python
az = (H1 * 100) + (H2 * 10) + H3 + (H4 / 10) - 360
el = (V1 * 100) + (V2 * 10) + V3 + (V4 / 10) - 360
```
### Degrees Per Pulse
The PH and PV values in the response packet reflect the settings of the rotator controller. The ROT2Prog supports the following resolutions (the value is always the same for azimuth and elevation):
| Degrees per pulse | PH | PV |
|:------------------|:-----|:-----|
| 1 | 0x01 | 0x01 |
| 0.5 | 0x02 | 0x02 |
| 0.25 | 0x04 | 0x04 |
### Stop Command
The stop command stops the rotator immediately in the current position and returns the current position.
*Command Packet*
| Byte | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 |
|:----------|:------|:-----|:-----|:-----|:-----|:-----|:-----|:-----|:-----|:-----|:-----|:-----|:-----|
| **Field** | START | H1 | H2 | H3 | H4 | PH | V1 | V2 | V3 | V4 | PV | K | END |
| **Value** | 0x57 | 0x00 | 0x00 | 0x00 | 0x00 | 0x00 | 0x00 | 0x00 | 0x00 | 0x00 | 0x00 | 0x0F | 0x20 |
> NOTE: The H1-H4, PH, V1-V4 and PV fields are ignored, so only the S, K and END fields are used.
##### Stop Command Example
*Example Response Packet*
| Byte | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 |
|:----------|:------|:---------|:---------|:---------|:---------|:-----|:---------|:---------|:---------|:---------|:-----|:-----|
| **Field** | START | H1 | H2 | H3 | H4 | PH | V1 | V2 | V3 | V4 | PV | END |
| **Value** | 0x57 | 0x0**3** | 0x0**7** | 0x0**2** | 0x0**5** | 0x02 | 0x0**3** | 0x0**9** | 0x0**4** | 0x0**0** | 0x02 | 0x20 |
*Decoding Example Response Packet*
```python
az = (3 * 100) + (7 * 10) + 2 + (5 / 10) - 360 = 12.5
el = (3 * 100) + (9 * 10) + 4 + (0 / 10) - 360 = 34.0
PH = PV = 0x02
```
### Status Command
The status command returns the current position of the antenna.
> NOTE: Status commands can be issued while the rotator is moving and will always return the current position.
*Command Packet*
| Byte | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 |
|:----------|:------|:-----|:-----|:-----|:-----|:-----|:-----|:-----|:-----|:-----|:-----|:-----|:-----|
| **Field** | START | H1 | H2 | H3 | H4 | PH | V1 | V2 | V3 | V4 | PV | K | END |
| **Value** | 0x57 | 0x00 | 0x00 | 0x00 | 0x00 | 0x00 | 0x00 | 0x00 | 0x00 | 0x00 | 0x00 | 0x1F | 0x20 |
> NOTE: The H1-H4, PH, V1-V4 and PV fields are ignored, so only the S, K and END fields are used.
##### Status Command Example
*Example Response Packet*
| Byte | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 |
|:----------|:------|:---------|:---------|:---------|:---------|:-----|:---------|:---------|:---------|:---------|:-----|:-----|
| **Field** | START | H1 | H2 | H3 | H4 | PH | V1 | V2 | V3 | V4 | PV | END |
| **Value** | 0x57 | 0x0**3** | 0x0**7** | 0x0**2** | 0x0**5** | 0x02 | 0x0**3** | 0x0**9** | 0x0**4** | 0x0**0** | 0x02 | 0x20 |
*Decoding Example Response Packet*
```python
az = (3 * 100) + (7 * 10) + 2 + (5 / 10) - 360 = 12.5
el = (3 * 100) + (9 * 10) + 4 + (0 / 10) - 360 = 34.0
PH = PV = 0x02
```
### Set Command
The set command tells the rotator to turn to a specific position. The controller does not send a response to this command.
*Encoding Command Packet*
```python
H = PH * (az + 360)
V = PV * (el + 360)
```
> NOTE: H1-H4 and V1-V4 are H and V converted to ASCII (0x30-0x39, i.e., '0'-'9').
##### Set Command Example
*Encoding Example Command Packet*
```
az = 123.5
el = 77.0
PH = PV = 0x2
```
```python
H = 2 * (123.5 + 360) = 967
V = 2 * (77.0 + 360) = 874
```
*Example Command Packet*
```
H = 0967
V = 0874
PH = PV = 0x2
```
| Byte | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 |
|:----------|:------|:---------|:---------|:---------|:---------|:-----|:---------|:---------|:---------|:---------|:-----|:-----|:-----|
| **Field** | START | H1 | H2 | H3 | H4 | PH | V1 | V2 | V3 | V4 | PV | K | END |
| **Value** | 0x57 | 0x3**0** | 0x3**9** | 0x3**6** | 0x3**7** | 0x02 | 0x3**0** | 0x3**8** | 0x3**7** | 0x3**4** | 0x02 | 0x2F | 0x20 |
> NOTE: The PH and PV values are ignored. The values used by the rotator control unit are set by choosing resolution in the setup menu directly on the controller. These values can be read using the status command if they are unknown. | /rot2prog-0.0.9.tar.gz/rot2prog-0.0.9/README.md | 0.871557 | 0.972072 | README.md | pypi |
import struct
from typing import Callable, Dict
import minimalmodbus
import cachetools.func
from loguru import logger as log
def bits_to_dict(value, structure_class: Callable):
cs = structure_class()
struct.pack_into(
cs.pack_format,
cs,
0,
value
)
fields = [item[0] for item in cs._fields_]
result = {field: getattr(cs, field) for field in fields}
return result
# cs
def dict_to_bits(values: Dict, structure_class: Callable) -> int:
cs = structure_class()
[setattr(cs, item, value) for item, value in values.items()]
value = struct.unpack_from(
cs.pack_format,
cs,
0
)[0]
return value
class BaseStructureManager:
def __init__(self, device: minimalmodbus.Instrument, control_structure: Callable, register_address):
self.device = device
self.control_structure = control_structure
self.register_address = register_address
@cachetools.func.ttl_cache(ttl=0.1)
def get_register(self) -> int:
"""
Gets the register value and keeps the value in the cache for reuse for .1 seconds
"""
try:
value = self.device.read_register(self.register_address)
return value
except Exception as e:
log.exception(e.__str__())
return 0
def set_register(self, value: int):
"""
Sets the register value to the given value
"""
try:
self.device.write_register(self.register_address, value=value)
except Exception as e:
log.exception(e.__str__())
def _set_bit_value(self, bit_name: str, value: bool):
"""
Updates the value of the given bit name with the specified value
"""
register_value = self.get_register()
status = bits_to_dict(register_value, self.control_structure)
status[bit_name] = value
register_value = dict_to_bits(status, self.control_structure)
self.set_register(register_value)
def _get_bit_value(self, bit_name: str) -> bool:
"""
Returns the bit value for the specified bit
"""
register_value = self.get_register()
status = bits_to_dict(register_value, self.control_structure)
return status[bit_name] | /rotary_controller_python-0.1.6-py3-none-any.whl/rotary_controller_python/utils/bitfields.py | 0.700485 | 0.288908 | bitfields.py | pypi |
from inspect import isfunction
import numpy as np
import tensorflow as tf
from tensorflow.keras import layers
# helper functions
#The three functions of rearrange, irearrange and repeat have been written
# due to the incompatibility of the einops library with tensorflow 2.x.
def rearrange(x, r=2):
b = tf.shape(x)
b1 = b[:-1]
b2 = b[-1, None]
b3 = tf.constant([r], dtype=tf.int32)
b4 = tf.cast(b2/b3, dtype=tf.int32)
b_ = tf.concat([b1, b4, b3], axis=0)
return tf.reshape(x, b_)
def irearrange(x):
c = tf.shape(x)
c1 = c[:-2]
c2 = tf.reduce_prod(c[-2:])[None]
c_ = tf.concat([c1, c2], axis=0)
return tf.reshape(x, c_)
def repeat(x, r):
c = tf.ones_like(tf.shape(x), dtype=tf.int32)
c1 = c[:-1]
c2 = c[-1][None] * r
c_ = tf.concat([c1, c2], axis=0)
return tf.tile(x, c_)
def exists(val):
return val is not None
def broadcat(tensors, dim = -1):
num_tensors = len(tensors)
shape_lens = set(list(map(lambda t: len(t.shape), tensors)))
assert len(shape_lens) == 1, 'tensors must all have the same number of dimensions'
shape_len = list(shape_lens)[0]
dim = (dim + shape_len) if dim < 0 else dim
dims = list(zip(*map(lambda t: list(t.shape), tensors)))
expandable_dims = [(i, val) for i, val in enumerate(dims) if i != dim]
assert all([*map(lambda t: len(set(t[1])) <= 2, expandable_dims)]), 'invalid dimensions for broadcastable concatentation'
max_dims = list(map(lambda t: (t[0], max(t[1])), expandable_dims))
expanded_dims = list(map(lambda t: (t[0], (t[1],) * num_tensors), max_dims))
expanded_dims.insert(dim, (dim, dims[dim]))
expandable_shapes = list(zip(*map(lambda t: t[1], expanded_dims)))
tensors = list(map(lambda t: tf.broadcast_to(t[0], t[1]), zip(tensors, expandable_shapes)))
return tf.concat(tensors, axis=dim)
# rotary embedding helper functions
def rotate_half(x):
x = rearrange(x, r = 2)
x1, x2 = tf.unstack(x, axis=-1)
x = tf.stack((-x2, x1), axis=-1)
return irearrange(x)
def apply_rotary_emb(freqs, t, start_index = 0):
rot_dim = freqs.shape[-1]
end_index = start_index + rot_dim
assert rot_dim <= t.shape[-1], f'feature dimension {t.shape[-1]} is not of sufficient size to rotate in all the positions {rot_dim}'
t_left, t, t_right = t[..., :start_index], t[..., start_index:end_index], t[..., end_index:]
t = (t * tf.cos(freqs)) + (rotate_half(t) * tf.sin(freqs))
return tf.concat((t_left, t, t_right), axis=-1)
# learned rotation helpers
def apply_learned_rotations(rotations, t, start_index = 0, freq_ranges = None):
if exists(freq_ranges):
rotations = tf.einsum('..., f -> ... f', rotations, freq_ranges)
rotations = irearrange(rotations)
rotations = repeat(rotations, r = 2)
return apply_rotary_emb(rotations, t, start_index = start_index)
# classes
class RotaryEmbedding(layers.Layer):
def __init__(
self,
dim,
custom_freqs = None,
freqs_for = 'lang',
theta = 10000,
max_freq = 10,
num_freqs = 1,
learned_freq = False
):
super(RotaryEmbedding, self).__init__()
if exists(custom_freqs):
freqs = custom_freqs
elif freqs_for == 'lang':
freqs = tf.convert_to_tensor(1. / (theta ** (np.arange(0, dim, 2)[:(dim // 2)] / dim)), dtype=tf.float32)
elif freqs_for == 'pixel':
freqs = tf.convert_to_tensor(np.logspace(0., np.log(max_freq / 2) / np.log(2), dim // 2, base = 2) * np.pi, dtype=tf.float32)
elif freqs_for == 'constant':
freqs = tf.ones(num_freqs, dtype=tf.float32)
else:
raise ValueError(f'unknown modality {freqs_for}')
self.cache = dict()
if learned_freq:
self.freqs = tf.Variable(freqs, trainable=True)
else:
# self.register_buffer('freqs', freqs)
self.freqs = freqs
def call(self, t, cache_key = None):
if exists(cache_key) and cache_key in self.cache:
return self.cache[cache_key]
if isfunction(t):
t = t()
freqs = self.freqs
freqs = tf.einsum('..., f -> ... f', tf.cast(t, dtype=freqs.dtype), freqs)
freqs = repeat(freqs, r = 2)
if exists(cache_key):
self.cache[cache_key] = freqs
return freqs | /rotary-embedding-tensorflow-0.1.1.tar.gz/rotary-embedding-tensorflow-0.1.1/rotary_embedding_tensorflow/rotary_embedding_tensorflow.py | 0.761627 | 0.703779 | rotary_embedding_tensorflow.py | pypi |
from math import pi, log
import torch
from torch import nn, einsum
from einops import rearrange, repeat
# helper functions
def exists(val):
return val is not None
def broadcat(tensors, dim = -1):
num_tensors = len(tensors)
shape_lens = set(list(map(lambda t: len(t.shape), tensors)))
assert len(shape_lens) == 1, 'tensors must all have the same number of dimensions'
shape_len = list(shape_lens)[0]
dim = (dim + shape_len) if dim < 0 else dim
dims = list(zip(*map(lambda t: list(t.shape), tensors)))
expandable_dims = [(i, val) for i, val in enumerate(dims) if i != dim]
assert all([*map(lambda t: len(set(t[1])) <= 2, expandable_dims)]), 'invalid dimensions for broadcastable concatentation'
max_dims = list(map(lambda t: (t[0], max(t[1])), expandable_dims))
expanded_dims = list(map(lambda t: (t[0], (t[1],) * num_tensors), max_dims))
expanded_dims.insert(dim, (dim, dims[dim]))
expandable_shapes = list(zip(*map(lambda t: t[1], expanded_dims)))
tensors = list(map(lambda t: t[0].expand(*t[1]), zip(tensors, expandable_shapes)))
return torch.cat(tensors, dim = dim)
# rotary embedding helper functions
def rotate_half(x):
x = rearrange(x, '... (d r) -> ... d r', r = 2)
x1, x2 = x.unbind(dim = -1)
x = torch.stack((-x2, x1), dim = -1)
return rearrange(x, '... d r -> ... (d r)')
def apply_rotary_emb(freqs, t, start_index = 0, scale = 1.):
freqs = freqs.to(t)
rot_dim = freqs.shape[-1]
end_index = start_index + rot_dim
assert rot_dim <= t.shape[-1], f'feature dimension {t.shape[-1]} is not of sufficient size to rotate in all the positions {rot_dim}'
t_left, t, t_right = t[..., :start_index], t[..., start_index:end_index], t[..., end_index:]
t = (t * freqs.cos() * scale) + (rotate_half(t) * freqs.sin() * scale)
return torch.cat((t_left, t, t_right), dim = -1)
# learned rotation helpers
def apply_learned_rotations(rotations, t, start_index = 0, freq_ranges = None):
if exists(freq_ranges):
rotations = einsum('..., f -> ... f', rotations, freq_ranges)
rotations = rearrange(rotations, '... r f -> ... (r f)')
rotations = repeat(rotations, '... n -> ... (n r)', r = 2)
return apply_rotary_emb(rotations, t, start_index = start_index)
# classes
class RotaryEmbedding(nn.Module):
def __init__(
self,
dim,
custom_freqs = None,
freqs_for = 'lang',
theta = 10000,
max_freq = 10,
num_freqs = 1,
learned_freq = False,
use_xpos = False,
xpos_scale_base = 512,
interpolate_factor = 1.,
theta_rescale_factor = 1.
):
super().__init__()
# proposed by reddit user bloc97, to rescale rotary embeddings to longer sequence length without fine-tuning
# has some connection to NTK literature
# https://www.reddit.com/r/LocalLLaMA/comments/14lz7j5/ntkaware_scaled_rope_allows_llama_models_to_have/
theta *= theta_rescale_factor ** (dim / (dim - 2))
if exists(custom_freqs):
freqs = custom_freqs
elif freqs_for == 'lang':
freqs = 1. / (theta ** (torch.arange(0, dim, 2)[:(dim // 2)].float() / dim))
elif freqs_for == 'pixel':
freqs = torch.linspace(1., max_freq / 2, dim // 2) * pi
elif freqs_for == 'constant':
freqs = torch.ones(num_freqs).float()
else:
raise ValueError(f'unknown modality {freqs_for}')
self.cache = dict()
self.cache_scale = dict()
self.freqs = nn.Parameter(freqs, requires_grad = learned_freq)
# interpolation factors
assert interpolate_factor >= 1.
self.interpolate_factor = interpolate_factor
# xpos
self.use_xpos = use_xpos
if not use_xpos:
self.register_buffer('scale', None)
return
scale = (torch.arange(0, dim, 2) + 0.4 * dim) / (1.4 * dim)
self.scale_base = xpos_scale_base
self.register_buffer('scale', scale)
def get_seq_pos(self, seq_len, device, dtype, offset = 0):
return (torch.arange(seq_len, device = device, dtype = dtype) + offset) / self.interpolate_factor
def rotate_queries_or_keys(self, t, seq_dim = -2, offset = 0):
assert not self.use_xpos, 'you must use `.rotate_queries_and_keys` method instead and pass in both queries and keys, for length extrapolatable rotary embeddings'
device, dtype, seq_len = t.device, t.dtype, t.shape[seq_dim]
freqs = self.forward(lambda: self.get_seq_pos(seq_len, device = device, dtype = dtype, offset = offset), cache_key = f'freqs:{seq_len}|offset:{offset}')
return apply_rotary_emb(freqs, t)
def rotate_queries_and_keys(self, q, k, seq_dim = -2):
assert self.use_xpos
device, dtype, seq_len = q.device, q.dtype, q.shape[seq_dim]
seq = self.get_seq_pos(seq_len, dtype = dtype, device = device)
freqs = self.forward(lambda: seq, cache_key = f'freqs:{seq_len}')
scale = self.get_scale(lambda: seq, cache_key = f'scale:{seq_len}').to(dtype)
rotated_q = apply_rotary_emb(freqs, q, scale = scale)
rotated_k = apply_rotary_emb(freqs, k, scale = scale ** -1)
return rotated_q, rotated_k
def get_scale(self, t, cache_key = None):
assert self.use_xpos
if exists(cache_key) and cache_key in self.cache:
return self.cache[cache_key]
if callable(t):
t = t()
scale = 1.
if self.use_xpos:
power = (t - len(t) // 2) / self.scale_base
scale = self.scale ** rearrange(power, 'n -> n 1')
scale = torch.cat((scale, scale), dim = -1)
if exists(cache_key):
self.cache[cache_key] = scale
return scale
def forward(self, t, cache_key = None):
if exists(cache_key) and cache_key in self.cache:
return self.cache[cache_key]
if callable(t):
t = t()
freqs = self.freqs
freqs = torch.einsum('..., f -> ... f', t.type(freqs.dtype), freqs)
freqs = repeat(freqs, '... n -> ... (n r)', r = 2)
if exists(cache_key):
self.cache[cache_key] = freqs
return freqs | /rotary_embedding_torch-0.2.7-py3-none-any.whl/rotary_embedding_torch/rotary_embedding_torch.py | 0.89526 | 0.691797 | rotary_embedding_torch.py | pypi |
# Standard library modules.
import collections
import datetime
import fnmatch
import functools
import logging
import os
import re
# External dependencies.
from dateutil.relativedelta import relativedelta
from executor import execute
from humanfriendly import format_path, parse_path, Timer
from humanfriendly.text import concatenate, split
from natsort import natsort
from six.moves import configparser
import boto
from rotate_backups import Backup, RotateBackups, TIMESTAMP_PATTERN
# Semi-standard module versioning.
__version__ = '0.3'
# Initialize a logger for this module.
logger = logging.getLogger(__name__)
GLOBAL_CONFIG_FILE = '/etc/rotate-backups-s3.ini'
"""The pathname of the system wide configuration file (a string)."""
LOCAL_CONFIG_FILE = '~/.rotate-backups-s3.ini'
"""The pathname of the user specific configuration file (a string)."""
ORDERED_FREQUENCIES = (('hourly', relativedelta(hours=1)),
('daily', relativedelta(days=1)),
('weekly', relativedelta(weeks=1)),
('monthly', relativedelta(months=1)),
('yearly', relativedelta(years=1)))
"""
A list of tuples with two values each:
- The name of a rotation frequency (a string like 'hourly', 'daily', etc.).
- A :class:`~dateutil.relativedelta.relativedelta` object.
The tuples are sorted by increasing delta (intentionally).
"""
SUPPORTED_FREQUENCIES = dict(ORDERED_FREQUENCIES)
"""
A dictionary with rotation frequency names (strings) as keys and
:class:`~dateutil.relativedelta.relativedelta` objects as values. This
dictionary is generated based on the tuples in :data:`ORDERED_FREQUENCIES`.
"""
class S3RotateBackups(RotateBackups):
"""Python API for the ``rotate-backups-s3`` program."""
def __init__(self, rotation_scheme, aws_access_key_id, aws_secret_access_key,
include_list=None, exclude_list=None, dry_run=False,
config_file=None):
"""
Construct a :class:`S3RotateBackups` object.
:param rotation_scheme: A dictionary with one or more of the keys 'hourly',
'daily', 'weekly', 'monthly', 'yearly'. Each key is
expected to have one of the following values:
- An integer gives the number of backups in the
corresponding category to preserve, starting from
the most recent backup and counting back in
time.
- The string 'always' means all backups in the
corresponding category are preserved (useful for
the biggest time unit in the rotation scheme).
By default no backups are preserved for categories
(keys) not present in the dictionary.
:param include_list: A list of strings with :mod:`fnmatch` patterns. If a
nonempty include list is specified each backup must
match a pattern in the include list, otherwise it
will be ignored.
:param exclude_list: A list of strings with :mod:`fnmatch` patterns. If a
backup matches the exclude list it will be ignored,
*even if it also matched the include list* (it's the
only logical way to combine both lists).
:param dry_run: If this is ``True`` then no changes will be made, which
provides a 'preview' of the effect of the rotation scheme
(the default is ``False``). Right now this is only useful
in the command line interface because there's no return
value.
:param io_scheduling_class: Use ``ionice`` to set the I/O scheduling class
(expected to be one of the strings 'idle',
'best-effort' or 'realtime').
:param config_file: The pathname of a configuration file (a string).
"""
self.aws_access_key_id = aws_access_key_id
self.aws_secret_access_key = aws_secret_access_key
self.conn = boto.connect_s3(aws_access_key_id, aws_secret_access_key)
super(S3RotateBackups, self).__init__(rotation_scheme,
include_list=include_list, exclude_list=exclude_list,
dry_run=dry_run, config_file=config_file)
def rotate_backups(self, bucketname):
"""
Rotate the backups in a bucket according to a flexible rotation scheme.
:param bucketname: S3 bucketthat contains backups to rotate (a string).
"""
bucket = self.conn.get_bucket(bucketname)
# Collect the backups in the given directory.
sorted_backups = self.collect_backups(bucketname)
if not sorted_backups:
logger.info("No backups found in %s.", bucketname)
return
most_recent_backup = sorted_backups[-1]
# Group the backups by the rotation frequencies.
backups_by_frequency = self.group_backups(sorted_backups)
# Apply the user defined rotation scheme.
self.apply_rotation_scheme(backups_by_frequency, most_recent_backup.timestamp)
# Find which backups to preserve and why.
backups_to_preserve = self.find_preservation_criteria(backups_by_frequency)
# Apply the calculated rotation scheme.
deleted_files = []
for backup in sorted_backups:
if backup in backups_to_preserve:
matching_periods = backups_to_preserve[backup]
logger.info("Preserving %s (matches %s retention %s) ..",
backup.pathname, concatenate(map(repr, matching_periods)),
"period" if len(matching_periods) == 1 else "periods"
)
else:
logger.info("Deleting %s %s ..", backup.type, backup.pathname)
if not self.dry_run:
logger.debug("Marking %s for deletion.", backup.pathname)
deleted_files.append(backup.pathname)
if deleted_files:
bucket.delete_keys(deleted_files)
if len(backups_to_preserve) == len(sorted_backups):
logger.info("Nothing to do! (all backups preserved)")
def collect_backups(self, bucketname):
"""
Collect the backups in the given s3 bucket.
:param bucket: s3 backup bucket (a string).
:returns: A sorted :class:`list` of :class:`Backup` objects (the
backups are sorted by their date).
"""
backups = []
bucket = self.conn.get_bucket(bucketname)
logger.info("Scanning bucket for backups: %s", bucketname)
for entry in natsort([key.name for key in bucket.list()]):
# Check for a time stamp in the directory entry's name.
match = TIMESTAMP_PATTERN.search(entry)
if match:
# Make sure the entry matches the given include/exclude patterns.
if self.exclude_list and any(fnmatch.fnmatch(entry, p) for p in self.exclude_list):
logger.debug("Excluded %r (it matched the exclude list).", entry)
elif self.include_list and not any(fnmatch.fnmatch(entry, p) for p in self.include_list):
logger.debug("Excluded %r (it didn't match the include list).", entry)
else:
backups.append(S3Backup(
pathname=entry,
timestamp=datetime.datetime(*(int(group, 10) for group in match.groups('0'))),
))
else:
logger.debug("Failed to match time stamp in filename: %s", entry)
if backups:
logger.info("Found %i timestamped backups in %s.", len(backups), bucket)
return sorted(backups)
class S3Backup(Backup):
@property
def type(self):
"""Get a string describing the type of backup (e.g. file, directory)."""
return 's3_file' | /rotate-backups-s3-0.3.tar.gz/rotate-backups-s3-0.3/rotate_backups_s3/__init__.py | 0.748628 | 0.187356 | __init__.py | pypi |
from Xlib import display
from Xlib.ext import randr
from typing import List, Tuple
# Create an X display and get the root window + its resources
d = display.Display()
root = d.screen().root
res = root.xrandr_get_screen_resources()
class Display:
def __init__(self, output_id, crtc_id):
self._output_id = output_id
self._crtc_id = crtc_id
def __repr__(self):
return f"<'{self.device_description[0]}' object>"
def rotate_to(self, degrees: int) -> None:
if degrees == 90:
rotation_val = randr.Rotate_90
elif degrees == 180:
rotation_val = randr.Rotate_180
elif degrees == 270:
rotation_val = randr.Rotate_270
elif degrees == 0:
rotation_val = randr.Rotate_0
else:
raise ValueError("Display can only be rotated to 0, 90, 180, or 270 degrees.")
# NOTE: seems to need to be done as calling self._crtc_info too many times results in https://github.com/python-xlib/python-xlib/issues/241
crtc_info = self._crtc_info
# Set screen size, if needed...
if (self.current_orientation in (0, 180) and rotation_val in (randr.Rotate_90, randr.Rotate_270)) \
or (self.current_orientation in (90, 270) and rotation_val in (randr.Rotate_0, randr.Rotate_180)):
# Start with the flipped screen max's
max_w = crtc_info.x + crtc_info.height
max_h = crtc_info.y + crtc_info.width
for display in get_displays():
display_crtc_info = display._crtc_info
max_w = max(max_w, display_crtc_info.x + display_crtc_info.width)
max_h = max(max_h, display_crtc_info.y + display_crtc_info.height)
screen_size_range = root.xrandr_get_screen_size_range()
# NOTE: Chosen to allow xlib to omit parts of the screen that overflow max screen size instead of erroring.
width = min(max(max_w, screen_size_range.min_width), screen_size_range.max_width)
height = min(max(max_h, screen_size_range.min_height), screen_size_range.max_height)
dpi = 96.0
width_mm = int((25.4 * width) / dpi)
height_mm = int((25.4 * height) / dpi)
root.xrandr_set_screen_size(width=width, height=height,
width_in_millimeters=width_mm, height_in_millimeters=height_mm)
set_crtc_config_result = d.xrandr_set_crtc_config(
crtc=self._crtc_id,
rotation=rotation_val,
x=crtc_info.x,
y=crtc_info.y,
mode=crtc_info.mode,
outputs=crtc_info.outputs,
config_timestamp=res.config_timestamp,
)
assert set_crtc_config_result.status == 0, f"xrandr failed to set crtc config for crtc id '{self._crtc_id}' on Display '{self}'"
def set_landscape(self) -> None:
self.rotate_to(0)
def set_landscape_flipped(self) -> None:
self.rotate_to(180)
def set_portrait(self) -> None:
self.rotate_to(90)
def set_portrait_flipped(self) -> None:
self.rotate_to(270)
@property
def _crtc_info(self):
return d.xrandr_get_crtc_info(self._crtc_id, res.config_timestamp)
@property
def current_orientation(self) -> int:
# Get the CRTC's current mode information
mode_info = d.xrandr_get_crtc_info(self._crtc_id, res.config_timestamp)
# Get the CRTC's current rotation
rotation = mode_info.rotation
if rotation == randr.Rotate_0:
return 0
elif rotation == randr.Rotate_90:
return 90
elif rotation == randr.Rotate_180:
return 180
elif rotation == randr.Rotate_270:
return 270
@property
def is_primary(self) -> bool:
primary_output = root.xrandr_get_output_primary().output
return self._output_id == primary_output
@property
def device_description(self) -> Tuple[str, str]:
output_info = d.xrandr_get_output_info(self._output_id, res.config_timestamp)
return output_info.name, str(self._crtc_id)
# xlib-style aliases
normal = set_landscape
inverted = set_landscape
left = set_portrait
right = set_portrait_flipped
def get_displays() -> List[Display]:
displays = []
for output_id in res.outputs:
output_info = d.xrandr_get_output_info(output_id, res.config_timestamp)
if output_info.crtc:
displays.append(Display(output_id, output_info.crtc))
return displays
def get_primary_display():
for display in get_displays():
if display.is_primary:
return display
def get_secondary_displays() -> List[Display]:
displays = [display for display in get_displays() if not display.is_primary]
return displays | /rotate-screen-0.1.5.tar.gz/rotate-screen-0.1.5/rotatescreen/display_linux.py | 0.828211 | 0.281347 | display_linux.py | pypi |
from typing import Dict, List, Tuple
import win32api
import win32con
class Display:
def __init__(self, hMonitor):
self.hMonitor = hMonitor
def __repr__(self):
return f"<'{self.device_description[0]}' object>"
def rotate_to(self, degrees: int) -> None:
if degrees == 90:
rotation_val = win32con.DMDO_90
elif degrees == 180:
rotation_val = win32con.DMDO_180
elif degrees == 270:
rotation_val = win32con.DMDO_270
elif degrees == 0:
rotation_val = win32con.DMDO_DEFAULT
else:
raise ValueError("Display can only be rotated to 0, 90, 180, or 270 degrees.")
dm = self.devicemodeW
if((dm.DisplayOrientation + rotation_val) % 2 == 1):
dm.PelsWidth, dm.PelsHeight = dm.PelsHeight, dm.PelsWidth
dm.DisplayOrientation = rotation_val
win32api.ChangeDisplaySettingsEx(self.device, dm)
def set_landscape(self) -> None:
self.rotate_to(0)
def set_landscape_flipped(self) -> None:
self.rotate_to(180)
def set_portrait(self) -> None:
self.rotate_to(90)
def set_portrait_flipped(self) -> None:
self.rotate_to(270)
@property
def current_orientation(self) -> int:
state: int = self.devicemodeW.DisplayOrientation
return state * 90
@property
def info(self) -> Dict:
return win32api.GetMonitorInfo(self.hMonitor)
@property
def device(self) -> str:
return self.info["Device"]
@property
def is_primary(self) -> int:
# The only flag is MONITORINFOF_PRIMARY which is 1 only for the primary monitor.
return self.info["Flags"]
@property
def device_description(self) -> Tuple[str, str]:
display_device = win32api.EnumDisplayDevices(self.device)
return display_device.DeviceString, display_device.DeviceID
@property
def devicemodeW(self):
return win32api.EnumDisplaySettings(self.device, win32con.ENUM_CURRENT_SETTINGS)
def get_displays() -> List[Display]:
displays = [Display(hMonitor) for hMonitor, _, _ in win32api.EnumDisplayMonitors()]
return displays
def get_primary_display():
for display in get_displays():
if display.is_primary:
return display
def get_secondary_displays() -> List[Display]:
displays = [display for display in get_displays() if not display.is_primary]
return displays | /rotate-screen-0.1.5.tar.gz/rotate-screen-0.1.5/rotatescreen/display.py | 0.875999 | 0.363788 | display.py | pypi |
__all__ = ['NotFittedError',
'ChangedBehaviorWarning',
'ConvergenceWarning',
'DataConversionWarning',
'DataDimensionalityWarning',
'EfficiencyWarning',
'FitFailedWarning',
'NonBLASDotWarning',
'UndefinedMetricWarning']
class NotFittedError(ValueError, AttributeError):
"""Exception class to raise if estimator is used before fitting.
This class inherits from both ValueError and AttributeError to help with
exception handling and backward compatibility.
Examples
--------
>>> from sklearn.svm import LinearSVC
>>> from sklearn.exceptions import NotFittedError
>>> try:
... LinearSVC().predict([[1, 2], [2, 3], [3, 4]])
... except NotFittedError as e:
... print(repr(e))
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
NotFittedError('This LinearSVC instance is not fitted yet',)
"""
class ChangedBehaviorWarning(UserWarning):
"""Warning class used to notify the user of any change in the behavior."""
class ConvergenceWarning(UserWarning):
"""Custom warning to capture convergence problems"""
class DataConversionWarning(UserWarning):
"""Warning used to notify implicit data conversions happening in the code.
This warning occurs when some input data needs to be converted or
interpreted in a way that may not match the user's expectations.
For example, this warning may occur when the the user
- passes an integer array to a function which expects float input and
will convert the input
- requests a non-copying operation, but a copy is required to meet the
implementation's data-type expectations;
- passes an input whose shape can be interpreted ambiguously.
"""
class DataDimensionalityWarning(UserWarning):
"""Custom warning to notify potential issues with data dimensionality.
For example, in random projection, this warning is raised when the
number of components, which quantifes the dimensionality of the target
projection space, is higher than the number of features, which quantifies
the dimensionality of the original source space, to imply that the
dimensionality of the problem will not be reduced.
"""
class EfficiencyWarning(UserWarning):
"""Warning used to notify the user of inefficient computation.
This warning notifies the user that the efficiency may not be optimal due
to some reason which may be included as a part of the warning message.
This may be subclassed into a more specific Warning class.
"""
class FitFailedWarning(RuntimeWarning):
"""Warning class used if there is an error while fitting the estimator.
This Warning is used in meta estimators GridSearchCV and RandomizedSearchCV
and the cross-validation helper function cross_val_score to warn when there
is an error while fitting the estimator.
Examples
--------
>>> from sklearn.model_selection import GridSearchCV
>>> from sklearn.svm import LinearSVC
>>> from sklearn.exceptions import FitFailedWarning
>>> import warnings
>>> warnings.simplefilter('always', FitFailedWarning)
>>> gs = GridSearchCV(LinearSVC(), {'C': [-1, -2]}, error_score=0)
>>> X, y = [[1, 2], [3, 4], [5, 6], [7, 8], [8, 9]], [0, 0, 0, 1, 1]
>>> with warnings.catch_warnings(record=True) as w:
... try:
... gs.fit(X, y) # This will raise a ValueError since C is < 0
... except ValueError:
... pass
... print(repr(w[-1].message))
... # doctest: +NORMALIZE_WHITESPACE
FitFailedWarning("Classifier fit failed. The score on this train-test
partition for these parameters will be set to 0.000000. Details:
\\nValueError('Penalty term must be positive; got (C=-2)',)",)
"""
class NonBLASDotWarning(EfficiencyWarning):
"""Warning used when the dot operation does not use BLAS.
This warning is used to notify the user that BLAS was not used for dot
operation and hence the efficiency may be affected.
"""
class UndefinedMetricWarning(UserWarning):
"""Warning used when the metric is invalid""" | /rotation_forest-0.4-py3-none-any.whl/rotation_forest/_exceptions.py | 0.893397 | 0.553083 | _exceptions.py | pypi |
import numpy as np
from sklearn.tree import DecisionTreeClassifier
from sklearn.tree._tree import DTYPE
from sklearn.ensemble._forest import ForestClassifier
from sklearn.utils import resample, gen_batches, check_random_state
from sklearn.decomposition import PCA
def random_feature_subsets(array, batch_size, random_state=1234):
""" Generate K subsets of the features in X """
random_state = check_random_state(random_state)
features = list(range(array.shape[1]))
random_state.shuffle([x for x in features])
for batch in gen_batches(len(features), batch_size):
yield features[batch]
class RotationTreeClassifier(DecisionTreeClassifier):
def __init__(self,
n_features_per_subset=3,
rotation_algo='pca',
criterion="gini",
splitter="best",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features=1.0,
random_state=None,
max_leaf_nodes=None,
class_weight=None,
presort=False):
self.n_features_per_subset = n_features_per_subset
self.rotation_algo = rotation_algo
super(RotationTreeClassifier, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
class_weight=class_weight,
random_state=random_state,
presort=presort)
def rotate(self, X):
if not hasattr(self, 'rotation_matrix'):
raise NotFittedError('The estimator has not been fitted')
return np.dot(X, self.rotation_matrix)
def pca_algorithm(self):
""" Deterimine PCA algorithm to use. """
if self.rotation_algo == 'randomized':
return PCA(svd_solver='randomized', random_state=self.random_state)
elif self.rotation_algo == 'pca':
return PCA()
else:
raise ValueError("`rotation_algo` must be either "
"'pca' or 'randomized'.")
def _fit_rotation_matrix(self, X):
random_state = check_random_state(self.random_state)
n_samples, n_features = X.shape
self.rotation_matrix = np.zeros((n_features, n_features),
dtype=np.float32)
for i, subset in enumerate(
random_feature_subsets(X, self.n_features_per_subset,
random_state=self.random_state)):
# take a 75% bootstrap from the rows
x_sample = resample(X, n_samples=int(n_samples*0.75),
random_state=10*i)
pca = self.pca_algorithm()
pca.fit(x_sample[:, subset])
self.rotation_matrix[np.ix_(subset, subset)] = pca.components_
def fit(self, X, y, sample_weight=None, check_input=True):
self._fit_rotation_matrix(X)
super(RotationTreeClassifier, self).fit(self.rotate(X), y,
sample_weight, check_input)
def predict_proba(self, X, check_input=True):
return super(RotationTreeClassifier, self).predict_proba(self.rotate(X),
check_input)
def predict(self, X, check_input=True):
return super(RotationTreeClassifier, self).predict(self.rotate(X),
check_input)
def apply(self, X, check_input=True):
return super(RotationTreeClassifier, self).apply(self.rotate(X),
check_input)
def decision_path(self, X, check_input=True):
return super(RotationTreeClassifier, self).decision_path(self.rotate(X),
check_input)
class RotationForestClassifier(ForestClassifier):
def __init__(self,
n_estimators=10,
criterion="gini",
n_features_per_subset=3,
rotation_algo='pca',
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features=1.0,
max_leaf_nodes=None,
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False,
class_weight=None):
super(RotationForestClassifier, self).__init__(
base_estimator=RotationTreeClassifier(),
n_estimators=n_estimators,
estimator_params=("n_features_per_subset", "rotation_algo",
"criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes",
"random_state"),
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start,
class_weight=class_weight)
self.n_features_per_subset = n_features_per_subset
self.rotation_algo = rotation_algo
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.max_leaf_nodes = max_leaf_nodes | /rotation_forest-0.4-py3-none-any.whl/rotation_forest/rotation_forest.py | 0.898053 | 0.429609 | rotation_forest.py | pypi |
from typing import List, Union
import numpy as np
import pandas as pd
from sklearn.metrics import accuracy_score
from RotationTree import RotationTree
class RotationForest:
"""Forest with RotationTree as base estimator.
Algorithm:
Building n_estimator of RotationTree.
Args:
n_estimators (int, optional): Number of estimator in forest.
k_features_subsets (int, optional): Number of feature subsets.
random_state (int, optional): Seed of random to use. Defaults to 73.
Methods:
fit(np.ndarray, np.ndarray): fitting the forest.
predict(np.ndarray): make predictions on new data.
predict_proba(np.ndarray): make predictions of probability on new data.
score (np.ndarray, np.ndarray): calculate accuracy_score.
Example:
>>> from sklearn.datasets import make_classification
>>> X, y = make_classification(
... n_samples=100, n_features=20, n_classes=2,
... n_informative=4, n_redundant=3, n_repeated=2,
... random_state=42)
>>> rrf = RotationForest(100, 2)
>>> rff = rrf.fit(X, y)
"""
def __init__(
self,
n_estimators: int = 100,
k_features_subsets: int = 2,
random_state=None,
**fit_params
):
self.n_estimators = n_estimators
self.k_features_subsets = k_features_subsets
self.random_state = random_state
self.fit_params = fit_params
self.models: List[RotationTree] = list()
def fit(self, X: np.ndarray, y: np.ndarray):
"""Fitting the forest.
Args:
X (np.ndarray): Matrix of data
y (np.ndarray): vector of y_true
"""
X = self.__pd_data(X)
for _ in range(self.n_estimators):
model = RotationTree(
self.k_features_subsets,
random_state=self.random_state,
**self.fit_params
)
model.fit(X, y)
self.models.append(model)
return self
def predict(self, X: np.ndarray) -> np.ndarray:
"""Make predictions for new data.
Args:
X (np.ndarray): Matrix of data
Returns:
np.ndarray: vector of predictions
"""
predictions = list()
for model in self.models:
pred = model.predict(X)
predictions.append(pred)
predictions_ = np.array(predictions)
final_pred = []
for i in range(len(X)):
pred_from_all_models = np.ravel(predictions_[:, i])
frequency = np.bincount(pred_from_all_models.astype("int"))
final_pred.append(np.argmax(frequency))
return np.array(final_pred)
def predict_proba(self, X: np.ndarray) -> np.ndarray:
"""Make probability predictions for new data.
Args:
X (np.ndarray): Data matrix
Returns:
np.ndarray: matrix of probability for every class
"""
predictions = []
for model in self.models:
pred = model.predict_proba(X)
predictions.append(pred)
predictions_ = np.array(predictions)
final_pred = np.zeros((predictions_.shape[1], predictions_.shape[2]))
for i in range(len(X)):
final_pred[i, 0] = predictions_[:, i, 0].mean()
final_pred[i, 1] = predictions_[:, i, 1].mean()
return final_pred
def score(self, X: np.ndarray, y: np.ndarray):
"""accuracy_score
Args:
X (np.ndarray): data matrix
y (np.ndarray): y_true vector
Returns:
float: accuracy
"""
pred = self.predict(X)
return accuracy_score(y, pred)
@staticmethod
def __pd_data(X: Union[np.ndarray, pd.DataFrame]) -> pd.DataFrame:
"""Returns pandas DataFrame for right work of algorithm.
Args:
data (np.array): Input values.
Returns:
pd.DataFrame
"""
if isinstance(X, np.ndarray):
return pd.DataFrame(X)
return X
if __name__ == "__main__":
import doctest
doctest.testmod() | /rotation-random-forest-0.1.1.tar.gz/rotation-random-forest-0.1.1/rotation_random_forest/RotationRandomForest.py | 0.94795 | 0.73782 | RotationRandomForest.py | pypi |
from typing import Iterable, List
import numpy as np
import pandas as pd
from sklearn.decomposition import PCA
from sklearn.tree import DecisionTreeClassifier
class RotationTree:
"""Base estimator for RotaionForest.
Algorithm:
Split feature set into k features subsets.
For every subset select a bootstrap sample from X of size 75% of the number of objects in X.
Apply PCA on this bootstrap sample and add PCA.components_ to the rotation matrix.
Then build tree using regenerated bootstrap sample multiplied by the rotation matrix
Args:
k_features_subsets (int, optional): Number of the feature subsets.
random_state (int, optional): Seed of random to use. Defaults to 73.
Attrs:
rotation_matrix (np.ndarray): rotation matrix to rotate the data.
Methods:
fit(pd.DataFrame, pd.DataFrame): fitting the estimator.
predict(pd.DataFrame): make predictions on new data.
predict_proba(pd.DataFrame): make predictions of probability on new data.
"""
def __init__(
self, k_features_subsets: int = 5, random_state: int = None, **fit_params
):
self.model = DecisionTreeClassifier(random_state=random_state, **fit_params)
self.random_state = random_state
self.k_features_subsets = k_features_subsets
np.random.seed(random_state)
def fit(self, X: pd.DataFrame, y: pd.DataFrame) -> object:
"""Fitting estimator
Args:
X (pd.DataFrame): Data
y (pd.DataFrame): True labels
Raises:
ValueError: if k_features_subsets > features in data
Returns:
object: self
"""
if self.k_features_subsets > X.shape[1]:
raise ValueError(
"'k_features_subsets must be less than number of features in data.'"
)
feature_subsets = self.get_subsets(X)
self.rotation_matrix = np.zeros((X.shape[1], X.shape[1]), dtype=float)
pca = PCA(random_state=self.random_state)
for subset in feature_subsets:
bt_sample = self.get_sample(X, subset)
pca.fit(bt_sample)
self.update_rotation_matrix(subset, pca.components_)
X_train, y_train = self.get_boot_sample(X, y)
X_transformed = X_train.dot(self.rotation_matrix)
self.model.fit(X_transformed, y_train)
return self
def get_subsets(self, X: pd.DataFrame) -> list:
"""Make k disjoint subsets of features.
Args:
X (pd.DataFrame): data
Returns:
list: list of k subsets
"""
n_features = X.shape[1]
features_set = list(range(n_features))
np.random.shuffle(
features_set,
)
m = n_features // self.k_features_subsets
subsets = [
[
feature
for feature in features_set[
i : self.k_features_subsets * m : self.k_features_subsets
]
]
for i in range(self.k_features_subsets)
]
return subsets
def get_sample(
self, X: pd.DataFrame, features: Iterable[int], bt_prcnt: float = 0.75
) -> pd.DataFrame:
"""Sample with features and 0.75 size of X.shape[0] for PCA fitting.
Args:
X (pd.DataFrame): data
features (Iterable[int]): indexes of features to take
bt_prcnt (int, optional): Size of bootstrap sample. Defaults to 0.75.
Returns:
pd.DataFrame: bootstrap sample
"""
subset_obj_idx = np.random.choice(
list(range(X.shape[0])),
size=int(bt_prcnt * X.shape[0]),
)
return X.iloc[subset_obj_idx, features]
def update_rotation_matrix(self, subset: List[int], pca_components) -> None:
"""Update the rotation matrix with pca's components.
Args:
subset (Iterable[int]): indexes of features to update
pca_components (Iterable[Iterable[int]]): pca's components
"""
for i in range(0, len(pca_components)):
for j in range(0, len(pca_components)):
self.rotation_matrix[subset[i], subset[j]] = pca_components[i, j]
def get_boot_sample(self, X: pd.DataFrame, y: pd.DataFrame) -> pd.DataFrame:
newdata = np.concatenate((X, y[:, np.newaxis]), axis=1)
cases = np.random.choice(
newdata.shape[0],
size=newdata.shape[0],
replace=True,
)
samples = newdata[
cases,
]
return samples[:, :-1], samples[:, -1]
def predict(self, X: pd.DataFrame):
"""Predict for new data.
Args:
X (pd.DataFrame): data
Returns:
np.ndarray: model output
"""
X_transformed = X.dot(self.rotation_matrix)
return self.model.predict(X_transformed)
def predict_proba(self, X: pd.DataFrame):
"""Probability predictions for new data.
Args:
X (pd.DataFrame): data
Returns:
Matrix[np.ndarray]: Matrix of probability for every class.
"""
X_transformed = X.dot(self.rotation_matrix)
return self.model.predict_proba(X_transformed) | /rotation-random-forest-0.1.1.tar.gz/rotation-random-forest-0.1.1/rotation_random_forest/RotationTree.py | 0.954932 | 0.795896 | RotationTree.py | pypi |
from copy import deepcopy
from math import sqrt
from torch import Tensor
from torch.nn.modules import Linear
from rotational_update.layers.base import Rotatable
from rotational_update.layers.functions.static import RotationalLinearFunctionByInsertingGrad
from rotational_update.layers.functions.static import RotationalLinearFunctionByInsertingZero
class RotationalLinear(Linear, Rotatable):
"""
Rotatable Linear class
"""
def __init__(self, linear: Linear, reduce_backward: bool = True):
"""
Parameters
----------
linear : torch.nn.modules.Linear
Base Linear object. RotatationalLinear object takes over weights and biases from base object.
"""
super().__init__(linear.in_features, linear.out_features, bias=True)
self.reduce_backward = reduce_backward
# 重みを引き継ぎ
# use current weights
self.weight = deepcopy(linear.weight)
self.bias = deepcopy(linear.bias)
self.learn_l = None
self.learn_r = None
output_features = linear.out_features
# group size -> sqrt(output_features)
num = int(sqrt(output_features))
group_list = [num for _ in range(num)]
nueron_sum = sum(group_list)
if nueron_sum < output_features: # make one more group with extra neurons
group_list.append(output_features - nueron_sum)
# cumulative sum for slicing
group_partition = [0] + group_list
for i in range(1, len(group_partition)):
group_partition[i] += group_partition[i - 1]
self.group_partition = group_partition
self.group_i = 1
def forward(self, input_tensor) -> Tensor:
"""
Feed-forward method
Almost same to normal Linear object.
Save variables for learning group.
Parameters
----------
input_tensor : torch.Tensor
Returns
-------
res : Output of feed-forwarding.
"""
self.learn_l = self.group_partition[self.group_i-1]
self.learn_r = self.group_partition[self.group_i]
if self.reduce_backward:
matmul = RotationalLinearFunctionByInsertingGrad.apply
else:
matmul = RotationalLinearFunctionByInsertingZero.apply
res = matmul(input_tensor, self.weight, self.bias, self.learn_l, self.learn_r)
return res
def rotate(self):
"""
Change learning group to next one.
Call this at every minibatch spending.
"""
self.group_i += 1
if self.group_i == len(self.group_partition):
self.group_i = 1 # 最初のグループへ to initial group | /rotational_update-0.0.18-py3-none-any.whl/rotational_update/layers/static.py | 0.824356 | 0.566438 | static.py | pypi |
import torch
from torch.autograd import Function
from torch.nn.functional import linear
from torch import Tensor
class RotationalLinearFunction(Function):
@staticmethod
def forward(ctx, *args) -> Tensor:
x, w, b, learn_left, learn_right = args
learn_l = torch.as_tensor(learn_left).requires_grad_(False)
learn_r = torch.as_tensor(learn_right).requires_grad_(False)
ctx.save_for_backward(x, w, b, learn_l, learn_r)
return linear(x, w, b)
@staticmethod
def backward(ctx, *args: Tensor) -> tuple:
raise NotImplementedError
class RotationalLinearFunctionByInsertingGrad(RotationalLinearFunction):
@staticmethod
def backward(ctx, *args: Tensor) -> tuple:
grad = args[0]
x, w, b, learn_l, learn_r = ctx.saved_tensors
w = w.t()
learn_l, learn_r = int(learn_l), int(learn_r)
# バイアスへの勾配は、0ベクトルを作って必要な要素だけ値を入れる
# gradients for bias, make 0 vector and insert value into needed element
if grad.is_cuda:
d_b = torch.zeros(size=(grad.shape[1],), dtype=torch.float32, device='cuda')
else:
d_b = torch.zeros(size=(grad.shape[1],), dtype=torch.float32)
d_b[learn_l:learn_r] = torch.sum(grad[:, learn_l:learn_r], dim=0)
# 重みへの勾配は、0行列を作って必要な行だけ値を入れる
# gradients for weights, make 0 matrix and insert value into needed column
if grad.is_cuda:
d_w = torch.zeros(size=(x.shape[1], grad.shape[1]), dtype=torch.float32, device='cuda')
else:
d_w = torch.zeros(size=(x.shape[1], grad.shape[1]), dtype=torch.float32)
d_w[:, learn_l:learn_r] = torch.matmul(x.t(), grad[:, learn_l:learn_r])
d_x = torch.matmul(grad, torch.t(w))
return d_x, d_w.t(), d_b, None, None
class RotationalLinearFunctionByInsertingZero(RotationalLinearFunction):
@staticmethod
def forward(ctx, *args) -> Tensor:
x, w, b, learn_left, learn_right = args
learn_l = torch.as_tensor(learn_left).requires_grad_(False)
learn_r = torch.as_tensor(learn_right).requires_grad_(False)
ctx.save_for_backward(x, w, b, learn_l, learn_r)
return linear(x, w, b)
@staticmethod
def backward(ctx, *args: Tensor) -> tuple:
grad = args[0]
x, w, b, learn_l, learn_r = ctx.saved_tensors
w = w.t()
learn_l, learn_r = int(learn_l), int(learn_r)
# バイアスへの勾配は、学習しない部分に0を入れる
# gradients for bias, insert 0 into removed element
d_b = torch.sum(grad, dim=0)
d_b[:learn_l] = d_b[learn_r:] = 0
# 重みへの勾配は、学習しない列に0を入れる
# gradients for weights, insert 0 into removed column
d_w = torch.matmul(x.t(), grad)
d_w[:, :learn_l] = d_w[:, learn_r:] = 0
d_x = torch.matmul(grad, torch.t(w))
return d_x, d_w.t(), d_b, None, None | /rotational_update-0.0.18-py3-none-any.whl/rotational_update/layers/functions/static.py | 0.822546 | 0.794425 | static.py | pypi |
Rotest
------
.. image:: https://img.shields.io/pypi/v/rotest.svg
:alt: PyPI
:target: https://pypi.org/project/rotest/
.. image:: https://img.shields.io/pypi/pyversions/rotest.svg
:alt: PyPI - Python Version
:target: https://pypi.org/project/rotest/
.. image:: https://github.com/gregoil/rotest/workflows/Python%20package/badge.svg
:target: https://github.com/gregoil/rotest/actions
.. image:: https://coveralls.io/repos/github/gregoil/rotest/badge.svg?branch=master
:target: https://coveralls.io/github/gregoil/rotest
.. image:: https://img.shields.io/readthedocs/rotest/stable.svg
:alt: Read the Docs (version)
:target: http://rotest.readthedocs.io/en/stable/
`Watch the demo <https://asciinema.org/a/u3B3aMmkipUDLSgTiv1thiBpP>`_
Rotest is a resource oriented testing framework, for writing system or
integration tests.
Rotest is based on Python's `unittest` module and on the Django framework.
It enables defining simple abstracted components in the system, called
resources. The resources may be DUT (devices under test) or they may help
the test process. The tests look very much like tests written using the
builtin module `unittest`.
Why Use Rotest?
===============
- Allowing teams to share resources without interfering with one another.
- Easily abstracting automated components in the system.
- Lots of useful features: multiprocess, filtering tests, variety of output
handlers (and the ability to create custom ones), and much more.
Examples
========
For a complete step-by-step explanation about the framework, you can read
our documentation at `Read The Docs <http://rotest.rtfd.io>`_. If you just want
to see how it looks, read further.
For our example, let's look at an example for a ``Calculator`` resource:
.. code-block:: python
import os
import rpyc
from django.db import models
from rotest.management.models import resource_data
from rotest.management import base_resource
class CalculatorData(resource_data.ResourceData):
class Meta:
app_label = "resources"
ip_address = models.IPAddressField()
class Calculator(base_resource.BaseResource):
DATA_CLASS = CalculatorData
PORT = 1357
EXECUTABLE_PATH = os.path.join(os.path.expanduser("~"),
"calc.py")
def connect(self):
self._rpyc = rpyc.classic.connect(self.data.ip_address,
self.PORT)
def calculate(self, expression):
result = self._rpyc.modules.subprocess.check_output(
["python", self.EXECUTABLE_PATH, expression])
return int(result.strip())
def finalize(self):
if self._rpyc is not None:
self._rpyc.close()
self._rpyc = None
The ``CalculatorData`` class is a standard Django model that exposes IP
address of the calculator machine through the data attribute.
Also, we're using `rpyc` for automating the access to those machines. Except
from that, it's easy to notice how the `connect` method is making the
connection to the machine, and how the `finalize` method is cleaning
afterwards.
Now, an example for a test:
.. code-block:: python
from rotest import main
from rotest.core import TestCase
class SimpleCalculationTest(TestCase):
calculator = Calculator()
def test_simple_calculation(self):
self.assertEqual(self.calculator.calculate("1+2"), 3)
if __name__ == "__main__":
main()
The test may include the ``setUp`` and ``tearDown`` methods of `unittest` as
well, and it differs only in the request for resources.
Following, those are the options exposed when running the test:
.. code-block:: console
$ rotest -h
Run tests in a module or directory.
Usage:
rotest [<path>...] [options]
Options:
-h, --help
Show help message and exit.
--version
Print version information and exit.
-c <path>, --config <path>
Test configuration file path.
-s, --save-state
Enable saving state of resources.
-d <delta-iterations>, --delta <delta-iterations>
Enable run of failed tests only - enter the number of times the
failed tests should be run.
-p <processes>, --processes <processes>
Use multiprocess test runner - specify number of worker
processes to be created.
-o <outputs>, --outputs <outputs>
Output handlers separated by comma.
-f <query>, --filter <query>
Run only tests that match the filter expression,
e.g. 'Tag1* and not Tag13'.
-n <name>, --name <name>
Assign a name for current launch.
-l, --list
Print the tests hierarchy and quit.
-F, --failfast
Stop the run on first failure.
-D, --debug
Enter ipdb debug mode upon any test exception.
-S, --skip-init
Skip initialization and validation of resources.
-r <query>, --resources <query>
Specify resources to request by attributes,
e.g. '-r res1.group=QA,res2.comment=CI'.
| /rotest-8.3.1.tar.gz/rotest-8.3.1/README.rst | 0.862901 | 0.683525 | README.rst | pypi |
import os
import pandas as pd
from ncs.data.downloader import download_and_extract
def data_folder():
"""
Retrieves the path to the data folder.
The data folder is in the user's home directory on Linux (i.e. /home/username/rotman_ncs_data/ncs_data)
and on Windows (i.e. C:\\Users\\username\\AppData\\Roaming\\rotman_ncs_data\\ncs_data).
Returns:
str: The path to the data folder.
"""
if os.name == 'nt': # For Windows
base_dir = os.getenv('APPDATA')
else: # For Linux/OS X
base_dir = os.path.expanduser('~')
dest_dir = os.path.join(base_dir, 'rotman_ncs_data', 'ncs_data')
if not os.path.exists(dest_dir):
os.makedirs(dest_dir)
return dest_dir
def check_data_exist_and_download(data_file):
"""
Check if the given data file exists and download it if it does not.
Parameters:
data_file (str): The path to the data file.
Returns:
None
"""
if not os.path.exists(data_file):
download_and_extract(
'https://storage.googleapis.com/rotman-ncs-data-buket/ncs_data.zip', 'rotman_ncs_data')
def load_stock_returns_on_calls(data_type='train'):
"""
Load stock returns on calls data.
Args:
data_type (str): The type of data to load (train or test). Defaults to 'train'.
Returns:
DataFrame: The loaded stock returns on calls data.
"""
data_file = f'{data_folder()}/{data_type}/stock_return_data.parquet'
check_data_exist_and_download(data_file)
return pd.read_parquet(data_file)
def load_stock_history():
"""
Load the stock price history data.
Returns:
pd.DataFrame: The stock price history data.
"""
data_file = f'{data_folder()}/all_stock_price_history.parquet'
check_data_exist_and_download(data_file)
return pd.read_parquet(data_file)
def load_call_description(data_type='train'):
"""
Load the earnings call description data from the specified data type.
Parameters:
data_type (str, optional): The type of data to load (train or test). Defaults to 'train'.
Returns:
DataFrame: The loaded call description data.
"""
data_file = f'{data_folder()}/{data_type}/call_data.parquet'
check_data_exist_and_download(data_file)
return pd.read_parquet(data_file)
def load_call_statements(data_type='train'):
"""
Load the earnings call statements data from a specified data type.
Parameters:
data_type (str): The type of data to load (train or test). Default is 'train'.
Returns:
pandas.DataFrame: The loaded call statements data.
"""
data_file = f'{data_folder()}/{data_type}/call_statement_data.parquet'
check_data_exist_and_download(data_file)
return pd.read_parquet(data_file) | /rotman_ncs-0.1.0a7-py3-none-any.whl/ncs/data/__init__.py | 0.659186 | 0.334916 | __init__.py | pypi |
from ..data import load_stock_returns_on_calls, load_call_statements
from .config import default_role_weights, default_section_weights, default_statement_type_weights, default_holding_period
import pandas as pd
from sklearn.metrics import classification_report
from sklearn.model_selection import train_test_split
from sklearn.model_selection import GridSearchCV
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.neural_network import MLPClassifier
import pickle
import warnings
warnings.filterwarnings('ignore')
stock_return_data = load_stock_returns_on_calls(
'train')[['call_uid', 'excess_return', 'holding_period']]
call_statement_data = load_call_statements('train')
def train(feature_files=[],
role_weights=default_role_weights,
section_weights=default_section_weights,
statement_type_weights=default_statement_type_weights,
sell_quantile=0.35,
buy_quantile=0.65,
holding_period=default_holding_period,
classifier='logistic_regression',
save_model='model.pkl'):
"""
Train a model using the provided feature files and parameters.
Parameters:
- feature_files: A list of paths to feature files.
- role_weights: A dictionary of weights for different presentor roles.
- section_weights: A dictionary of weights for different sections.
- statement_type_weights: A dictionary of weights for different statement types.
- sell_quantile: The quantile value for determining the sell threshold. Defaults to 0.35.
- buy_quantile: The quantile value for determining the buy threshold. Defaults to 0.65.
- holding_period: The holding period for the stock return data.
- classifier: The type of classifier to use for training ('logistic_regression', 'random_forest', or 'neural_network'). Defaults to 'logistic_regression'.
- save_model: The path to save the trained model.
Returns:
None
"""
# load features
feature_df = call_statement_data[[
'statement_uid', 'call_uid', 'presentor_role', 'section', 'statement_type']].set_index('statement_uid')
feature_cols = []
for feature_file in feature_files:
feature_data = pd.read_parquet(feature_file)
# merge on index
feature_df = pd.merge(feature_df, feature_data,
left_index=True, right_index=True)
feature_cols += feature_data.columns.tolist()
# aggregate features to call_uid level by the weights provided
agg_feature_df = pd.DataFrame(
columns=feature_cols, index=feature_df.call_uid.unique())
for call_uid, call_feature in feature_df.groupby('call_uid'):
agg_feature = call_feature[feature_cols].multiply(
call_feature.presentor_role.map(role_weights), axis=0).multiply(
call_feature.section.map(section_weights), axis=0).multiply(
call_feature.statement_type.map(statement_type_weights), axis=0).sum()
agg_feature_df.loc[call_uid] = agg_feature
X = agg_feature_df.sort_index()
# quantize excess return to -1, 0, 1 base on the quantile
hp_stock_return_data = stock_return_data[stock_return_data.holding_period == holding_period]
sell_excess_return = hp_stock_return_data.excess_return\
.quantile(sell_quantile)
buy_excess_return = hp_stock_return_data.excess_return\
.quantile(buy_quantile)
print('sell excess return threshold: ', sell_excess_return)
print('buy excess return threshold: ', buy_excess_return)
actions = hp_stock_return_data.excess_return.map(
lambda x: 1 if x > buy_excess_return else -1 if x < sell_excess_return else 0)
y = pd.DataFrame(
actions.values, index=hp_stock_return_data.call_uid).sort_index()
# consolidate to intersection of X.call_uid and y.call_uid
intersection_call_uid = X.index.intersection(y.index)
print(len(intersection_call_uid))
X = X.loc[intersection_call_uid]
y = y.loc[intersection_call_uid]
print('Number of calls: ', len(y))
# Split the data into training and testing sets
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.2, random_state=42)
# Define the parameter grid for hyperparameter tuning
if classifier == 'logistic_regression':
param_grid = {
'C': [0.1, 1.0, 10.0],
'penalty': ['l1', 'l2'],
}
# Create Logistic Regression classifier
classifier_obj = LogisticRegression()
elif classifier == 'random_forest':
param_grid = {
'n_estimators': [100, 200, 500],
'max_depth': [None, 5, 10],
}
# Create Random Forest classifier
classifier_obj = RandomForestClassifier()
elif classifier == 'neural_network':
param_grid = {
'hidden_layer_sizes': [(50, 25,), (100, 50,), (200, 100,)],
'activation': ['logistic', 'tanh', 'relu'],
}
# Create Neural Network classifier
classifier_obj = MLPClassifier()
else:
raise Exception('Invalid classifier')
# Perform hyperparameter tuning using GridSearchCV with cross-validation
print('Hyper-parameter Tuning')
grid_search = GridSearchCV(
estimator=classifier_obj, param_grid=param_grid, cv=5)
grid_search.fit(X_train, y_train)
# Get the best hyperparameters
if classifier == 'logistic_regression':
best_C = grid_search.best_params_['C']
best_penalty = grid_search.best_params_['penalty']
print(f'Best C: {best_C}, penalty type: {best_penalty}')
print('Train Logistic Regression with best hyperparameters')
classifer_best = LogisticRegression(
C=best_C, penalty=best_penalty)
elif classifier == 'random_forest':
best_n_estimators = grid_search.best_params_['n_estimators']
best_max_depth = grid_search.best_params_['max_depth']
print(
f'Best n_estimators: {best_n_estimators}, max_depth: {best_max_depth}')
print('Train Random Forest with best hyperparameters')
classifer_best = RandomForestClassifier(
n_estimators=best_n_estimators, max_depth=best_max_depth)
elif classifier == 'neural_network':
best_hidden_layer_sizes = grid_search.best_params_[
'hidden_layer_sizes']
best_activation = grid_search.best_params_['activation']
print(
f'Best hidden_layer_sizes: {best_hidden_layer_sizes}, activation: {best_activation}')
print('Train Neural Network with best hyperparameters')
classifer_best = MLPClassifier(
hidden_layer_sizes=best_hidden_layer_sizes, activation=best_activation)
else:
raise Exception('Invalid classifier')
classifer_best.fit(X_train, y_train)
# Make predictions on the test set
y_pred = classifer_best.predict(X_test)
# Print classification report
print('Classification Report')
print(classification_report(y_test, y_pred))
# Save the model
with open(save_model, 'wb') as f:
pickle.dump(classifer_best, f) | /rotman_ncs-0.1.0a7-py3-none-any.whl/ncs/model/train.py | 0.885631 | 0.370339 | train.py | pypi |
from .config import default_role_weights, default_section_weights, default_statement_type_weights
from ..data import load_call_statements
import pandas as pd
import os
import pickle
import warnings
warnings.filterwarnings('ignore')
cur_dir = os.path.dirname(os.path.realpath(__file__))
call_statement_data = load_call_statements('test')
def inference(feature_files=[],
role_weights=default_role_weights,
section_weights=default_section_weights,
statement_type_weights=default_statement_type_weights,
model_file='model.pkl',
action_file='actions.csv'):
"""load features and model to generate actions of test set
Args:
feature_files (list, optional): a list of feature files index by call_uid. Defaults to [].
model_file (str, optional): file path of trained model. Defaults to 'model.pkl'.
action_file (str, optional): file path of actions to save. Defaults to 'actions.csv'.
Returns:
save actions to file action_file
Content of action_file:
call_uid,action
f28cf056-f2df-4d94-ad36-4219506cd8b5,1
f304ac10-f92c-404a-b3cc-f02ab8baa2c8,1
22be863c-f8e5-4855-8523-44c6b398369b,1
call_uid: unique identifier of the call in test set
action: 1 for buy, -1 for sell, 0 for hold
"""
# load features
feature_df = call_statement_data[[
'statement_uid', 'call_uid', 'presentor_role', 'section', 'statement_type']].set_index('statement_uid')
feature_cols = []
for feature_file in feature_files:
feature_data = pd.read_parquet(feature_file)
# merge on index
feature_df = pd.merge(feature_df, feature_data,
left_index=True, right_index=True)
feature_cols += feature_data.columns.tolist()
# aggregate features to call_uid level by the weights provided
agg_feature_df = pd.DataFrame(
columns=feature_cols, index=feature_df.call_uid.unique())
for call_uid, call_feature in feature_df.groupby('call_uid'):
agg_feature = call_feature[feature_cols].multiply(
call_feature.presentor_role.map(role_weights), axis=0).multiply(
call_feature.section.map(section_weights), axis=0).multiply(
call_feature.statement_type.map(statement_type_weights), axis=0).sum()
agg_feature_df.loc[call_uid] = agg_feature
# load model
with open(model_file, 'rb') as f:
model = pickle.load(f)
# generate actions
actions = model.predict(agg_feature_df)
actions = pd.DataFrame(
{'call_uid': agg_feature_df.index, 'action': actions})
actions.to_csv(action_file, index=False) | /rotman_ncs-0.1.0a7-py3-none-any.whl/ncs/model/inference.py | 0.684159 | 0.156911 | inference.py | pypi |
import hashlib
from typing import *
from pathlib import Path
from enum import Enum
from rich.progress import track
from typer import Argument, Option
from .app import app
class InvalidHashAlgoException(Exception):
def __init__(self, hash_name: str):
super().__init__(f"{hash_name} is not supported")
class PathNotFoundExecption(Exception):
def __init__(self, path: Path) -> None:
super().__init__(f"{path.as_posix()} not found")
class HashAlgo(str, Enum):
md5: str = "md5"
sha1: str = "sha1"
sha224: str = "sha224"
sha256: str = "sha256"
sha384: str = "sha384"
def __call__(self):
if self == HashAlgo.md5:
return hashlib.md5()
if self == HashAlgo.sha1:
return hashlib.sha1()
if self == HashAlgo.sha224:
return hashlib.sha224()
if self == HashAlgo.sha256:
return hashlib.sha256()
if self == HashAlgo.sha384:
return hashlib.sha384()
raise InvalidHashAlgoException(self.value)
@app.command(callback=print)
def hash_path(
path: Path = Argument(..., help="File to hash"),
pattern: str = Option("*", help="File patern incase path is directory"),
algo: HashAlgo = Option(HashAlgo.md5.value, help="Hash generator"),
num_block: int = Option(16, help="Number of block for each step"),
length: int = Option(-1, help="Output length"),
verbose: bool = Option(True, help="show more info"),
):
if not path.exists():
raise PathNotFoundExecption(path)
path = path.resolve()
files = []
if path.is_file():
files.append(path)
if path.is_dir():
for child in path.rglob(pattern):
if child.is_dir():
continue
files.append(child.resolve())
def get_hash(file: Path) -> str:
hash = algo()
with open(file, "rb") as fin:
while True:
buf = fin.read(num_block * hash.block_size)
if not buf:
break
hash.update(buf)
hash = hash.hexdigest()
return hash
def shorten_hash(hash: str) -> str:
if length > 0:
hash = hash.encode()
hash = hashlib.shake_256(hash).hexdigest(length=length // 2)
return hash
hashes = []
if verbose:
for file in track(files, description="Hashing..."):
hashes.append((file.relative_to(path).as_posix(), get_hash(file)))
else:
for file in files:
try:
hashes.append((file.relative_to(path).as_posix(), get_hash(file)))
except Exception as e:
print(e, file)
raise e
if path.is_file():
_, hash = hashes[0]
return shorten_hash(hash)
if len(hashes) == 0:
return ""
hashes.sort()
return hash_str(
"-".join(f"{file}-{hash}" for file, hash in hashes),
algo,
length,
)
@app.command(callback=print)
def hash_str(
string: str = Argument(..., help="String to hash"),
algo: HashAlgo = Option(HashAlgo.md5.value, help="Hash generator"),
length: int = Option(-1, help="Output length"),
):
hash = algo()
hash.update(string.encode())
hash = hash.hexdigest()
if length > 0:
hash = hash.encode()
hash = hashlib.shake_256(hash).hexdigest(length=length // 2)
return hash | /modules/common/hash.py | 0.745306 | 0.253249 | hash.py | pypi |
import sys
import types
import typing
# [ Imports:Third Party ]
import din
# [ Exports ]
__all__ = (
'Coro',
'RecordedCoro',
'Yielded',
'Returned',
'Raised',
'OutputType',
'ThrowableType',
'WrappableObjType',
'WrappableFuncType',
)
def __dir__() -> typing.Tuple[str, ...]: # pragma: no cover
return __all__
# [ Internal ]
_GenericTypeVar = typing.TypeVar('_GenericTypeVar')
_YieldTypeVar = typing.TypeVar('_YieldTypeVar')
_SendTypeVar = typing.TypeVar('_SendTypeVar')
_ReturnTypeVar = typing.TypeVar('_ReturnTypeVar')
# [ API ]
WrappableObjType = typing.Union[
typing.Generator[_YieldTypeVar, _SendTypeVar, _ReturnTypeVar],
typing.Coroutine[_YieldTypeVar, _SendTypeVar, _ReturnTypeVar],
]
WrappableFuncType = typing.Callable[..., WrappableObjType[_YieldTypeVar, _SendTypeVar, _ReturnTypeVar]]
ThrowableType = typing.Union[
BaseException,
typing.Tuple[
typing.Optional[typing.Type[BaseException]],
typing.Optional[BaseException],
typing.Optional[types.TracebackType],
],
]
class Yielded(din.ReprMixin, typing.Generic[_GenericTypeVar]): # pylint: disable=unsubscriptable-object
"""A value yielded by a WrappableObjType."""
def __init__(self, value: _GenericTypeVar) -> None:
super().__init__()
self.value = value
class Returned(din.ReprMixin, typing.Generic[_GenericTypeVar]): # pylint: disable=unsubscriptable-object
"""A value returned by a WrappableObjType."""
def __init__(self, value: _GenericTypeVar) -> None:
super().__init__()
self.value = value
class Raised(din.ReprMixin):
"""An error raised by a WrappableObjType."""
def __init__(
self,
exc_type: typing.Optional[typing.Type[BaseException]],
exc_value: typing.Optional[BaseException],
exc_traceback: typing.Optional[types.TracebackType],
):
super().__init__()
self.exc_type = exc_type
self.exc_value = exc_value
self.exc_traceback = exc_traceback
@property
def value(self):
"""Return a sys.exc_info-style tuple."""
return (self.exc_type, self.exc_value, self.exc_traceback)
OutputType = typing.Union[
Yielded[_YieldTypeVar],
Returned[_ReturnTypeVar],
Raised,
]
# pylint is wrong about 'Generic' being unsubscriptable:
# https://docs.python.org/3/library/typing.html#user-defined-generic-types
# pylint: disable=unsubscriptable-object
# XXX switch to just functions that return a coro state?
class Coro(din.ReprMixin, typing.Generic[_YieldTypeVar, _SendTypeVar, _ReturnTypeVar]):
"""
Provides an init method on coroutines.
`coro = Coro(func)` creates an object with an interface consistent with coroutines
(send, throw) and adds an init. The init is equivalent to calling the function
to get the coroutine, then sending the initial None to that coroutine.
`yielded = Coro(func).init(*args, **kwargs)`:
* `coro = func(*args, **kwargs)`
* `yielded = coro.send(None)`
"""
def __init__(self, func: WrappableFuncType[_YieldTypeVar, _SendTypeVar, _ReturnTypeVar]) -> None:
super().__init__()
self.func = func
self.coro: WrappableObjType[_YieldTypeVar, _SendTypeVar, _ReturnTypeVar]
self.args: typing.Tuple[typing.Any, ...] = ()
self.kwargs: typing.Dict[str, typing.Any] = {}
def init(self, *args: typing.Any, **kwargs: typing.Any) -> OutputType[_YieldTypeVar, _ReturnTypeVar]:
"""Initialize the coroutine and return the first yielded value."""
self.args = args
self.kwargs = kwargs
self.coro = self.func(*args, **kwargs)
# casting the send type, because you *must* send None in first, but otherwise it's invalid,
# and unnecessarily complicates the type signatures, and I don't want to do that.
return self.send(typing.cast(_SendTypeVar, None))
def send(self, value: _SendTypeVar) -> OutputType[_YieldTypeVar, _ReturnTypeVar]:
"""Send the value into the coroutine and return the value it yields."""
try:
return Yielded(self.coro.send(value))
except StopIteration as return_error:
return Returned(return_error.value)
# intentionally capturing all
except Exception: # pylint: disable=broad-except
return Raised(*sys.exc_info())
def throw(self, throwable: ThrowableType) -> OutputType[_YieldTypeVar, _ReturnTypeVar]:
"""Throw the exception into the coroutine and return the value it yields."""
try:
if isinstance(throwable, tuple):
return Yielded(self.coro.throw(*throwable)) # type: ignore
# mypy insists the first arg must be typing.Type[BaseException], which isn't true
return Yielded(self.coro.throw(throwable)) # type: ignore
except StopIteration as return_error:
return Returned(return_error.value)
# intentionally capturing all
except Exception: # pylint: disable=broad-except
return Raised(*sys.exc_info())
def close(self):
"""Close the coroutine and return the value it returns."""
try:
return Returned(self.coro.close()) # type: ignore
# intentionally capturing all
except Exception: # pylint: disable=broad-except
return Raised(*sys.exc_info())
def __str__(self) -> str: # pragma: no cover
lines = [
f"[rototiller.Coro]",
f" func: {self.func.__module__}.{self.func.__qualname__}",
f" args:",
*("\n".join(f" {l}" for l in f"{a}".splitlines()) for a in self.args),
f" kwargs:",
*("\n".join(f" {l}" for l in f"{k}: {v}".splitlines()) for k, v in self.kwargs.items()),
]
return "\n".join(lines)
# pylint: enable=unsubscriptable-object
class RecordedCoro(Coro[_YieldTypeVar, _SendTypeVar, _ReturnTypeVar]):
"""
A Coro with recorded actions (init/send/throw) and responses (yield/raise).
Inputs and outputs are recorded, accessible via `get_history`
"""
def __init__(self, func: WrappableFuncType[_YieldTypeVar, _SendTypeVar, _ReturnTypeVar]) -> None:
super().__init__(func)
self._history: typing.List[typing.Dict[str, typing.Any]] = []
def init(self, *args: typing.Any, **kwargs: typing.Any) -> OutputType[_YieldTypeVar, _ReturnTypeVar]:
"""Initialize the coroutine and return the first yielded value."""
output = super().init(*args, **kwargs)
self._history.append({'args': args, 'kwargs': kwargs, 'output': output})
return output
def send(self, value: _SendTypeVar) -> OutputType[_YieldTypeVar, _ReturnTypeVar]:
"""Send the value into the coroutine and return the value it yields."""
output = super().send(value)
self._history.append({'sent': value, 'output': output})
return output
def throw(self, throwable: ThrowableType) -> OutputType[_YieldTypeVar, _ReturnTypeVar]:
"""Throw the exception into the coroutine and return the value it yields."""
output = super().throw(throwable)
self._history.append({'thrown': throwable, 'output': output})
return output
def close(self) -> OutputType[_YieldTypeVar, _ReturnTypeVar]:
"""Close the coroutine and return the value it returns."""
output = super().close()
self._history.append({'closed': None, 'output': output})
return output
def _get_state_str(self) -> typing.Iterable[str]: # pragma: no cover
for item in tuple(self._history):
if tuple(sorted(item.keys())) == tuple(sorted(('args', 'kwargs', 'output'))):
yield "\n".join([
f"Initialized with:",
f" args:",
*("\n".join(f" {l}" for l in f"{a}".splitlines()) for a in item['args']),
f" kwargs:",
*("\n".join(f" {l}" for l in f"{k}: {v}".splitlines()) for k, v in item['kwargs'].items()),
f" output: {item['output']}",
])
elif tuple(sorted(item.keys())) == tuple(sorted(('sent', 'output'))):
yield "\n".join([
f"Sent:",
f" value: {item['sent']}",
f" output: {item['output']}",
])
elif tuple(sorted(item.keys())) == tuple(sorted(('thrown', 'output'))):
yield "\n".join([
f"Thrown:",
f" error: {item['thrown']!r}",
f" output: {item['output']}",
])
else:
raise RuntimeError(f"Cannot stringify: unrecognized item in history ({item})")
def __str__(self) -> str: # pragma: no cover
lines = [
f"[rototiller.Restorable]",
f" func: {self.func.__module__}.{self.func.__qualname__}",
f" history:",
*("\n".join(f" {l}" for l in f"{s}".splitlines()) for s in self._get_state_str()),
]
return "\n".join(lines)
# [ Vulture ]
# pylint: disable=pointless-statement
# These are all API's, and called in the tests, at least
RecordedCoro | /rototiller-0.3.0-py3-none-any.whl/rototiller.py | 0.649467 | 0.254477 | rototiller.py | pypi |
from sklearn.decomposition import PCA
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
def plot_rots(rots_res, fdr=0.05, type=None):
# Check for plot type
if type is not None:
if type not in ["volcano", "heatmap", "ma", "reproducibility", "pvalue", "pca"]:
raise ValueError("Plot type not available. The options are: 'volcano', 'heatmap', 'ma', 'reproducibility', 'pvalue', 'pca'")
else:
raise ValueError("Plot type not selected. The options are: 'volcano', 'heatmap', 'ma', 'reproducibility', 'pvalue', 'pca'")
# Differentially expressed features
de = np.where(rots_res["FDR"] < fdr)[0]
# Volcano plot
if type == "volcano":
plt.scatter(rots_res["logfc"], -np.log10(rots_res["p"]), color="black", s=1)
plt.scatter(rots_res["logfc"][de], -np.log10(rots_res["p"][de]), color="red", s=1)
plt.xlabel("log2(fold change)")
plt.ylabel("-log10(p-value)")
plt.show()
# Heatmap
if type == "heatmap":
sns.heatmap(rots_res["data"].iloc[de, :], cmap="RdBu_r", center=0)
plt.show()
# MA plot
if type == "ma":
plt.scatter(rots_res["data"].mean(axis=1)*0.5, rots_res["logfc"], color="black", s=1)
plt.scatter(rots_res["data"].mean(axis=1).iloc[de], rots_res["logfc"][de], color="red", s=1)
plt.xlabel("Mean")
plt.ylabel("log2(fold change)")
plt.show()
# Reproducibility plot
if type == "reproducibility":
z = rots_res["ztable"][rots_res["ztable"].index == f'{rots_res["a1"]:.2f}']
k = rots_res["ztable"].columns.astype(int)
plt.scatter(k, z, color="black", s=1)
plt.scatter(k[np.where(z == np.max(z.values))[1]], z.iloc[0, np.where(z==np.max(z.values))[1][0]], color="red", s=1)
plt.xlabel("Top list size")
plt.ylabel("Reproducibility Z-score")
plt.show()
# Histogram of p-values
if type == "pvalue":
plt.hist(rots_res["p"], bins=50)
plt.xlabel("p-value")
plt.ylabel("Frequency")
plt.show()
# PCA plot
if type == "pca":
if len(de) > 0:
pca = PCA(n_components=2)
dt = rots_res["data"].iloc[de, :].fillna(0)
X = pca.fit_transform(dt.T)
plt.scatter(X[:,0], X[:,1], c=rots_res['cl'], s=1)
plt.xlabel("Principal component 1")
plt.ylabel("Principal component 2")
plt.show() | /rots-py-1.2.2.tar.gz/rots-py-1.2.2/src/rotspy/plot_rots.py | 0.70477 | 0.516108 | plot_rots.py | pypi |
import numpy as np
from numba import njit, jit
import pandas as pd
from tqdm import tqdm
import warnings
from optim_cy import optim
@jit(nopython=True, error_model='numpy')
def bootstrapSamples(B, labels, paired):
samples = np.zeros((B, len(labels)))
for i in range(B):
for label in np.unique(labels):
pos = np.where(labels == label)[0]
#samples[i, pos] = np.random.choice(pos, size=len(pos), replace=True)
_samp = samples[i]
_samp[pos] = np.random.choice(pos, size=len(pos), replace=True)
samples[i] = _samp
if paired:
for i in range(B):
for label in np.unique(labels)[1:]:
pos = np.where(labels == label)[0]
#samples[i, pos] = samples[i, np.where(labels == 1)[0]] + pos[0]-1
_samp = samples[i]
_samp[pos] = _samp[np.where(labels == 1)[0]] + pos[0]-1
samples[i] = _samp
return samples
@jit(nopython=True, error_model='numpy')
def permutatedSamples(B, cl):
samples = np.zeros((B, len(cl)))
for i in range(B):
samples[i, :] = np.random.permutation(len(cl))
return samples
def testStatistic(paired, samples):
np.seterr(divide='ignore', invalid='ignore')
# Two groups
if len(samples)==2:
X = samples[0]
Y = samples[1]
## Calculates the test statistic for each row.
## X and Y are the data matrices of the two groups.
## Each row of these two matrices must contain at least TWO not NA values.
## Thus the "variance" always exists.
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=RuntimeWarning)
## Row means
mX = np.nanmean(X, axis=1, keepdims=True) #rowMeans(X, na.rm=TRUE)
mY = np.nanmean(Y, axis=1, keepdims=True) #rowMeans(Y, na.rm=TRUE)
#mX[np.isnan(mX)] = 0
#mY[np.isnan(mY)] = 0
## Pooled standard deviations for each row
sX = np.nansum((X - mX)**2, axis=1) #rowSums((X - mX)^2, na.rm=TRUE)
sY = np.nansum((Y - mY)**2, axis=1) #rowSums((Y - mY)^2, na.rm=TRUE)
#sX[np.isnan(sX)] = 0
#sY[np.isnan(sY)] = 0
if not paired:
## Number of not NA values in each row
nX = np.sum(~np.isnan(X), axis=1)
nY = np.sum(~np.isnan(Y), axis=1)
## d == difference between the group means for each row (==gene)
## s == pooled standard deviation for each row (==gene)
d = mY - mX
s = np.sqrt(((sX + sY) / (nX + nY - 2)) * (1 / nX + 1 / nY))
## Cases with less than two non-missing values.
## Set d = 0, s = 1
ind = np.where( (nY < 2) | (nX < 2) )
d[ind] = 0
s[ind] = 1
if paired:
## Add for paired
sXY = np.nansum((X - mX)*(Y - mY), axis=1)
## Number of not NA values in each row
n = np.sum(~np.isnan(X*Y), axis=1)
## d == difference between the group means for each row (==gene)
## s == pooled standard deviation for each row (==gene)
d = mY - mX
s = np.sqrt(((sX + sY) / (n + n - 2)) * (2 / n) - 2/(n*n-n)*sXY)
## Cases with less than two non-missing values.
## Set d = 0, s = 1
ind = np.where( n < 2 )
d[ind] = 0
s[ind] = 1
return {'d': d.reshape(-1), 's': s.reshape(-1)}
# Multiple groups
if len(samples)>2:
samples_all = np.concatenate(samples, axis=1) # do.call("cbind",samples)
if not paired:
sum_cols = np.sum([sample.shape[1] for sample in samples])
prod_cols = np.prod([sample.shape[1] for sample in samples])
f = sum_cols / prod_cols #f <- sum(sapply(samples, ncol)) / prod(sapply(samples, ncol))
r = np.zeros(samples_all.shape[0]) #r <- vector(mode="numeric", length=nrow(samples.all))
for k in range(len(samples)):
r = r + (np.nanmean(samples[k], axis=1)-np.nanmean(samples_all, axis=1))**2 #r <- r + (rowMeans(samples[[k]], na.rm=TRUE)-rowMeans(samples.all, na.rm=TRUE))^2
d = (f*r)**0.5
f = 1/np.sum([sample.shape[1] for sample in samples]-1) * np.sum(1/[sample.shape[1] for sample in samples]) #f <- 1/sum(sapply(samples, ncol)-1) * sum(1/sapply(samples, ncol))
s = np.zeros(samples_all.shape[0]) # s <- vector(mode="numeric", length=nrow(samples.all))
for k in range(len(samples)):
s = s + np.sum(np.apply_along_axis(lambda x: (x - np.nanmean(x))**2, axis=1, arr=samples[k,:]), axis=0, where=~np.isnan(samples[k,:]).any(axis=0)) #s <- s + colSums(apply(samples[[k]], 1, function(x) (x-mean(x,na.rm=TRUE))^2), na.rm=TRUE)
s = (f*s)**0.5
if paired:
raise ValueError("Multiple paired groups not supported!")
return {'d': d.reshape(-1), 's': s.reshape(-1)}
def calculateP(observed, permuted):
# Store order for later use
observed_order = sorted(range(len(observed)), key=lambda k: abs(observed[k]), reverse=True) # order(abs(observed), decreasing=TRUE)
# Sort observed and permuted values
observed = -np.sort(-abs(observed)) #sort(abs(observed), decreasing=TRUE)
permuted = -np.sort(-np.abs(permuted.flatten())) #sort(abs(as.vector(permuted)), decreasing=TRUE)
print("observed shape: ", observed.shape)
print("permuted shape: ", permuted.shape)
# Get p-values from C++ code
# (expects ordered vectors)
p = optim.pvalue(observed, permuted)
# Revert to original ordering
results = np.zeros(len(p)) #vector(mode="numeric", length=length(p))
for i in range(len(results)):
results[observed_order[i]] = p[i]
return results
def calculateFDR(observed, permuted, progress):
observed = abs(observed)
permuted = abs(permuted)
ord = np.argsort(-observed, kind='mergesort') #order(observed, decreasing=TRUE, na.last=TRUE)
a = observed[ord]
A = np.empty((len(a), permuted.shape[1]))
A.fill(np.nan)
if progress:
pb = tqdm(total=A.shape[1])
for i in range(A.shape[1]): #seq_len(ncol(A))
sorted_column = np.sort(permuted[:,i])[::-1] #sort(permuted[,i], decreasing=TRUE, na.last=TRUE)
a_rand = np.concatenate([sorted_column[~np.isnan(sorted_column)], sorted_column[np.isnan(sorted_column)]])
n_bigger = biggerN(a, a_rand)
A[ord,i] = n_bigger/range(1, len(a)+1)
if progress:
pb.update()
if progress:
pb.close()
FDR = np.apply_along_axis(np.median, 1, A)
FDR[FDR>1] = 1
FDR[ord] = list(reversed([min(FDR[ord][x-1:]) for x in range(len(FDR), 0, -1)])) #rev(sapply(length(FDR):1, function(x) return(min(FDR[ord][x:length(FDR)]))))
return FDR
def biggerN(x, y):
sorted_x = np.sort(x)[::-1]
x = np.concatenate([sorted_x[~np.isnan(sorted_x)], sorted_x[np.isnan(sorted_x)]]) #sort(x, decreasing=TRUE, na.last=TRUE) # sort x in decreasing order
sorted_y = np.sort(y)[::-1]
y = np.concatenate([sorted_y[~np.isnan(sorted_y)], sorted_y[np.isnan(sorted_y)]]) #sort(y, decreasing=TRUE, na.last=TRUE) # sort y in decreasing order
#a <- match(x, x) # vector of the positions of (first) matches of the first argument in the second
a = np.array([np.where(x == v)[0][0] if v in x else None for v in x])
#b <- x %in% y # a logical vector indicating if there is a match or not for its left operand
b = np.isin(x, y)
sorted_z = np.sort(np.concatenate([x, y]))[::-1]
z = np.concatenate([sorted_z[~np.isnan(sorted_z)], sorted_z[np.isnan(sorted_z)]]) #sort(c(x, y), decreasing=TRUE, na.last=TRUE) # sort c(x,y) in decreasing order
#d <- match(x, z) # vector of the positions of (first) matches of the first argument in the second
# z_indices = np.argsort(z)
# match_indices = np.searchsorted(z[z_indices], x, side='left')
# matches = np.full(x.shape, np.nan)
# matches[np.argsort(z_indices)] = z_indices[match_indices]
# d = matches
#ipdb.set_trace(context=10) ## BREAKPOINT
d = np.array([np.where(z == v)[0][0] if v in z else None for v in x])
res = d - a + b
return res
# Replaced by *optim_cy.pvalue*
@jit(nopython=True, error_model='numpy')
def pvalue(a, b):
observed = a.ravel()
permuted = b.ravel()
pvalues = np.zeros(len(observed))
j = 0
for i in range(len(observed)):
while permuted[j] >= observed[i] and j < len(permuted):
j += 1
pvalues[i] = float(j) / len(permuted)
return pvalues.reshape(a.shape)
def get_summary(rots_res, fdr_c=None, n_genes=None, verbose=True):
if fdr_c is not None or n_genes is not None:
## Sort ROTS-statistic values (abs)
stat = pd.DataFrame(np.abs(rots_res['d']), index=rots_res['data'].index, columns=['statistic'])
sorted_stat = stat.sort_values(by='statistic', ascending=False)
## Add feature-names to fdr and ROTS values
fdr = pd.DataFrame(rots_res['FDR'], index=rots_res['data'].index)
d = pd.DataFrame(rots_res['d'], index=rots_res['data'].index)
pvalue = pd.DataFrame(rots_res['p'], index=rots_res['data'].index)
## Result matrix, columns are: "Row", "ROTS-statistic", "pvalue" and "FDR"
res = pd.DataFrame(
data={
'Row': sorted_stat.index,
'ROTS-statistic': d.loc[sorted_stat.index][0],
'pvalue': pvalue.loc[sorted_stat.index][0],
'FDR': fdr.loc[sorted_stat.index][0]})
## Show only num.gene top rows or rows whose false discovery rate <= fdr
if n_genes is not None:
res = res.iloc[: min(n_genes, len(res))]
elif fdr_c is not None:
res = res[res['FDR'] <= fdr_c]
if verbose:
print("ROTS results:", "\n")
print("Number of resamplings: ", rots_res["B"], "\n")
print("a1: ", rots_res["a1"])
print("a2: ", rots_res["a2"])
print("Top list size: ", rots_res["k"])
print("Reproducibility value: ", rots_res["R"])
print("Z-score: ", rots_res["Z"], "\n")
print(len(res), "rows satisfy the condition.")
## Print only the first 10 rows
if len(res) > 10:
print(" Only ten first rows are", "\n",
"displayed, see the return value for the whole output.")
print(res[:10])
print("...")
else:
print(res)
return res | /rots-py-1.2.2.tar.gz/rots-py-1.2.2/src/rotspy/helpers.py | 0.439266 | 0.563018 | helpers.py | pypi |
import argparse
import math
import sys
from argparse import ArgumentParser
import matplotlib as mpl
import matplotlib.colors as colors
import matplotlib.pyplot as plt
import numpy as np
import rotsim2d.dressedleaf as dl
import rotsim2d.pathways as pw
import rotsim2d.visual as vis
from asteval import Interpreter
from matplotlib.cm import get_cmap
from matplotlib.colorbar import Colorbar
from molspecutils.molecule import CH3ClAlchemyMode, COAlchemyMode
class HelpfulParser(ArgumentParser):
def error(self, message):
sys.stderr.write('error: {:s}\n'.format(message))
self.print_help()
sys.exit(2)
def run():
# * Parse arguments
parser = HelpfulParser(
description='Plot 2D resonance map of 2D spectrum of CO or CH3Cl.'
' Clicking on a resonance will print on standard output all pathways'
' contributing to it.',
add_help=False)
parser.add_argument('molecule', choices=('CO', 'CH3Cl'),
help="Molecule.")
parser.add_argument('-h', '--help', action='help', default=argparse.SUPPRESS,
help='Show this help message and exit.')
parser.add_argument('-c', '--colors', type=int, choices=(1, 2, 3), default=3,
help="Full spectrum with broadband pulses or limited"
" to one or two colors (default: %(default)d).")
parser.add_argument('-j', '--jmax', type=int,
help="Maximum initial angular momentum quantum number.")
parser.add_argument('-k', '--kmax', type=int,
help="Maximum projection on principal molecular axis.")
parser.add_argument('--no-abstract', action='store_true',
help='Print actual J values.')
parser.add_argument('-f', "--filter", action='append',
help="Filter pathways by filtering excitation tree. "
"Can be provided multiple times to chain multiple filters."
"List of filter: https://allisonlab.gitlab.io/mdcs/rotsim2d/api/pathways.html#tree-filtering-functions")
parser.add_argument('-a', '--angles', nargs=4, default=['0.0']*4,
help="Three beam angles and the detection angle. "
"Each angle can be a Python mathematical expression"
" using standard arithmetic operators and math"
" functions from Python math module.")
parser.add_argument('-t', '--time', type=float, default=1.0,
help="Waiting time in ps (default: %(default)f).")
parser.add_argument('-D', '--dpi', type=float,
help="Force DPI.")
parser.add_argument('--symmetric-log', action='store_true',
help="Use symmetric logarithmic scaling for color"
" normalization.")
args = parser.parse_args()
aeval = Interpreter(use_numpy=False, minimal=True)
angles = [aeval(angle) for angle in args.angles]
if args.dpi:
mpl.rcParams['figure.dpi'] = args.dpi
# * Vibrational mode
print('Initializing vibrational mode')
if args.molecule == 'CH3Cl':
vib_mode = CH3ClAlchemyMode()
else:
vib_mode = COAlchemyMode()
T = 296.0
# * Pathways
print('Calculating peak list')
jmax = args.jmax
if jmax is None:
if args.molecule == 'CH3Cl':
jmax = 37
else:
jmax = 20
# ** Filters
meths = []
if args.colors == 2:
meths.append(pw.remove_threecolor)
elif args.colors == 1:
meths.append(pw.only_dfwm)
if args.filter:
meths.extend([getattr(pw, filter) for filter in args.filter])
# ** Calculate peaks
if args.kmax:
kiter_func = "range((j if j<={kmax:d} else {kmax:d})+1)".format(kmax=args.kmax)
else:
kiter_func = "range(j+1)"
pws = pw.gen_pathways(
range(jmax), meths=meths,
rotor='symmetric' if args.molecule == 'CH3Cl' else 'linear',
kiter_func=kiter_func)
dressed_pws = dl.DressedPathway.from_kb_list(pws, vib_mode, T)
peaks = dl.Peak2DList.from_dp_list(
dressed_pws, tw=args.time*1e-12, angles=angles)
vminmax = np.max(np.abs(np.array(peaks.intensities)))*1.1*1e6
# * Visualize
if args.symmetric_log:
norm = colors.SymLogNorm(linthresh=vminmax/100.0,
vmin=-vminmax, vmax=vminmax)
else:
norm = colors.Normalize(vmin=-vminmax, vmax=vminmax)
fig = plt.figure(constrained_layout=True)
gs = fig.add_gridspec(nrows=1, ncols=2, width_ratios=[20, 1])
ax = fig.add_subplot(gs[0])
sc = ax.scatter(peaks.probes, peaks.pumps, s=10.0,
c=-np.array(peaks.intensities)*1e6,
cmap=get_cmap('RdBu').reversed(), norm=norm, picker=True)
ax.set(xlabel=r'$\Omega_3$ (cm$^{-1}$)',
ylabel=r'$\Omega_1$ (cm$^{-1}$)')
axcbar = fig.add_subplot(gs[-1])
cbar = Colorbar(mappable=sc, ax=axcbar, orientation='vertical', extend='neither')
amp_str = r"$S^{(3)}\cos \Omega_2 t_2$"
cbar.set_label(amp_str + r" ($10^{-6}$ m$^{2}$ Hz/(V s/m)$^2$)")
ax.set_title(str(args.filter), fontsize=10)
fig.canvas.set_window_title(str(args.filter))
abstract = not args.no_abstract
def scatter_onpick(event):
"""Show information about the peak pathway."""
if event.artist != sc:
return
dl.pprint_dllist(peaks[event.ind[0]].dp_list, abstract=abstract, angles=angles)
fig.canvas.mpl_connect('pick_event', scatter_onpick)
plt.show() | /rotsim2d_apps-0.3.2.tar.gz/rotsim2d_apps-0.3.2/rotsim2d_apps/peak_picker.py | 0.546738 | 0.215536 | peak_picker.py | pypi |
from pathlib import Path
import string
import sys
from argparse import ArgumentParser
from pprint import pprint
from typing import List, Sequence
import rotsim2d.dressedleaf as dl
import rotsim2d.pathways as pw
import rotsim2d.propagate as prop
import toml
from asteval import Interpreter
class HelpfulParser(ArgumentParser):
def error(self, message):
sys.stderr.write('error: {:s}\n'.format(message))
self.print_help()
sys.exit(2)
def named_fields(s: str) -> List[str]:
return [x[1] for x in string.Formatter().parse(s)
if x[1] is not None]
def run():
parser = HelpfulParser(
description="Calculate and save to file list of 2D peaks or 2D spectrum.",
add_help=False)
parser.add_argument("input_paths", nargs='+',
help="Paths to input files.",)
args = parser.parse_args()
aeval = Interpreter(use_numpy=False, minimal=True)
for input_path in args.input_paths:
params = toml.load(input_path)
pprint(params)
if str in [type(x) for x in params['spectrum']['angles']]:
params['spectrum']['angles'] = \
[aeval(angle) for angle in params['spectrum']['angles']]
if params['spectrum']['type'] == 'peaks':
print("Calculating peak list...")
peaks = dl.run_peak_list(params)
print("Saving to {!s}...".format(params['output']['file']))
peaks.to_file(params['output']['file'],
metadata=params)
elif params['spectrum']['type'] in ('lineshapes', 'time'):
print("Preparing DressedPathway's...")
dls = dl.DressedPathway.from_params_dict(params['pathways'])
print("Calculating 2D spectrum...")
params = prop.run_update_metadata(params)
if isinstance(params['spectrum']['pressure'], Sequence) and\
'p' not in named_fields(params['output']['file']):
raise ValueError(
"Format specifier with field 'p' not provided. "
"Data for all pressures would have been overwritten.")
if not isinstance(params['spectrum']['pressure'], Sequence):
pressures = [params['spectrum']['pressure']]
if 'file' not in params['output']:
params['output']['file'] = Path(input_path).with_suffix('.h5')
else:
pressures = params['spectrum']['pressure'][:]
if 'file' not in params['output']:
params['output']['file'] = Path(input_path).stem +\
'_{:.1f}.h5'
for p in pressures:
params['spectrum']['pressure'] = p
print("Pressure = {:.2f} atm".format(p))
fs_pu, fs_pr, spec2d = prop.run_propagate(
dls, params['spectrum'])
output_file = params['output']['file'].format(p=p)
print("Saving to {!s}...".format(output_file))
prop.run_save(
output_file,
fs_pu, fs_pr, spec2d,
params)
if __name__ == '__main__':
run() | /rotsim2d_apps-0.3.2.tar.gz/rotsim2d_apps-0.3.2/rotsim2d_apps/rotsim2d_calc.py | 0.446495 | 0.213367 | rotsim2d_calc.py | pypi |
import sys
import matplotlib as mpl
import numpy as np
import PyQt5
import rotsim2d.dressedleaf as dl
import rotsim2d.pathways as pw
import rotsim2d.symbolic.functions as sym
from PyQt5 import Qt, QtCore, QtGui, QtWidgets
from .AngleWidget import Ui_AngleWidget
from .PolarizationsUI import Ui_MainWindow
class Model:
def __init__(self, direction):
kbs = pw.gen_pathways([5], rotor='symmetric',
meths=[getattr(pw, 'only_'+direction)],
kiter_func="[1]",
pump_overlap=False)
pws = dl.Pathway.from_kb_list(kbs)
rf_pws = sym.RFactorPathways.from_pwlist(pws, True, True)
self.rfactors = [rfpw.rfactor for rfpw in rf_pws]
self.titles = ['peaks: '+','.join(rfpw.peak_labels)+'\n'+
'trans: '+','.join(rfpw.trans_labels_deg)
for rfpw in rf_pws]
self.angles = np.linspace(-np.pi/2, np.pi/2, 100)
self.axes_labels = (r'$\Phi_2$', r'$\Phi_3$', r'$\Phi_4$')
def plot_data(self, index, val):
args = [0, self.angles[:, None], self.angles[None, :]]
args.insert(index, val*np.pi/180.0)
data = [rfactor.numeric_rel(*args) for rfactor in self.rfactors]
return data
def plot_axes_labels(self, index):
labels = list(self.axes_labels)
del labels[index]
return labels
class AngleWidget(QtWidgets.QWidget, Ui_AngleWidget):
def __init__(self, label, enabled=False, parent=None):
super(AngleWidget, self).__init__(parent)
self.setupUi(self)
self.label.setTextFormat(QtCore.Qt.TextFormat.RichText)
self.label.setText(label)
self.slider.valueChanged.connect(self.spin.setValue)
self.spin.valueChanged.connect(self.slider.setValue)
self.radio.toggled.connect(self.slider.setEnabled)
self.radio.toggled.connect(self.spin.setEnabled)
self.radio.setChecked(enabled)
class Polarizations(QtWidgets.QMainWindow):
def __init__(self, parent=None):
QtWidgets.QMainWindow.__init__(self, parent)
self.ui = Ui_MainWindow()
self.ui.setupUi(self)
# take a reasonable amount of screen size
screen = QtWidgets.QDesktopWidget().availableGeometry()
self.resize(screen.width()*0.7, screen.height()*0.7)
# add angle widgets
self._radio_group = QtWidgets.QButtonGroup()
self._radio_group.buttonToggled.connect(
self._radio_toggled)
self._angle_widgets = []
self.ui.anglesLayout.setAlignment(QtCore.Qt.AlignmentFlag.AlignTop)
self._add_angle_widget(AngleWidget("Φ<sub>2</sub>", True))
self._add_angle_widget(AngleWidget("Φ<sub>3</sub>"))
self._add_angle_widget(AngleWidget("Φ<sub>4</sub>"))
self.ui.anglesLayout.setSpacing(10)
# add direction radios
self.ui.anglesLayout.insertSpacing(0, 10)
self._dir_group = QtWidgets.QButtonGroup()
self._dir_group.buttonToggled.connect(
self._dir_toggled)
self._add_dir_radio(QtWidgets.QRadioButton("SIII: k1+k2-k3"))
btn = QtWidgets.QRadioButton("SII: k1-k2+k3")
btn.setChecked(True)
self._add_dir_radio(btn)
self._add_dir_radio(QtWidgets.QRadioButton("SI: -k1+k2+k3"))
# add model
self.ui.statusbar.showMessage("Initializing model")
self.model = Model('SII')
self.ui.mplwdg.set_titles(self.model.titles)
# self.update_plots()
self.ui.statusbar.clearMessage()
def _angle_index(self):
index = 1
for i in range(len(self._angle_widgets)):
if self._angle_widgets[i].radio.isChecked():
index += i
return index
def update_plots(self, val=None):
index = self._angle_index()
if val is None:
val = self._angle_widgets[index-1].spin.value()
data = self.model.plot_data(index, val)
self.ui.mplwdg.figure_update(data)
def update_axes_labels(self):
index = self._angle_index()
labels = self.model.plot_axes_labels(index-1)
self.ui.mplwdg.set_axes_labels(labels)
def _radio_toggled(self, button, checked):
self.update_plots()
self.update_axes_labels()
def _add_angle_widget(self, wdg):
wdg.spin.valueChanged.connect(self.update_plots)
self._angle_widgets.append(wdg)
self.ui.anglesLayout.addWidget(wdg)
self._radio_group.addButton(wdg.radio)
def _dir_toggled(self, button, checked):
if checked:
direction = button.text().split(':')[0]
self.model = Model(direction)
self.ui.mplwdg.set_titles(self.model.titles)
self.update_plots()
def _add_dir_radio(self, wdg):
self.ui.anglesLayout.insertWidget(0, wdg)
self._dir_group.addButton(wdg)
def run():
app = QtWidgets.QApplication(sys.argv)
mpl.rcParams['figure.dpi'] = app.desktop().physicalDpiX()
polarizations = Polarizations()
polarizations.show()
sys.exit(app.exec_())
# Local Variables:
# compile-comand: "make -k"
# End: | /rotsim2d_apps-0.3.2.tar.gz/rotsim2d_apps-0.3.2/rotsim2d_apps/polarizations/main.py | 0.402744 | 0.15863 | main.py | pypi |
import matplotlib
import matplotlib.cm as cm
import matplotlib.colors as clrs
import numpy as np
from matplotlib.backends.backend_qt5agg import \
NavigationToolbar2QT as NavigationToolbar
from matplotlib.colorbar import Colorbar
from matplotlib.widgets import MultiCursor
from PyQt5 import QtWidgets
from ..MplCanvas import MplCanvas
nature_fontsize = 8
nature_rc = {
# 'font.sans-serif': 'Arial',
'font.size': nature_fontsize,
'axes.labelsize': nature_fontsize,
'xtick.labelsize': nature_fontsize,
'ytick.labelsize': nature_fontsize,
'legend.fontsize': nature_fontsize,
'lines.markersize': 1.0,
'lines.linewidth': 0.6,
'xtick.major.size': 3,
'xtick.minor.size': 1.5,
'xtick.major.pad': 4,
'ytick.major.size': 3,
'ytick.major.pad': 4
}
matplotlib.rcParams.update(nature_rc)
class BlitManager:
def __init__(self, canvas, animated_artists=()):
"""
Parameters
----------
canvas : FigureCanvasAgg
The canvas to work with, this only works for sub-classes of the Agg
canvas which have the `~FigureCanvasAgg.copy_from_bbox` and
`~FigureCanvasAgg.restore_region` methods.
animated_artists : Iterable[Artist]
List of the artists to manage
"""
self.canvas = canvas
self._bg = None
self._artists = []
for a in animated_artists:
self.add_artist(a)
# grab the background on every draw
self.cid = canvas.mpl_connect("draw_event", self.on_draw)
def on_draw(self, event):
"""Callback to register with 'draw_event'."""
cv = self.canvas
if event is not None:
if event.canvas != cv:
raise RuntimeError
self._bg = cv.copy_from_bbox(cv.figure.bbox)
self._draw_animated()
def add_artist(self, art):
"""
Add an artist to be managed.
Parameters
----------
art : Artist
The artist to be added. Will be set to 'animated' (just
to be safe). *art* must be in the figure associated with
the canvas this class is managing.
"""
if art.figure != self.canvas.figure:
raise RuntimeError
art.set_animated(True)
self._artists.append(art)
def _draw_animated(self):
"""Draw all of the animated artists."""
fig = self.canvas.figure
for a in self._artists:
fig.draw_artist(a)
def update(self):
"""Update the screen with animated artists."""
cv = self.canvas
fig = cv.figure
# paranoia in case we missed the draw event,
if self._bg is None:
self.on_draw(None)
else:
# restore the background
cv.restore_region(self._bg)
# draw all of the animated artists
self._draw_animated()
# update the GUI state
cv.blit(fig.bbox)
# let the GUI event loop process anything it has to do
cv.flush_events()
def suspend(self):
"""Remove animated property from artists."""
for art in self._artists:
art.set_animated(False)
def resume(self):
"""Add animated property to artists."""
for art in self._artists:
art.set_animated(True)
class PolarizationWidget(QtWidgets.QWidget):
def __init__(self, parent=None):
QtWidgets.QWidget.__init__(self, parent)
self.canvas = MplCanvas()
self.toolbar = NavigationToolbar(self.canvas, parent)
self.vbl = QtWidgets.QVBoxLayout()
self.vbl.addWidget(self.canvas)
self.vbl.addWidget(self.toolbar)
self.setLayout(self.vbl)
self.fig = self.canvas.fig
self.figure_setup()
@staticmethod
def main_subplots(gs):
for ss in gs:
if ss.is_last_col():
continue
else:
yield ss
def figure_setup(self):
# self.fig.patch.set_facecolor('white')
fake_data = np.zeros((100, 100))
self.gs = self.fig.add_gridspec(
nrows=2, ncols=5, width_ratios=[20, 20, 20, 20, 1])
subplots = list(self.main_subplots(self.gs))
del subplots[3]
self.axes = [self.fig.add_subplot(subplots[i]) for i in range(7)]
self.set_axes_labels((r'$\Phi_3$', r'$\Phi_4$'))
axcbar = self.fig.add_subplot(self.gs[:, -1])
scalar_map = cm.ScalarMappable(norm=clrs.Normalize(-1, 1),
cmap=cm.get_cmap('RdBu'))
cbar = Colorbar(mappable=scalar_map,
ax=axcbar, orientation='vertical', extend='neither')
extent = [-90, 90, -90, 90]
self.images = [self.axes[i].imshow(
fake_data, aspect='auto', extent=extent, origin='lower', vmin=-1.0,
vmax=1.0, cmap=cm.get_cmap('RdBu')) for i in range(7)]
self.fig.set_constrained_layout_pads(
wspace=0.02, hspace=0.02)
self.blit_manager = BlitManager(self.canvas, self.images)
self.multicursor = MultiCursor(
self.canvas, self.axes, useblit=True, horizOn=True, vertOn=True,
color='gray', lw=0.7)
self.canvas.mpl_connect('figure_leave_event', self.restore_main_blit)
self.canvas.mpl_connect('figure_enter_event', self.disable_main_blit)
def set_titles(self, titles):
for title, ax in zip(titles, self.axes):
ax.set_title(title, fontsize=8)
def set_axes_labels(self, labels):
ylabel, xlabel = labels
for ax in self.axes:
ax.set(xlabel=xlabel, ylabel=ylabel)
def disable_main_blit(self, event):
self.blit_manager.suspend()
self.canvas.draw()
def restore_main_blit(self, event):
self.blit_manager.resume()
self.canvas.restore_region(self.multicursor.background)
self.canvas.blit()
def figure_update(self, data):
"""`data` is a list of 2D arrays."""
for i in range(7):
self.images[i].set_data(data[i])
self.blit_manager.update() | /rotsim2d_apps-0.3.2.tar.gz/rotsim2d_apps-0.3.2/rotsim2d_apps/polarizations/PolarizationWidget.py | 0.728748 | 0.399343 | PolarizationWidget.py | pypi |
import click
from rotten_tomatoes_client import MovieBrowsingQuery
from data import BrowseStreamingMovieCategory, BrowseMovieInTheaterCategory, MovieService, MovieGenre, BrowseSortBy
from data.services import RottenTomatoesMoviesBrowser
from tables.builders import BrowseMovieTableBuilder
from tables.rows.builders import BrowseMovieRowBuilder
browser = RottenTomatoesMoviesBrowser()
table_builder = BrowseMovieTableBuilder(BrowseMovieRowBuilder())
@click.group()
def movies():
pass
@click.command()
@click.argument("category", default=BrowseMovieInTheaterCategory.opening.name,
type=click.Choice(BrowseMovieInTheaterCategory.names()))
@click.option("--minimum_rating", "-l", default=70)
@click.option("--maximum_rating", "-h", default=100)
@click.option("--certified_fresh", "-f", is_flag=True)
@click.option("--service", "-s", default=None, multiple=True, type=click.Choice(MovieService.names()))
@click.option("--genre", "-g", default=None, multiple=True, type=click.Choice(MovieGenre.names()))
@click.option("--sort_by", default=BrowseSortBy.popularity.name, type=click.Choice(BrowseSortBy.names()))
def theaters(category, minimum_rating, maximum_rating, certified_fresh, service, genre, sort_by):
theaters_category = BrowseMovieInTheaterCategory.category(value=category).value["client_category"]
build(category=theaters_category, minimum_rating=minimum_rating, maximum_rating=maximum_rating,
certified_fresh=certified_fresh, services=service, genres=genre, sort_by=sort_by)
@click.command()
@click.argument("category", default=BrowseStreamingMovieCategory.new.name,
type=click.Choice(BrowseStreamingMovieCategory.names()))
@click.option("--minimum_rating", "-l", default=70)
@click.option("--maximum_rating", "-h", default=100)
@click.option("--certified_fresh", "-f", is_flag=True)
@click.option("--service", "-s", default=None, multiple=True, type=click.Choice(MovieService.names()))
@click.option("--genre", "-g", default=None, multiple=True, type=click.Choice(MovieGenre.names()))
@click.option("--sort_by", default=BrowseSortBy.popularity.name, type=click.Choice(BrowseSortBy.names()))
def streaming(category, minimum_rating, maximum_rating, certified_fresh, service, genre, sort_by):
streaming_category = BrowseStreamingMovieCategory.category(value=category).value["client_category"]
build(category=streaming_category, minimum_rating=minimum_rating, maximum_rating=maximum_rating,
certified_fresh=certified_fresh, services=service, genres=genre, sort_by=sort_by)
def build(category, minimum_rating, maximum_rating, certified_fresh, services, genres, sort_by):
results = browser.browse(query(category=category, minimum_rating=minimum_rating,
maximum_rating=maximum_rating, certified_fresh=certified_fresh,
services=services, genres=genres, sort_by=sort_by))
if len(results) > 0:
click.echo(table_builder.build(results))
else:
click.echo("No results")
def query(category, minimum_rating, maximum_rating, certified_fresh, services, genres, sort_by):
services = [MovieService.service(value=service).value["client_service"] for service in services]
genres = [MovieGenre.genre(value=genre).value["client_genre"] for genre in genres]
return MovieBrowsingQuery(minimum_rating=minimum_rating,
maximum_rating=maximum_rating,
certified_fresh=certified_fresh, services=services, genres=genres,
sort_by=BrowseSortBy.sort_by(value=sort_by).value["client_value"],
category=category)
movies.add_command(theaters)
movies.add_command(streaming) | /rotten_tomatoes_cli-0.0.3.tar.gz/rotten_tomatoes_cli-0.0.3/scripts/movies.py | 0.433022 | 0.180865 | movies.py | pypi |
from data import TvShowSearchResult, MovieSearchResult, BrowseTvShowResult, BrowseMovieResult
class TvShowSearchResultsParser:
def __init__(self):
pass
def parse(self, tv_show_results):
return [
TvShowSearchResult(name=tv_show_result["title"],
start_year=tv_show_result["startYear"],
end_year=self.year(year=tv_show_result["endYear"]),
rotten_tomatoes_score=self.rotten_tomatoes_score(result=tv_show_result))
for tv_show_result in tv_show_results
]
# Rotten Tomatoes represents the end year for a currently running TV series as 0
def year(self, year):
return None if year == 0 else year
# Sometimes this field is not set
def rotten_tomatoes_score(self, result):
return None if "meterScore" not in result else result["meterScore"]
class MovieSearchResultsParser:
def __init__(self):
pass
def parse(self, movie_results):
return [
MovieSearchResult(name=movie_result["name"], year=movie_result["year"],
rotten_tomatoes_score=self.rotten_tomatoes_score(result=movie_result),
cast=self.cast(cast=movie_result["castItems"]))
for movie_result in movie_results
]
def cast(self, cast):
return [cast_member["name"] for cast_member in cast]
# Sometimes this field is not set
def rotten_tomatoes_score(self, result):
return None if "meterScore" not in result else result["meterScore"]
class TvShowBrowseResultsParser:
def __init__(self):
pass
def parse(self, tv_show_results):
return [
BrowseTvShowResult(title=tv_show_result["title"],
rotten_tomatoes_score=self.rotten_tomatoes_score(result=tv_show_result))
for tv_show_result in tv_show_results
]
# Sometimes this field is not set
def rotten_tomatoes_score(self, result):
return None if "tomatoScore" not in result else result["tomatoScore"]
class MovieBrowseResultsParser:
def __init__(self):
pass
def parse(self, movie_results):
return [
BrowseMovieResult(title=movie_result["title"],
rotten_tomatoes_score=self.rotten_tomatoes_score(result=movie_result),
synopsis=movie_result["synopsis"], runtime=self.runtime(result=movie_result),
theater_release_date=self.theater_release_date(result=movie_result),
dvd_release_date=self.dvd_release_date(result=movie_result),
mpaa_rating=movie_result["mpaaRating"], actors=movie_result["actors"])
for movie_result in movie_results
]
# Sometimes this field is not set
def rotten_tomatoes_score(self, result):
return None if "tomatoScore" not in result else result["tomatoScore"]
# Sometimes this field is not set
def dvd_release_date(self, result):
return None if "dvdReleaseDate" not in result else result["dvdReleaseDate"]
# Sometimes this field is not set
def theater_release_date(self, result):
return None if "theaterReleaseDate" not in result else result["theaterReleaseDate"]
def runtime(self, result):
return None if "runtime" not in result else result["runtime"] | /rotten_tomatoes_cli-0.0.3.tar.gz/rotten_tomatoes_cli-0.0.3/data/parsers.py | 0.688468 | 0.190442 | parsers.py | pypi |
from textwrap import wrap
from termcolor import colored
from tables.utilities import RottenTomatoesScoreFormatter, MpaaRatingFormatter, convert_to_ascii, clean_html, formatted_header
class MovieSearchRowBuilder:
def __init__(self):
self.rating_formatter = RottenTomatoesScoreFormatter()
def build(self, movie):
return [
self.name(name=movie.name),
self.rating_formatter.format(rating=movie.rotten_tomatoes_score),
movie.year,
self.cast(cast=movie.cast)
]
def name(self, name):
wrapped_name = wrap(text=convert_to_ascii(text=name), width=30)
return "\n".join([colored(value, attrs=["bold"]) for value in wrapped_name])
def cast(self, cast):
return "\n".join([convert_to_ascii(text=actor) for actor in cast])
class TvShowSearchRowBuilder:
def __init__(self):
self.rating_formatter = RottenTomatoesScoreFormatter()
def build(self, tv_show):
return [
self.name(name=tv_show.name),
self.rating_formatter.format(rating=tv_show.rotten_tomatoes_score),
self.format_years(start_year=tv_show.start_year, end_year=tv_show.end_year)
]
def format_years(self, start_year, end_year):
end_year_value = "" if end_year is None else end_year
return "{start_year}-{end_year}".format(start_year=start_year, end_year=end_year_value)
def name(self, name):
wrapped_name = wrap(text=convert_to_ascii(text=name), width=30)
return "\n".join([colored(value, attrs=["bold"]) for value in wrapped_name])
class BrowseTvShowRowBuilder:
def __init__(self):
self.rating_formatter = RottenTomatoesScoreFormatter()
def build(self, tv_show):
return [tv_show.title, self.rating_formatter.format(rating=tv_show.rotten_tomatoes_score)]
class BrowseMovieRowBuilder:
def __init__(self):
self.rotten_tomatoes_score_formatter = RottenTomatoesScoreFormatter()
self.mpaa_rating_formatter = MpaaRatingFormatter()
def build(self, movie):
return [
self.summary(movie=movie),
self.details(movie=movie)
]
def summary(self, movie):
return "{title}\n\n" \
"{synopsis}"\
.format(title=self.title(title=movie.title),
synopsis=self.synopsis(synopsis=movie.synopsis))
def details(self, movie):
return "{score_header}\n" \
"{score}\n\n" \
"{rating_header}\n" \
"{rating}\n\n" \
"{runtime_header}\n" \
"{runtime}\n\n" \
"{release_header}\n" \
"{release}\n\n" \
"{actors_header}\n" \
"{actors}"\
.format(score_header=formatted_header(text="Score"),
rating_header=formatted_header(text="Rating"),
runtime_header=formatted_header(text="Runtime"),
release_header=formatted_header(text="Release"),
actors_header=formatted_header(text="Actors"),
score=self.rotten_tomatoes_score_formatter.format(rating=movie.rotten_tomatoes_score),
rating=self.mpaa_rating_formatter.format(rating=movie.mpaa_rating),
runtime=self.runtime(runtime=movie.runtime),
release=self.release_dates(movie=movie),
actors=self.actors(actors=movie.actors))
def release_dates(self, movie):
release_dates = []
if movie.theater_release_date is not None:
release_dates.append("{theater_release_date} (Theaters)"
.format(theater_release_date=movie.theater_release_date))
if movie.dvd_release_date is not None:
release_dates.append("{dvd_release_date} (DVD)".format(dvd_release_date=movie.dvd_release_date))
return "\n".join(release_dates)
def actors(self, actors):
return "\n".join([convert_to_ascii(text=actor) for actor in actors])
def runtime(self, runtime):
return "N/A" if runtime is None else runtime
def synopsis(self, synopsis):
return "\n".join(wrap(text=convert_to_ascii(text=clean_html(raw_html=synopsis)), width=50))
def title(self, title):
wrapped_title = wrap(text=convert_to_ascii(text=title), width=50)
return "\n".join([colored(value, attrs=["bold", "underline"]) for value in wrapped_title]) | /rotten_tomatoes_cli-0.0.3.tar.gz/rotten_tomatoes_cli-0.0.3/tables/rows/builders.py | 0.753013 | 0.312658 | builders.py | pypi |


[](https://badge.fury.io/py/rottentomatoes-python)


# :movie_camera: Rotten Tomatoes in Python (and API) :clapper:
> **Note**
> If at any point in your project this library stops working, returning errors for standalone functions or the `Movie` class, first try updating it with `pip install -U rottentomatoes-python`, and if it's still not working, submit an issue on this repo. 99% of the time it'll "stop working" because the Rotten Tomatoes site schema has changed, meaning some changes to web scraping and extraction under the hood are necessary to make everything work again. Tests run on this repo automatically once a day, so breaking changes to the Rotten Tomatoes site should be caught by myself or a maintainer pretty quickly.
This package allows you to easily fetch Rotten Tomatoes scores and other movie data such as genres, without the use of the official Rotten Tomatoes API. The package scrapes their website for the data. I built this because unfortunately, to get access to their API, you have to submit a special request which takes an inordinate amount of time to process, or doesn't go through at all.
The package now, by default, scrapes the Rotten Tomatoes search page to find the true url of the first valid movie response (is a movie and has a tomatometer). This means queries that previously didn't work because their urls had a unique identifier or a year-released prefix, now work. The limitation of this new mechanism is that you only get the top response, and when searching for specific movies (sequels, by year, etc.) Rotten Tomatoes seems to return the same results as the original query. So, it's difficult to use specific queries to try and get the desired result movie as the top response. See #4 for more info on this.
There is now an API deployed to make querying multiple movies and getting several responses easier. The endpoint is https://rotten-tomatoes-api.ue.r.appspot.com and it's open and free to use. Visit `/docs` or `/redoc` in the browser to view the endpoints. Both endpoints live right now are browser accessible meaning you don't need an HTTP client to use the API.
- https://rotten-tomatoes-api.ue.r.appspot.com/movie/bad_boys for JSON response of the top result
- https://rotten-tomatoes-api.ue.r.appspot.com/search/bad_boys for a JSON response of all valid results
## Usage
You can either call the standalone functions `tomatometer`, `audience_score`, `genres`, etc., or use the `Movie` class to only pass the name and have each attribute be fetched automatically. If you use the `Movie` class, you can print all attributes by printing the object itself, or by accessing each attribute individually.
The weighted score is calculated using the formula $\frac{2}{3}(tomatometer) + \frac{1}{3}(audience)$. The result is then rounded to the nearest integer.
Basic usage examples:
```python
import rottentomatoes as rt
print(rt.tomatometer("happy gilmore"))
# Output: 61
# Type: int
print(rt.audience_score('top gun maverick'))
# Output: 99
# Type: int
print(rt.rating('everything everywhere all at once'))
# Output: R
# Type: str
print(rt.genres('top gun'))
# Output: ['Action', 'Adventure']
# Type: list[str]
print(rt.weighted_score('happy gilmore'))
# Output: 69
# Type: int
print(rt.year_released('happy gilmore'))
# Output: 1996
# Type: str
print(rt.actors('top gun maverick', max_actors=5))
# Output: ['Tom Cruise', 'Miles Teller', 'Jennifer Connelly', 'Jon Hamm', 'Glen Powell']
# Type: list[str]
# --- Using the Movie class ---
movie = rt.Movie('top gun')
print(movie)
# Output
# Top Gun, PG, 1h 49m.
# Released in 1986.
# Tomatometer: 58
# Weighted score: 66
# Audience Score: 83
# Genres - ['Action', 'Adventure']
# Prominent actors: Tom Cruise, Kelly McGillis, Anthony Edwards, Val Kilmer, Tom Skerritt.
# Type: str
print(movie.weighted_score)
# Output: 66
# Type: int
```
## Exceptions
If you're using this package within a larger program, it's useful to know what exceptions are raised (and when) so they can be caught and handled.
### `LookupError`
When _any_ call is made to scrape the Rotten Tomatoes website (Tomatometer, Audience Score, Genres, etc.), if a proper movie page wasn't returned (can be due to a typo in name entry, duplicate movie names, etc.), a `LookupError` is raised, printing the attempted query url.
## Performance
`v0.3.0` makes the `Movie` class 19x more efficient. Data attained from scraping Rotten Tomatoes is temporarily cached and used to parse various other attributes. To test the performance difference, I used two separate virtual environments, `old` and `venv`. `rottentomatoes-python==0.2.5` was installed on `old`, and `rottentomatoes-python==0.3.0` was installed on `venv`. I then ran the same script, shown below, using each environment (Python 3.10.4).
```python
import rottentomatoes as rt
from time import perf_counter
def test() -> None:
start = perf_counter()
movie = rt.Movie('top gun maverick')
print('\n', movie, sep='')
print(f"That took {perf_counter() - start} seconds.")
if __name__ == "__main__":
test()
```
The results:
```console
❯ deactivate && source old/bin/activate && python test.py
Top Gun Maverick, PG-13, 2h 11m.
Released in 2022.
Tomatometer: 97
Weighted score: 97
Audience Score: 99
Genres - ['Action', 'Adventure']
That took 6.506246249999094 seconds.
❯ deactivate && source venv/bin/activate && python test.py
Top Gun Maverick, PG-13, 2h 11m.
Released in 2022.
Tomatometer: 97
Weighted score: 97
Audience Score: 99
Genres - ['Action', 'Adventure']
Prominent actors: Tom Cruise, Miles Teller, Jennifer Connelly, Jon Hamm, Glen Powell.
That took 0.3400420409961953 seconds.
```
## API
The API is deployed at https://rotten-tomatoes-api.ue.r.appspot.com/. It has two endpoints currently, `/movie/{movie_name}` and `/search/{movie_name}`. The first will pull one movie, the top result. The second will pull a list of _all_ valid movie results.
The first, with `movie_name="bad boys"`:
```json
{
"name": "Bad Boys for Life",
"tomatometer": 76,
"audience_score": 96,
"weighted_score": 82,
"genres": [
"Action",
"Comedy"
],
"rating": "R",
"duration": "2h 4m",
"year": "2020",
"actors": [
"Will Smith",
"Martin Lawrence",
"Vanessa Hudgens",
"Jacob Scipio",
"Alexander Ludwig"
],
"directors": [
"Adil El Arbi",
"Bilall Fallah"
]
}
```
The second, with `movie_name="bad boys"`:
```json
{
"movies": [
{
"name": "Bad Boys for Life",
"tomatometer": 76,
"audience_score": 96,
"weighted_score": 82,
"genres": [
"Action",
"Comedy"
],
"rating": "R",
"duration": "2h 4m",
"year": "2020",
"actors": [
"Will Smith",
"Martin Lawrence",
"Vanessa Hudgens",
"Jacob Scipio",
"Alexander Ludwig"
],
"directors": [
"Adil El Arbi",
"Bilall Fallah"
]
},
{
"name": "Bad Boys II",
"tomatometer": 23,
"audience_score": 78,
"weighted_score": 41,
"genres": [
"Action",
"Comedy"
],
"rating": "R",
"duration": "2h 26m",
"year": "2003",
"actors": [
"Martin Lawrence",
"Will Smith",
"Jordi Mollà",
"Gabrielle Union",
"Peter Stormare"
],
"directors": [
"Michael Bay"
]
},
{
"name": "Bad Boys",
"tomatometer": 43,
"audience_score": 78,
"weighted_score": 54,
"genres": [
"Action",
"Comedy"
],
"rating": "R",
"duration": "1h 58m",
"year": "1995",
"actors": [
"Martin Lawrence",
"Will Smith",
"Téa Leoni",
"Tchéky Karyo",
"Theresa Randle"
],
"directors": [
"Michael Bay"
]
},
{
"name": "Bad Boys",
"tomatometer": 90,
"audience_score": 82,
"weighted_score": 87,
"genres": [
"Drama"
],
"rating": "R",
"duration": "2h 3m",
"year": "1983",
"actors": [
"Sean Penn",
"Reni Santoni",
"Esai Morales",
"Jim Moody",
"Ally Sheedy"
],
"directors": [
"Rick Rosenthal 2"
]
}
]
}
```
| /rottentomatoes-python-0.6.2.tar.gz/rottentomatoes-python-0.6.2/README.md | 0.470493 | 0.938801 | README.md | pypi |
from pydantic import ConfigDict, BaseModel, Field
class MovieQuery(BaseModel):
"""Job request, querying for a movie."""
name: str = Field(..., title="Name of the movie you're searching for.")
# Model configuration
model_config = ConfigDict(json_schema_extra={"example": {"name": "top gun"}})
class MovieAttributes(BaseModel):
"""Output, movie attributes."""
name: str = Field(..., title="Name of the movie according to its page.")
tomatometer: int = Field(..., title="Rotten Tomatoes Tomatometer.")
audience_score: int = Field(..., title="Rotten Tomatoes audience score.")
weighted_score: int = Field(
...,
title="Internally formulated weighted score between tomatometer and audience score.",
)
genres: list[str] = Field(..., title="List of genres.")
rating: str = Field(..., title="Movie viewership rating, ex. PG.")
duration: str = Field(..., title="String represented time, ex. 2h 11m.")
year: str = Field(..., title="String represented year, ex. 1995.")
actors: list[str] = Field(..., title="List of featured prominent actors.")
directors: list[str] = Field(..., title="List of directors.")
# Model configuration
model_config = ConfigDict(json_schema_extra={
"example": {
"name": "Bad Boys for Life",
"tomatometer": 76,
"audience_score": 96,
"weighted_score": 82,
"genres": ["Action", "Comedy"],
"rating": "R",
"duration": "2h 4m",
"year": "2020",
"actors": [
"Will Smith",
"Martin Lawrence",
"Vanessa Hudgens",
"Jacob Scipio",
"Alexander Ludwig",
],
"directors": ["Adil El Arbi", "Bilall Fallah"],
}
})
class Movies(BaseModel):
movies: list[MovieAttributes] = Field(
..., title="A list of movies with attributes."
)
# Model configuration
model_config = ConfigDict(json_schema_extra={
"example": {
"movies": [
{
"name": "Bad Boys for Life",
"tomatometer": 76,
"audience_score": 96,
"weighted_score": 82,
"genres": ["Action", "Comedy"],
"rating": "R",
"duration": "2h 4m",
"year": "2020",
"actors": [
"Will Smith",
"Martin Lawrence",
"Vanessa Hudgens",
"Jacob Scipio",
"Alexander Ludwig",
],
"directors": ["Adil El Arbi", "Bilall Fallah"],
},
{
"name": "Bad Boys II",
"tomatometer": 23,
"audience_score": 78,
"weighted_score": 41,
"genres": ["Action", "Comedy"],
"rating": "R",
"duration": "2h 26m",
"year": "2003",
"actors": [
"Martin Lawrence",
"Will Smith",
"Jordi Mollà",
"Gabrielle Union",
"Peter Stormare",
],
"directors": ["Michael Bay"],
},
{
"name": "Bad Boys",
"tomatometer": 43,
"audience_score": 78,
"weighted_score": 54,
"genres": ["Action", "Comedy"],
"rating": "R",
"duration": "1h 58m",
"year": "1995",
"actors": [
"Martin Lawrence",
"Will Smith",
"Téa Leoni",
"Tchéky Karyo",
"Theresa Randle",
],
"directors": ["Michael Bay"],
},
{
"name": "Bad Boys",
"tomatometer": 90,
"audience_score": 82,
"weighted_score": 87,
"genres": ["Drama"],
"rating": "R",
"duration": "2h 3m",
"year": "1983",
"actors": [
"Sean Penn",
"Reni Santoni",
"Esai Morales",
"Jim Moody",
"Ally Sheedy",
],
"directors": ["Rick Rosenthal 2"],
},
]
}
}) | /rottentomatoes-python-0.6.2.tar.gz/rottentomatoes-python-0.6.2/api/models.py | 0.871229 | 0.397441 | models.py | pypi |
import requests
import re
from typing import List
from . import utils
from .exceptions import LookupError
class SearchListing:
"""A search listing from the Rotten Tomatoes search page."""
def __init__(self, has_tomatometer: bool, is_movie: bool, url: str) -> None:
self.has_tomatometer = has_tomatometer
self.is_movie = is_movie
self.url = str(url)
@classmethod
def from_html(cls, html_snippet: str) -> "SearchListing":
"""
Takes a snippet from the search page's HTML code.
Use `re.findall(r"<search-page-media-row(.*?)</search-page-media-row>", content)`
to separate the html into snippets, then feed each one to this method to create
a `SearchListing` objects.
"""
# Find the tomatometer
tomato_qry = "tomatometerscore="
tomato_loc = html_snippet.find(tomato_qry) + len(tomato_qry)
tomato_snip = html_snippet[tomato_loc:tomato_loc+5]
meter = tomato_snip.split('"')[1]
has_tomatometer = bool(meter)
# Find the url
urls = re.findall(r'a href="(.*?)"', html_snippet)
url = urls[0]
# Determine if it's a movie
is_movie = "/m/" in url
return cls(has_tomatometer=has_tomatometer, is_movie=is_movie, url=url)
def __str__(self) -> str:
"""Represent the SearchListing object."""
return f"Tomatometer: {self.has_tomatometer}. URL: {self.url}. Is movie: {self.is_movie}."
def _movie_search_content(name: str) -> str:
"""Raw HTML content from searching for a movie."""
url_name = "%20".join(name.split())
url = f"https://www.rottentomatoes.com/search?search={url_name}"
content = str(requests.get(url, headers=utils.REQUEST_HEADERS).content)
# Remove misc quotes from conversion
content = content[2:-1]
return content
def search_results(name: str) -> List[SearchListing]:
"""Get a list of search results."""
content = _movie_search_content(name)
snippets = re.findall(r"<search-page-media-row(.*?)</search-page-media-row>", content)
return [SearchListing.from_html(snippet) for snippet in snippets]
def filter_searches(results: List[SearchListing]) -> List[SearchListing]:
"""Filters search results for valid movies."""
return list(filter(lambda result: result.is_movie and result.has_tomatometer, results))
def top_movie_result(name: str) -> SearchListing:
"""Get the first movie result that has a tomatometer."""
results = search_results(name)
filtered = filter_searches(results)
if not filtered:
raise LookupError("No movies found.")
return filtered[0] | /rottentomatoes-python-0.6.2.tar.gz/rottentomatoes-python-0.6.2/rottentomatoes/search.py | 0.877975 | 0.271475 | search.py | pypi |
from bs4 import BeautifulSoup
# Non-local imports
import json
import requests # interact with RT website
from typing import List
# Project modules
from .exceptions import *
from . import search
from . import utils
def _movie_url(movie_name: str) -> str:
"""Generates a target url on the Rotten Tomatoes website given
the name of a movie.
Args:
movie_name (str): Title of the movie. Any number of words.
Returns:
str: `str` url that should point to the movie's real page.
"""
movie_name = movie_name.lower()
all_words = movie_name.split(sep=' ')
underscored = '_'.join(all_words)
return 'https://www.rottentomatoes.com/m/' + underscored
def _extract(content: str, start_string: str, end_string: str) -> str:
"""Retrieves parts of the RT website data given a start string
and an end string.
Args:
content (str): The raw RT data for a movie.
start_string (str): The start of the data to be extracted.
end_string (str): The end of the data to be extracted.
Returns:
string: A part of the raw RT data, from the start string to the
end string.
"""
start_idx = content.find(start_string)
if start_idx == -1:
return None
end_idx = content.find(end_string, start_idx)
return content[start_idx+len(start_string):end_idx]
def _get_schema_json_ld(content: str) -> object:
"""Retrieves the schema.org data model for a movie. This data
typically contains Tomatometer score, genre etc.
Args:
content (str): The raw RT data for a movie.
Returns:
object: The schema.org data model for the movie.
"""
return json.loads(
_extract(
content,
'<script type="application/ld+json">',
'</script>'
)
)
def _get_score_details(content: str) -> object:
"""Retrieves the scoreboard data for a movie. Scoreboard data
typically contains audience score, ratings, duration etc.
Args:
content (str): The raw RT data for a movie.
Returns:
object: The scoreboard data for the movie.
"""
return json.loads(
_extract(
content,
'<script id="scoreDetails" type="application/json">',
'</script>'
)
)
def _request(movie_name: str, raw_url: bool = False, force_url: str = "") -> str:
"""Scrapes Rotten Tomatoes for the raw website data, to be
passed to each standalone function for parsing.
Args:
movie_name (str): Title of the movie. Case insensitive.
raw_url (bool): Don't search for the movie, build the url manually.
force_url (str): Use this url to scrape the site. Don't use this.
Raises:
LookupError: If the movie isn't found on Rotten Tomatoes.
This could be due to a typo in entering the movie's name,
duplicates, or other issues.
Returns:
str: The raw RT website data of the given movie.
"""
if raw_url or force_url:
rt_url = _movie_url(movie_name) if movie_name else force_url
else:
search_result = search.top_movie_result(movie_name)
rt_url = search_result.url
response = requests.get(rt_url, headers=utils.REQUEST_HEADERS)
if response.status_code == 404:
raise LookupError(
"Unable to find that movie on Rotten Tomatoes.",
f"Try this link to source the movie manually: {rt_url}"
)
return response.text
def movie_title(movie_name: str, content: str = None) -> str:
"""Search for the movie and return the queried title."""
if content is None:
content = _request(movie_name)
find_str = '<meta property="og:title" content='
loc = content.find(find_str) + len(find_str)
substring = content[loc:loc+100] # enough breathing room
subs = substring.split('>')
return subs[0][1:-1]
def tomatometer(movie_name: str, content: str = None) -> int:
"""Returns an integer of the Rotten Tomatoes tomatometer
of `movie_name`.
Args:
movie_name (str): Title of the movie. Case insensitive.
Raises:
LookupError: If the movie isn't found on Rotten Tomatoes.
This could be due to a typo in entering the movie's name,
duplicates, or other issues.
Returns:
int: Tomatometer of `movie_name`.
"""
if content is None:
content = _request(movie_name)
return _get_score_details(content)['scoreboard']['tomatometerScore']["value"]
def audience_score(movie_name: str, content: str = None) -> int:
"""Returns an integer of the Rotten Tomatoes tomatometer
of `movie_name`.
Args:
movie_name (str): Title of the movie. Case insensitive.
Raises:
LookupError: If the movie isn't found on Rotten Tomatoes.
This could be due to a typo in entering the movie's name,
duplicates, or other issues.
Returns:
int: Tomatometer of `movie_name`.
"""
if content is None:
content = _request(movie_name)
return _get_score_details(content)['scoreboard']['audienceScore']["value"]
def genres(movie_name: str, content: str = None) -> List[str]:
"""Returns an integer of the Rotten Tomatoes tomatometer
of `movie_name`. Copies the movie url to clipboard.
Args:
movie_name (str): Title of the movie. Case insensitive.
Raises:
LookupError: If the movie isn't found on Rotten Tomatoes.
This could be due to a typo in entering the movie's name,
duplicates, or other issues.
Returns:
list[str]: List of genres.
"""
if content is None:
content = _request(movie_name)
return _get_schema_json_ld(content)['genre']
def weighted_score(movie_name: str, content: str = None) -> int:
"""2/3 tomatometer, 1/3 audience score."""
if content is None:
content = _request(movie_name)
return int((2/3) * tomatometer(movie_name, content=content) +
(1/3) * audience_score(movie_name, content=content))
def rating(movie_name: str, content: str = None) -> str:
"""Returns a `str` of PG, PG-13, R, etc."""
if content is None:
content = _request(movie_name)
return _get_score_details(content)['scoreboard']['rating']
def duration(movie_name: str, content: str = None) -> str:
"""Returns the duration, ex. 1h 32m."""
if content is None:
content = _request(movie_name)
return_duration = _get_score_details(
content)['scoreboard']['info'].split(',')[-1]
return return_duration.replace(' ', '', 1)
def year_released(movie_name: str, content: str = None) -> str:
"""Returns a string of the year the movie was released."""
if content is None:
content = _request(movie_name)
release_year = _get_score_details(
content)['scoreboard']['info'].split(',')[0]
return release_year
def actors(movie_name: str, max_actors: int = 5, content: str = None) -> List[str]:
"""
Returns a list of the top 5 actors listed by Rotten Tomatoes.
"""
if content is None:
content = _request(movie_name)
def _get_top_n_actors(html, n):
soup = BeautifulSoup(html, 'html.parser')
cast_items = soup.find_all('div', {'data-qa': 'cast-crew-item'})
top_actors = []
for i, cast_item in enumerate(cast_items):
if i == n:
break
actor_name = cast_item.find('p').text.strip()
top_actors.append(actor_name)
return top_actors
return _get_top_n_actors(content, max_actors)
def directors(movie_name: str, max_directors: int = 10, content: str = None) -> List[str]:
"""Returns a list of all the directors listed
by Rotten Tomatoes. Specify `max_directors` to only receive
a certain number."""
get_name = lambda x: x.split("/")[-1].replace("_", " ").title()
if content is None:
content = _request(movie_name)
directors = _get_schema_json_ld(content)["director"][:max_directors]
return [get_name(n["sameAs"]).replace("-", " ") for n in directors]
def image(movie_name: str, content: str = None) -> str:
if content is None:
content = _request(movie_name)
return _get_schema_json_ld(content)['image']
def url(movie_name: str, content: str = None) -> str:
if content is None:
content = _request(movie_name)
return _get_schema_json_ld(content)['url']
def critics_consensus(movie_name: str, content: str = None) -> str:
if content is None:
content = _request(movie_name)
return _extract(content,'<span data-qa="critics-consensus">','</span>') | /rottentomatoes-python-0.6.2.tar.gz/rottentomatoes-python-0.6.2/rottentomatoes/standalone.py | 0.871639 | 0.478712 | standalone.py | pypi |
from . import standalone
class Movie:
"""
Accepts the name of a movie and automatically fetches all attributes.
Raises `exceptions.LookupError` if the movie is not found on Rotten Tomatoes.
"""
def __init__(self, movie_title: str = "", force_url: str = "") -> None:
if not movie_title and not force_url:
raise ValueError("You must provide either a movie_title or force_url.")
if force_url:
content = standalone._request(movie_name="", force_url=force_url)
else:
content = standalone._request(movie_name=movie_title)
self.movie_title = standalone.movie_title(movie_title, content=content)
self.tomatometer = standalone.tomatometer(self.movie_title, content=content)
self.audience_score = standalone.audience_score(self.movie_title, content=content)
self.weighted_score = standalone.weighted_score(self.movie_title, content=content)
self.genres = standalone.genres(self.movie_title, content=content)
self.rating = standalone.rating(self.movie_title, content=content)
self.duration = standalone.duration(self.movie_title, content=content)
self.year_released = standalone.year_released(self.movie_title, content=content)
self.actors = standalone.actors(self.movie_title, content=content)
self.directors = standalone.directors(self.movie_title, max_directors=5, content=content)
self.image = standalone.image(self.movie_title, content=content)
self.url = standalone.url(self.movie_title, content=content)
self.critics_consensus = standalone.critics_consensus(self.movie_title, content=content)
def __str__(self) -> str:
return f"{self.movie_title.title()}, {self.rating}, {self.duration}.\n" \
f"Released in {self.year_released}.\n" \
f"Directed by {', '.join(self.directors)}.\n" \
f"Tomatometer: {self.tomatometer}\n" \
f"Weighted score: {self.weighted_score}\n" \
f"Audience Score: {self.audience_score}\nGenres - {self.genres}\n" \
f"Prominent actors: {', '.join(self.actors)}."
def __eq__(self, other: "Movie") -> bool:
return self.movie_title == other.movie_title | /rottentomatoes-python-0.6.2.tar.gz/rottentomatoes-python-0.6.2/rottentomatoes/movie.py | 0.838481 | 0.405802 | movie.py | pypi |
from __future__ import absolute_import
import six
import rouge_chinese.rouge_score as rouge_score
import io
import os
import re
class FilesRouge:
def __init__(self, *args, **kwargs):
"""See the `Rouge` class for args
"""
self.rouge = Rouge(*args, **kwargs)
def _check_files(self, hyp_path, ref_path):
assert(os.path.isfile(hyp_path))
assert(os.path.isfile(ref_path))
def line_count(path):
count = 0
with open(path, "rb") as f:
for line in f:
count += 1
return count
hyp_lc = line_count(hyp_path)
ref_lc = line_count(ref_path)
assert(hyp_lc == ref_lc)
def get_scores(self, hyp_path, ref_path, avg=False, ignore_empty=False):
"""Calculate ROUGE scores between each pair of
lines (hyp_file[i], ref_file[i]).
Args:
* hyp_path: hypothesis file path
* ref_path: references file path
* avg (False): whether to get an average scores or a list
"""
self._check_files(hyp_path, ref_path)
with io.open(hyp_path, encoding="utf-8", mode="r") as hyp_file:
hyps = [line[:-1] for line in hyp_file]
with io.open(ref_path, encoding="utf-8", mode="r") as ref_file:
refs = [line[:-1] for line in ref_file]
return self.rouge.get_scores(hyps, refs, avg=avg,
ignore_empty=ignore_empty)
class Rouge:
DEFAULT_METRICS = ["rouge-1", "rouge-2", "rouge-l"]
AVAILABLE_METRICS = {
"rouge-1": lambda hyp, ref, **k: rouge_score.rouge_n(hyp, ref, 1, **k),
"rouge-2": lambda hyp, ref, **k: rouge_score.rouge_n(hyp, ref, 2, **k),
"rouge-3": lambda hyp, ref, **k: rouge_score.rouge_n(hyp, ref, 3, **k),
"rouge-4": lambda hyp, ref, **k: rouge_score.rouge_n(hyp, ref, 4, **k),
"rouge-5": lambda hyp, ref, **k: rouge_score.rouge_n(hyp, ref, 5, **k),
"rouge-l": lambda hyp, ref, **k:
rouge_score.rouge_l_summary_level(hyp, ref, **k),
}
DEFAULT_STATS = ["r", "p", "f"]
AVAILABLE_STATS = ["r", "p", "f"]
def __init__(self, metrics=None, stats=None, return_lengths=False,
raw_results=False, exclusive=True):
self.return_lengths = return_lengths
self.raw_results = raw_results
self.exclusive = exclusive
if metrics is not None:
self.metrics = [m.lower() for m in metrics]
for m in self.metrics:
if m not in Rouge.AVAILABLE_METRICS:
raise ValueError("Unknown metric '%s'" % m)
else:
self.metrics = Rouge.DEFAULT_METRICS
if self.raw_results:
self.stats = ["hyp", "ref", "overlap"]
else:
if stats is not None:
self.stats = [s.lower() for s in stats]
for s in self.stats:
if s not in Rouge.AVAILABLE_STATS:
raise ValueError("Unknown stat '%s'" % s)
else:
self.stats = Rouge.DEFAULT_STATS
def cut_sent(self, para):
para = re.sub('([。!?\?])([^”’])', r"\1\n\2", para)
para = re.sub('(\.{6})([^”’])', r"\1\n\2", para)
para = re.sub('(\…{2})([^”’])', r"\1\n\2", para)
para = re.sub('([。!?\?][”’])([^,。!?\?])', r'\1\n\2', para)
para = para.rstrip()
return para.split("\n")
def get_scores(self, hyps, refs, avg=False, ignore_empty=False):
if isinstance(hyps, six.string_types):
hyps, refs = [hyps], [refs]
if ignore_empty:
# Filter out hyps of 0 length
hyps_and_refs = zip(hyps, refs)
hyps_and_refs = [_ for _ in hyps_and_refs
if len(_[0]) > 0
and len(_[1]) > 0]
hyps, refs = zip(*hyps_and_refs)
assert(isinstance(hyps, type(refs)))
assert(len(hyps) == len(refs))
if not avg:
return self._get_scores(hyps, refs)
return self._get_avg_scores(hyps, refs)
def _get_scores(self, hyps, refs):
scores = []
for hyp, ref in zip(hyps, refs):
sen_score = {}
hyp = [" ".join(_.split()) for _ in self.cut_sent(hyp) if len(_) > 0]
ref = [" ".join(_.split()) for _ in self.cut_sent(ref) if len(_) > 0]
for m in self.metrics:
fn = Rouge.AVAILABLE_METRICS[m]
sc = fn(
hyp,
ref,
raw_results=self.raw_results,
exclusive=self.exclusive)
sen_score[m] = {s: sc[s] for s in self.stats}
if self.return_lengths:
lengths = {
"hyp": len(" ".join(hyp).split()),
"ref": len(" ".join(ref).split())
}
sen_score["lengths"] = lengths
scores.append(sen_score)
return scores
def _get_avg_scores(self, hyps, refs):
scores = {m: {s: 0 for s in self.stats} for m in self.metrics}
if self.return_lengths:
scores["lengths"] = {"hyp": 0, "ref": 0}
count = 0
for (hyp, ref) in zip(hyps, refs):
hyp = [" ".join(_.split()) for _ in self.cut_sent(hyp) if len(_) > 0]
ref = [" ".join(_.split()) for _ in self.cut_sent(ref) if len(_) > 0]
for m in self.metrics:
fn = Rouge.AVAILABLE_METRICS[m]
sc = fn(hyp, ref, exclusive=self.exclusive)
scores[m] = {s: scores[m][s] + sc[s] for s in self.stats}
if self.return_lengths:
scores["lengths"]["hyp"] += len(" ".join(hyp).split())
scores["lengths"]["ref"] += len(" ".join(ref).split())
count += 1
avg_scores = {
m: {s: scores[m][s] / count for s in self.stats}
for m in self.metrics
}
if self.return_lengths:
avg_scores["lengths"] = {
k: scores["lengths"][k] / count
for k in ["hyp", "ref"]
}
return avg_scores | /rouge_chinese-1.0.3.tar.gz/rouge_chinese-1.0.3/rouge_chinese/rouge.py | 0.746878 | 0.355467 | rouge.py | pypi |
import os
import re
import shutil
import subprocess
import sys
from glob import glob
from tempfile import mkdtemp
from typing import Dict, List, Optional
from rouge_metric import perl_cmd
if sys.version_info < (3,):
def makedirs(name, mode=0o777, exist_ok=False):
if not os.path.isdir(name):
os.makedirs(name, mode)
else:
if not exist_ok:
raise
else:
from os import makedirs
class PerlRouge(object):
"""A Python wrapper for the ROUGE-1.5.5.pl script.
:param rouge_n_max: Compute ROUGE-N from N=1 to ``rouge_n_max``
:param rouge_l: If true, compute longest common subsequence (LCS)
co-occurrence (ROUGE-L).
:param rouge_w: If true, compute Weighted-LCS (WLCS) co-occurrence
(ROUGE-W).
:param rouge_w_weight: The weight w of the weighting function
:math:`f(x) = x^w` to emphasize consecutive matches in ROUGE-W.
:param rouge_s: If true, compute skip-bigram co-occurrence (ROUGE-S).
:param rouge_su: If true, compute skip-bigram with unigram co-occurrence
(ROUGE-SU).
:param skip_gap: The maximum gap between two words in skip-bigram.
:param multi_ref_mode: The method to combine the scores between a
hypothesis and its multiple references. Choose from {average, best}.
:param alpha: The balance factor between recall and precision. Favors
recall if close to 1, precision if close to 0.
:param stemming: If true, stem summaries using Porter stemmer.
:param remove_stopwords: Remove stopwords in summaries before evaluation.
:param word_limit: Only use the first n words for evaluation.
:param byte_limit: Only use the first n bytes for evaluation.
:param confidence: The confidence level (%) for the confidence interval.
:param resampling: Number of sampling points in bootstrap resampling.
:param temp_dir: The directory to hold temporary files.
:param clean_up: If true, clean up temporary files after evaluation.
Example:
::
>>> from rouge_metric import PerlRouge
>>> hypotheses = ['Police killed the gunman']
>>> references = [['The gunman killed the policeman']]
>>> PerlRouge().evaluate(hypotheses, references)
{
'rouge-1': {
'r': 0.6, 'r_conf_int': (0.6, 0.6),
'p': 0.75, 'p_conf_int': (0.75, 0.75),
'f': 0.66667, 'f_conf_int': (0.66667, 0.66667)
},
'rouge-2': {
'r': 0.5, 'r_conf_int': (0.5, 0.5),
'p': 0.66667, 'p_conf_int': (0.66667, 0.66667),
'f': 0.57143, 'f_conf_int': (0.57143, 0.57143)
},
'rouge-l': {
'r': 0.4, 'r_conf_int': (0.4, 0.4),
'p': 0.5, 'p_conf_int': (0.5, 0.5),
'f': 0.44444, 'f_conf_int': (0.44444, 0.44444)
}
}
.. warning::
Only for summaries in English. For non-English summaries, use
:class:`rouge_metric.evaluate` instead, or convert the tokens to
integers separated by space before evaluation.
"""
def __init__(self,
rouge_n_max=2, # type: int
rouge_l=True, # type: bool
rouge_w=False, # type: bool
rouge_w_weight=1.2, # type: float
rouge_s=False, # type: bool
rouge_su=False, # type: bool
skip_gap=None, # type: Optional[int]
multi_ref_mode='average', # type: str
alpha=0.5, # type: float
stemming=False, # type: bool
remove_stopwords=False, # type: bool
word_limit=None, # type: Optional[int]
byte_limit=None, # type: Optional[int]
confidence=95, # type: int
resampling=1000, # type: int
temp_dir='./.rouge_metric/', # type: Optional[str]
clean_up=True # type: bool
):
perl_cmd.create_wordnet_db()
self.rouge_n_max = rouge_n_max
self.rouge_l = rouge_l
self.rouge_w = rouge_w
self.rouge_w_weight = rouge_w_weight
self.rouge_s = rouge_s
self.rouge_su = rouge_su
self.skip_gap = skip_gap
self.multi_ref_mode = multi_ref_mode
self.alpha = alpha
self.stemming = stemming
self.remove_stopwords = remove_stopwords
self.word_limit = word_limit
self.byte_limit = byte_limit
self.confidence = confidence
self.resampling = resampling
self.temp_root = temp_dir
self.clean_up = clean_up
@staticmethod
def _write_summaries(summaries, references, hyp_dir, ref_dir):
# type: (List[str], List[List[str]], str, str) -> None
makedirs(hyp_dir, exist_ok=True)
makedirs(ref_dir, exist_ok=True)
for i, hyp in enumerate(summaries):
hyp_path = os.path.join(hyp_dir, '{}.txt'.format(i))
with open(hyp_path, 'w') as f:
f.write(hyp)
for i, multi_ref in enumerate(references):
for j, ref in enumerate(multi_ref):
ref_path = os.path.join(ref_dir, '{}.{}.txt'.format(i, j))
with open(ref_path, 'w') as f:
f.write(ref)
@staticmethod
def _write_config(config_path, hyp_dir, ref_dir):
# type: (str, str, str) -> None
xml = '<ROUGE-EVAL version="1.5.5">'
for n, hyp in enumerate(glob(os.path.join(hyp_dir, '*'))):
if not os.path.isfile(hyp):
continue
hyp_name = os.path.basename(hyp)
hyp_stem, _ = os.path.splitext(hyp_name)
hyp_str = '<P ID="{}">{}</P>'.format('A', hyp_name)
ref_paths = glob(os.path.join(ref_dir, '{}.*'.format(hyp_stem)))
ref_paths = [ref for ref in ref_paths if os.path.isfile(ref)]
if not ref_paths:
raise RuntimeError('Reference not found for {}'.format(hyp))
ref_str = '\n'.join(
'<M ID="{}">{}</M>'.format(idx, os.path.basename(ref))
for idx, ref in enumerate(ref_paths))
xml += """
<EVAL ID="{eval_id}">
<MODEL-ROOT>{model_root}</MODEL-ROOT>
<PEER-ROOT>{peer_root}</PEER-ROOT>
<INPUT-FORMAT TYPE="SPL">
</INPUT-FORMAT>
<PEERS>
{peers}
</PEERS>
<MODELS>
{models}
</MODELS>
</EVAL>""".format(eval_id=n + 1, model_root=ref_dir, peer_root=hyp_dir,
peers=hyp_str, models=ref_str)
xml += '</ROUGE-EVAL>'
with open(config_path, 'w') as f:
f.write(xml)
@staticmethod
def _parse_output(output):
# type: (str) -> Dict[str, Dict[str, float]]
result = {}
# A ROUGE-SU4 Average_P: 1.00000 (95%-conf.int. 1.00000 - 1.00000)
pattern = (r'A (ROUGE-.+?) Average_([FPR]): ([\d.]+) '
r'\(.+?-conf.int. ([\d.]+) - ([\d.]+)\)')
lines = re.findall(pattern, output)
for rouge, metric, value, conf_begin, conf_end in lines:
rouge = rouge.lower()
metric = metric.lower()
value = float(value)
conf_int = (float(conf_begin), float(conf_end))
result.setdefault(rouge, {})
result[rouge][metric] = value
result[rouge]['{}_conf_int'.format(metric)] = conf_int
return result
def evaluate(self, hypotheses, multi_references):
# type: (List[str], List[List[str]]) -> Dict[str, Dict[str, float]]
"""Compute ROUGE scores between hypothesis and reference summaries.
The hypotheses and multi_references should follow the below format.
::
hypotheses = [summary1, summary2, ...]
multi_references = [
[summary1_ref1, summary1_ref2, ...],
[summary2_ref1, summary2_ref2, ...],
...
]
Since the ROUGE-1.5.5.pl script will tokenize sentences before
evaluation, the summary here is a ``str`` with multiple lines, separated
by ``\\n``. Each line represents a sentence.
:param hypotheses: A list of hypothesis summaries.
:param multi_references: A double list of reference summaries.
:return: All computed ROUGE scores.
"""
if len(hypotheses) != len(multi_references):
raise ValueError('Hypotheses and references must be the same size')
makedirs(self.temp_root, exist_ok=True)
temp_dir = mkdtemp(dir=self.temp_root)
hyp_dir = os.path.join(temp_dir, 'hyp')
ref_dir = os.path.join(temp_dir, 'ref')
self._write_summaries(hypotheses, multi_references, hyp_dir, ref_dir)
result = self.evaluate_from_files(hyp_dir, ref_dir)
if self.clean_up:
shutil.rmtree(temp_dir)
return result
def evaluate_from_files(self, hypothesis_dir, reference_dir):
# type: (str, str) -> Dict[str, Dict[str, float]]
"""Compute ROUGE scores from existing files.
:param hypothesis_dir: The directory containing hypothesis summaries.
Example hypothesis file names:
* summary1.txt
* summary2.txt
* ...
:param reference_dir: The directory containing reference summaries.
To match the hypothesis and reference, the basename of the
hypothesis file should be the prefix of the corresponding reference
file name. Example reference file names:
* summary1.1.txt
* summary1.2.txt
* ...
* summary2.1.txt
* summary2.2.txt
* ...
:return: All computed ROUGE scores.
"""
makedirs(self.temp_root, exist_ok=True)
temp_dir = mkdtemp(dir=self.temp_root)
config_path = os.path.join(temp_dir, 'config.xml')
self._write_config(config_path, hypothesis_dir, reference_dir)
cmd = perl_cmd.get_command(
config_path, self.rouge_n_max, self.rouge_l, self.rouge_w,
self.rouge_w_weight, self.rouge_s, self.rouge_su, self.skip_gap,
self.alpha, self.stemming, self.remove_stopwords, self.confidence,
self.multi_ref_mode, self.word_limit, self.byte_limit,
self.resampling
)
output = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
result = self._parse_output(output.decode('utf-8'))
if self.clean_up:
shutil.rmtree(temp_dir)
return result | /rouge_metric-1.0.1-py3-none-any.whl/rouge_metric/perl_rouge.py | 0.593609 | 0.42483 | perl_rouge.py | pypi |
import os
import subprocess
from typing import List, Optional
HERE = os.path.dirname(__file__)
ROUGE_HOME = os.path.join(HERE, 'RELEASE-1.5.5')
ROUGE_EXEC = os.path.join(ROUGE_HOME, 'ROUGE-1.5.5.pl')
ROUGE_DATA_HOME = os.path.join(ROUGE_HOME, 'data')
ROUGE_DB = os.path.join(ROUGE_DATA_HOME, 'WordNet-2.0.exc.db')
ROUGE_WORDNET_DIR = os.path.join(ROUGE_DATA_HOME, 'WordNet-2.0-Exceptions')
ROUGE_BUILD_DB_SCRIPT = os.path.join(ROUGE_WORDNET_DIR, 'buildExeptionDB.pl')
ROUGE_SMART_COMMON_WORDS = os.path.join(ROUGE_DATA_HOME,
'smart_common_words.txt')
CONFIG_FORMATS = ('SEE', 'SPL', 'ISI', 'SIMPLE')
SCORING_FORMULA_MAP = {'average': 'A', 'best': 'B'}
COUNT_SENTENCE = 0
COUNT_TOKEN = 1
COUNT_TOKEN_WITH_RAW_COUNTS = 2
COUNTING_UNITS = (
COUNT_SENTENCE,
COUNT_TOKEN,
COUNT_TOKEN_WITH_RAW_COUNTS,
)
BE_H = 0 # head only scoring (does not applied to Minipar-based BEs).
BE_HM = 1 # head and modifier pair scoring.
BE_HMR = 2 # head, modifier and relation triple scoring.
BE_HM1 = 3 # H and HM scoring (same as HM for Minipar-based BEs).
BE_HMR1 = 4 # HM and HMR scoring (same as HMR for Minipar-based BEs).
BE_HMR2 = 5 # H, HM and HMR scoring (same as HMR for Minipar-based BEs).
BASIC_ELEMENTS = (
BE_H, BE_HM, BE_HMR,
BE_HM1, BE_HMR1, BE_HMR2,
)
def create_wordnet_db():
try:
subprocess.check_call(['perl', '--version'],
stdout=open(os.devnull, 'w'),
stderr=subprocess.STDOUT)
except IOError:
raise RuntimeError('Perl is not correctly installed on your machine. '
'Please make sure its binary is in PATH')
if not os.path.exists(ROUGE_DB):
subprocess.call(['perl', ROUGE_BUILD_DB_SCRIPT, ROUGE_WORDNET_DIR,
ROUGE_SMART_COMMON_WORDS, ROUGE_DB])
def get_command(
config_path, # type: str
rouge_n_max=None, # type: Optional[int]
rouge_l=True, # type: bool
rouge_w=False, # type: bool
rouge_w_weight=1.2, # type: float
rouge_s=False, # type: bool
rouge_su=False, # type: bool
skip_distance=None, # type: Optional[int]
alpha=None, # type: Optional[float]
stemming=False, # type: bool
remove_stopwords=False, # type: bool
confidence=None, # type: Optional[int]
scoring_formula=None, # type: Optional[str]
word_limit=None, # type: Optional[int]
byte_limit=None, # type: Optional[int]
resampling_points=None, # type: Optional[int]
basic_element=None, # type: Optional[int]
print_each_eval=False, # type: bool
env=None, # type: Optional[str]
counting_unit=None, # type: Optional[int]
config_format=None, # type: Optional[int]
system_id=None, # type: Optional[str]
verbose=False # type: bool
): # type: (...) -> List[str]
"""Assemble the command line to invoke the ROUGE-1.5.5.pl perl script.
:param config_path: The XML configuration files that specifies the path of
peer and model summaries.
:param rouge_n_max: Compute ROUGE-N up to `rouge_n_max`. If negative, do not
compute ROUGE-N.
:param rouge_l: Whether compute LCS co-occurrence (ROUGE-L).
:param rouge_w: Whether compute WLCS co-occurrence (ROUGE-W).
:param rouge_w_weight: Compute ROUGE-W that gives consecutive matches of
length L in an LCS a weight of 'L^weight' instead of just 'L' as in LCS.
Typically this is set to 1.2 or other number greater than 1.
:param rouge_s: Whether compute skip bigram (ROGUE-S) co-occurrence.
:param rouge_su: Whether compute skip bigram co-occurrence including unigram
(ROGUE-SU).
:param skip_distance: The maximum gap between two words (skip bi-gram) in
ROUGE-S or ROUGE-SU.
:param alpha: Relative importance of recall and precision. Alpha -> 1 favors
precision, Alpha -> 0 favors recall.
:param stemming: Stem both model and system summaries using Porter stemmer.
:param remove_stopwords: Remove stopwords in model and system summaries
before computing various statistics.
:param confidence: Specify CF% (0 <= CF <= 100) confidence interval to
compute. The default is 95% (i.e. CF=95).
:param scoring_formula: Method to combine multi-reference results. Choose
from (average, best).
:param word_limit: Only use the first n words for evaluation.
:param byte_limit: Only use the first n bytes for evaluation.
:param resampling_points: The number of sampling point in bootstrap
resampling (default is 1000).
:param basic_element: Compute BE score.
* H -> head only scoring (does not applied to Minipar-based BEs).
* HM -> head and modifier pair scoring.
* HMR -> head, modifier and relation triple scoring.
* HM1 -> H and HM scoring (same as HM for Minipar-based BEs).
* HMR1 -> HM and HMR scoring (same as HMR for Minipar-based BEs).
* HMR2 -> H, HM and HMR scoring (same as HMR for Minipar-based BEs).
:param print_each_eval: Print per evaluation average score for each system.
:param env: Specify the directory where the ROUGE data files can be found.
:param counting_unit: Compute average ROUGE by averaging over the whole test
corpus instead of sentences (units).
* 0: use sentence as counting unit
* 1: use token as counting unit
* 2: same as 1 but output raw counts instead of precision, recall, \
and f-measure scores. Useful when computation of the final, precision, \
recall, and f-measure scores will be conducted later.
:param config_format: A list of peer-model pair per line in the specified
format (SEE|SPL|ISI|SIMPLE)
:param system_id: Specify the system in the config file for evaluation.
If None, evaluate all systems.
:param verbose: Print debugging information for diagnostic purpose.
:return: An executable command line with the given options
"""
if basic_element is not None and basic_element not in BASIC_ELEMENTS:
raise ValueError('Invalid basic_element {}: expected {}'.format(
basic_element, BASIC_ELEMENTS))
if confidence is not None and not 0 <= confidence <= 100:
raise ValueError(
'Invalid confidence {}: expected between [0, 100]'.format(
confidence))
if scoring_formula is not None:
if scoring_formula in SCORING_FORMULA_MAP:
scoring_formula = SCORING_FORMULA_MAP[scoring_formula]
else:
raise ValueError(
'Invalid scoring_formula {}: expected (average, best)'.format(
scoring_formula))
if counting_unit is not None and counting_unit not in COUNTING_UNITS:
raise ValueError('Invalid counting_unit {}: expected {}'.format(
counting_unit, COUNTING_UNITS))
if config_format is not None and config_format not in CONFIG_FORMATS:
raise ValueError('Invalid config_format {}: expected {}'.format(
config_format, CONFIG_FORMATS))
if rouge_w and not rouge_w_weight >= 1:
raise ValueError('Invalid rouge_w_weight {}: expected >= 1'.format(
rouge_w_weight))
if alpha is not None and not 0 <= alpha <= 1:
raise ValueError('Invalid alpha {}: expected between [0, 1]'.format(
alpha))
if word_limit is not None and byte_limit is not None:
raise ValueError('Cannot specify both word_limit and byte_limit')
if skip_distance is None:
skip_distance = -1
env = env or ROUGE_DATA_HOME
options = ['perl', ROUGE_EXEC]
if system_id is None:
options.append('-a')
if confidence is not None:
options.extend(['-c', confidence])
if print_each_eval:
options.append('-d')
options.extend(['-e', env])
if word_limit is not None:
options.extend(['-l', word_limit])
if byte_limit is not None:
options.extend(['-b', byte_limit])
if stemming:
options.append('-m')
if rouge_n_max is not None:
options.extend(['-n', rouge_n_max])
if remove_stopwords:
options.append('-s')
if resampling_points is not None:
options.extend(['-r', resampling_points])
if rouge_s:
if rouge_su:
options.extend(['-2', skip_distance, '-U'])
else:
options.extend(['-2', skip_distance])
else:
if rouge_su:
options.extend(['-2', skip_distance, '-u'])
if basic_element is not None:
options.extend(['-3', basic_element])
if rouge_w:
options.extend(['-w', rouge_w_weight])
if verbose:
options.append('-v')
if not rouge_l:
options.append('-x')
if scoring_formula is not None:
options.extend(['-f', scoring_formula])
if alpha is not None:
options.extend(['-p', alpha])
if counting_unit is not None:
options.extend(['-t', counting_unit])
options.append(config_path)
if system_id is not None:
options.append(system_id)
return [str(opt) for opt in options] | /rouge_metric-1.0.1-py3-none-any.whl/rouge_metric/perl_cmd.py | 0.695441 | 0.269902 | perl_cmd.py | pypi |
from __future__ import division
import collections
import itertools
from typing import (List, Dict, Callable, Tuple, Iterable, Set, Counter, Union,
Optional)
NGramsType = Counter[Tuple[str]]
ScoreType = Dict[str, float]
RougeType = Dict[str, Dict[str, float]]
try:
from math import isclose
except ImportError:
def isclose(a, b, rel_tol=1e-09, abs_tol=0.0):
# type: (float, float, float, float) -> bool
return abs(a - b) <= max(rel_tol * max(abs(a), abs(b)), abs_tol)
"""Precision Recall & F-score"""
def _format_score(fscore, precision, recall):
# type: (float, float, float) -> Dict[str, float]
return {'r': recall, 'p': precision, 'f': fscore}
def _f_score(precision, recall, alpha):
# type: (float, float, float) -> float
if not 0 <= alpha <= 1:
raise ValueError(
'Invalid alpha {}: expected between [0, 1]'.format(alpha))
if isclose(precision, 0) or isclose(recall, 0):
return 0.0
return recall * precision / (alpha * recall + (1 - alpha) * precision)
def _div_or_zero(dividend, divisor):
# type: (float, float) -> float
if isclose(divisor, 0):
return 0.0
else:
return dividend / divisor
def _f_p_r_score(match_score, hyp_score, ref_score, alpha):
# type: (float, float, float, float) -> Dict[str, float]
precision = _div_or_zero(match_score, hyp_score)
recall = _div_or_zero(match_score, ref_score)
fscore = _f_score(precision, recall, alpha)
return _format_score(fscore, precision, recall)
def _flatten(sentences):
# type: (List[List[str]]) -> List[str]
return list(itertools.chain.from_iterable(sentences))
"""Match statistics"""
class _Match(collections.namedtuple('BaseMatch', 'matches hyp_size ref_size')):
def __add__(self, other):
# type: (Union[_Match, int]) -> _Match
if isinstance(other, int) and other == 0:
return self
elif isinstance(other, _Match):
return _Match(self.matches + other.matches,
self.hyp_size + other.hyp_size,
self.ref_size + other.ref_size)
else:
raise ValueError('Unexpected addend {}'.format(other))
def __radd__(self, other):
# type: (Union[_Match, int]) -> _Match
return self.__add__(other)
def to_score(self, alpha):
# type: (float) -> Dict[str, float]
return _f_p_r_score(self.matches, self.hyp_size, self.ref_size, alpha)
def to_weighted_score(self, alpha, weight):
# type: (float, float) -> Dict[str, float]
inv_weight_func = _get_weight_func(weight, inverse=True)
precision = inv_weight_func(_div_or_zero(self.matches, self.hyp_size))
recall = inv_weight_func(_div_or_zero(self.matches, self.ref_size))
fscore = _f_score(precision, recall, alpha)
return _format_score(fscore, precision, recall)
class _MatchAggregator(object):
def aggregate(self, matches):
# type: (Iterable[_Match]) -> _Match
raise NotImplementedError
class _AverageMatchAggregator(_MatchAggregator):
def aggregate(self, matches):
# type: (Iterable[_Match]) -> _Match
result = sum(matches)
if result == 0:
raise ValueError('Average on empty sequence')
return result
class _BestMatchAggregator(_MatchAggregator):
def aggregate(self, matches):
# type: (Iterable[_Match]) -> _Match
return max(matches, key=lambda x: _div_or_zero(x.matches, x.ref_size))
def _build_match_aggregator(multi_ref_mode):
# type: (str) -> _MatchAggregator
if multi_ref_mode == 'average':
return _AverageMatchAggregator()
elif multi_ref_mode == 'best':
return _BestMatchAggregator()
else:
raise ValueError(
'Invalid multi_ref_mode {}: expected (average, best)'.format(
multi_ref_mode))
"""ROUGE-N scores"""
def _build_ngrams(sent, n):
# type: (List[str], int) -> NGramsType
ngrams = collections.Counter()
for i in range(len(sent) - n + 1):
ngrams[tuple(sent[i:i + n])] += 1
return ngrams
def _count_ngrams(ngrams):
# type: (NGramsType) -> int
return sum(ngrams.values())
def _intersect_ngrams(hyp_ngrams, ref_ngrams):
# type: (NGramsType, NGramsType) -> NGramsType
return hyp_ngrams & ref_ngrams
def _union_ngrams(ngrams, other):
# type: (NGramsType, NGramsType) -> NGramsType
return ngrams | other
def _rouge_n_sentence_level(hyp, ref, n):
# type: (List[str], List[str], int) -> _Match
hyp_ngrams = _build_ngrams(hyp, n)
ref_ngrams = _build_ngrams(ref, n)
match_ngrams = _intersect_ngrams(hyp_ngrams, ref_ngrams)
return _Match(_count_ngrams(match_ngrams), _count_ngrams(hyp_ngrams),
_count_ngrams(ref_ngrams))
def _rouge_n_summary_level(hyps, refs, n):
# type: (List[List[str]], List[List[str]], int) -> _Match
return _rouge_n_sentence_level(_flatten(hyps), _flatten(refs), n)
def _rouge_n_multi_ref(hyps, multi_refs, n, multi_ref_mode, alpha):
# type: (List[List[str]], List[List[List[str]]], int, str, float) -> ScoreType
agg = _build_match_aggregator(multi_ref_mode)
match = agg.aggregate(
_rouge_n_summary_level(hyps, refs, n) for refs in multi_refs)
return match.to_score(alpha)
"""ROUGE-L scores"""
def _lcs_table(a, b):
# type: (List[str], List[str]) -> List[List[int]]
m = len(a)
n = len(b)
table = [[0 for _ in range(n + 1)] for _ in range(m + 1)]
for i in range(1, m + 1):
for j in range(1, n + 1):
if a[i - 1] == b[j - 1]:
table[i][j] = table[i - 1][j - 1] + 1
else:
table[i][j] = max(table[i - 1][j], table[i][j - 1])
return table
def _lcs_length(a, b):
# type: (List[str], List[str]) -> int
table = _lcs_table(a, b)
return table[-1][-1]
def _lcs_elements(a, b, table):
# type: (List[str], List[str], List[List[float]]) -> List[Tuple[int, int]]
s = []
i = len(a)
j = len(b)
while i > 0 and j > 0:
if a[i - 1] == b[j - 1]:
i -= 1
j -= 1
s.append((i, j))
elif table[i][j] == table[i][j - 1]:
j -= 1
else:
i -= 1
s.reverse()
return s
def _lcs_union(hyps, ref):
# type: (List[List[str]], List[str]) -> Set[int]
lcs_union = set()
for hyp in hyps:
lcs_elem = _lcs_elements(hyp, ref, _lcs_table(hyp, ref))
lcs_union = lcs_union.union(ref_idx for _, ref_idx in lcs_elem)
return lcs_union
def _rouge_l_sentence_level(hyp, ref):
# type: (List[str], List[str]) -> _Match
return _Match(_lcs_length(hyp, ref), len(hyp), len(ref))
def _rouge_l_summary_level(hyps, refs):
# type: (List[List[str]], List[List[str]]) -> _Match
hyp_unigram = _build_ngrams(_flatten(hyps), 1)
match_size = 0
for ref in refs:
lcs_union = _lcs_union(hyps, ref)
for ref_idx in lcs_union:
unigram = (ref[ref_idx],)
if hyp_unigram.get(unigram, 0) > 0:
hyp_unigram[unigram] -= 1
match_size += 1
ref_len = sum(len(ref) for ref in refs)
hyp_len = sum(len(hyp) for hyp in hyps)
return _Match(match_size, hyp_len, ref_len)
def _rouge_l_multi_ref(hyps, multi_refs, multi_ref_mode, alpha):
# type: (List[List[str]], List[List[List[str]]], str, float) -> ScoreType
agg = _build_match_aggregator(multi_ref_mode)
match = agg.aggregate(
_rouge_l_summary_level(hyps, refs) for refs in multi_refs)
return match.to_score(alpha)
"""ROUGE-W scores"""
def _wlcs_table(a, b, weight):
# type: (List[str], List[str], Callable[[float], float]) -> List[List[float]]
m = len(a)
n = len(b)
wlen = [[0.0 for _ in range(n + 1)] for _ in range(m + 1)]
continuous_matches = [[0 for _ in range(n + 1)] for _ in range(m + 1)]
for i in range(1, m + 1):
for j in range(1, n + 1):
if a[i - 1] == b[j - 1]:
k = continuous_matches[i - 1][j - 1]
wlen[i][j] = wlen[i - 1][j - 1] + weight(k + 1) - weight(k)
continuous_matches[i][j] = k + 1
else:
wlen[i][j] = max(wlen[i - 1][j], wlen[i][j - 1])
continuous_matches[i][j] = 0
return wlen
def _wlcs_union(hyps, ref, weight_func):
# type: (List[List[str]], List[str], Callable[[float], float]) -> List[int]
wlcs_union = set()
for hyp in hyps:
wlcs_elem = _lcs_elements(hyp, ref, _wlcs_table(hyp, ref, weight_func))
wlcs_union = wlcs_union.union(ref_idx for _, ref_idx in wlcs_elem)
return sorted(wlcs_union)
def _rouge_w_sentence_level(hyp, ref, weight):
# type: (List[str], List[str], float) -> _Match
return _rouge_w_summary_level([hyp], [ref], weight)
def _get_weight_func(weight, inverse):
# type: (float, bool) -> Callable[[float], float]
if weight < 1:
raise ValueError('Invalid weight {}: expected >= 1'.format(weight))
if inverse:
weight = 1 / weight
return lambda x: x ** weight
def _rouge_w_summary_level(hyps, refs, weight):
# type: (List[List[str]], List[List[str]], float) -> _Match
weight_func = _get_weight_func(weight, inverse=False)
hyp_flat = _flatten(hyps)
hyp_unigrams = _build_ngrams(hyp_flat, 1)
ref_score = weight_func(sum(weight_func(len(ref)) for ref in refs))
hyp_score = weight_func(sum(len(hyp) for hyp in hyps))
match_score = 0
for ref in refs:
wlcs_union = _wlcs_union(hyps, ref, weight_func)
consecutive_matches = 0
for ref_idx in wlcs_union:
token = (ref[ref_idx],)
if hyp_unigrams[token] > 0:
hyp_unigrams[token] -= 1
consecutive_matches += 1
if ref_idx == len(ref) - 1 or ref_idx + 1 not in wlcs_union:
match_score += weight_func(consecutive_matches)
consecutive_matches = 0
return _Match(match_score, hyp_score, ref_score)
def _rouge_w_multi_ref(hyps, multi_refs, weight, multi_ref_mode, alpha):
# type: (List[List[str]], List[List[List[str]]], float, str, float) -> ScoreType
agg = _build_match_aggregator(multi_ref_mode)
match = agg.aggregate(
_rouge_w_summary_level(hyps, refs, weight) for refs in multi_refs)
return match.to_weighted_score(alpha, weight)
"""ROUGE-S scores"""
def _skip_bigrams(sent, skip_gap):
# type: (List[str], Optional[int]) -> NGramsType
bigrams = collections.Counter()
if skip_gap is None or skip_gap < 0:
skip_gap = len(sent)
for lo in range(len(sent)):
for hi in range(lo + 1, min(len(sent), lo + skip_gap + 2)):
bigrams[(sent[lo], sent[hi])] += 1
return bigrams
def _rouge_s_or_su(hyp, ref, skip_gap, include_unigram):
# type: (List[str], List[str], Optional[int], bool) -> _Match
hyp_skip = _skip_bigrams(hyp, skip_gap)
ref_skip = _skip_bigrams(ref, skip_gap)
if include_unigram:
hyp_skip = _union_ngrams(hyp_skip, _build_ngrams(hyp[:-1], 1))
ref_skip = _union_ngrams(ref_skip, _build_ngrams(ref[:-1], 1))
match_skip = _intersect_ngrams(hyp_skip, ref_skip)
return _Match(_count_ngrams(match_skip), _count_ngrams(hyp_skip),
_count_ngrams(ref_skip))
def _rouge_s_sentence_level(hyp, ref, skip_gap):
# type: (List[str], List[str], Optional[int]) -> _Match
return _rouge_s_or_su(hyp, ref, skip_gap, False)
def _rouge_s_summary_level(hyps, refs, skip_gap):
# type: (List[List[str]], List[List[str]], Optional[int]) -> _Match
return _rouge_s_sentence_level(_flatten(hyps), _flatten(refs), skip_gap)
def _rouge_s_multi_ref(hyps, multi_refs, skip_gap, multi_ref_mode, alpha):
# type: (List[List[str]], List[List[List[str]]], Optional[int], str, float) -> ScoreType
agg = _build_match_aggregator(multi_ref_mode)
match = agg.aggregate(
_rouge_s_summary_level(hyps, refs, skip_gap) for refs in multi_refs)
return match.to_score(alpha)
"""ROUGE-SU scores"""
def _rouge_su_sentence_level(hyp, ref, skip_gap):
# type: (List[str], List[str], Optional[int]) -> _Match
return _rouge_s_or_su(hyp, ref, skip_gap, True)
def _rouge_su_summary_level(hyps, refs, skip_gap):
# type: (List[List[str]], List[List[str]], Optional[int]) -> _Match
return _rouge_su_sentence_level(_flatten(hyps), _flatten(refs), skip_gap)
def _rouge_su_multi_ref(hyps, multi_refs, skip_gap, multi_ref_mode, alpha):
# type: (List[List[str]], List[List[List[str]]], Optional[int], str, float) -> ScoreType
agg = _build_match_aggregator(multi_ref_mode)
match = agg.aggregate(
_rouge_su_summary_level(hyps, refs, skip_gap) for refs in multi_refs)
return match.to_score(alpha)
"""All ROUGE scores"""
def _rouge_scores_multi_ref(
hyp, # type: List[List[str]]
multi_ref, # type: List[List[List[str]]]
rouge_n, # type: Union[int, Iterable[int]]
rouge_l, # type: bool
rouge_w, # type: bool
rouge_w_weight, # type: float
rouge_s, # type: bool
rouge_su, # type: bool
skip_gap, # type: Optional[int]
multi_ref_mode, # type: str
alpha, # type: float
): # type: (...) -> Dict[str, Dict[str, float]]
if isinstance(rouge_n, int):
rouge_n = range(1, 1 + rouge_n)
skip_suffix = str(skip_gap) if skip_gap and skip_gap >= 0 else '*'
result = {}
for n in rouge_n:
result['rouge-{}'.format(n)] = _rouge_n_multi_ref(
hyp, multi_ref, n, multi_ref_mode, alpha)
if rouge_l:
result['rouge-l'] = _rouge_l_multi_ref(
hyp, multi_ref, multi_ref_mode, alpha)
if rouge_w:
result['rouge-w-{}'.format(rouge_w_weight)] = _rouge_w_multi_ref(
hyp, multi_ref, rouge_w_weight, multi_ref_mode, alpha)
if rouge_s:
result['rouge-s{}'.format(skip_suffix)] = _rouge_s_multi_ref(
hyp, multi_ref, skip_gap, multi_ref_mode, alpha)
if rouge_su:
result['rouge-su{}'.format(skip_suffix)] = _rouge_su_multi_ref(
hyp, multi_ref, skip_gap, multi_ref_mode, alpha)
return result
class _RougeAggregator(object):
def aggregate(self, scores):
# type: (Iterable[RougeType]) -> Union[List[RougeType], RougeType]
raise NotImplementedError
class _IndividualRougeAggregator(_RougeAggregator):
def aggregate(self, scores):
# type: (Iterable[RougeType]) -> List[RougeType]
return list(scores)
class _AverageRougeAggregator(_RougeAggregator):
def __init__(self, alpha):
self.alpha = alpha
def aggregate(self, scores):
# type: (Iterable[RougeType]) -> RougeType
scores = list(scores)
if len(scores) == 0:
return {}
results = {}
for key in scores[0].keys():
results[key] = self.average_score(score[key] for score in scores)
return results
def average_score(self, scores):
# type: (Iterable[ScoreType]) -> ScoreType
total_p = 0
total_r = 0
count = 0
for score in scores:
total_p += score['p']
total_r += score['r']
count += 1
precision = _div_or_zero(total_p, count)
recall = _div_or_zero(total_r, count)
fscore = _f_score(precision, recall, self.alpha)
return _format_score(fscore, precision, recall)
def _build_rouge_aggregator(mode, alpha):
# type: (str, float) -> _RougeAggregator
if mode == 'individual':
return _IndividualRougeAggregator()
if mode == 'average':
return _AverageRougeAggregator(alpha)
raise ValueError(
'Invalid mode {}: expected (individual, average)'.format(mode))
class PyRouge(object):
"""Compute ROUGE scores between multiple hypothesis and reference summaries.
:param rouge_n: Compute N-gram co-occurrence (ROUGE-N). Given an integer N,
compute ROUGE-1 to ROUGE-N. Given a list of integers, compute ROUGE-N if
N is on the list.
:param rouge_l: If true, compute longest common subsequence (LCS)
co-occurrence (ROUGE-L).
:param rouge_w: If true, compute Weighted-LCS (WLCS) co-occurrence
(ROUGE-W).
:param rouge_w_weight: The weight w of the weighting function
:math:`f(x) = x^w` to emphasize consecutive matches in ROUGE-W.
:param rouge_s: If true, compute skip-bigram co-occurrence (ROUGE-S).
:param rouge_su: If true, compute skip-bigram with unigram co-occurrence
(ROUGE-SU).
:param skip_gap: The maximum gap between two words in skip-bigram.
:param multi_ref_mode: The method to combine the scores between a
hypothesis and its multiple references. Choose from {average, best}.
:param alpha: The balance factor between recall and precision. Favors recall
if close to 1, precision if close to 0.
:param mode: The method to combine the scores on multiple documents.
Choose from {average, individual}.
Example:
::
>>> from rouge_metric import PyRouge
>>> hypotheses = ['Police killed the gunman'.lower()]
>>> references = [['The gunman killed the policeman'.lower()]]
>>> PyRouge().evaluate(hypotheses, references)
{
'rouge-1': {'r': 0.6, 'p': 0.75, 'f': 0.666666667},
'rouge-2': {'r': 0.5, 'p': 0.666666667, 'f': 0.571428571},
'rouge-l': {'r': 0.4, 'p': 0.5, 'f': 0.444444444}
}
>>> hypotheses = [['Police killed the gunman'.lower().split()]]
>>> references = [[['The gunman killed the policeman'.lower().split()]]]
>>> PyRouge().evaluate_tokenized(hypotheses, references)
{
'rouge-1': {'r': 0.6, 'p': 0.75, 'f': 0.666666667},
'rouge-2': {'r': 0.5, 'p': 0.666666667, 'f': 0.571428571},
'rouge-l': {'r': 0.4, 'p': 0.5, 'f': 0.444444444}
}
"""
def __init__(self,
rouge_n=(1, 2), # type: Union[int, Iterable[int]]
rouge_l=True, # type: bool
rouge_w=False, # type: bool
rouge_w_weight=1.2, # type: float
rouge_s=False, # type: bool
rouge_su=False, # type: bool
skip_gap=None, # type: Optional[int]
multi_ref_mode='average', # type: str
alpha=0.5, # type: float
mode='average', # type: str
):
self.rouge_n = rouge_n
self.rouge_l = rouge_l
self.rouge_w = rouge_w
self.rouge_w_weight = rouge_w_weight
self.rouge_s = rouge_s
self.rouge_su = rouge_su
self.skip_gap = skip_gap
self.multi_ref_mode = multi_ref_mode
self.alpha = alpha
self.mode = mode
@staticmethod
def _default_sentencizer(text):
# type: (str) -> List[str]
return text.split('\n')
@staticmethod
def _default_tokenizer(sent):
# type: (str) -> List[str]
return sent.split()
def evaluate_tokenized(
self,
hypotheses, # type: List[List[List[str]]]
multi_references, # type: List[List[List[List[str]]]]
):
# type: (...) -> Union[RougeType, List[RougeType]]
"""Compute ROUGE scores between tokenized hypotheses and references.
Multiple reference summaries can be specified for a hypothesis summary.
The input should follow the below format so that we know how to match a
hypothesis with its references.
::
hypotheses = [
doc1_hyp_summary, # Hypothesis summary for document 1
doc2_hyp_summary, # Hypothesis summary for document 2
...
]
multi_references = [
[
doc1_ref1_summary, # Reference summary 1 for document 1
doc1_ref2_summary, # Reference summary 2 for document 1
...
],
[
doc2_ref1_summary, # Reference summary 1 for document 2
doc2_ref2_summary, # Reference summary 2 for document 2
...
],
]
Note that a summary is represented by a list of sentences, and a
sentence is represented by a list of tokens. A token is a basic element
here, represented by a ``str``. i.e.,
::
summary = [
[sent1_token1, sent1_token2, ...], # sentence 1
[sent2_token1, sent2_token2, ...], # sentence 2
]
:param hypotheses: A list of predicted summaries for multiple documents.
Each summary contains multiple sentences, and each sentence contains
multiple tokens.
:param multi_references: A list of gold standard summaries for multiple
documents. Each document corresponds to multiple reference
summaries. Each summary contains multiple sentences, and each
sentence contains multiple tokens.
:return: All computed ROUGE scores.
"""
if len(hypotheses) != len(multi_references):
raise ValueError('Hypotheses and references must be the same size')
aggregator = _build_rouge_aggregator(self.mode, self.alpha)
result = aggregator.aggregate(
_rouge_scores_multi_ref(
hyp, multi_ref, self.rouge_n, self.rouge_l, self.rouge_w,
self.rouge_w_weight, self.rouge_s, self.rouge_su, self.skip_gap,
self.multi_ref_mode, self.alpha
) for hyp, multi_ref in zip(hypotheses, multi_references)
)
return result
def evaluate(self,
hypotheses, # type: List[str]
multi_references, # type: List[List[str]]
sentencizer=None, # type: Optional[Callable[[str], List[str]]]
tokenizer=None # type: Optional[Callable[[str], List[str]]]
):
# type: (...) -> Union[RougeType, List[RougeType]]
"""Compute ROUGE scores between hypothesis and reference summaries.
The hypotheses and multi_references should follow the below format.
::
hypotheses = [summary1, summary2, ...]
multi_references = [
[summary1_ref1, summary1_ref2, ...],
[summary2_ref1, summary2_ref2, ...],
...
]
A summary here is a ``str`` with multiple lines, separated by ``\\n``.
Each line represents a sentence.
:param hypotheses: A list of hypothesis summaries.
:param multi_references: A double list of reference summaries.
:param sentencizer: A function to split a paragraph into sentences.
:param tokenizer: A function to split a sentence into tokens.
:return: All computed ROUGE scores.
"""
if sentencizer is None:
sentencizer = self._default_sentencizer
if tokenizer is None:
tokenizer = self._default_tokenizer
tokenized_hyp = [[tokenizer(sent) for sent in sentencizer(hyp)]
for hyp in hypotheses]
tokenized_multi_ref = [[[tokenizer(sent) for sent in sentencizer(ref)]
for ref in multi_ref]
for multi_ref in multi_references]
return self.evaluate_tokenized(tokenized_hyp, tokenized_multi_ref) | /rouge_metric-1.0.1-py3-none-any.whl/rouge_metric/py_rouge.py | 0.82485 | 0.340636 | py_rouge.py | pypi |
from __future__ import absolute_import
import six
import rouge_mongolian.rouge_score as rouge_score
import io
import os
import re
class FilesRouge:
def __init__(self, *args, **kwargs):
"""See the `Rouge` class for args
"""
self.rouge = Rouge(*args, **kwargs)
def _check_files(self, hyp_path, ref_path):
assert(os.path.isfile(hyp_path))
assert(os.path.isfile(ref_path))
def line_count(path):
count = 0
with open(path, "rb") as f:
for line in f:
count += 1
return count
hyp_lc = line_count(hyp_path)
ref_lc = line_count(ref_path)
assert(hyp_lc == ref_lc)
def get_scores(self, hyp_path, ref_path, avg=False, ignore_empty=False):
"""Calculate ROUGE scores between each pair of
lines (hyp_file[i], ref_file[i]).
Args:
* hyp_path: hypothesis file path
* ref_path: references file path
* avg (False): whether to get an average scores or a list
"""
self._check_files(hyp_path, ref_path)
with io.open(hyp_path, encoding="utf-8", mode="r") as hyp_file:
hyps = [line[:-1] for line in hyp_file]
with io.open(ref_path, encoding="utf-8", mode="r") as ref_file:
refs = [line[:-1] for line in ref_file]
return self.rouge.get_scores(hyps, refs, avg=avg,
ignore_empty=ignore_empty)
class Rouge:
DEFAULT_METRICS = ["rouge-1", "rouge-2", "rouge-l"]
AVAILABLE_METRICS = {
"rouge-1": lambda hyp, ref, **k: rouge_score.rouge_n(hyp, ref, 1, **k),
"rouge-2": lambda hyp, ref, **k: rouge_score.rouge_n(hyp, ref, 2, **k),
"rouge-3": lambda hyp, ref, **k: rouge_score.rouge_n(hyp, ref, 3, **k),
"rouge-4": lambda hyp, ref, **k: rouge_score.rouge_n(hyp, ref, 4, **k),
"rouge-5": lambda hyp, ref, **k: rouge_score.rouge_n(hyp, ref, 5, **k),
"rouge-l": lambda hyp, ref, **k:
rouge_score.rouge_l_summary_level(hyp, ref, **k),
}
DEFAULT_STATS = ["r", "p", "f"]
AVAILABLE_STATS = ["r", "p", "f"]
def __init__(self, metrics=None, stats=None, return_lengths=False,
raw_results=False, exclusive=True):
self.return_lengths = return_lengths
self.raw_results = raw_results
self.exclusive = exclusive
if metrics is not None:
self.metrics = [m.lower() for m in metrics]
for m in self.metrics:
if m not in Rouge.AVAILABLE_METRICS:
raise ValueError("Unknown metric '%s'" % m)
else:
self.metrics = Rouge.DEFAULT_METRICS
if self.raw_results:
self.stats = ["hyp", "ref", "overlap"]
else:
if stats is not None:
self.stats = [s.lower() for s in stats]
for s in self.stats:
if s not in Rouge.AVAILABLE_STATS:
raise ValueError("Unknown stat '%s'" % s)
else:
self.stats = Rouge.DEFAULT_STATS
def cut_sent(self, para):
para = re.sub('([᠃︖︕\?])([^”’])', r"\1\n\2", para)
para = para.rstrip()
print(para)
return para.split("\n")
def get_scores(self, hyps, refs, avg=False, ignore_empty=False):
if isinstance(hyps, six.string_types):
hyps, refs = [hyps], [refs]
if ignore_empty:
# Filter out hyps of 0 length
hyps_and_refs = zip(hyps, refs)
hyps_and_refs = [_ for _ in hyps_and_refs
if len(_[0]) > 0
and len(_[1]) > 0]
hyps, refs = zip(*hyps_and_refs)
assert(isinstance(hyps, type(refs)))
assert(len(hyps) == len(refs))
if not avg:
return self._get_scores(hyps, refs)
return self._get_avg_scores(hyps, refs)
def _get_scores(self, hyps, refs):
scores = []
for hyp, ref in zip(hyps, refs):
sen_score = {}
hyp = [" ".join(_.split()) for _ in self.cut_sent(hyp) if len(_) > 0]
ref = [" ".join(_.split()) for _ in self.cut_sent(ref) if len(_) > 0]
for m in self.metrics:
fn = Rouge.AVAILABLE_METRICS[m]
sc = fn(
hyp,
ref,
raw_results=self.raw_results,
exclusive=self.exclusive)
sen_score[m] = {s: sc[s] for s in self.stats}
if self.return_lengths:
lengths = {
"hyp": len(" ".join(hyp).split()),
"ref": len(" ".join(ref).split())
}
sen_score["lengths"] = lengths
scores.append(sen_score)
return scores
def _get_avg_scores(self, hyps, refs):
scores = {m: {s: 0 for s in self.stats} for m in self.metrics}
if self.return_lengths:
scores["lengths"] = {"hyp": 0, "ref": 0}
count = 0
for (hyp, ref) in zip(hyps, refs):
hyp = [" ".join(_.split()) for _ in self.cut_sent(hyp) if len(_) > 0]
ref = [" ".join(_.split()) for _ in self.cut_sent(ref) if len(_) > 0]
for m in self.metrics:
fn = Rouge.AVAILABLE_METRICS[m]
sc = fn(hyp, ref, exclusive=self.exclusive)
scores[m] = {s: scores[m][s] + sc[s] for s in self.stats}
if self.return_lengths:
scores["lengths"]["hyp"] += len(" ".join(hyp).split())
scores["lengths"]["ref"] += len(" ".join(ref).split())
count += 1
avg_scores = {
m: {s: scores[m][s] / count for s in self.stats}
for m in self.metrics
}
if self.return_lengths:
avg_scores["lengths"] = {
k: scores["lengths"][k] / count
for k in ["hyp", "ref"]
}
return avg_scores | /rouge_mongolian-1.0.2-py3-none-any.whl/rouge_mongolian/rouge.py | 0.747892 | 0.402686 | rouge.py | pypi |
from collections import defaultdict
import logging
def filter_graphalignments(file_name, min_mapq=50):
f = open(file_name)
alignments = defaultdict(list) # read id to list of alignments
for i, line in enumerate(f):
if i % 1000000 == 0:
logging.info("Read %d lines" % i)
l = line.split("\t")
alignments[l[0]].append((l[1], int(l[2])))
n_single = 0
n_skipped_multiple_good = 0
n_skipped_multiple_good_unique = 0
n_kept_multiple_good = 0
n_kept_multiple_good_unique = 0
n_filtered_bad_score = 0
n_filtered_linear_many_alternative = 0
for read_name, hits in alignments.items():
sorted_hits = sorted(hits, key=lambda x: x[1])
if sorted_hits[0][1] >= 7:
# Skip because too bad score
n_filtered_bad_score += 1
continue
if len(sorted_hits) == 1:
best_hit = sorted_hits[0]
score = 60 - best_hit[1] * 2
n_single += 1
else:
# Only keep if second best has much more mismatches
n_mismatches = sorted_hits[0][1]
if sorted_hits[1][1] <= n_mismatches + 0:
# skip
n_skipped_multiple_good_unique += 1
n_skipped_multiple_good += len(sorted_hits)
continue
else:
best_hit = sorted_hits[0]
n_kept_multiple_good_unique += 1
n_kept_multiple_good += len(sorted_hits)
score = 5
if len(hits) == 2:
score = 58
elif len(hits) == 3:
score = 56
elif len(hits) >= 4:
score = 54
score -= best_hit[1] * 1 + (sorted_hits[1][1] - sorted_hits[0][1])
if score >= min_mapq:
print("%s\t%s\t%s" % (read_name, best_hit[0], score))
logging.info("%d reads filtered because multimapping on linear ref" % n_filtered_linear_many_alternative)
logging.info("%d reads filtered because too low score" % n_filtered_bad_score)
logging.info("%d reads have only a single good alignment" % n_single)
logging.info("%d reads filtered out because they have multiple good mappings (%d in total)" %
(n_skipped_multiple_good_unique, n_skipped_multiple_good))
logging.info("%d reads with multiple alignments kept, because second best is bad (%d in total)" %
(n_kept_multiple_good_unique, n_kept_multiple_good)) | /rough_graph_mapper-0.0.4-py3-none-any.whl/rough_graph_mapper/filter_graphalignments.py | 0.422505 | 0.223695 | filter_graphalignments.py | pypi |
import os
from .util import split_sam_by_chromosomes, run_bwa_mem, run_hybrid_between_bwa_and_minimap
from multiprocessing import Process
import logging
from .sam_to_graph_aligner import SamToGraphAligner
logging.basicConfig(level=logging.DEBUG, format='%(asctime)s %(levelname)s: %(message)s')
def map_single_chromosome(file_base_name, graph_dir, chromosome, minimum_mapq_to_graphalign=60):
sam_file_name = file_base_name + "_chr" + chromosome + ".sam"
aligner = SamToGraphAligner(graph_dir, chromosome, sam_file_name, minimum_mapq_to_graphalign)
aligner.align_sam()
class LinearToGraphMapper:
def __init__(self, fasta_file_name, linear_reference_file_name, graph_dir, chromosomes, minimum_mapq_to_graphalign=60, write_final_alignments_to_file=None, n_threads=3,
skip_mapq_adjustment=False):
self.chromosomes = chromosomes
self.graph_dir = graph_dir
self.minimum_mapq_to_graphalign = minimum_mapq_to_graphalign
self.write_final_alignments_to_file=write_final_alignments_to_file
self.skip_mapq_adjustmnet = skip_mapq_adjustment
self.base_name = '.'.join(fasta_file_name.split(".")[:-1])
# First align to linear reference
if not skip_mapq_adjustment:
run_hybrid_between_bwa_and_minimap(linear_reference_file_name, fasta_file_name, self.base_name + ".sam",
bwa_arguments="-t %d -h 10000000 -D 0.05" % n_threads,
minimap_arguments="-t %d -k19 -w11 --sr --frag=yes -A2 -B8 -O12,32 -E2,1 -r50 -p.5 -f90000,180000 -n2 -m20 -s40 -g200 -2K50m --heap-sort=yes -N 7 -a" % n_threads)
else:
# Only run bwa mem
logging.warning("Skipping mapq adjustment. Will only run BWA, not minimap2")
run_bwa_mem(linear_reference_file_name, fasta_file_name, self.base_name + ".sam", arguments="-t %d -h 1000000 -D 0.05" % n_threads)
assert os.path.isfile(self.base_name + ".sam"), "No sam file generated. Did BWA MEM or minimap fail?"
# Split sam by chromosome
split_sam_by_chromosomes(self.base_name + ".sam", chromosomes)
self.map_all_chromosomes()
def map_all_chromosomes(self):
processes = []
for chromosome in self.chromosomes:
process = Process(target=map_single_chromosome, args=(self.base_name, self.graph_dir, chromosome, self.minimum_mapq_to_graphalign))
process.start()
processes.append(process)
for p in processes:
p.join()
# Merge results from all chromosomes
out_file = None
out_file_name = None
if self.write_final_alignments_to_file is not None:
out_file_name = self.write_final_alignments_to_file
out_file = open(out_file_name, "w")
for chromosome in self.chromosomes:
with open(self.base_name + "_chr" + chromosome + ".sam.graphalignments", "r") as f:
for line in f:
if self.write_final_alignments_to_file is not None:
out_file.writelines([line])
else:
print(line.strip())
if self.write_final_alignments_to_file is not None:
logging.info("Merged all graphalignments into file %s" % out_file_name)
out_file.close()
logging.info("Done mapping reads")
if __name__ == "__main__":
mapper = LinearToGraphMapper("sim_2m.fa", "../data/human_full/testreference.fa", "../data/human_full/",
[str(chrom) for chrom in range(1, 23)] + ["X"]) | /rough_graph_mapper-0.0.4-py3-none-any.whl/rough_graph_mapper/linear_to_graph_mapper.py | 0.450359 | 0.151875 | linear_to_graph_mapper.py | pypi |
import ctypes
import numpy as np
from .dlpack import _c_str_dltensor, DLManagedTensor, DLTensor
ctypes.pythonapi.PyCapsule_IsValid.restype = ctypes.c_int
ctypes.pythonapi.PyCapsule_IsValid.argtypes = [ctypes.py_object, ctypes.c_char_p]
ctypes.pythonapi.PyCapsule_GetPointer.restype = ctypes.c_void_p
ctypes.pythonapi.PyCapsule_GetPointer.argtypes = [ctypes.py_object, ctypes.c_char_p]
def _array_interface_from_dl_tensor(dlt):
"""Constructs NumPy's array_interface dictionary
from `dlpack.DLTensor` descriptor."""
assert isinstance(dlt, DLTensor)
shape = tuple(dlt.shape[dim] for dim in range(dlt.ndim))
itemsize = dlt.dtype.lanes * dlt.dtype.bits // 8
if dlt.strides:
strides = tuple(
dlt.strides[dim] * itemsize for dim in range(dlt.ndim)
)
else:
# Array is compact, make it numpy compatible.
strides = []
for i, s in enumerate(shape):
cumulative = 1
for e in range(i + 1, dlt.ndim):
cumulative *= shape[e]
strides.append(cumulative * itemsize)
strides = tuple(strides)
typestr = "|" + str(dlt.dtype.type_code)[0] + str(itemsize)
return dict(
version=3,
shape=shape,
strides=strides,
data=(dlt.data, True),
offset=dlt.byte_offset,
typestr=typestr,
)
class _Holder:
"""A wrapper that combines a pycapsule and array_interface for consumption by numpy.
Parameters
----------
array_interface : dict
A description of the underlying memory.
pycapsule : PyCapsule
A wrapper around the dlpack tensor that will be converted to numpy.
"""
def __init__(self, array_interface, pycapsule) -> None:
self.__array_interface__ = array_interface
self._pycapsule = pycapsule
def to_numpy(pycapsule) -> np.ndarray:
"""Convert a dlpack tensor into a numpy array without copying.
Parameters
----------
pycapsule : PyCapsule
A pycapsule wrapping a dlpack tensor that will be converted.
Returns
-------
np_array : np.ndarray
A new numpy array that uses the same underlying memory as the input
pycapsule.
"""
assert ctypes.pythonapi.PyCapsule_IsValid(pycapsule, _c_str_dltensor)
dl_managed_tensor = ctypes.pythonapi.PyCapsule_GetPointer(
pycapsule, _c_str_dltensor
)
dl_managed_tensor_ptr = ctypes.cast(dl_managed_tensor, ctypes.POINTER(DLManagedTensor))
dl_managed_tensor = dl_managed_tensor_ptr.contents
holder = _Holder(_array_interface_from_dl_tensor(dl_managed_tensor.dl_tensor), pycapsule)
return np.ctypeslib.as_array(holder) | /RoughPy-0.0.1.tar.gz/RoughPy-0.0.1/external/dlpack/apps/numpy_dlpack/dlpack/to_numpy.py | 0.847527 | 0.353289 | to_numpy.py | pypi |
import ctypes
_c_str_dltensor = b"dltensor"
class DLDeviceType(ctypes.c_int):
"""The enum that encodes the type of the device where
DLTensor memory is allocated.
"""
kDLCPU = 1
kDLCUDA = 2
kDLCUDAHost = 3
kDLOpenCL = 4
kDLVulkan = 7
kDLMetal = 8
kDLVPI = 9
kDLROCM = 10
kDLROCMHost = 11
kDLCUDAManaged = 13
kDLOneAPI = 14
def __str__(self):
return {
self.kDLCPU : "CPU",
self.kDLCUDA: "CUDA",
self.kDLCUDAHost: "CUDAHost",
self.kDLOpenCL: "OpenCL",
self.kDLVulkan: "Vulkan",
self.kDLMetal: "Metal",
self.kDLVPI: "VPI",
self.kDLROCM: "ROCM",
self.kDLROCMHost: "ROMCHost",
self.kDLCUDAManaged: "CUDAManaged",
self.kDLOneAPI: "oneAPI",
}[self.value]
class DLDevice(ctypes.Structure):
"""Represents the device where DLTensor memory is allocated.
The device is represented by the pair of fields:
device_type: DLDeviceType
device_id: c_int
"""
_fields_ = [
("device_type", DLDeviceType),
("device_id", ctypes.c_int),
]
class DLDataTypeCode(ctypes.c_uint8):
"""An integer that encodes the category of DLTensor elements' data type."""
kDLInt = 0
kDLUInt = 1
kDLFloat = 2
kDLOpaquePointer = 3
kDLBfloat = 4
kDLComplex = 5
def __str__(self):
return {
self.kDLInt: "int",
self.kDLUInt: "uint",
self.kDLFloat: "float",
self.kDLBfloat: "bfloat",
self.kDLComplex: "complex",
self.kDLOpaquePointer: "void_p"
}[self.value]
class DLDataType(ctypes.Structure):
"""Descriptor of data type for elements of DLTensor.
The data type is described by a triple, `DLDataType.type_code`,
`DLDataType.bits`, and `DLDataType.lanes`.
The element is understood as packed `lanes` repetitions of
elements from `type_code` data-category of width `bits`.
"""
_fields_ = [
("type_code", DLDataTypeCode),
("bits", ctypes.c_uint8),
("lanes", ctypes.c_uint16),
]
TYPE_MAP = {
"bool": (DLDataTypeCode.kDLUInt, 1, 1),
"int8": (DLDataTypeCode.kDLInt, 8, 1),
"int16": (DLDataTypeCode.kDLInt, 16, 1),
"int32": (DLDataTypeCode.kDLInt, 32, 1),
"int64": (DLDataTypeCode.kDLInt, 64, 1),
"uint8": (DLDataTypeCode.kDLUInt, 8, 1),
"uint16": (DLDataTypeCode.kDLUInt, 16, 1),
"uint32": (DLDataTypeCode.kDLUInt, 32, 1),
"uint64": (DLDataTypeCode.kDLUInt, 64, 1),
"float16": (DLDataTypeCode.kDLFloat, 16, 1),
"float32": (DLDataTypeCode.kDLFloat, 32, 1),
"float64": (DLDataTypeCode.kDLFloat, 64, 1),
"complex64": (DLDataTypeCode.kDLComplex, 64, 1),
"complex128": (DLDataTypeCode.kDLComplex, 128, 1)
}
class DLTensor(ctypes.Structure):
"""Structure describing strided layout of DLTensor.
Fields are:
data: void pointer
device: DLDevice
ndim: number of indices needed to reference an
element of the tensor
dtype: data type descriptor
shape: tuple with lengths of the corresponding
tensor dimensions
strides: tuple of numbers of array elements to
step in each dimension when traversing
the tensor
byte_offset: data + byte_offset gives the address of
tensor element with index (0,) * ndim
"""
_fields_ = [
("data", ctypes.c_void_p),
("device", DLDevice),
("ndim", ctypes.c_int),
("dtype", DLDataType),
("shape", ctypes.POINTER(ctypes.c_int64)),
("strides", ctypes.POINTER(ctypes.c_int64)),
("byte_offset", ctypes.c_uint64),
]
class DLManagedTensor(ctypes.Structure):
"""Structure storing the pointer to the tensor descriptor,
deleter callable for the tensor descriptor, and pointer to
some additional data. These are stored in fields `dl_tensor`,
`deleter`, and `manager_ctx`."""
_fields_ = [
("dl_tensor", DLTensor),
("manager_ctx", ctypes.c_void_p),
("deleter", ctypes.CFUNCTYPE(None, ctypes.c_void_p)),
] | /RoughPy-0.0.1.tar.gz/RoughPy-0.0.1/external/dlpack/apps/numpy_dlpack/dlpack/dlpack.py | 0.794106 | 0.367242 | dlpack.py | pypi |
from typing import Callable
import numpy as np
import ctypes
from .dlpack import DLManagedTensor, DLDevice, DLDataType, _c_str_dltensor
ctypes.pythonapi.PyMem_RawMalloc.restype = ctypes.c_void_p
ctypes.pythonapi.PyMem_RawFree.argtypes = [ctypes.c_void_p]
ctypes.pythonapi.PyCapsule_New.restype=ctypes.py_object
ctypes.pythonapi.PyCapsule_New.argtypes=[ctypes.c_void_p, ctypes.c_char_p, ctypes.c_void_p]
class _Holder:
"""A wrapper around a numpy array to keep track of references to the underlying memory.
Parameters
----------
np_array : np.ndarray
The numpy array that will be converted to a DLPack tensor and must be managed.
"""
def __init__(self, np_array: np.ndarray) -> None:
self.np_array = np_array
self.data = np_array.ctypes.data_as(ctypes.c_void_p)
self.shape = np_array.ctypes.shape_as(ctypes.c_int64)
self.strides = np_array.ctypes.strides_as(ctypes.c_int64)
for i in range(np_array.ndim):
self.strides[i] //= np_array.itemsize
def _as_manager_ctx(self) -> ctypes.c_void_p:
py_obj = ctypes.py_object(self)
py_obj_ptr = ctypes.pointer(py_obj)
ctypes.pythonapi.Py_IncRef(py_obj)
ctypes.pythonapi.Py_IncRef(ctypes.py_object(py_obj_ptr))
return ctypes.cast(py_obj_ptr, ctypes.c_void_p)
@ctypes.CFUNCTYPE(None, ctypes.c_void_p)
def _numpy_array_deleter(handle: ctypes.c_void_p) -> None:
"""A function to deallocate the memory of a numpy array."""
dl_managed_tensor = DLManagedTensor.from_address(handle)
py_obj_ptr = ctypes.cast(
dl_managed_tensor.manager_ctx, ctypes.POINTER(ctypes.py_object)
)
py_obj = py_obj_ptr.contents
ctypes.pythonapi.Py_DecRef(py_obj)
ctypes.pythonapi.Py_DecRef(ctypes.py_object(py_obj_ptr))
ctypes.pythonapi.PyMem_RawFree(handle)
@ctypes.CFUNCTYPE(None, ctypes.c_void_p)
def _numpy_pycapsule_deleter(handle: ctypes.c_void_p) -> None:
"""A function to deallocate a pycapsule that wraps a numpy array."""
pycapsule: ctypes.py_object = ctypes.cast(handle, ctypes.py_object)
if ctypes.pythonapi.PyCapsule_IsValid(pycapsule, _c_str_dltensor):
dl_managed_tensor = ctypes.pythonapi.PyCapsule_GetPointer(
pycapsule, _c_str_dltensor
)
_numpy_array_deleter(dl_managed_tensor)
ctypes.pythonapi.PyCapsule_SetDestructor(pycapsule, None)
def from_numpy(np_array: np.ndarray):
"""Convert a numpy array to another type of dlpack compatible array.
Parameters
----------
np_array : np.ndarray
The source numpy array that will be converted.
Returns
-------
pycapsule : PyCapsule
A pycapsule containing a DLManagedTensor that can be converted
to other array formats without copying the underlying memory.
"""
holder = _Holder(np_array)
size = ctypes.c_size_t(ctypes.sizeof(DLManagedTensor))
dl_managed_tensor = DLManagedTensor.from_address(
ctypes.pythonapi.PyMem_RawMalloc(size)
)
dl_managed_tensor.dl_tensor.data = holder.data
dl_managed_tensor.dl_tensor.device = DLDevice(1, 0)
dl_managed_tensor.dl_tensor.ndim = np_array.ndim
dl_managed_tensor.dl_tensor.dtype = DLDataType.TYPE_MAP[str(np_array.dtype)]
dl_managed_tensor.dl_tensor.shape = holder.shape
dl_managed_tensor.dl_tensor.strides = holder.strides
dl_managed_tensor.dl_tensor.byte_offset = 0
dl_managed_tensor.manager_ctx = holder._as_manager_ctx()
dl_managed_tensor.deleter = _numpy_array_deleter
pycapsule = ctypes.pythonapi.PyCapsule_New(
ctypes.byref(dl_managed_tensor),
_c_str_dltensor,
_numpy_pycapsule_deleter,
)
return pycapsule | /RoughPy-0.0.1.tar.gz/RoughPy-0.0.1/external/dlpack/apps/numpy_dlpack/dlpack/from_numpy.py | 0.877948 | 0.382228 | from_numpy.py | pypi |
from collections import namedtuple
import os
import re
CPP_SEP = '/'
Include = namedtuple('Include', ['path', 'line_no'])
''' Represents a file path '''
class Path(list):
def __init__(self, *args):
super().__init__()
if (len(args) > 0 and type(args[0]) is list):
for p in args[0]:
self.append(p)
else:
for p in args:
self.append(p)
def append(self, sub: str):
separated = sub.split(os.sep)
if (len(separated) == 1):
separated = sub.split(CPP_SEP)
for i in separated:
if (i == '..'):
# Go up a path
self.pop()
else:
super().append(i)
def copy(self):
temp = Path()
for i in self:
temp.append(i)
return temp
def join(self, sub: str):
temp = self.copy()
temp.append(sub)
return temp
''' Return the first element of the path '''
def dirname(self) -> str:
try:
return self[0]
except IndexError:
return ''
def ext(self) -> str:
try:
return self[-1].split('.')[-1]
except IndexError:
return ''
def __str__(self):
if (len(self) == 1):
return self[0] + '/'
return '/'.join(self)
def __hash__(self):
return hash(str(self))
def header_list(files: list) -> list:
'''
Given a list of files, compute the list of header files in the order in which they should
be included to avoid conflicts
'''
dependencies = {}
headers = []
''' Iterate over every .cpp and .hpp file '''
for file in files:
file_ext = file.ext()
if (file_ext == 'hpp' or file_ext == 'h'):
dependencies[file] = [d.path for d in get_dependencies(file)['local']]
while dependencies:
for file in list(dependencies.keys()):
# Remove includes we've already included
dependencies[file] = [i for i in dependencies[file] if i not in headers]
# If no more dependencies, add file
if not dependencies[file]:
headers.append(file)
dependencies.pop(file)
return headers
def get_dependencies(file: Path) -> dict:
''' Parse a .cpp/.hpp file for its system and local dependencies '''
dir = Path(file[:-1])
headers = {
"system": [],
"local": []
}
with open(str(file), mode='r') as infile:
for i, line in enumerate(infile):
sys_include = re.search('^#include <(?P<file>.*)>', line)
local_include = re.search('^#include "(?P<file>.*)"', line)
if sys_include:
headers["system"].append(
Include(path=sys_include.group('file'), line_no=i))
elif local_include:
headers["local"].append(
Include(path=dir.join(local_include.group('file')), line_no=i))
return headers
''' Strip local include statements and #pragma once declarations from source files '''
def file_strip(file: Path) -> str:
new_file = ''
strip_these = ['#include "(?P<file>.*)"', '#pragma once' ]
# Strip out pragma once
with open(str(file), mode='r') as infile:
for line in infile:
add_this_line = sum(re.search(strip, line) is not None for strip in strip_these) == 0
# Change "#define CSV_INLINE" to "#define CSV_INLINE inline"
if ('#define CSV_INLINE' in line):
line = "#define CSV_INLINE inline\n"
if (add_this_line):
new_file += line
return new_file
'''
Collate header files by using this following algorithm:
- Given a list of header files (HEADERS) ordered such that the first file
has no internal dependencies, and the last file is the most dependent
- Reverse the list
- Maintain these data structures:
- A set of header files (PROCESSED) that were processed
- A set of header files (MISSING_INCLUDES) that we are looking for
- The collation of header source code (HEADER_CONCAT)
- Go through each FILE in list of headers in reverse order (starting with
the headers at the highest level of the dependency tree)
- If FILE is not in MISSING_INCLUDES, then concatenate source verbatim to HEADER_CONCAT
- Otherwise, there is one or more #include statements in HEADER_CONCAT which references FILE
- Replace the first #include statement with the source of FILE, and remove the rest
'''
def header_collate(headers: list):
headers.reverse()
# Placeholder for includes to be inserted
splice_template = "__INSERT_HEADER_HERE__({})\n"
header_concat = ''
processed = set()
missing_includes = set()
def process_file(path: Path):
source = ''
with open(str(path), mode='r') as infile:
for line in infile:
# Add local includes to MISSING_INCLUDES
local_include = re.search('^#include "(?P<file>.*)"', line)
if local_include:
dir = Path(path[:-1])
include_path = dir.join(local_include.group('file'))
if str(include_path) not in processed:
missing_includes.add(str(include_path))
source += splice_template.format(str(include_path))
elif '#pragma once' in line:
continue
else:
source += line
return source
for path in headers:
processed.add(str(path))
if str(path) in missing_includes:
source = process_file(path)
splice_phrase = splice_template.format(str(path))
header_concat = header_concat.replace(
splice_phrase,
source + '\n', 1)
header_concat = header_concat.replace(splice_phrase, '')
missing_includes.remove(str(path))
else:
header_concat += process_file(path)
return header_concat
if __name__ == "__main__":
''' Iterate over every .cpp and .hpp file '''
headers = []
sources = []
system_includes = set()
# Generate a list of header and source file locations
for dir in os.walk('include'):
files = dir[2]
for file in files:
fname = Path(dir[0], file)
if (file[-4:] == '.hpp' or file[-2:] == '.h'):
headers.append(fname)
elif (file[-4:] == '.cpp'):
sources.append(fname)
# Rearrange header order to avoid compilation conflicts
headers = header_list(sorted(headers))
# Get system includes
for file in sources + headers:
for include in get_dependencies(file)['system']:
system_includes.add(include.path)
# Collate header and source files
header_concat = header_collate(headers)
source_collate = ''
for cpp in sources:
source_collate += file_strip(cpp) + '\n'
# Generate hpp file
print("#pragma once")
print(header_concat.replace(
"#define CSV_INLINE", "#define CSV_INLINE inline").replace(
"/** INSERT_CSV_SOURCES **/", source_collate)) | /RoughPy-0.0.1.tar.gz/RoughPy-0.0.1/external/csv-parser/single_header.py | 0.413596 | 0.152631 | single_header.py | pypi |
# Vince's CSV Parser
[](https://travis-ci.com/vincentlaucsb/csv-parser)
* [Motivation](#motivation)
* [Documentation](#documentation)
* [Integration](#integration)
* [C++ Version](#c-version)
* [Single Header](#single-header)
* [CMake Instructions](#cmake-instructions)
* [Features & Examples](#features--examples)
* [Reading an Arbitrarily Large File (with Iterators)](#reading-an-arbitrarily-large-file-with-iterators)
* [Memory Mapped Files vs. Streams](#memory-mapped-files-vs-streams)
* [Indexing by Column Names](#indexing-by-column-names)
* [Numeric Conversions](#numeric-conversions)
* [Specifying the CSV Format](#specifying-the-csv-format)
* [Trimming Whitespace](#trimming-whitespace)
* [Handling Variable Numbers of Columns](#handling-variable-numbers-of-columns)
* [Setting Column Names](#setting-column-names)
* [Converting to JSON](#converting-to-json)
* [Parsing an In-Memory String](#parsing-an-in-memory-string)
* [Writing CSV Files](#writing-csv-files)
* [Contributing](#contributing)
## Motivation
There's plenty of other CSV parsers in the wild, but I had a hard time finding what I wanted. Inspired by Python's `csv` module, I wanted a library with **simple, intuitive syntax**. Furthermore, I wanted support for special use cases such as calculating statistics on very large files. Thus, this library was created with these following goals in mind.
### Performance and Memory Requirements
With the deluge of large datasets available, a performant CSV parser is a necessity. By using overlapped threads, memory mapped IO, and
efficient data structures, this parser can quickly tackle large CSV files. Furthermore, this parser has a minimal memory footprint and
can handle larger-than-RAM files.
#### Show me the numbers
On my computer (Intel Core i7-8550U @ 1.80GHz/Toshiba XG5 SSD), this parser can read
* the [69.9 MB 2015_StateDepartment.csv](https://github.com/vincentlaucsb/csv-data/tree/master/real_data) in 0.26 seconds (269 MBps)
* a [1.4 GB Craigslist Used Vehicles Dataset](https://www.kaggle.com/austinreese/craigslist-carstrucks-data/version/7) in 2.1 seconds (667 MBps)
* a [1.24GB Car Accidents Dataset](https://www.kaggle.com/sobhanmoosavi/us-accidents) in 5 seconds (248 MBps)
### Robust Yet Flexible
#### RFC 4180 and Beyond
This CSV parser is much more than a fancy string splitter, and parses all files following [RFC 4180](https://www.rfc-editor.org/rfc/rfc4180.txt).
However, in reality we know that RFC 4180 is just a suggestion, and there's many "flavors" of CSV such as tab-delimited files. Thus, this library has:
* Automatic delimiter guessing
* Ability to ignore comments in leading rows and elsewhere
* Ability to handle rows of different lengths
By default, rows of variable length are silently ignored, although you may elect to keep them or throw an error.
#### Encoding
This CSV parser is encoding-agnostic and will handle ANSI and UTF-8 encoded files.
It does not try to decode UTF-8, except for detecting and stripping UTF-8 byte order marks.
### Well Tested
This CSV parser has an extensive test suite and is checked for memory safety with Valgrind. If you still manage to find a bug,
do not hesitate to report it.
## Documentation
In addition to the [Features & Examples](#features--examples) below, a [fully-fledged online documentation](https://vincentlaucsb.github.io/csv-parser/html/) contains more examples, details, interesting features, and instructions for less common use cases.
## Integration
This library was developed with Microsoft Visual Studio and is compatible with >g++ 7.5 and clang.
All of the code required to build this library, aside from the C++ standard library, is contained under `include/`.
### C++ Version
While C++17 is recommended, C++11 is the minimum version required. This library makes extensive use of string views, and uses
[Martin Moene's string view library](https://github.com/martinmoene/string-view-lite) if `std::string_view` is not available.
### Single Header
This library is available as a single `.hpp` file under [`single_include/csv.hpp`](single_include/csv.hpp).
### CMake Instructions
If you're including this in another CMake project, you can simply clone this repo into your project directory,
and add the following to your CMakeLists.txt:
```
# Optional: Defaults to C++ 17
# set(CSV_CXX_STANDARD 11)
add_subdirectory(csv-parser)
# ...
add_executable(<your program> ...)
target_link_libraries(<your program> csv)
```
## Features & Examples
### Reading an Arbitrarily Large File (with Iterators)
With this library, you can easily stream over a large file without reading its entirety into memory.
**C++ Style**
```cpp
# include "csv.hpp"
using namespace csv;
...
CSVReader reader("very_big_file.csv");
for (CSVRow& row: reader) { // Input iterator
for (CSVField& field: row) {
// By default, get<>() produces a std::string.
// A more efficient get<string_view>() is also available, where the resulting
// string_view is valid as long as the parent CSVRow is alive
std::cout << field.get<>() << ...
}
}
...
```
**Old-Fashioned C Style Loop**
```cpp
...
CSVReader reader("very_big_file.csv");
CSVRow row;
while (reader.read_row(row)) {
// Do stuff with row here
}
...
```
#### Memory-Mapped Files vs. Streams
By default, passing in a file path string to the constructor of `CSVReader`
causes memory-mapped IO to be used. In general, this option is the most
performant.
However, `std::ifstream` may also be used as well as in-memory sources via `std::stringstream`.
**Note**: Currently CSV guessing only works for memory-mapped files. The CSV dialect
must be manually defined for other sources.
```cpp
CSVFormat format;
// custom formatting options go here
CSVReader mmap("some_file.csv", format);
std::ifstream infile("some_file.csv", std::ios::binary);
CSVReader ifstream_reader(infile, format);
std::stringstream my_csv;
CSVReader sstream_reader(my_csv, format);
```
### Indexing by Column Names
Retrieving values using a column name string is a cheap, constant time operation.
```cpp
# include "csv.hpp"
using namespace csv;
...
CSVReader reader("very_big_file.csv");
double sum = 0;
for (auto& row: reader) {
// Note: Can also use index of column with [] operator
sum += row["Total Salary"].get<double>();
}
...
```
### Numeric Conversions
If your CSV has lots of numeric values, you can also have this parser (lazily)
convert them to the proper data type.
* Type checking is performed on conversions to prevent undefined behavior and integer overflow
* Negative numbers cannot be blindly converted to unsigned integer types
* `get<float>()`, `get<double>()`, and `get<long double>()` are capable of parsing numbers written in scientific notation.
* **Note:** Conversions to floating point types are not currently checked for loss of precision.
```cpp
# include "csv.hpp"
using namespace csv;
...
CSVReader reader("very_big_file.csv");
for (auto& row: reader) {
if (row["timestamp"].is_int()) {
// Can use get<>() with any integer type, but negative
// numbers cannot be converted to unsigned types
row["timestamp"].get<int>();
// You can also attempt to parse hex values
int value;
if (row["hexValue"].try_parse_hex(value)) {
std::cout << "Hex value is " << value << std::endl;
}
// ..
}
}
```
### Converting to JSON
You can serialize individual rows as JSON objects, where the keys are column names, or as
JSON arrays (which don't contain column names). The outputted JSON contains properly escaped
strings with minimal whitespace and no quoting for numeric values. How these JSON fragments are
assembled into a larger JSON document is an exercise left for the user.
```cpp
# include <sstream>
# include "csv.hpp"
using namespace csv;
...
CSVReader reader("very_big_file.csv");
std::stringstream my_json;
for (auto& row: reader) {
my_json << row.to_json() << std::endl;
my_json << row.to_json_array() << std::endl;
// You can pass in a vector of column names to
// slice or rearrange the outputted JSON
my_json << row.to_json({ "A", "B", "C" }) << std::endl;
my_json << row.to_json_array({ "C", "B", "A" }) << std::endl;
}
```
### Specifying the CSV Format
Although the CSV parser has a decent guessing mechanism, in some cases it is preferrable to specify the exact parameters of a file.
```cpp
# include "csv.hpp"
# include ...
using namespace csv;
CSVFormat format;
format.delimiter('\t')
.quote('~')
.header_row(2); // Header is on 3rd row (zero-indexed)
// .no_header(); // Parse CSVs without a header row
// .quote(false); // Turn off quoting
// Alternatively, we can use format.delimiter({ '\t', ',', ... })
// to tell the CSV guesser which delimiters to try out
CSVReader reader("wierd_csv_dialect.csv", format);
for (auto& row: reader) {
// Do stuff with rows here
}
```
#### Trimming Whitespace
This parser can efficiently trim off leading and trailing whitespace. Of course,
make sure you don't include your intended delimiter or newlines in the list of characters
to trim.
```cpp
CSVFormat format;
format.trim({ ' ', '\t' });
```
#### Handling Variable Numbers of Columns
Sometimes, the rows in a CSV are not all of the same length. Whether this was intentional or not,
this library is built to handle all use cases.
```cpp
CSVFormat format;
// Default: Silently ignoring rows with missing or extraneous columns
format.variable_columns(false); // Short-hand
format.variable_columns(VariableColumnPolicy::IGNORE);
// Case 2: Keeping variable-length rows
format.variable_columns(true); // Short-hand
format.variable_columns(VariableColumnPolicy::KEEP);
// Case 3: Throwing an error if variable-length rows are encountered
format.variable_columns(VariableColumnPolicy::THROW);
```
#### Setting Column Names
If a CSV file does not have column names, you can specify your own:
```cpp
std::vector<std::string> col_names = { ... };
CSVFormat format;
format.column_names(col_names);
```
### Parsing an In-Memory String
```cpp
# include "csv.hpp"
using namespace csv;
...
// Method 1: Using parse()
std::string csv_string = "Actor,Character\r\n"
"Will Ferrell,Ricky Bobby\r\n"
"John C. Reilly,Cal Naughton Jr.\r\n"
"Sacha Baron Cohen,Jean Giard\r\n";
auto rows = parse(csv_string);
for (auto& r: rows) {
// Do stuff with row here
}
// Method 2: Using _csv operator
auto rows = "Actor,Character\r\n"
"Will Ferrell,Ricky Bobby\r\n"
"John C. Reilly,Cal Naughton Jr.\r\n"
"Sacha Baron Cohen,Jean Giard\r\n"_csv;
for (auto& r: rows) {
// Do stuff with row here
}
```
### Writing CSV Files
```cpp
# include "csv.hpp"
# include ...
using namespace csv;
using namespace std;
...
stringstream ss; // Can also use ofstream, etc.
auto writer = make_csv_writer(ss);
// auto writer = make_tsv_writer(ss); // For tab-separated files
// DelimWriter<stringstream, '|', '"'> writer(ss); // Your own custom format
// set_decimal_places(2); // How many places after the decimal will be written for floats
writer << vector<string>({ "A", "B", "C" })
<< deque<string>({ "I'm", "too", "tired" })
<< list<string>({ "to", "write", "documentation." });
writer << array<string, 2>({ "The quick brown", "fox", "jumps over the lazy dog" });
writer << make_tuple(1, 2.0, "Three");
...
```
You can pass in arbitrary types into `DelimWriter` by defining a conversion function
for that type to `std::string`.
## Contributing
Bug reports, feature requests, and so on are always welcome. Feel free to leave a note in the Issues section.
| /RoughPy-0.0.1.tar.gz/RoughPy-0.0.1/external/csv-parser/README.md | 0.638046 | 0.873161 | README.md | pypi |
.. figure:: https://github.com/pybind/pybind11/raw/master/docs/pybind11-logo.png
:alt: pybind11 logo
**pybind11 — Seamless operability between C++11 and Python**
|Latest Documentation Status| |Stable Documentation Status| |Gitter chat| |GitHub Discussions| |CI| |Build status|
|Repology| |PyPI package| |Conda-forge| |Python Versions|
`Setuptools example <https://github.com/pybind/python_example>`_
• `Scikit-build example <https://github.com/pybind/scikit_build_example>`_
• `CMake example <https://github.com/pybind/cmake_example>`_
.. start
**pybind11** is a lightweight header-only library that exposes C++ types
in Python and vice versa, mainly to create Python bindings of existing
C++ code. Its goals and syntax are similar to the excellent
`Boost.Python <http://www.boost.org/doc/libs/1_58_0/libs/python/doc/>`_
library by David Abrahams: to minimize boilerplate code in traditional
extension modules by inferring type information using compile-time
introspection.
The main issue with Boost.Python—and the reason for creating such a
similar project—is Boost. Boost is an enormously large and complex suite
of utility libraries that works with almost every C++ compiler in
existence. This compatibility has its cost: arcane template tricks and
workarounds are necessary to support the oldest and buggiest of compiler
specimens. Now that C++11-compatible compilers are widely available,
this heavy machinery has become an excessively large and unnecessary
dependency.
Think of this library as a tiny self-contained version of Boost.Python
with everything stripped away that isn't relevant for binding
generation. Without comments, the core header files only require ~4K
lines of code and depend on Python (3.6+, or PyPy) and the C++
standard library. This compact implementation was possible thanks to
some of the new C++11 language features (specifically: tuples, lambda
functions and variadic templates). Since its creation, this library has
grown beyond Boost.Python in many ways, leading to dramatically simpler
binding code in many common situations.
Tutorial and reference documentation is provided at
`pybind11.readthedocs.io <https://pybind11.readthedocs.io/en/latest>`_.
A PDF version of the manual is available
`here <https://pybind11.readthedocs.io/_/downloads/en/latest/pdf/>`_.
And the source code is always available at
`github.com/pybind/pybind11 <https://github.com/pybind/pybind11>`_.
Core features
-------------
pybind11 can map the following core C++ features to Python:
- Functions accepting and returning custom data structures per value,
reference, or pointer
- Instance methods and static methods
- Overloaded functions
- Instance attributes and static attributes
- Arbitrary exception types
- Enumerations
- Callbacks
- Iterators and ranges
- Custom operators
- Single and multiple inheritance
- STL data structures
- Smart pointers with reference counting like ``std::shared_ptr``
- Internal references with correct reference counting
- C++ classes with virtual (and pure virtual) methods can be extended
in Python
Goodies
-------
In addition to the core functionality, pybind11 provides some extra
goodies:
- Python 3.6+, and PyPy3 7.3 are supported with an implementation-agnostic
interface (pybind11 2.9 was the last version to support Python 2 and 3.5).
- It is possible to bind C++11 lambda functions with captured
variables. The lambda capture data is stored inside the resulting
Python function object.
- pybind11 uses C++11 move constructors and move assignment operators
whenever possible to efficiently transfer custom data types.
- It's easy to expose the internal storage of custom data types through
Pythons' buffer protocols. This is handy e.g. for fast conversion
between C++ matrix classes like Eigen and NumPy without expensive
copy operations.
- pybind11 can automatically vectorize functions so that they are
transparently applied to all entries of one or more NumPy array
arguments.
- Python's slice-based access and assignment operations can be
supported with just a few lines of code.
- Everything is contained in just a few header files; there is no need
to link against any additional libraries.
- Binaries are generally smaller by a factor of at least 2 compared to
equivalent bindings generated by Boost.Python. A recent pybind11
conversion of PyRosetta, an enormous Boost.Python binding project,
`reported <https://graylab.jhu.edu/Sergey/2016.RosettaCon/PyRosetta-4.pdf>`_
a binary size reduction of **5.4x** and compile time reduction by
**5.8x**.
- Function signatures are precomputed at compile time (using
``constexpr``), leading to smaller binaries.
- With little extra effort, C++ types can be pickled and unpickled
similar to regular Python objects.
Supported compilers
-------------------
1. Clang/LLVM 3.3 or newer (for Apple Xcode's clang, this is 5.0.0 or
newer)
2. GCC 4.8 or newer
3. Microsoft Visual Studio 2017 or newer
4. Intel classic C++ compiler 18 or newer (ICC 20.2 tested in CI)
5. Cygwin/GCC (previously tested on 2.5.1)
6. NVCC (CUDA 11.0 tested in CI)
7. NVIDIA PGI (20.9 tested in CI)
About
-----
This project was created by `Wenzel
Jakob <http://rgl.epfl.ch/people/wjakob>`_. Significant features and/or
improvements to the code were contributed by Jonas Adler, Lori A. Burns,
Sylvain Corlay, Eric Cousineau, Aaron Gokaslan, Ralf Grosse-Kunstleve, Trent Houliston, Axel
Huebl, @hulucc, Yannick Jadoul, Sergey Lyskov Johan Mabille, Tomasz Miąsko,
Dean Moldovan, Ben Pritchard, Jason Rhinelander, Boris Schäling, Pim
Schellart, Henry Schreiner, Ivan Smirnov, Boris Staletic, and Patrick Stewart.
We thank Google for a generous financial contribution to the continuous
integration infrastructure used by this project.
Contributing
~~~~~~~~~~~~
See the `contributing
guide <https://github.com/pybind/pybind11/blob/master/.github/CONTRIBUTING.md>`_
for information on building and contributing to pybind11.
License
~~~~~~~
pybind11 is provided under a BSD-style license that can be found in the
`LICENSE <https://github.com/pybind/pybind11/blob/master/LICENSE>`_
file. By using, distributing, or contributing to this project, you agree
to the terms and conditions of this license.
.. |Latest Documentation Status| image:: https://readthedocs.org/projects/pybind11/badge?version=latest
:target: http://pybind11.readthedocs.org/en/latest
.. |Stable Documentation Status| image:: https://img.shields.io/badge/docs-stable-blue.svg
:target: http://pybind11.readthedocs.org/en/stable
.. |Gitter chat| image:: https://img.shields.io/gitter/room/gitterHQ/gitter.svg
:target: https://gitter.im/pybind/Lobby
.. |CI| image:: https://github.com/pybind/pybind11/workflows/CI/badge.svg
:target: https://github.com/pybind/pybind11/actions
.. |Build status| image:: https://ci.appveyor.com/api/projects/status/riaj54pn4h08xy40?svg=true
:target: https://ci.appveyor.com/project/wjakob/pybind11
.. |PyPI package| image:: https://img.shields.io/pypi/v/pybind11.svg
:target: https://pypi.org/project/pybind11/
.. |Conda-forge| image:: https://img.shields.io/conda/vn/conda-forge/pybind11.svg
:target: https://github.com/conda-forge/pybind11-feedstock
.. |Repology| image:: https://repology.org/badge/latest-versions/python:pybind11.svg
:target: https://repology.org/project/python:pybind11/versions
.. |Python Versions| image:: https://img.shields.io/pypi/pyversions/pybind11.svg
:target: https://pypi.org/project/pybind11/
.. |GitHub Discussions| image:: https://img.shields.io/static/v1?label=Discussions&message=Ask&color=blue&logo=github
:target: https://github.com/pybind/pybind11/discussions
| /RoughPy-0.0.1.tar.gz/RoughPy-0.0.1/external/pybind11/README.rst | 0.921996 | 0.731562 | README.rst | pypi |
import typing as t
import urllib.parse
import functools
import horseman.parsers
import horseman.types
import horseman.http
import horseman.meta
from dataclasses import dataclass
from roughrider.routing.meta import Route
class Request(horseman.meta.Overhead):
__slots__ = (
'_content_type',
'_cookies',
'_data',
'_query',
'app',
'environ',
'method',
'route',
'script_name',
)
app: horseman.meta.Node
content_type: t.Optional[horseman.http.ContentType]
cookies: horseman.http.Cookies
environ: horseman.types.Environ
method: horseman.types.HTTPMethod
query: horseman.http.Query
route: t.Optional[Route]
script_name: str
_data: t.Optional[horseman.parsers.Data]
def __init__(self,
app: horseman.meta.Node,
environ: horseman.types.Environ,
route: t.Optional[Route] = None):
self._content_type = ...
self._cookies = ...
self._data = ...
self._query = ...
self.app = app
self.environ = environ
self.method = environ['REQUEST_METHOD'].upper()
self.route = route
self.script_name = urllib.parse.quote(environ['SCRIPT_NAME'])
def extract(self) -> horseman.parsers.Data:
if self._data is not ...:
return self._data
if self.content_type:
self._data = horseman.parsers.parser(
self.environ['wsgi.input'], self.content_type)
return self._data
@property
def query(self):
if self._query is ...:
self._query = horseman.http.Query.from_environ(self.environ)
return self._query
@property
def cookies(self):
if self._cookies is ...:
self._cookies = horseman.http.Cookies.from_environ(self.environ)
return self._cookies
@property
def content_type(self):
if self._content_type is ...:
if 'CONTENT_TYPE' in self.environ:
self._content_type = \
horseman.http.ContentType.from_http_header(
self.environ['CONTENT_TYPE'])
else:
self._content_type = None
return self._content_type
@functools.cached_property
def application_uri(self):
scheme = self.environ['wsgi.url_scheme']
http_host = self.environ.get('HTTP_HOST')
if not http_host:
server = self.environ['SERVER_NAME']
port = self.environ.get('SERVER_PORT', '80')
elif ':' in http_host:
server, port = http_host.split(':', 1)
else:
server = http_host
port = '80'
if (scheme == 'http' and port == '80') or \
(scheme == 'https' and port == '443'):
return f'{scheme}://{server}{self.script_name}'
return f'{scheme}://{server}:{port}{self.script_name}'
def uri(self, include_query=True):
url = self.application_uri
path_info = urllib.parse.quote(self.environ.get('PATH_INFO', ''))
if include_query:
qs = urllib.parse.quote(self.environ.get('QUERY_STRING'))
if qs:
return f"{url}{path_info}?{qs}"
return f"{url}{path_info}" | /roughrider.application-0.3.1.tar.gz/roughrider.application-0.3.1/src/roughrider/application/request.py | 0.58676 | 0.160694 | request.py | pypi |
from typing import Optional, Iterable, NamedTuple, Literal, Tuple, Iterator
Header = Tuple[str, str]
Headers = Iterator[Header]
HTTPVerb = Literal[
"GET", "HEAD", "PUT", "DELETE", "PATCH", "POST", "OPTIONS"]
class CORSPolicy(NamedTuple):
origin: str = "*"
methods: Optional[Iterable[HTTPVerb]] = None
allow_headers: Optional[Iterable[str]] = None
expose_headers: Optional[Iterable[str]] = None
credentials: Optional[bool] = None
max_age: Optional[int] = None
def headers(self) -> Headers:
yield "Access-Control-Allow-Origin", self.origin
if self.methods is not None:
values = ", ".join(self.methods)
yield "Access-Control-Allow-Methods", values
if self.allow_headers is not None:
values = ", ".join(self.allow_headers)
yield "Access-Control-Allow-Headers", values
if self.expose_headers is not None:
values = ", ".join(self.expose_headers)
yield "Access-Control-Expose-Headers", values
if self.max_age is not None:
yield "Access-Control-Max-Age", str(self.max_age)
if self.credentials:
yield "Access-Control-Allow-Credentials", "true"
def preflight(self,
origin: Optional[str] = None,
acr_method: Optional[str] = None,
acr_headers: Optional[str] = None) -> Headers:
if origin:
if self.origin == '*':
yield "Access-Control-Allow-Origin", '*'
elif origin == self.origin:
yield "Access-Control-Allow-Origin", origin
yield "Vary", "Origin"
else:
yield "Access-Control-Allow-Origin", self.origin
yield "Vary", "Origin"
if self.methods is not None:
yield "Access-Control-Allow-Methods", ", ".join(self.methods)
elif acr_method:
yield "Access-Control-Allow-Methods", acr_method
if self.allow_headers is not None:
values = ", ".join(self.allow_headers)
yield "Access-Control-Allow-Headers", values
elif acr_headers:
yield "Access-Control-Allow-Headers", acr_headers
if self.expose_headers is not None:
values = ", ".join(self.expose_headers)
yield "Access-Control-Expose-Headers", values | /roughrider.cors-0.1.tar.gz/roughrider.cors-0.1/src/roughrider/cors/policy.py | 0.798815 | 0.167423 | policy.py | pypi |
from frozendict import frozendict
from typing import cast, Optional, Union, Any, Mapping, List, NoReturn, Iterable, Tuple, Dict
Pairs = Iterable[Tuple[str, Any]]
class FormData(Dict[str, List[Any]]):
def __init__(self, data: Optional[Union['FormData', Dict, Pairs]] = None):
if data is not None:
if isinstance(data, FormData):
super().__init__(data)
elif isinstance(data, Mapping):
for key, value in data.items():
self.add(key, value)
else:
value = cast(Pairs, data)
for key, value in value:
self.add(key, value)
def get(self, name: str, default: Optional[Any] = None) -> Any:
"""Return the first value of the found list.
"""
return super().get(name, [default])[0]
def getlist(self, name: str, default: Optional[Any] = None) -> List[Any]:
"""Return the value list
"""
return super().get(name, default)
def pairs(self) -> Pairs:
for key, values in self.items():
for value in values:
yield key, value
def add(self, name: str, value: Any) -> NoReturn:
if name in self:
self[name].append(value)
else:
self[name] = [value]
def to_dict(self, frozen=True):
impl = frozendict if frozen else dict
return impl(
{k: (len(v) == 1 and v[0] or v) for k, v in self.items()}
)
class TypeCastingDict(FormData):
TRUE_STRINGS = {'t', 'true', 'yes', '1', 'on'}
FALSE_STRINGS = {'f', 'false', 'no', '0', 'off'}
NONE_STRINGS = {'n', 'none', 'null'}
def bool(self, key: str, default=...):
value = self.get(key, default)
if value in (True, False, None):
return value
value = value.lower()
if value in self.TRUE_STRINGS:
return True
elif value in self.FALSE_STRINGS:
return False
elif value in self.NONE_STRINGS:
return None
raise ValueError(f"Can't cast {value!r} to boolean.")
def int(self, key: str, default=...):
return int(self.get(key, default))
def float(self, key: str, default=...):
return float(self.get(key, default)) | /roughrider.routing-0.2.1.tar.gz/roughrider.routing-0.2.1/src/horseman/src/horseman/datastructures.py | 0.910433 | 0.309389 | datastructures.py | pypi |
import sys
from abc import ABC, abstractmethod
from typing import TypeVar
from horseman.response import Response
from horseman.http import HTTPError
from horseman.types import (
WSGICallable, Environ, StartResponse, ExceptionInfo)
Data = TypeVar('Data')
class Overhead(ABC):
"""WSGI Environ Overhead aka Request representation
This object contains everything needed to handle a request.
It can carry DB connectors, parsed data and other utils.
"""
data: Data
environ: Environ
@abstractmethod
def extract(self) -> Data:
"""Extracts the data from the incoming HTTP request.
"""
class APIView:
"""View with methods to act as HTTP METHOD dispatcher.
Method names of the class must be a valid uppercase HTTP METHOD name.
example : OPTIONS, GET, POST
"""
def __call__(self, overhead: Overhead) -> Response:
method = overhead.environ['REQUEST_METHOD'].upper()
if worker := getattr(self, method, None):
return worker(overhead)
# Method not allowed
return Response(405)
class Node(WSGICallable):
@abstractmethod
def resolve(self, path_info: str, environ: Environ) -> WSGICallable:
"""Resolves the path into a wsgi callable (eg. Response).
If nothing was found, returns None or a WSGI callable corresponding
to the HTTP Error (404, 405, 406).
"""
def __call__(self, environ: Environ, start_response: StartResponse):
# according to PEP 3333 the native string representing PATH_INFO
# (and others) can only contain unicode codepoints from 0 to 255,
# which is why we need to decode to latin-1 instead of utf-8 here.
# We transform it back to UTF-8
path_info = environ['PATH_INFO'].encode('latin-1').decode('utf-8')
try:
response = self.resolve(path_info, environ)
if response is None:
response = Response(404)
except HTTPError as error:
# FIXME: Log.
response = Response(error.status, error.body)
return response(environ, start_response)
class SentryNode(Node):
@abstractmethod
def handle_exception(self, exc_info: ExceptionInfo, environ: Environ):
"""This method handles exceptions happening while the
application is trying to render/process/interpret the request.
"""
def __call__(self, environ: Environ, start_response: StartResponse):
iterable = None
try:
iterable = super().__call__(environ, start_response)
yield from iterable
except Exception:
exc_info = sys.exc_info()
self.handle_exception(exc_info, environ)
exc_info = None
raise
finally:
if hasattr(iterable, 'close'):
try:
iterable.close()
except Exception:
exc_info = sys.exc_info()
self.handle_exception(exc_info, environ)
exc_info = None
raise | /roughrider.routing-0.2.1.tar.gz/roughrider.routing-0.2.1/src/horseman/src/horseman/meta.py | 0.588298 | 0.252096 | meta.py | pypi |
try:
from typing import Iterable, BinaryIO, Type, ClassVar
from pathlib import Path
from fs.base import FS
from roughrider.storage.meta import FileInfo, Storage, ChecksumAlgorithm
class PyFSStorage(Storage):
fs: FS
checksum_algorithm: ChecksumAlgorithm
def __init__(self, name: str, fs: FS, algorithm='md5'):
self.name = name
self.fs = fs
try:
self.checksum_algorithm = ChecksumAlgorithm[algorithm]
except KeyError:
raise LookupError(f'Unknown algorithm: `{algorithm}`.')
def retrieve(self, ticket: str) -> Iterable[bytes]:
path = self.ticket_to_uri(ticket)
if not self.fs.exists(str(path)):
raise FileNotFoundError(path)
def file_iterator(path: Path, chunk=4096):
with self.fs.openbin(str(path)) as reader:
while True:
data = reader.read(chunk)
if not data:
break
yield data
return file_iterator(path)
def delete(self, ticket: str) -> Iterable[bytes]:
path = self.ticket_to_uri(ticket)
try:
self.fs.remove(str(path))
return True
except FileNotFoundError:
raise # we need to propagate.
return False
def store(self, data: BinaryIO, **metadata) -> FileInfo:
ticket = self.generate_ticket()
path = self.ticket_to_uri(ticket)
self.fs.makedirs(str(path.parent), recreate=True)
size = 0
fhash = self.checksum_algorithm.value()
with self.fs.openbin(str(path), mode='w+') as target:
for block in iter(lambda: data.read(4096), b""):
size += target.write(block)
fhash.update(block)
return FileInfo(
namespace=self.name,
ticket=ticket,
size=size,
checksum=(fhash.name, fhash.hexdigest()),
metadata=metadata
)
except ImportError:
pass | /roughrider.storage-0.1.tar.gz/roughrider.storage-0.1/src/roughrider/storage/pyfs.py | 0.58439 | 0.221414 | pyfs.py | pypi |
import enum
import hashlib
from abc import ABC, abstractmethod
from functools import partial
from pathlib import Path
from typing import Optional, BinaryIO, Mapping, Iterable, Tuple
from typing_extensions import TypedDict
ChecksumAlgorithm = enum.Enum(
'Algorithm', {
name: partial(hashlib.new, name)
for name in hashlib.algorithms_available
}
)
class FileInfo(TypedDict):
ticket: str
size: int
checksum: Tuple[str, str] # (algorithm, value)
namespace: str
metadata: Optional[dict] = None
class Storage(ABC):
name: str
root: Path
@abstractmethod
def generate_ticket(self) -> str:
pass
@abstractmethod
def ticket_to_uri(self, uid: str) -> Path:
pass
@abstractmethod
def retrieve(self, ticket: str) -> Iterable[bytes]:
pass
@abstractmethod
def store(self, data: BinaryIO, **metadata) -> FileInfo:
pass
@abstractmethod
def delete(self, ticket: str) -> bool:
pass
class StorageCenter:
__slots__ = ('namespaces',)
namespaces: Mapping[str, Storage]
def __init__(self, namespaces=None):
if namespaces is None:
namespaces = {}
self.namespaces = namespaces
def __getitem__(self, info: FileInfo) -> Iterable[bytes]:
return self.retrieve(info['namespace'], info['ticket'])
def __delitem__(self, info: FileInfo):
return self.delete(info['namespace'], info['ticket'])
def register(self, storage: Storage):
if storage.name in self.namespaces:
raise NameError(f'Namespace `{storage.name}` already exists.')
self.namespaces[storage.name] = storage
def store(self, namespace: str, data: BinaryIO, **metadata) -> FileInfo:
storage = self.namespaces.get(namespace)
if storage is None:
raise LookupError(f'Namespace `{namespace}` is unknown.')
return storage.store(data, **metadata)
def retrieve(self, namespace: str, ticket: str) -> Iterable[bytes]:
storage = self.namespaces.get(namespace)
if storage is None:
raise LookupError(f'Namespace `{namespace}` is unknown.')
return storage.retrieve(ticket)
def delete(self, namespace: str, ticket: str) -> bool:
storage = self.namespaces.get(namespace)
if storage is None:
raise LookupError(f'Namespace `{namespace}` is unknown.')
return storage.delete(ticket) | /roughrider.storage-0.1.tar.gz/roughrider.storage-0.1/src/roughrider/storage/meta.py | 0.848925 | 0.264765 | meta.py | pypi |
from typing import Iterable, BinaryIO
from pathlib import Path
from roughrider.storage.meta import FileInfo, Storage, ChecksumAlgorithm
class FilesystemStorage(Storage):
checksum_algorithm: ChecksumAlgorithm
def __init__(self, name: str, root: Path, algorithm='md5'):
self.name = name
self.root = root
try:
self.checksum_algorithm = ChecksumAlgorithm[algorithm]
except KeyError:
raise LookupError(f'Unknown algorithm: `{algorithm}`.')
@staticmethod
def file_iterator(path: Path, chunk=4096):
with path.open('rb') as reader:
while True:
data = reader.read(chunk)
if not data:
break
yield data
def retrieve(self, ticket: str) -> Iterable[bytes]:
path = self.ticket_to_uri(ticket)
if not path.exists():
raise FileNotFoundError(path)
return self.file_iterator(path)
def delete(self, ticket: str) -> Iterable[bytes]:
path = self.ticket_to_uri(ticket)
try:
path.unlink()
return True
except FileNotFoundError:
raise # we need to propagate.
return False
def store(self, data: BinaryIO, **metadata) -> FileInfo:
ticket = self.generate_ticket()
path = self.ticket_to_uri(ticket)
assert not path.exists() # this happens on ticket conflicts.
depth = len(path.relative_to(self.root).parents)
if depth > 1:
path.parent.mkdir(mode=0o755, parents=True, exist_ok=False)
size = 0
fhash = self.checksum_algorithm.value()
with path.open('wb+') as target:
for block in iter(lambda: data.read(4096), b""):
size += target.write(block)
fhash.update(block)
return FileInfo(
namespace=self.name,
ticket=ticket,
size=size,
checksum=(fhash.name, fhash.hexdigest()),
metadata=metadata
) | /roughrider.storage-0.1.tar.gz/roughrider.storage-0.1/src/roughrider/storage/fs.py | 0.763572 | 0.3043 | fs.py | pypi |
import copy
import logging
import pandas as pd
from pandas import DataFrame, Series
class RoughSetSI:
"""Class RoughSet to model an Information System SI = (X, A).
DT = f(X, A, y),
where:
X - objects of universe,
A - attributes describing objects of X,
"""
def __init__(self, X: DataFrame, ind_index_name="IND_INDEX"):
"""Initialize object of class RoughSet
Parameters
----------
X: DataFrame
Objects of universe of type: pandas DataFrame
y: Series
Decision set related to X or None if used simple SI
ind_index_name: string, default 'IND_INDEX'
Name of a special column to store index of discernibilty relation,
computed by the function: get_indiscernibility_relations function.
Note: X and y are computed as data structures with nominal values.
References
----------
pandas array: https://pandas.pydata.org/docs/reference/arrays.html
"""
self.ind_rel_column_index_name = "index"
# cache variables
self.__indiscrenibility_relation: DataFrame = None # distinct rows of X with ID of IND
self.__R: DataFrame = None # R -> X; A row of R has ID from __ind_rel which connect row from X to IND
self.logger_name = __name__
self.logger = logging.getLogger(self.logger_name)
if not isinstance(X, DataFrame):
raise Exception("X must be a type of Pandas DataFrame. See more: https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.html")
self.X = X
if self.ind_rel_column_index_name in self.X.columns:
raise ValueError(f"You can not use {self.ind_rel_column_index_name} as a column name.")
self.ind_index_name = ind_index_name # nazwa kolumny pomocniczej dla relacji nieodróżnialności
def get_deepcopy(self):
"""Get deepcopy of the object
reference: https://docs.python.org/3/library/copy.html
"""
return copy.deepcopy(self)
def get_X(self) -> DataFrame:
"""
Get X and y as one DataFrame
"""
return self.X
@property
def __column_names_X(self) -> list:
return self.X.columns.values.tolist()
def __get_empty_X(self, columns=None) -> DataFrame:
"""Get empty X"""
if columns is None:
columns = self.__column_names_X
return DataFrame(columns=columns)
@property
def __rows_count(self):
"""Get rows count of X"""
return len(self.X.index)
@property
def is_empty(self):
"""
Check if y is empty (so X also must be empty)
"""
result = True if self.__rows_count == 0 else False
return result
def get_indiscernibility_relations(self, subset=None, return_indiscernibility_index: bool = True):
"""
Compute indiscernibility relations for X DataFrame
According to RoughSet Theory, it is a set of data supposed to be similar with respect to this relation.
Parameters
----------
subset: column label or sequence of labels, optional
Only consider certain columns for identifying duplicates,
by default use all of the columns.
return_indiscernibility_index: bool, default: True
Whether to return additional column with index of indiscernibility relations
Returns
-------
DataFrame or None
DataFrame with indiscernibility relations or None.
"""
if subset in [None, []] or (len(subset) == len(self.X.columns) and subset == self.X.columns):
subset = None
df = self.X
else:
df = self.X[subset]
IND_OF_X = df.drop_duplicates().reset_index(drop=True).reset_index()
IND_OF_X.rename(columns={self.ind_rel_column_index_name: self.ind_index_name}, inplace=True)
if not return_indiscernibility_index:
ind = IND_OF_X[IND_OF_X.columns.drop(self.ind_index_name)]
else:
ind = IND_OF_X
return ind
def get_X_with_indiscernibility_relations_index(self, subset=None):
if subset in [None, []] or (len(subset) == len(self.X.columns) and subset == self.X.columns):
subset = None
IND_OF_X = self.get_indiscernibility_relations(subset=subset)
# Add column <self.ind_rel_column_index_name> with an original row number in X
_on_columns = subset if subset is not None else self.X.columns.values.tolist()
X_IND = self.X.reset_index(
drop=False
).merge(
IND_OF_X, how="inner", on=_on_columns
).sort_values(
by=self.ind_rel_column_index_name
)
# Set original row number using data from column <self.ind_rel_column_index_name>
X_IND = X_IND.set_index(self.ind_rel_column_index_name)
X_IND.index.name = None
return X_IND, IND_OF_X | /roughsets-base-1.0.1.2.tar.gz/roughsets-base-1.0.1.2/src/roughsets_base/roughset_si.py | 0.80969 | 0.632928 | roughset_si.py | pypi |
import copy
import logging
import pandas as pd
from pandas import DataFrame, Series
from roughsets_base.roughset_si import RoughSetSI
class RoughSetDT(RoughSetSI):
"""Class RoughSet to model a decision table (DT).
DT = f(X, A, y),
where:
X - objects of universe,
A - attributes describing objects of X,
y - a decision attribute related to X.
"""
def __init__(self, X: DataFrame, y: Series = None, ind_index_name="IND_INDEX"):
"""Initialize object of class RoughSet
Parameters
----------
X: DataFrame
Objects of universe of type: pandas DataFrame
y: Series
Decision set related to X or None if used simple SI
ind_index_name: string, default 'IND_INDEX'
Name of a special column to store index of discernibilty relation,
computed by the function: get_indiscernibility_relations function.
Note: X and y are computed as data structures with nominal values.
References
----------
pandas array: https://pandas.pydata.org/docs/reference/arrays.html
"""
super().__init__(X, ind_index_name)
self.default_class_attr = "target"
if isinstance(y, list):
y = pd.Series(y, name=self.default_class_attr)
self.__assert_X_y(X, y)
self.y = y
if self.ind_rel_column_index_name in self.X.columns:
raise ValueError(f"You can not use {self.ind_rel_column_index_name} as a column name.")
self.ind_index_name = ind_index_name # nazwa kolumny pomocniczej dla relacji nieodróżnialności
def __assert_X_y(self, X, y):
if not isinstance(y, Series):
raise Exception("y must be a type of list or Pandas Series. See more: https://pandas.pydata.org/docs/reference/api/pandas.Series.html")
if not isinstance(X, DataFrame):
raise Exception("X must be a type of Pandas DataFrame. See more: https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.html")
if not len(X.index) == len(y.index):
raise Exception("Number of objects in X does not match number of decisions in y.")
def concat_X_and_y(self, X, y) -> DataFrame:
"""Add y series as a column to X DataFrame"""
Xy = pd.concat([X, y], axis=1)
return Xy
def get_Xy(self) -> DataFrame:
"""
Get X and y as one DataFrame
"""
return self.concat_X_and_y(self.X, self.y)
@property
def __column_name_y(self) -> str:
return self.y.name
def __get_empty_X(self, columns=None) -> DataFrame:
"""Get empty X"""
if columns is None:
columns = self.__column_names_X
return DataFrame(columns=columns)
def __get_empty_y(self) -> Series:
"""Get empty y"""
column = self.__column_name_y
return Series(data=[], name=column)
def get_all_concepts(self):
return self.y.drop_duplicates().reset_index(drop=True)
def get_Xy_with_indiscernibility_relations_index(self, subset=None):
X_IND, IND_OF_X = self.get_X_with_indiscernibility_relations_index(subset)
# Add column with an IDrelation: X_IND -> y
y_IND = pd.DataFrame(self.y.copy())
y_IND[self.ind_index_name] = X_IND[self.ind_index_name]
# Zliczenie ilości klas dla każdej relacji nieodróżnialności
# i dodanie do IND_OF_X
# Count classes for each indiscernibility_relation
y_class_count = y_IND.drop_duplicates().groupby(
[self.ind_index_name]
)[self.y.name].count().reset_index(
name="count"
)["count"]
# Add number of classes to each indiscernibility_relation
IND_OF_X["y_class_count"] = y_class_count
return X_IND, y_IND, IND_OF_X
def get_approximation_indices(self, concepts=None, subset=None):
"""
Get Pandas DataFrame indices which describe approximations boundaries.
Parameters
----------
concepts: list of decisions for which approximations boundaries must be evaluated.
If None, computation will be done for all decisions.
subset: column label or sequence of labels, optional
Only consider certain columns for identifying duplicates,
by default use all of the columns.
Returns
-------
Tuple: positive_region_of_X, boundary_region_of_X, upper_approximation_of_X, negative_region_of_X
"""
if concepts is None or concepts == [] or concepts == pd.Series.empty:
concepts = self.get_all_concepts()
if not isinstance(concepts, Series):
concepts = pd.Series(concepts)
# IND_OF_X_EXT - indiscernibilty relations (extended with columns: y_class_count and <self.ind_index_name>)
X_IND, y_IND, IND_OF_X_EXT = self.get_Xy_with_indiscernibility_relations_index(
subset=subset
)
# Get indexes of indiscernibilty relations, related to concepts
IND_concept = y_IND[
y_IND[self.y.name].isin(concepts)
][self.ind_index_name].drop_duplicates()
# Get indexes of IND_OF_X_EXT which belong to concept
IND_OF_X_by_concept = IND_OF_X_EXT[IND_OF_X_EXT[self.ind_index_name].isin(IND_concept)]
# Get a lower approximation (if only one concept) or sum of lower approximations (if more than one concept)
# (DataFrame's indexes of dataset X)
ind_index_of_lower_approximation: Series = IND_OF_X_by_concept[
IND_OF_X_by_concept["y_class_count"] == 1
][self.ind_index_name]
lower_approximation_of_X = X_IND[
X_IND[self.ind_index_name].isin(ind_index_of_lower_approximation)
].index
# Get a boundary region (DataFrame's indexes of dataset X)
ind_index_of_boundary_region: Series = IND_OF_X_by_concept[
IND_OF_X_by_concept["y_class_count"] > 1
][self.ind_index_name]
boundary_region_of_X = X_IND[
X_IND[self.ind_index_name].isin(ind_index_of_boundary_region)
].index
# Get a upper approximation (if only one concept) or sum of upper approximations (if more than one concept)
# (DataFrame's indexes of dataset X)
upper_approximation_of_X = lower_approximation_of_X.append(boundary_region_of_X)
# Get a negative region (DataFrame's indexes of dataset X)
negative_region_of_X = self.X.index.difference(upper_approximation_of_X)
# Get a negative region (DataFrame's indexes of dataset X) (method 2)
# IND_OF_X_negative_by_concept = IND_OF_X_EXT[
# ~IND_OF_X_EXT[self.ind_index_name].isin(IND_concept)
# ]
# ind_index_of_negative_region: Series = IND_OF_X_negative_by_concept[self.ind_index_name]
# negative_region_of_X = X_IND[
# X_IND[self.ind_index_name].isin(ind_index_of_negative_region)
# ].index
return lower_approximation_of_X.sort_values(), boundary_region_of_X.sort_values(), upper_approximation_of_X.sort_values(), negative_region_of_X.sort_values()
def get_approximation_objects(self, approximation_indices) -> (DataFrame, Series):
"""
Get subset (defined by approximation_indices) of X and y objects
"""
selection = self.y.index.isin(approximation_indices)
return self.X[selection], self.y[selection] | /roughsets-base-1.0.1.2.tar.gz/roughsets-base-1.0.1.2/src/roughsets_base/roughset_dt.py | 0.814533 | 0.561636 | roughset_dt.py | pypi |
roughviz is a python visualization library for creating sketchy/hand-drawn styled charts.
### Available Charts
<ul>
<li>Bar (<code>roughviz.bar</code>) </li>
<li>Horizontal Bar (<code>roughviz.barh</code>) </li>
<li>Pie (<code>roughviz.pie</code>) </li>
<li>Donut (<code>roughviz.donut</code>) </li>
</ul>
### Installation
```
pip install roughviz
```
<h2 id="API">API</h2>
### <code id="Bar">roughviz.bar</code>
Required
- `labels`: Labels with which to construct chart.
- `values`: Values with which to construct chart.
```
roughviz.bar(df["ABC"], df["XYZ"])
```
Optional
- `axisFontSize` [string]: Font-size for axes' labels. Default: `'1rem'`.
- `axisRoughness` [number]: Roughness for x & y axes. Default: `0.5`.
- `axisStrokeWidth` [number]: Stroke-width for x & y axes. Default: `0.5`.
- `bowing` [number]: Chart bowing. Default: `0`.
- `color` [string]: Color for each bar. Default: `'skyblue'`.
- `fillStyle` [string]: Bar fill-style.
- `fillWeight` [number]: Weight of inner paths' color. Default: `0.5`.
- `font`: Font-family to use. You can use `0` or `gaegu` to use `Gaegu`, or `1` or `indie flower` to use `Indie Flower`. Or feed it something else. Default: `Gaegu`.
- `highlight` [string]: Color for each bar on hover. Default: `'coral'`.
- `innerStrokeWidth` [number]: Stroke-width for paths inside bars. Default: `1`.
- `labelFontSize` [string]: Font-size for axes' labels. Default: `'1rem'`.
- `padding` [number]: Padding between bars. Default: `0.1`.
- `roughness` [number]: Roughness level of chart. Default: `1`.
- `simplification` [number]: Chart simplification. Default `0.2`.
- `stroke` [string]: Color of bars' stroke. Default: `black`.
- `strokeWidth` [number]: Size of bars' stroke. Default: `1`.
- `title` [string]: Chart title. Optional.
- `titleFontSize` [string]: Font-size for chart title. Default: `'1rem'`.
- `tooltipFontSize` [string]: Font-size for tooltip. Default: `'0.95rem'`.
- `xLabel` [string]: Label for x-axis.
- `yLabel` [string]: Label for y-axis.
- `width` [number]: Width of the chart (in pixels).
- `height` [number]: Height of the chart (in pixels).
### <code id="BarH">roughviz.barh</code>
Required
- `labels`: Labels with which to construct chart.
- `values`: Values with which to construct chart.
```
roughviz.barh(df["ABC"], df["XYZ"])
```
Optional
- `axisFontSize` [string]: Font-size for axes' labels. Default: `'1rem'`.
- `axisRoughness` [number]: Roughness for x & y axes. Default: `0.5`.
- `axisStrokeWidth` [number]: Stroke-width for x & y axes. Default: `0.5`.
- `bowing` [number]: Chart bowing. Default: `0`.
- `color` [string]: Color for each bar. Default: `'skyblue'`.
- `fillStyle` [string]: Bar fill-style.
- `fillWeight` [number]: Weight of inner paths' color. Default: `0.5`.
- `font`: Font-family to use. You can use `0` or `gaegu` to use `Gaegu`, or `1` or `indie flower` to use `Indie Flower`. Or feed it something else. Default: `Gaegu`.
- `highlight` [string]: Color for each bar on hover. Default: `'coral'`.
- `innerStrokeWidth` [number]: Stroke-width for paths inside bars. Default: `1`.
- `labelFontSize` [string]: Font-size for axes' labels. Default: `'1rem'`.
- `padding` [number]: Padding between bars. Default: `0.1`.
- `roughness` [number]: Roughness level of chart. Default: `1`.
- `simplification` [number]: Chart simplification. Default `0.2`.
- `stroke` [string]: Color of bars' stroke. Default: `black`.
- `strokeWidth` [number]: Size of bars' stroke. Default: `1`.
- `title` [string]: Chart title. Optional.
- `titleFontSize` [string]: Font-size for chart title. Default: `'1rem'`.
- `tooltipFontSize` [string]: Font-size for tooltip. Default: `'0.95rem'`.
- `xLabel` [string]: Label for x-axis.
- `yLabel` [string]: Label for y-axis.
- `width` [number]: Width of the chart (in pixels).
- `height` [number]: Height of the chart (in pixels).
### <code id="Donut">roughviz.donut</code>
Required
- `labels`: Labels with which to construct chart.
- `values`: Values with which to construct chart.
```
roughviz.donut(df["ABC"], df["XYZ"])
```
Optional
- `bowing` [number]: Chart bowing. Default: `0`.
- `fillStyle` [string]: Chart fill-style.
- `fillWeight` [number]: Weight of inner paths' color. Default: `0.85`.
- `font`: Font-family to use. You can use `0` or `gaegu` to use `Gaegu`, or `1` or `indie flower` to use `Indie Flower`. Or feed it something else. Default: `Gaegu`.
- `highlight` [string]: Color for each arc on hover. Default: `'coral'`.
- `innerStrokeWidth` [number]: Stroke-width for paths inside arcs. Default: `0.75`.
- `roughness` [number]: Roughness level of chart. Default: `1`.
- `simplification` [number]: Chart simplification. Default `0.2`.
- `strokeWidth` [number]: Size of bars' stroke. Default: `1`.
- `title` [string]: Chart title. Optional.
- `titleFontSize` [string]: Font-size for chart title. Default: `'1rem'`.
- `tooltipFontSize` [string]: Font-size for tooltip. Default: `'0.95rem'`.
### <code id="Pie">roughviz.pie</code>
Required
- `labels`: Labels with which to construct chart.
- `values`: Values with which to construct chart.
```
roughviz.pie(df["ABC"], df["XYZ"])
```
Optional
- `bowing` [number]: Chart bowing. Default: `0`.
- `fillStyle` [string]: Chart fill-style.
- `fillWeight` [number]: Weight of inner paths' color. Default: `0.85`.
- `font`: Font-family to use. You can use `0` or `gaegu` to use `Gaegu`, or `1` or `indie flower` to use `Indie Flower`. Or feed it something else. Default: `Gaegu`.
- `highlight` [string]: Color for each arc on hover. Default: `'coral'`.
- `innerStrokeWidth` [number]: Stroke-width for paths inside arcs. Default: `0.75`.
- `roughness` [number]: Roughness level of chart. Default: `1`.
- `simplification` [number]: Chart simplification. Default `0.2`.
- `strokeWidth` [number]: Size of bars' stroke. Default: `1`.
- `title` [string]: Chart title. Optional.
- `titleFontSize` [string]: Font-size for chart title. Default: `'1rem'`.
- `tooltipFontSize` [string]: Font-size for tooltip. Default: `'0.95rem'`.
### Future Plans
- [ ] Exception Handling
- [ ] Add Chart: Histogram
- [ ] Add Chart: Scatter
- [ ] Add Chart: Bubble Chart
- [ ] Add Chart: Line
- [ ] Advance CSS control capabilities
### Based on
<a href="https://github.com/jwilber/roughViz"><img src="https://raw.githubusercontent.com/jwilber/random_data/master/roughViz_Title.png" width="350" alt="roughViz.js"><a>
### License
MIT License
Copyright (c) 2019 Hannan Satopay
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
| /roughviz-4.0.0.tar.gz/roughviz-4.0.0/README.md | 0.553747 | 0.882782 | README.md | pypi |
import pandas as pd
from sklearn.model_selection import train_test_split
from time import time
def prepare_data_for_training(
df: pd.core.frame.DataFrame,
target: str,
index_column: str = None,
validation_test_size: float = 0.2,
verbose: bool = False,
):
"""takes input data and setting index, seperating target column
and retuns a random split of given size
Args:
df(pd.core.frame.DataFrame): input data
target(str): target column name
index_column(str): index column name, default to None (no indexing)
validation_test_size(float): test set size 0.0-1.0 if 0 all will be training set
defaults to 0.2
verbose(bool): prints out the shape of test and train datasets
Returns:
_x, test_x, _y, test_y: a tuple of datasets - train, test
"""
if index_column:
if index_column in df.columns:
_df = df.set_index(index_column)
else:
raise KeyError("{} not in dataframe columns: [{}]".format(
index_column,
df.columns
))
else:
_df = df
if target in df.columns:
_target = _df[target]
_data = _df.drop(target, axis=1)
else:
raise KeyError("{} not in dataframe columns: [{}]".format(
target,
df.columns
))
if validation_test_size == 0:
_x, test_x, _y, test_y = _data, pd.DataFrame(), _target, pd.Series()
else:
_x, test_x, _y, test_y = train_test_split(
_data,
_target,
test_size=validation_test_size,
random_state=int(time()),
)
if verbose:
print("shape of training data = {}".format(_x.shape))
print("shape of training data target = {}".format(_y.shape))
print("shape of validation data = {}".format(test_x.shape))
print(
"shape of validation data target = {}\n".format(
test_y.shape))
return _x, _y.values, test_x, test_y.values | /roulette_ml-0.1.1-py3-none-any.whl/roulette/builder/data_prep.py | 0.763924 | 0.566348 | data_prep.py | pypi |
import numpy as np
from collections import namedtuple
from sklearn.metrics import classification_report, confusion_matrix
Doc = namedtuple(
'Doc',
[
"version",
"type",
"algo",
"param",
"cv",
]
)
def compress_regression_results(l, true_condition=lambda x: x >= 0.6):
out = []
for x in list(l):
if true_condition(x):
out.append(1)
else:
out.append(0)
return out
def generate_model_documentation(
model_version: str,
model_type: str,
model_algorithm: str,
model_parameter_tuning: str = None,
model_cv: str = None,
):
return (
"\n---------- Model Details:\n\n" +
"Model Version == {}\n".format(model_version) +
"Model Type == {}\n".format(model_type) +
"Model Algorithm == {}\n".format(model_algorithm) +
"Model Parameter Tuning == {}\n".format(model_parameter_tuning) +
"Model CV == {}\n".format(model_cv)
)
def create_classification_report(y_real, y_pred):
p = compress_regression_results(list(y_pred))
r = compress_regression_results(list(y_real))
for y_p, y_r_p, y_r, y_r_r in zip(
p,
list(y_pred),
r,
list(y_real),
):
print("Predicted {rp} ~ {p} for result {rr} ~ {r}".format(
p=y_p,
rp=y_r_p,
r=y_r,
rr=y_r_r,
))
print("\n{}".format(
classification_report(
r,
p,
target_names=["bottom_tier", "top_tier"],
)
)
)
tn, fp, fn, tp = confusion_matrix(p, r).ravel()
print(
"tn = {} \n".format(tn / len(p)) +
"tp = {} \n".format(tp / len(p)) +
"fn = {} \n".format(fn / len(p)) +
"fp = {} \n".format(fp / len(p))
)
print(
"Precision = {}\n".format(round(tp / (tp + fp), 2)) +
"Recall = {}\n".format(round(tp / (tp + fn), 2))
)
def min_max_norm(y: np.ndarray) -> np.ndarray:
return (y - y.min()) / (y.max() - y.min())
def is_regression_metric(metric: callable) -> bool:
real = np.asarray([0.1, 0.33, 0.44])
pred_close = np.asarray([0.11, 0.34, 0.45])
pred_far = np.asarray([0.3, 0.6, 0.9])
if (not metric(real, real) == 0.0 and
not metric(real, real) < metric(real, pred_close) < metric(real, pred_far)):
return False
else:
return True
def is_binary_classification_metric(metric: callable) -> bool:
real = np.asarray([1, 1, 0])
pred_close = np.asarray([1, 0, 0])
pred_far = np.asarray([0, 0, 1])
if (not metric(real, real) == 0.0 and
not metric(real, real) < metric(real, pred_close) < metric(real, pred_far)):
return False
else:
return True | /roulette_ml-0.1.1-py3-none-any.whl/roulette/builder/utils.py | 0.494873 | 0.36311 | utils.py | pypi |
import os
from typing import Union
import random
import pandas as pd
from tqdm import tqdm
import matplotlib.pyplot as plt
from roulette.builder.data_prep import prepare_data_for_training
from roulette.builder.save_load_model import load_model
from roulette.builder.utils import is_regression_metric, is_binary_classification_metric
from roulette.evaluation import MonteCarloSimulation
from roulette.evaluation.metrics import get_regression_metric, get_binary_classification_metric
from roulette.evaluation.norms import get_normalizer
from roulette.logger import Logger
BUILD_DIR_NAME = "build"
class Builder(object):
logger = Logger("Builder").get_logger()
def __init__(
self,
path_to_model: str,
exp_type: str,
data: pd.core.frame.DataFrame,
target: str,
metric: callable,
index: str = None
):
"""Base builder class, handles the MC iteration
Args:
data(pd.DataFrame):
path_to_model(str): path to a folder containing model.py file
target(str): name of column in data to target for
index(str): name of colummn in data to use as index, default None
"""
self.data = data
self.path_to_model = path_to_model
self.target = target
self.index_column = index
self.final_model = None
self.result = None
self.MC_simulation = MonteCarloSimulation(exp_type)
def _build(
self,
n_experiments: int,
metric,
):
"""Build and evaluates the model loaded
Args:
n_experiments(int): number of experiment to execute
"""
self.logger.info("Initiating {} Epochs".format(n_experiments))
Model = load_model(self.path_to_model)
with tqdm(total=n_experiments, desc=" Training Model") as bar:
for _ in range(n_experiments):
X, y, v_X, v_y = prepare_data_for_training(
df=self.data,
target=self.target,
index_column=self.index_column,
validation_test_size=random.uniform(0.15, 0.25),
)
this_model = Model()
this_model.fit(X, y)
this_prediction = this_model.predict(v_X)
self.MC_simulation.load_experiment(
v_y,
y,
this_prediction
)
bar.update(1)
self.MC_simulation.digest(metric=metric)
self.result = self.MC_simulation.metrics_as_dict()
def finalize_model(self,):
"""trains the model on the entire dataset
"""
X, y, _, _ = prepare_data_for_training(
df=self.data,
target=self.target,
index_column=self.index_column,
validation_test_size=0,
)
self.logger.info("Finalzing model")
Model = load_model(self.path_to_model)
self.logger.info("Training model on all data")
final_model = Model()
final_model.fit(X, y)
final_model.save("playground")
self.final_model = Model()
self.final_model.fit(X, y)
def get_results(self) -> dict:
"""returns the building stage results
Returns:
results(dict): a dictionary with the model building results
"""
if self.result:
return self.result
else:
raise RuntimeError("You must use build() to get results")
def plot(self, title=None):
"""plots the simulation histogram summery to screen
Args:
title(str): plot's title
"""
plt.clf()
self.MC_simulation.plot(title=title)
plt.show()
def save(self, plot=True, summery=False, data=False):
"""Saves the model to the model directory
Args:
plot(bool): saves the simulation histogram as png default True
summery(bool): saves the summry of all experiments ran default False
data(bool): saves the data used in traininig default False
"""
if self.final_model:
print("saving model")
print(type(self.final_model))
model_dir = self.final_model.save(
os.path.join(self.path_to_model, BUILD_DIR_NAME))
self.logger.info("saved model to {}".format(model_dir))
else:
raise RuntimeError(
"You did not finalize model thus no model will be saved, use .finalize_model() method to save model")
if self.result:
self.logger.info("saving model metrics")
self.MC_simulation.metrics_to_json(os.path.join(
model_dir, "{}_metadata.json".format(self.final_model.model_name)))
if plot:
self.logger.info("saving simultion plot")
self.MC_simulation.plot(path=model_dir)
else:
self.logger.info("plot=False will not save evaluation plot")
else:
raise RuntimeError("You must use build() to save")
if summery:
self.logger.info("saving experiment summery")
self.MC_simulation.save_experiment_summery(os.path.join(
model_dir, "{}_summery.json".format(self.final_model.model_name)))
else:
self.logger.info(
"summery = False, will not save experiment summery")
if data:
self.logger.info("saving input data")
self.data.to_csv(os.path.join(
model_dir, "{}_data.csv".format(self.final_model.model_name)))
else:
self.logger.info("data = False, will not save experiment data")
return model_dir
class RegressionBuilder(Builder):
def __init__(
self,
path_to_model: str,
data: pd.core.frame.DataFrame,
target: str,
metric: Union[str, callable],
normalizer: Union[str, callable] = None,
index: str = None
):
if normalizer:
self.logger.info(
"normalizing data target, this will duplicate data space in mem")
norm_data = data.copy()
if hasattr(normalizer, "__call__"):
norm_data[target] = normalizer(norm_data[target])
elif isinstance(normalizer, str):
norm_data[target] = get_normalizer(
normalizer)(norm_data[target])
else:
raise ValueError("normalizer should be either str or callable")
super().__init__(path_to_model, "reg", norm_data, target, metric)
else:
super().__init__(path_to_model, "reg", data, target, metric)
if hasattr(metric, "__call__"):
assert is_regression_metric(metric)
self._metric = metric
elif isinstance(metric, str):
self._metric = get_regression_metric(metric)
else:
raise ValueError(
"metric should be str or callable, not {}".format(
type(metric)))
def build(
self,
n_experiments: int,
):
self._build(n_experiments, self._metric)
class BinaryClassificationBuilder(Builder):
def __init__(
self,
path_to_model: str,
data: pd.core.frame.DataFrame,
target: str,
metric: Union[str, callable],
index: str = None
):
super().__init__(path_to_model, "binary", data, target, metric)
if hasattr(metric, "__call__"):
assert is_binary_classification_metric(metric)
self._metric = metric
elif isinstance(metric, str):
self._metric = get_binary_classification_metric(metric)
else:
raise ValueError(
"metric should be str or callable, not {}".format(
type(metric)))
def build(
self,
n_experiments: int,
):
self._build(n_experiments, self._metric) | /roulette_ml-0.1.1-py3-none-any.whl/roulette/builder/builder.py | 0.837852 | 0.297142 | builder.py | pypi |
import random
from time import time
from collections import namedtuple
import numpy as np
from roulette.evaluation.simulation_data import ExperimentData, Score
from roulette.evaluation.utils import validate_multiple_lists_length
from roulette.evaluation.metrics import WD
from roulette.evaluation.constants import ExperimentConstants
random.seed(int(time()) % 10**4)
def length_error(data_length):
raise ValueError(
"all data should be of the same length - {}".format(data_length))
def _divergence_by_wd(dist, ref):
"""calculated the divergence between two distributions, meaning:
the inverse of the distance between them, the more they are evenly
distributed == divergence is higher
Args:
dist(array-like): original distribution
ref(array-like): refrence distribution
Returns:
abs_wd
"""
abs_wd = 1 / abs(WD(dist, ref))
if abs_wd > 0.0:
return abs_wd
else:
return np.inf
def reg_mean(y, size):
return np.full(size, np.asarray(y).mean())
def binary_mean(y, size):
return np.full(size, np.bincount(y).argmax())
def choice_rand(y, size):
return np.random.choice(y, size=size)
BASE_DIST = {
"reg": {
"mean": reg_mean,
"rand": choice_rand
},
"binary": {
"mean": binary_mean,
"rand": choice_rand
}
}
class Experiment(object):
"""
gets monte carlo exp results and returns metrics
"""
ExperimentData = namedtuple('ExperimentData', [
"Real",
'Model',
'Rand',
'Mean',
# 'OtherModels',
])
Score = namedtuple('Score', [
'Model',
'Rand',
'Mean',
])
def __init__(
self,
exp_type: str,
real: list,
real_trained: np.ndarray,
model: list,
):
self.experiment_data = None
if exp_type in ExperimentConstants.TYPES:
self.exp_type = exp_type
else:
raise ValueError("exp_type must be one of: [{}]".format(
ExperimentConstants.TYPES))
self._load(
real_results=real,
real_trained_results=real_trained,
model_prediction=model,
)
self.experiment_results = None
def _load(
self,
real_results: list,
real_trained_results: np.ndarray,
model_prediction: list,
):
"""loads experiment data into ExperimentData object
Args:
real_results(list): real results of this experiments
real_trained_results(np.ndarray): target vector of the trained data in this experiment
model_prediction(list): predictions of the model
random_scale(int): what is the scale [0, X] from which random results would be selected
other_models_predictions(dict): a dictionary of other models model_name->list_of_socres
Raises:
ValueError: if there is a mismatch in length of any of the arguments
"""
if validate_multiple_lists_length(
real_results,
model_prediction,
):
size = len(real_results)
random_data = BASE_DIST[self.exp_type]["rand"](
real_trained_results, size)
mean_data = BASE_DIST[self.exp_type]["mean"](
real_trained_results, size)
self.experiment_data = ExperimentData(
real_results, model_prediction, random_data,
mean_data)
else:
raise length_error(len(real_results))
def score(self, metric) -> Score:
"""calculates the score of this model based on the metrics
Args:
metric(callable): which metric should calcultae the error bw 2 results sets
Returns:
score(Score): score object with all the metric calculated scores
"""
self.experiment_results = Score(
metric(self.experiment_data.Real, self.experiment_data.Model),
metric(self.experiment_data.Real, self.experiment_data.Rand),
metric(self.experiment_data.Real, self.experiment_data.Mean),
)
return self.experiment_results | /roulette_ml-0.1.1-py3-none-any.whl/roulette/evaluation/experiment.py | 0.867429 | 0.548432 | experiment.py | pypi |
import scipy as sp
import numpy as np
from sklearn.metrics import mean_absolute_error, mean_squared_error, roc_auc_score
from roulette.evaluation.constants import MetricsConstants
from roulette.evaluation.utils import close_enough,\
validate_multiple_lists_length,\
samples_to_bin_numbers,\
is_binary
MSE = mean_squared_error
ABS_ERR = mean_absolute_error
ERR_TYPES = ["mse", "abs"]
WD = sp.stats.wasserstein_distance
def discriminability(
a,
mean,
rand,
):
"""calculates the discrimination of model given error distribution a,
against random and mean `models`
Args:
a (array-like): the error distribution of model
mean (array-like): the error distribution against `guessing` the mean value
rand (array-like): the error distribution against `guessing` random values
Returns:
d(float): the rate of discrimination
"""
if np.mean(a) > np.mean(mean):
return 0.0
else:
MX = WD(np.asarray([0.0] * len(a)), mean)
B = WD(a, mean)
C = WD(mean, rand)
return (B / C) / (MX / C)
def certainty(
a,
rand,
):
"""the inverse of the standard deviation of the error distribution of a model
Args:
a (array-like): the error distribution of model
rand (array-like): the error distribution against `guessing` random values
Returns:
c (float): the std^-1
"""
std_a = np.std(a)
std_rand = np.std(rand)
if close_enough(std_a, 0.0, 16):
return -1
else:
return float(std_rand / std_a)
def divergency(s):
"""the average distribution distance between the ground truth and the predictions
veotrs
Args:
s (array-like): vector of the distances of each sample
Returns:
d (float): mean value of distnace
"""
return float(np.mean(s))
def _weighted_error(
bins,
weights: np.ndarray,
how: str,
):
"""returns an error calculating function according to weights of samples
given by the weights matrix
Args:
bins (array-like): the bin edges for grouping the r, p vectors
weights (np.ndarray): an N X N matrix representing the cost of predicting
in different bin
how (str): should the error be absolute / squared, gets values in ['abs', 'mse']
defaults to 'mse'
Returns:
_calc_metric (callable): a functions that gets two vectors and returns the calculated error
with accordance to bins, weights and how
"""
if weights.shape == (len(bins) - 1, len(bins) - 1):
_step = (lambda i, j: abs(i - j)) if how == 'abs' else (
lambda i, j: ((i - j)**2))
def _calc_metric(r, p):
if validate_multiple_lists_length(r, p):
weighted_sum = 0.0
sum_of_weights = 0.0
r_bins, p_bins = samples_to_bin_numbers(r, p, bins=bins)
for i, j, ib, jb in zip(r, p, r_bins, p_bins):
w = weights[ib, jb]
diff = _step(i, j)
weighted_sum += diff * w
sum_of_weights += w
return (weighted_sum / sum_of_weights)
else:
raise ValueError(
"r, p should be of same length but len(r) = {lr}, len(p) = {lp}"
.format(lr=len(r), lp=len(p)))
return _calc_metric
else:
raise IndexError(
"size of weights matrix should be same as len(bins) - 1: {} X {}".
format(len(bins) - 1,
len(bins) - 1))
def _resize_matrix_function(mx: np.ndarray, kind='linear'):
"""creates an interpolation function with accordance to the matrix mx
Args:
mx (np.ndarray): a cubic matrix
kind (str): type of interpolation, gets values in linear, cubic
Returns:
f (callable): an interpolation function
"""
x = np.linspace(0, 1, mx.shape[0])
y = np.linspace(0, 1, mx.shape[1])
f = sp.interpolate.interp2d(x, y, mx, kind=kind)
return f
def _resize_vector_function(v):
"""creates an interpolation function with accordance to the vector v.
with which we could expand the vector v
Args:
v (array-like): a 1d vector
Returns:
f (callable): an interpolation function in 1d
"""
_v = np.asarray(v)
f = sp.interpolate.interp1d(
np.linspace(_v.min(), _v.max(), len(v)), v, kind='cubic')
return f
def _interpolate_weights(
w: np.ndarray,
new_size: int,
):
"""extends a cubic weight matrix to new size
Args:
w (np.ndarray): a cubic matrix of weights
new_size (int): the new size of output matrix
Returns:
_w (np.ndarray): a cubic weight matrix with size new_size
"""
if new_size == w.shape[0]:
return w
else:
_w = w
if max(w.shape) <= 3:
_w = _resize_matrix_function(w)(np.linspace(0, 1, 4),
np.linspace(0, 1, 4))
_w = _resize_matrix_function(
_w, kind='cubic')(np.linspace(0, 1, new_size),
np.linspace(0, 1, new_size))
for i in range(new_size):
_w[i, i] = 1
return _w
def _interpolate_bins(bins, new_size: int):
"""extends a bins vector
Args:
bins (array-like): bin edges vector
new_size (int): the new size of output matrix
Returns:
new_bins (array-like): a cubic weight matrix with size new_size
"""
bins_as_vec = np.asarray(bins)
new_bins_x = np.linspace(bins_as_vec.min(), bins_as_vec.max(), new_size)
new_bins = _resize_vector_function(bins)(new_bins_x)
return new_bins
def get_weight_metric(bins, weights: np.ndarray,
new_size) -> (callable, np.ndarray):
"""returns an interpolated weight metric
Args:
bins(array-like): array of bin edges
weights(np.ndarray): a small (3X3 usually) matrix of bin-weight errors
new_size(int): interpolation new size ( > len(bins)-1)
Returns:
T(tuple[callable, np.ndarray]): a tuple where the first arg is a function to calculate
weights between two scores and the second is matrix of new weights
"""
new_bins = _interpolate_bins(bins, new_size + 1)
new_weights = _interpolate_weights(weights, new_size)
def _sample_weight(a, b):
a_bins, b_bins = samples_to_bin_numbers(
[a], [b], bins=new_bins
) # TODO: change samples to bins to handle list and scalar
return new_weights[a_bins[0], b_bins[0]]
return _sample_weight, new_weights
def weighted_interpolated_error(
size,
bins,
weights: np.ndarray,
error_type: str,
):
"""returns a error calculation function
Args:
r_vector(array-like): result vector
bins(list): bin edges vector
weights(np.ndarray): matrix of weights between bins
error_type(str): type of error to use, (abs/mse)
Returns:
_calc_metric (callable): a functions that gets two vectors and returns the calculated error
with accordance to bins, weights and how
"""
if error_type in ERR_TYPES:
new_size = max(9, size)
new_bins = _interpolate_bins(bins, new_size + 1)
new_weights = _interpolate_weights(weights, new_size)
return _weighted_error(new_bins, new_weights, how=error_type)
else:
raise ValueError("error type must be in {}".format(ERR_TYPES))
def inverse_accuracy(y_real, y_pred):
def inverse_acc(y_r, y_p):
ineq_sum = 0
for i, j in zip(y_r, y_p):
if i != j:
ineq_sum += 1
return ineq_sum / len(y_real)
if len(y_pred) == len(y_real):
if is_binary(y_pred):
return inverse_acc(y_real, y_pred)
else:
if (len(y_pred.shape) == 2 and y_pred.shape[1] == 2):
return inverse_acc(y_real, np.argmax(y_pred, axis=1))
else:
return inverse_acc(y_real, np.round(y_pred))
def inverse_roc_auc(y_real, y_pred):
if is_binary(y_pred):
raise ValueError("y_pred should be (n, 2) shaped probability vector")
if len(y_pred.shape) == 2:
return roc_auc_score(y_real, y_pred[:, 0])
else:
return roc_auc_score(y_real, 1 - y_pred)
REGRESSION_METRICS = {
"mse": mean_squared_error,
"abs": mean_absolute_error
}
BINARY_CLASSIFICATION_METRICS = {
"acc": inverse_accuracy,
# "roc_auc": inverse_roc_auc
}
def get_regression_metric(metric: str) -> callable:
if metric in MetricsConstants.REGRESSION_METRICS:
return REGRESSION_METRICS[metric]
def get_binary_classification_metric(metric: str) -> callable:
if metric in MetricsConstants.BINARY_METRICS:
return BINARY_CLASSIFICATION_METRICS[metric]
__all__ = [
'discriminability', 'certainty', 'divergency',
'weighted_interpolated_error', "get_regression_metric"
] | /roulette_ml-0.1.1-py3-none-any.whl/roulette/evaluation/metrics.py | 0.898066 | 0.677247 | metrics.py | pypi |
import json
import numpy as np
from roulette.evaluation.utils import parse_ndarray_as_float_list
from roulette.evaluation.experiment import Experiment
from roulette.evaluation.simulation_data import Metrics
from roulette.evaluation.metrics import discriminability, certainty
from roulette.evaluation.plotting.hist import single_hist
from roulette.evaluation.plotting.result_data import ResultData
class MonteCarloSimulation(object):
"""facilitates the experiments conducted, and calculating the metrics
"""
def __init__(
self,
exp_type: str
):
"""initiates monte carlo simulation
Args:
W: weights for ratio between metrics
bins: list of bin edges
Raises:
ValueError: if lists not of same length
"""
self.experiments = []
self.scores = {}
self.metrics = None
self.exp_type = exp_type
def load_experiment(self,
real: list,
real_trained: np.ndarray,
model: list,
):
"""loading a single experiment to simulation
Args:
real(list): list of ground truth results of the test set
real_trained(list): list of ground truth results of the training set
model(list): list of subjected-model predictions
rand(int): scale of random samples, ir R in (0, n), defaults to 1
others(dict): dictionary of other models predictions.
"""
self.experiments.append(
Experiment(self.exp_type, real, real_trained, model))
def digest(self, metric):
"""calculates the full simulation results on the experiments
loaded thus far
Args:
metric(callable): the metric to calculate results on (array-like, array-like) -> float
defaults to sklearn.metrics.mean_squared_error
"""
_scores = []
for exp in self.experiments:
_scores.append(exp.score(metric))
self.scores["model"] = []
self.scores["rand"] = []
self.scores["mean"] = []
for s in _scores:
self.scores["model"].append(s.Model)
self.scores["rand"].append(s.Rand)
self.scores["mean"].append(s.Mean)
self.metrics = Metrics(
discriminability(self.scores["model"], self.scores["mean"],
self.scores["rand"]),
certainty(self.scores["model"], self.scores["rand"]),
)
def get_metrics(self):
"""returns the Metrics namedTuple
Returns:
metrics (Metrics)
"""
return self.metrics
def metrics_as_dict(self):
"""returns the Metrics as dict
Returns:
metrics (dict): dictionary of metrics
"""
if self.metrics:
return {
"discriminability": self.metrics.Discriminability,
"certainty": self.metrics.Certainty,
}
else:
return None
def metrics_to_json(
self,
path: str,
):
"""saves MC result metrics as .json file
Args:
path(str): path to save json file
filename(str): filename to be used in saving
"""
with open(path, 'w+') as output_file:
output_file.write(json.dumps(self.metrics_as_dict()))
def save_experiment_summery(
self,
path: str,
):
"""saves a summery report of the experiments
Args:
path(str): path to save summry report json file to
"""
experiment_summery = {}
for i, exp in enumerate(self.experiments):
experiment_id = "experiment_{}".format(i)
experiment_summery[experiment_id] = {
"real": parse_ndarray_as_float_list(exp.experiment_data.Real),
"model": parse_ndarray_as_float_list(exp.experiment_data.Model),
"mean": parse_ndarray_as_float_list(exp.experiment_data.Mean),
"rand": parse_ndarray_as_float_list(exp.experiment_data.Rand),
}
with open(path, 'w+') as output_file:
output_file.write(json.dumps(experiment_summery))
def plot(self, path=None, title=None):
"""plots simulation histograms
Args:
path(str): path to save plots to
"""
try:
max_scores = [max(v) for k, v in self.scores.items()]
bins = np.linspace(0, min(1.0, max(max_scores)),
max(int(len(self.scores["model"]) / 10), 100))
plots = [ResultData(k, v, None) for k, v in self.scores.items()]
if path:
single_hist(data=plots, bins=bins, path=path, title=title)
else:
return single_hist(data=plots, bins=bins, title=title)
except Exception as e:
raise e | /roulette_ml-0.1.1-py3-none-any.whl/roulette/evaluation/monte_carlo.py | 0.878027 | 0.559591 | monte_carlo.py | pypi |
import random
from enum import Enum
class Color( Enum ) :
Green = 0
Red = 1
Black = 2
class Slot:
def __init__(self, color, number):
self.color = None
self.number = number
class Wheel:
def __init__(self, size=36, number_of_reds=18, number_of_blacks=18, number_of_greens=2):
self.size = size
self.number_of_reds = number_of_reds
self.number_of_blacks = number_of_blacks
self.number_of_greens = number_of_greens
self.wheel_size = number_of_reds + number_of_blacks + number_of_greens
self.black_slots = []
self.red_slots = []
self.green_slots = []
self.slots = []
self.create_wheel()
"""
:brief: A very ugly construction.
:details: The model is made up of 36 slots. Each slot is made up of a color and a number.
1.) Every other slot starting from 1 is a red slot.
2.) Every other slot starting from 2 is a black slot.
3.) The
Combine all these lists into one giant list representing the model. That's where random will draw from.
:TODO: For custom wheels, it's unsafe to assume the number of green slots.
"""
for i in list( range( 1, self.wheel_size, 2 ) ) :
self.red_slots.append( Slot( Color.Red, i ) )
# TODO: find a better way to do this
for i in self.red_slots:
i.color = Color.Red
for i in list( range( 0, self.wheel_size, 2 ) ) :
self.black_slots.append( Slot( Color.Black, i ) )
# TODO: find a better way to do this
for i in self.black_slots:
i.color = Color.Black
self.black_slots[0].color = Color.Green
self.red_slots[-1].color = Color.Green
self.slots = self.black_slots + self.red_slots + self.green_slots
def random_spin(self):
return random.choice(self.slots)
def create_wheel( self ) :
# TODO I doubt this will work
pass | /roulette_simulator-1.0.0-py3-none-any.whl/model/roulette_wheel.py | 0.473414 | 0.305257 | roulette_wheel.py | pypi |
import random
import time
import locale
import sys
import argparse
from tabulate import tabulate
from .vars.numbers import *
from .vars.bets import *
from .utils import config
# Currency locale
locale.setlocale(locale.LC_ALL, '')
def showBank():
"""
Show current bank
"""
global currentBank
print('* Current bank: %s' % (amountToCurrency(currentBank)))
def updateBank(amount):
"""
Update the bank with losses or winnings
"""
global currentBank
# Set vars
currentBank = currentBank + amount
word = 'loss' if amount < 0 else 'winnings'
# Update bank in config
config.update('bank', currentBank)
print('* After accounting for your %s of %s, your bank is now %s' %
(word, amountToCurrency(amount), amountToCurrency(currentBank)))
def checkBankStatus():
"""
Check if the user has money in the bank
"""
global currentBank
if currentBank <= 0:
print()
print('You are out of money!')
print('You can add money to your bank with the flag `--bank [amount]`')
print()
sys.exit()
def amountToCurrency(amount):
"""
Shows an amount in the user currency locale or default to `$`
"""
try:
return locale.currency(amount, grouping=True)
except ValueError: # If the locale does not support currency formatting
return '$' + str(round(amount, 2))
def getMaxPossibleBet(maximum_bet):
"""
Will return the maximum bet allowed for this game
"""
global currentBank
if currentBank < maximum_bet:
return currentBank
return maximum_bet
def wheel():
"""
Will define the winning number and return a list with the complete game
Example:
[(2, 'black'), (25, 'red'), ...... , (10, 'black'), (5, 'red'), (24, 'black'), (16, 'red'), (33, 'black')]
↑ Ball initial hit ↑ looping thru the wheel ↑ winning
"""
global withColors
start = random.randint(1, len(withColors) - 1)
runs = random.randint(2, 5)
winner = random.randint(1, len(withColors))
wheel = []
for run in range(0, runs):
if run == 0: # First turn
thisloop = withColors[start:]
elif run == runs - 1: # Last turn
thisloop = withColors[:winner]
else: # Intermediate
thisloop = withColors
wheel = wheel + thisloop
return wheel
def game():
"""
Spin the wheel, show the ball position and return the winning position
Return example: `(10, 'black')`
"""
# Get random wheel
w = wheel()
seq = ('/', '-', '\\', '_')
for i, item in enumerate(w):
numer, color = item
print(' %s %s %s' %
(str(seq[i % 4]), getColorIcon(color), str(numer)), end='\r')
sleep(i, len(w))
# Hide game
print(' ' * 20, end='\r')
# Get winning position
winning = w[-1]
return winning
def getOutcome(betAmount, bet, specificChoice=None):
"""
Initiate a game with a bet amount and a bet type and calculate the outcome
"""
# Spin the wheel and get the winning position
winning = game()
number, color = winning
print()
print('* Winning position: %s %s' % (getColorIcon(color), str(number)))
# print()
if (specificChoice and number == specificChoice) or (specificChoice is None and number in bet['winningSpaces']):
if isUnicodeSupported():
print(' ... you won! ' + u"\U0001F4B0" ' ' * 3)
else:
print(' ... you won!')
a, b = bet['payout']
updateBank(betAmount * a / b)
else:
print(' ... you lost!')
# Update bank
updateBank(betAmount * -1)
def getColorIcon(color):
"""
If unicode is supported, it will return an icon for the number color.
If not, will return a letter (B, R or G)
"""
if isUnicodeSupported():
if color == 'red':
return u"\U0001F534"
elif color == 'black':
return u"\u2B24"
elif color == 'green':
return u"\U0001F49A"
return color[:1].upper()
def sleep(iteration, total):
"""
While displaying each number and color, will sleep less and less to simulate the wheel slowing down
at the end of the game
"""
# Calculate percentage of wheel rotation
pct = iteration / total * 100
if iteration == total - 2: # 2nd to last one
s = 0.4
elif iteration == total - 1: # Last one
s = 1
elif pct < 50:
s = 0.05
elif pct < 70:
s = 0.09
elif pct < 85:
s = 0.1
elif pct < 90:
s = 0.15
elif pct < 95:
s = 0.18
elif pct < 99:
s = 0.2
else: # Default
s = 0.25
time.sleep(s)
def isUnicodeSupported():
"""
Returns `True` if stdout supports unicode
"""
if sys.stdout.encoding:
return sys.stdout.encoding.lower().startswith('utf-')
return False
def betsTable():
"""
Prints a human readable bets table
"""
global bets
table = []
for key, bet in enumerate(bets):
a, b = bet['payout']
table.append([
key + 1,
bet['name'],
str(a) + ' to ' + str(b),
])
# Show bets table
print(tabulate(table, headers=['#', 'Bet', 'Payout']))
def isBetTypeValid(betNumber):
"""
Check if the bet chosen is valid
"""
global bets
try:
# Will trigger an error if the number is invalid
getBet(betNumber)
return True
except ValueError: # Number out of range
return False
except IndexError: # Not a number
return False
def getBet(betNumber):
"""
Returns a bet dict for a specific bet
"""
return bets[int(betNumber) - 1]
def isBetAmountValid(betAmount, minimum_bet, maximum_bet):
"""
Check if a bet amount is between the minimum and maximum allowed amounts
"""
if betAmount and betAmount >= float(minimum_bet) and betAmount <= getMaxPossibleBet(maximum_bet):
return True
return False
def isSpecificChoiceValid(choice, type_):
global american, french
# Convert choice to int except for `00`
if choice != '00':
choice = int(choice)
if type_ == 'french':
if choice in list(french):
return True
elif type_ == 'american':
if choice in list(american):
return True
return False
def play(type_='french', minimum_bet=1, maximum_bet=10000, break_=False):
"""
Initiate a game
"""
# Check bank status
checkBankStatus()
# Default vars
previousBetNumber = previousBetAmount = None
try:
# Show bets table
print()
betsTable()
# Show bank
print()
showBank()
while True:
# Choose a bet number
valid = False
while valid is False:
if previousBetNumber:
previousBet = getBet(previousBetNumber)
betNumber = input(
'* Choose a bet number (just press [ENTER] to play again `%s`): ' % (previousBet['name']))
# Default to previous bet
if betNumber == '':
betNumber = previousBetNumber
else:
betNumber = input('* Choose a bet number: ')
# Check if the bet type is valid
valid = isBetTypeValid(betNumber)
# Display bet name
bet = getBet(betNumber)
print('* Bet chosen: %s' % (bet['name']))
# Optionally pick a specific wheel position
specificChoice = None
if bet['type'] == 'pickone':
valid = False
while valid is False:
specificChoice = input('* Pick a number from the wheel: ')
# Check if the bet type is valid
valid = isSpecificChoiceValid(specificChoice, type_)
# Choose a bet number
valid = False
while valid is False:
if previousBetAmount and previousBetAmount < getMaxPossibleBet(maximum_bet):
betAmount = input('* Place your bets: (min: %s, max: %s) (just press [ENTER] to play again %s): ' % (
amountToCurrency(minimum_bet), amountToCurrency(getMaxPossibleBet(maximum_bet)), amountToCurrency(previousBetAmount)))
# Default to previous bet
if betAmount == '':
betAmount = previousBetAmount
else:
betAmount = input('* Place your bets: (min: %s, max: %s): ' % (
amountToCurrency(minimum_bet), amountToCurrency(getMaxPossibleBet(maximum_bet))))
# Check if the bet amount is valid
if betAmount:
valid = isBetAmountValid(
float(betAmount), minimum_bet, maximum_bet)
# Initiate the game
getOutcome(int(betAmount), bet, specificChoice)
# Start another game
time.sleep(2)
# Set vars for re-play
previousBetNumber = betNumber
previousBetAmount = float(betAmount)
# Break if requested
if break_:
break
except KeyboardInterrupt:
print()
showBank()
print('* Your bank is saved.')
print('* Game interrupted')
def firstPlay(bank=None, type_='french', minimum_bet=1, maximum_bet=10000, break_=False):
global currentBank, withColors
# Get config
conf = config.getConfig()
# Override bank if necessary
if bank:
config.update('bank', bank)
# Vars
currentBank = float(conf['bank']) # Set the default bank
# Create the roulette wheel with colors
withColors = addColors(french if type_ == 'french' else american)
play(type_=type_,
minimum_bet=minimum_bet,
maximum_bet=maximum_bet,
break_=break_)
def main():
# Parse arguments
parser = argparse.ArgumentParser()
parser.add_argument("-t", "--type", type=str, help="Roulette type",
choices=['french', 'american'], default='french')
parser.add_argument("-b", "--bank", type=int, help="Set bank amount")
parser.add_argument("-i", "--minimum_bet", type=int,
help="Minimum bet allowed", default=1)
parser.add_argument("-x", "--maximum_bet", type=int,
help="Maximum bet allowed", default=10000)
args = parser.parse_args()
firstPlay(bank=args.bank,
type_=args.type,
minimum_bet=args.minimum_bet,
maximum_bet=args.maximum_bet)
if __name__ == '__main__':
main() | /roulette-1.5.tar.gz/roulette-1.5/src/play.py | 0.518302 | 0.275209 | play.py | pypi |
from .util import _carb
from .util import _fat
from .util import _sod_pot
from .util import parse_quantity
from .util import round_increment
def calories(quantity: "int|str") -> str:
"""Round a calories quantity.
Args:
quantity (int|str): The quantity to be rounded.
Returns:
str: The rounded calories quantity.
"""
value, unit = parse_quantity(quantity)
if value is None or value < 5:
return f"0{unit}"
elif value <= 50:
return f"{round_increment(value, 5)}{unit}"
return f"{round_increment(value, 10)}{unit}"
def total_fat(quantity: "int|str") -> str:
"""Round a total fat quantity.
Args:
quantity (int|str): The quantity to be rounded.
Returns:
str: The rounded total quantity.
"""
return _fat(quantity)
tot_fat = total_fat
def sat_fat(quantity: "int|str") -> str:
"""Round a saturated fat quantity.
Args:
quantity (int|str): The quantity to be rounded.
Returns:
str: The rounded saturated quantity.
"""
return _fat(quantity)
def trans_fat(quantity: "int|str") -> str:
"""Round a trans-unsaturated fat quantity.
Args:
quantity (int|str): The quantity to be rounded.
Returns:
str: The rounded trans-unsaturated quantity.
"""
return _fat(quantity)
def poly_fat(quantity: "int|str") -> str:
"""Round a polyunsaturated fat quantity.
Args:
quantity (int|str): The quantity to be rounded.
Returns:
str: The rounded monounsaturated quantity.
"""
return _fat(quantity)
def mono_fat(quantity: "int|str") -> str:
"""Round a monounsaturated fat quantity.
Args:
quantity (int|str): The quantity to be rounded.
Returns:
str: The rounded monounsaturated quantity.
"""
return _fat(quantity)
def cholesterol(quantity: "int|str", minimal: bool = False) -> str:
"""Round a cholesterol quantity.
Args:
quantity (int|str): The quantity to be rounded.
minimal (bool, optional): Indicate whether to return in minimal format. Defaults to False.
Returns:
str: The rounded cholesterol quantity.
"""
value, unit = parse_quantity(quantity)
unit = "mg" if unit.strip() == "" else unit
if value < 2:
return f"0{unit}"
elif value < 5:
return f"<5{unit}" if minimal else f"less than 5{unit}"
return f"{round_increment(value, 5)}{unit}"
def sodium(quantity: "int|str") -> str:
"""Round a sodium quantity.
Args:
quantity (int|str): The quantity to be rounded.
Returns:
str: The rounded sodium quantity.
"""
return _sod_pot(quantity)
def potassium(quantity: "int|str") -> str:
"""Round a potassium quantity.
Args:
quantity (int|str): The quantity to be rounded.
Returns:
str: The rounded potassium quantity.
"""
return _sod_pot(quantity)
def total_carb(quantity: "int|str", minimal: bool = False) -> str:
"""Round a total carbohydrate quantity.
Args:
quantity (int|str): The quantity to be rounded.
minimal (bool, optional): Indicate whether to return in minimal format. Defaults to False.
Returns:
str: The rounded total carbohydrate quantity.
"""
return _carb(quantity, minimal)
tot_carb = total_carb
def dietary_fiber(quantity: "int|str", minimal: bool = False) -> str:
"""Round a dietary fiber quantity.
Args:
quantity (int|str): The quantity to be rounded.
minimal (bool, optional): Indicate whether to return in minimal format. Defaults to False.
Returns:
str: The rounded dietary fiber quantity.
"""
return _carb(quantity, minimal)
diet_fiber = dietary_fiber
def soluble_fiber(quantity: "int|str", minimal: bool = False) -> str:
"""Round a soluble fiber quantity.
Args:
quantity (int|str): The quantity to be rounded.
minimal (bool, optional): Indicate whether to return in minimal format. Defaults to False.
Returns:
str: The rounded soluble fiber quantity.
"""
return _carb(quantity, minimal)
sol_fiber = soluble_fiber
def insoluble_fiber(quantity: "int|str", minimal: bool = False) -> str:
"""Round a insoluble fiber quantity.
Args:
quantity (int|str): The quantity to be rounded.
minimal (bool, optional): Indicate whether to return in minimal format. Defaults to False.
Returns:
str: The rounded insoluble fiber quantity.
"""
return _carb(quantity, minimal)
insol_fiber = insoluble_fiber
def total_sugars(quantity: "int|str", minimal: bool = False) -> str:
"""Round a total sugars quantity.
Args:
quantity (int|str): The quantity to be rounded.
minimal (bool, optional): Indicate whether to return in minimal format. Defaults to False.
Returns:
str: The rounded total sugars quantity.
"""
return _carb(quantity, minimal)
tot_sugars = total_sugars
def added_sugars(quantity: "int|str", minimal: bool = False) -> str:
"""Round a added sugars quantity.
Args:
quantity (int|str): The quantity to be rounded.
minimal (bool, optional): Indicate whether to return in minimal format. Defaults to False.
Returns:
str: The rounded added sugars quantity.
"""
return _carb(quantity, minimal)
add_sugars = added_sugars
def sugar_alcohol(quantity: "int|str", minimal: bool = False) -> str:
"""Round a sugar alcohol quantity.
Args:
quantity (int|str): The quantity to be rounded.
minimal (bool, optional): Indicate whether to return in minimal format. Defaults to False.
Returns:
str: The rounded sugar alcohol quantity.
"""
return _carb(quantity, minimal)
sugar_alc = sugar_alcohol
def other_carb(quantity: "int|str", minimal: bool = False) -> str:
"""Round a other carbohydrate quantity.
Args:
quantity (int|str): The quantity to be rounded.
minimal (bool, optional): Indicate whether to return in minimal format. Defaults to False.
Returns:
str: The rounded other carbohydrate quantity.
"""
return _carb(quantity, minimal)
def protein(quantity: "int|str", minimal: bool = False) -> str:
"""Round a protein quantity.
Args:
quantity (int|str): The quantity to be rounded.
minimal (bool, optional): Indicate whether to return in minimal format. Defaults to False.
Returns:
str: The rounded protein quantity.
"""
value, unit = parse_quantity(quantity)
unit = "g" if unit.strip() == "" else unit
if value < 0.5:
return f"0{unit}"
elif value < 1:
return f"1{unit}" if minimal else f"less than 1{unit}"
return f"{round_increment(value, 1)}{unit}" | /round-nutrition-1.1.0.tar.gz/round-nutrition-1.1.0/round_nutrition/general.py | 0.906145 | 0.514217 | general.py | pypi |
from .util import _vmo
def calcium(quantity: "int|str") -> str:
"""Round a calcium quantity.
Args:
quantity (int|str): The quantity to be rounded.
Returns:
str: The rounded calcium quantity.
"""
return _vmo(quantity, 10, "mg")
def potassium(quantity: "int|str") -> str:
"""Round a potassium quantity.
Args:
quantity (int|str): The quantity to be rounded.
Returns:
str: The rounded potassium quantity.
"""
return _vmo(quantity, 10, "mg")
def phosphorus(quantity: "int|str") -> str:
"""Round a phosphorus quantity.
Args:
quantity (int|str): The quantity to be rounded.
Returns:
str: The rounded phosphorus quantity.
"""
return _vmo(quantity, 10, "mg")
def magnesium(quantity: "int|str") -> str:
"""Round a magnesium quantity.
Args:
quantity (int|str): The quantity to be rounded.
Returns:
str: The rounded magnesium quantity.
"""
return _vmo(quantity, 5, "mg")
def iron(quantity: "int|str") -> str:
"""Round a iron quantity.
Args:
quantity (int|str): The quantity to be rounded.
Returns:
str: The rounded iron quantity.
"""
return _vmo(quantity, 0.1, "mg")
def zinc(quantity: "int|str") -> str:
"""Round a zinc quantity.
Args:
quantity (int|str): The quantity to be rounded.
Returns:
str: The rounded zinc quantity.
"""
return _vmo(quantity, 0.1, "mg")
def copper(quantity: "int|str") -> str:
"""Round a copper quantity.
Args:
quantity (int|str): The quantity to be rounded.
Returns:
str: The rounded copper quantity.
"""
return _vmo(quantity, 0.1, "mg")
def manganese(quantity: "int|str") -> str:
"""Round a manganese quantity.
Args:
quantity (int|str): The quantity to be rounded.
Returns:
str: The rounded manganese quantity.
"""
return _vmo(quantity, 0.1, "mg")
def iodine(quantity: "int|str") -> str:
"""Round a iodine quantity.
Args:
quantity (int|str): The quantity to be rounded.
Returns:
str: The rounded iodine quantity.
"""
return _vmo(quantity, 1, "mcg")
def selenium(quantity: "int|str") -> str:
"""Round a selenium quantity.
Args:
quantity (int|str): The quantity to be rounded.
Returns:
str: The rounded selenium quantity.
"""
return _vmo(quantity, 1, "mcg")
def chromium(quantity: "int|str") -> str:
"""Round a chromium quantity.
Args:
quantity (int|str): The quantity to be rounded.
Returns:
str: The rounded chromium quantity.
"""
return _vmo(quantity, 1, "mcg")
def molybdenum(quantity: "int|str") -> str:
"""Round a molybdenum quantity.
Args:
quantity (int|str): The quantity to be rounded.
Returns:
str: The rounded molybdenum quantity.
"""
return _vmo(quantity, 1, "mcg") | /round-nutrition-1.1.0.tar.gz/round-nutrition-1.1.0/round_nutrition/mineral.py | 0.960119 | 0.534187 | mineral.py | pypi |
from .util import _vmo
def vitamin_c(quantity: "int|str"):
"""Round a vitamin C quantity.
Args:
quantity (int|str): The quantity to be rounded.
Returns:
str: The rounded vitamin C quantity.
"""
return _vmo(quantity, 1, "mg")
vit_c = vitamin_c
def vitamin_e(quantity: "int|str"):
"""Round a vitamin E quantity.
Args:
quantity (int|str): The quantity to be rounded.
Returns:
str: The rounded vitamin E quantity.
"""
return _vmo(quantity, 0.1, "mg")
vit_e = vitamin_e
def thiamine(quantity: "int|str"):
"""Round a thiamine/vitamin B12 quantity.
Args:
quantity (int|str): The quantity to be rounded.
Returns:
str: The rounded thiamine/vitamin B12 quantity.
"""
return _vmo(quantity, 0.1, "mg")
vitamin_b1 = vit_b1 = thiamine
def riboflavin(quantity: "int|str"):
"""Round a riboflavin/vitamin B2 quantity.
Args:
quantity (int|str): The quantity to be rounded.
Returns:
str: The rounded riboflavin/vitamin B2 quantity.
"""
return _vmo(quantity, 0.1, "mg")
vitamin_b2 = vit_b2 = riboflavin
def niacin(quantity: "int|str"):
"""Round a niacin/vitamin B3 quantity.
Args:
quantity (int|str): The quantity to be rounded.
Returns:
str: The rounded niacin/vitamin B3 quantity.
"""
return _vmo(quantity, 0.1, "mg")
vitamin_b3 = vit_b3 = niacin
def vitamin_b6(quantity: "int|str"):
"""Round a vitamin B6 quantity.
Args:
quantity (int|str): The quantity to be rounded.
Returns:
str: The rounded vitamin B6 quantity.
"""
return _vmo(quantity, 0.1, "mg")
vit_b6 = vitamin_b6
def pantothenic_acid(quantity: "int|str"):
"""Round a pantothenic acid/vitamin B5 quantity.
Args:
quantity (int|str): The quantity to be rounded.
Returns:
str: The rounded pantothenic acid/vitamin B5 quantity.
"""
return _vmo(quantity, 0.1, "mg")
panto_acid = vitamin_b5 = vit_b5 = pantothenic_acid
def vitamin_a(quantity: "int|str"):
"""Round a vitamin A quantity.
Args:
quantity (int|str): The quantity to be rounded.
Returns:
str: The rounded vitamin A quantity.
"""
return _vmo(quantity, 10, "mcg")
vit_a = vitamin_a
def folate(quantity: "int|str"):
"""Round a folate/vitamin B9 quantity.
Args:
quantity (int|str): The quantity to be rounded.
Returns:
str: The rounded folate/vitamin B9 quantity.
"""
return _vmo(quantity, 5, "mcg")
vitamin_b9 = vit_b9 = folate
def vitamin_k(quantity: "int|str"):
"""Round a vitamin K quantity.
Args:
quantity (int|str): The quantity to be rounded.
Returns:
str: The rounded vitamin K quantity.
"""
return _vmo(quantity, 1, "mcg")
vit_k = vitamin_k
def biotin(quantity: "int|str"):
"""Round a biotin/vitamin B7 quantity.
Args:
quantity (int|str): The quantity to be rounded.
Returns:
str: The rounded biotin/vitamin B7 quantity.
"""
return _vmo(quantity, 1, "mcg")
vitamin_b7 = vit_b7 = biotin
def vitamin_d(quantity: "int|str"):
"""Round a vitamin D quantity.
Args:
quantity (int|str): The quantity to be rounded.
Returns:
str: The rounded vitamin D quantity.
"""
return _vmo(quantity, 0.1, "mcg")
vit_d = vitamin_d
def vitamin_b12(quantity: "int|str"):
"""Round a vitamin B12 quantity.
Args:
quantity (int|str): The quantity to be rounded.
Returns:
str: The rounded vitamin B12 quantity.
"""
return _vmo(quantity, 10, "mcg")
vit_b12 = vitamin_b12 | /round-nutrition-1.1.0.tar.gz/round-nutrition-1.1.0/round_nutrition/vitamin.py | 0.930577 | 0.576244 | vitamin.py | pypi |
from round_robin_tournament.participant import Participant
class Match:
"""
A match represents a single match in a tournament, between 2 participants.
It adds empty participants as placeholders for the winner and loser,
so they can be accessed as individual object pointers.
"""
def __init__(self, left_participant, right_participant):
self.__left_participant = left_participant
self.__right_participant = right_participant
self.__winner = Participant()
self.__loser = Participant()
def set_winner(self, competitor):
"""
When the match is over, set the winner competitor here and the loser will be set too.
"""
if competitor == self.__left_participant.get_competitor():
self.__winner.set_competitor(competitor)
self.__loser.set_competitor(self.__right_participant.get_competitor())
elif competitor == self.__right_participant.get_competitor():
self.__winner.set_competitor(competitor)
self.__loser.set_competitor(self.__left_participant.get_competitor())
else:
raise Exception("Invalid competitor")
def get_winner_participant(self):
"""
If the winner is set, get it here. Otherwise this return None.
"""
return self.__winner
def get_loser_participant(self):
"""
If the winner is set, you can get the loser here. Otherwise this return None.
"""
return self.__loser
def get_participants(self):
"""
Get the left and right participants in a list.
"""
return [self.__left_participant, self.__right_participant]
def is_ready_to_start(self):
"""
This returns True if both of the participants coming in have their competitors "resolved".
This means that the match that the participant is coming from is finished.
It also ensure that the winner hasn't been set yet.
"""
is_left_resolved = self.__left_participant.get_competitor() is not None
is_right_resolved = self.__right_participant.get_competitor() is not None
is_winner_resolved = self.__winner.get_competitor() is not None
return is_left_resolved and is_right_resolved and not is_winner_resolved | /round_robin_tournament-1.0.0.tar.gz/round_robin_tournament-1.0.0/round_robin_tournament/match.py | 0.679604 | 0.33674 | match.py | pypi |
import math
import itertools
from round_robin_tournament.match import Match
from round_robin_tournament.participant import Participant
class Tournament:
"""
This is a round-robin tournament where each match is between 2 competitors.
It takes in a list of competitors, which can be strings or any type of Python object,
but they should be unique. They should be ordered by a seed, with the first entry being the most
skilled and the last being the least. They can also be randomized before creating the instance.
Optional options dict fields:
winners_to_take: How many players will be counted as winners once the games have been played.
"""
def __init__(self, competitors_list, options={}):
assert len(competitors_list) > 1
self.__wins = {}
self.__winners_to_take = 1
if 'winners_to_take' in options:
self.__winners_to_take = options['winners_to_take']
self.__matches = []
participants = list(map(Participant, competitors_list))
for x in range(len(participants)):
for y in range(x + 1, len(participants)):
self.__matches.append(Match(participants[x], participants[y]))
def __iter__(self):
return iter(self.__matches)
def get_active_matches(self):
"""
Returns a list of all matches that are ready to be played.
"""
return [match for match in self.get_matches() if match.is_ready_to_start()]
def get_matches(self):
"""
Returns a list of all matches for the tournament.
"""
return self.__matches
def get_active_matches_for_competitor(self, competitor):
"""
Given the string or object of the competitor that was supplied
when creating the tournament instance,
returns a Match that they are currently playing in,
or None if they are not up to play.
"""
return [match for match in self.get_active_matches() if competitor in _get_match_competitors(match)]
def get_winners(self):
"""
Returns None if the winner has not been decided yet,
and returns a list containing the single victor otherwise.
"""
if len(self.get_active_matches()) > 0:
return None
winners_asc = [x[0] for x in sorted(self.__wins.items(), key=lambda kv: kv[1])]
winners_asc.reverse()
return winners_asc[0:self.__winners_to_take]
def add_win(self, match, competitor):
"""
Set the victor of a match, given the competitor string/object and match.
"""
match.set_winner(competitor)
if competitor not in self.__wins:
self.__wins[competitor] = 0
self.__wins[competitor] += 1
def _get_match_competitors(match):
return [participant.get_competitor() for participant in match.get_participants()] | /round_robin_tournament-1.0.0.tar.gz/round_robin_tournament-1.0.0/round_robin_tournament/tournament.py | 0.578924 | 0.409723 | tournament.py | pypi |
def numbers_rndwitherr(value, error, errdig=2):
"""Returns rounded floating points for `value` and `error`.
This function duplicates how numbers are round internally. It is
available if you want rounded numbers rather than formatted and properly
truncated strings. Be aware that because of the way floating point numbers
are printed, this may not dispaly proper significant figures when output.
Use the functions that return strings to guarantee good significant figures.
Parameters
----------
value: float
The value in floating point.
error: float
The error/uncertainty in floating point.
errdig: int, optional
The number of digits to keep in the error. The value is rounded to the
least significant digit kept for the error. (default value = 2).
Returns
-------
value: float
The value rounded based on the error.
error: float
The error/uncertainty rounded to the number of digits requested by
errdig.
Examples
========
Default
-------
>>> numbers_rndwitherr(0.002345,0.0072)
(0.002, 0.007)
>>> numbers_rndwitherr(2.345864,0.0072)
(2.3459, 0.0072)
>>> numbers_rndwitherr(2.345864e-3,0.0072e-2)
(0.002346, 7.2e-05)
>>> numbers_rndwitherr(83e-4, 0)
(0.0083, 0)
Specifying number of error digits
---------------------------------
>>> numbers_rndwitherr(1247.325, 1.23, errdig = 3)
(1247.33, 1.23)
Default floating point display may not give proper significant figures.
-----------------------------------------------------------------------
Compare the output of `numbers_rndwitherr` and `rndwitherr`.
>>> numbers_rndwitherr(1247.325, 1.23, errdig = 1) # bad
(1247.0, 1.0)
>>> rndwitherr(1247.325, 1.23, errdig = 1, highmag = 3) # good
('1247', '1', '')
"""
import math
pwroften = 0
rndto = 0
if value != 0:
pwroften = math.floor(math.log(math.fabs(value), 10))
if error < 0:
raise ValueError('Errors are expected to be >= 0.')
if error == 0:
rndto = int(pwroften - 12) # beyond this run up against 64 bit
# precision
if error > 0:
if error < math.fabs(value) or value == 0:
rndto = int(math.floor(math.log(error, 10) - errdig + 1))
else:
rndto = pwroften
return round(value, -rndto), round(error, -rndto)
def rndwitherr(value, error, errdig=2, lowmag = -1, highmag = 2):
"""Return strings that can be used to represent reasonably rounded
numbers with errors.
This is similar in functionality to the error rounding function of the
package [sigfig](https://github.com/drakegroup/sigfig). The difference
is that it also switches between decimal notaton and scientific
notation in an automatic manner. The default is the author's personal
opinion of when this switch is done by most students. Decimal notation
is used for numbers in the range 0.1 to 1000 by default. Outside this
range the number is provided in scientific notation. Where this switch
occurs can be set by optional parameters.
The `sigfig` package is not used to avoid doing the exponent analysis for
the switch between decimal and scientific notation twice. This also
avoids having to convert strings to numbers.
Parameters
----------
value: float
The value to be rounded.
error: float
The error in the value to be rounded.
errdig: int, optional
(default = 2) number of significant figures to keep on the error.
The value is rounded to the least significant digit in the error.
lowmag: int, optional
(default = -1) magnitude below which scientific notation is used.
highmag: int, optional
(default = 2) magnitude above which scientific notation is used.
Returns
-------
valuestr: str
rounded value.
errstr: str
rounded error.
pwroftenstr: str
string for scientific notation exponent. Empty string if values
returned as decimals.
Examples
========
Default
-------
>>> from round_using_error import *
>>> rndwitherr(12.345, 0.23)
('12.35', '0.23', '')
>>> rndwitherr(983.4, 34)
('983', '34', '')
>>> rndwitherr(1247.325, 1.23)
('1.2473', '0.0012', '3')
>>> rndwitherr(0.2345, 0.0125)
('0.234', '0.013', '')
>>> rndwitherr(0.0353, 0.00224)
('3.53', '0.22', '-2')
>>> rndwitherr(3.353e-2,2.24e-3)
('3.35', '0.22', '-2')
>>> rndwitherr(3.53e-2,2.24e-3)
('3.53', '0.22', '-2')
>>> rndwitherr(83e-4, 0)
('8.300000000000', '0.000000000000', '-3')
>>> rndwitherr(-2, 0.00034)
('-2.00000', '0.00034', '')
>>> rndwitherr(0, 0.00034)
('0.00000', '0.00034', '')
>>> rndwitherr(0, 3452)
('0', '3500', '')
>>> rndwitherr(0.011,0.034)
('1', '3', '-2')
>>> rndwitherr(0.011,0.34)
('1', '34', '-2')
>>> rndwitherr(0.11,0.34)
('0.1', '0.3', '')
>>> rndwitherr(1,34)
('1', '34', '')
>>> rndwitherr(1,3437)
('1', '3437', '')
>>> rndwitherr(12,3437)
('10', '3440', '')
>>> rndwitherr(1222,343789)
('1', '344', '3')
>>> rndwitherr(-2, -0.00034)
Traceback (most recent call last):
...
ValueError: Errors are expected to be >= 0.
Adjusting the significant digits on errors
------------------------------------------
>>> rndwitherr(1247.325, 1.23, errdig = 3)
('1.24733', '0.00123', '3')
>>> rndwitherr(1247.325, 1.23, errdig = 1)
('1.247', '0.001', '3')
Adjusting the cutoffs for switching to scientific notation
----------------------------------------------------------
>>> rndwitherr(1247.325, 1.23, errdig = 1, highmag = 3)
('1247', '1', '')
>>> rndwitherr(3.53e-2,2.24e-3, errdig = 1, lowmag = -2)
('0.035', '0.002', '')
"""
import math
pwroften = 0
if value != 0:
pwroften = math.floor(math.log(math.fabs(value), 10))
if error < 0:
raise ValueError('Errors are expected to be >= 0.')
if error == 0:
rndto = int(pwroften - 12) # beyond this run up against 64 bit
# precision
if error > 0:
if error < math.fabs(value) or value == 0:
rndto = int(math.floor(math.log(error, 10) - errdig + 1))
else:
rndto = pwroften
valscaled = value
errscaled = error
pwroftenstr = ''
if (pwroften < lowmag) or (pwroften > highmag):
valscaled = value * 10 ** (-pwroften)
errscaled = error * 10 ** (-pwroften)
rndto = rndto - pwroften
pwroftenstr = str(pwroften)
valscaled = round(valscaled, -rndto)
errscaled = round(errscaled, -rndto)
if rndto < 0:
precisstr = '%.' + str(-rndto) + 'f'
else:
precisstr = '%.f'
valuestr = str(precisstr % valscaled)
errorstr = str(precisstr % errscaled)
return valuestr, errorstr, pwroftenstr
def output_rndwitherr(value, error, errdig=2, lowmag = -1, highmag = 2,
style='latex'):
r"""
This method outputs the results of rndwitherr as a string. Accepts the
same input as the method `rndwitherr()` and an additional optional
parameter `style = "latex" or "text"` defining the output style of the
returned string.
Parameters
----------
value: float
error: float
errdig: int, optional, default = 2
lowmag: int, optional, default = -1
highmag: int, optional, default = 2
style: str, optional, default = 'latex', alternative 'text'
Returns
-------
String representation of the value +/- the error properly rounded and in
the format specified by `style`.
Examples
========
>>> output_rndwitherr(3.53e-2,2.24e-3)
'(3.53\\pm0.22)\\times 10^{-2}'
>>> output_rndwitherr(3.53e-2,2.24e-3, style = "text")
'(3.53 +/- 0.22) X 10^-2'
>>> output_rndwitherr(3.53e-2,2.24e-3, errdig = 1, lowmag=-1, style = "text")
'(3.5 +/- 0.2) X 10^-2'
>>> output_rndwitherr(3.53e-2,2.24e-3, errdig = 1, lowmag=-2, style = "text")
'0.035 +/- 0.002'
>>> output_rndwitherr(3.53e-2,2.24e-3, errdig = 1, lowmag=-2, style = "string")
Traceback (most recent call last):
...
ValueError: style parameter must be either "latex" or "text".
"""
if style not in ('latex', 'text'):
raise ValueError('style parameter must be either "latex" or "text".')
valstr, errstr, expstr = rndwitherr(value, error, errdig, lowmag, highmag)
pwrstr = ''
lparen = ''
rparen = ''
if style == 'latex':
pm = r'\pm'
if style == 'text':
pm = r' +/- '
if expstr != '':
lparen = '('
rparen = ')'
if style == 'latex':
pwrstr = r'\times 10^{' + expstr + '}'
if style == 'text':
pwrstr = r' X 10^' + expstr
return str(r'' + lparen + valstr + pm + errstr + rparen + pwrstr)
def latex_rndwitherr(value, error, errdig=2, lowmag = -1, highmag = 2):
r"""
This is a convenience function to render the output of `rndwitherr()`
as a latex string. Equivalent to a call to `output_rndwitherr()` with the
style = "latex"`.
Parameters
----------
value: float
error: float
errdig: int, optional, default = 2
lowmag: int, optional, default = -1
highmag: int, optional, default = 2
Returns
-------
String for latex representation of rounded value +/- error.
Examples
========
>>> latex_rndwitherr(3.53e-2,2.24e-3, errdig = 1, lowmag=-2)
'0.035\\pm0.002'
>>> latex_rndwitherr(3.53e-2,2.24e-3)
'(3.53\\pm0.22)\\times 10^{-2}'
>>> latex_rndwitherr(1247.325, 1.23)
'(1.2473\\pm0.0012)\\times 10^{3}'
To view typeset latex output in Jupyter use:
```
from IPython.display import Math
Math(latex_rndwitherr(value, error))
```
"""
return output_rndwitherr(value, error, errdig, lowmag, highmag,
style='latex')
def text_rndwitherr(value, error, errdig=2, lowmag = -1, highmag = 2):
"""
This is a convenience function to render the output of `rndwitherr()`
as a text string. Equivalent to a call to `output_rndwitherr()` with the
style = "text".
Parameters
----------
value: float
error: float
errdig: int, optional, default = 2
lowmag: int, optional, default = -1
highmag: int, optional, default = 2
Returns
-------
String representation of rounded value +/- error.
Examples
========
>>> text_rndwitherr(3.53e-2,2.24e-3, errdig = 1, lowmag=-2)
'0.035 +/- 0.002'
>>> text_rndwitherr(3.53e-2,2.24e-3)
'(3.53 +/- 0.22) X 10^-2'
>>> text_rndwitherr(1247.325, 1.23)
'(1.2473 +/- 0.0012) X 10^3'
"""
return output_rndwitherr(value, error, errdig, lowmag, highmag,
style='text') | /round_using_error-1.2.0-py3-none-any.whl/round_using_error/round_using_error.py | 0.924133 | 0.9455 | round_using_error.py | pypi |
# Move a file in the safest way possible::
# >>> from RoundBox.core.files.move import file_move_safe
# >>> file_move_safe("/tmp/old_file", "/tmp/new_file")
import errno
import os
from shutil import copystat
from RoundBox.core.files import locks
__all__ = ["file_move_safe"]
def _samefile(src, dst):
# Macintosh, Unix.
if hasattr(os.path, "samefile"):
try:
return os.path.samefile(src, dst)
except OSError:
return False
# All other platforms: check for same pathname.
return os.path.normcase(os.path.abspath(src)) == os.path.normcase(os.path.abspath(dst))
def file_move_safe(old_file_name, new_file_name, chunk_size=1024 * 64, allow_overwrite=False):
"""
Move a file from one location to another in the safest way possible.
First, try ``os.rename``, which is simple but will break across filesystems.
If that fails, stream manually from one file to another in pure Python.
If the destination file exists and ``allow_overwrite`` is ``False``, raise
``FileExistsError``.
"""
# There's no reason to move if we don't have to.
if _samefile(old_file_name, new_file_name):
return
try:
if not allow_overwrite and os.access(new_file_name, os.F_OK):
raise FileExistsError(
"Destination file %s exists and allow_overwrite is False." % new_file_name
)
os.rename(old_file_name, new_file_name)
return
except OSError:
# OSError happens with os.rename() if moving to another filesystem or
# when moving opened files on certain operating systems.
pass
# first open the old file, so that it won't go away
with open(old_file_name, "rb") as old_file:
# now open the new file, not forgetting allow_overwrite
fd = os.open(
new_file_name,
(
os.O_WRONLY
| os.O_CREAT
| getattr(os, "O_BINARY", 0)
| (os.O_EXCL if not allow_overwrite else 0)
),
)
try:
locks.lock(fd, locks.LOCK_EX)
current_chunk = None
while current_chunk != b"":
current_chunk = old_file.read(chunk_size)
os.write(fd, current_chunk)
finally:
locks.unlock(fd)
os.close(fd)
try:
copystat(old_file_name, new_file_name)
except PermissionError as e:
# Certain filesystems (e.g. CIFS) fail to copy the file's metadata if
# the type of the destination filesystem isn't the same as the source
# filesystem; ignore that.
if e.errno != errno.EPERM:
raise
try:
os.remove(old_file_name)
except PermissionError as e:
# Certain operating systems (Cygwin and Windows)
# fail when deleting opened files, ignore it. (For the
# systems where this happens, temporary files will be auto-deleted
# on close anyway.)
if getattr(e, "winerror", 0) != 32:
raise | /RoundBox-2022.4.21b0-py3-none-any.whl/RoundBox/core/files/move.py | 0.425963 | 0.204719 | move.py | pypi |
# Portable file locking utilities.
# Based partially on an example by Jonathan Feignberg in the Python
# Cookbook [1] (licensed under the Python Software License) and a ctypes port by
# Anatoly Techtonik for Roundup [2] (license [3]).
# [1] https://code.activestate.com/recipes/65203/
# [2] https://sourceforge.net/p/roundup/code/ci/default/tree/roundup/backends/portalocker.py # NOQA
# [3] https://sourceforge.net/p/roundup/code/ci/default/tree/COPYING.txt
# Example Usage::
# >>> from RoundBox.core.files import locks
# >>> with open('./file', 'wb') as f:
# ... locks.lock(f, locks.LOCK_EX)
# ... f.write('Django')
import os
__all__ = ("LOCK_EX", "LOCK_SH", "LOCK_NB", "lock", "unlock")
def _fd(f):
"""Get a file descriptor from something which could be a file or an fd."""
return f.fileno() if hasattr(f, "fileno") else f
if os.name == "nt":
import msvcrt
from ctypes import (
POINTER,
Structure,
Union,
byref,
c_int64,
c_ulong,
c_void_p,
sizeof,
windll,
)
from ctypes.wintypes import BOOL, DWORD, HANDLE
LOCK_SH = 0 # the default
LOCK_NB = 0x1 # LOCKFILE_FAIL_IMMEDIATELY
LOCK_EX = 0x2 # LOCKFILE_EXCLUSIVE_LOCK
# --- Adapted from the pyserial project ---
# detect size of ULONG_PTR
if sizeof(c_ulong) != sizeof(c_void_p):
ULONG_PTR = c_int64
else:
ULONG_PTR = c_ulong
PVOID = c_void_p
# --- Union inside Structure by stackoverflow:3480240 ---
class _OFFSET(Structure):
_fields_ = [("Offset", DWORD), ("OffsetHigh", DWORD)]
class _OFFSET_UNION(Union):
_anonymous_ = ["_offset"]
_fields_ = [("_offset", _OFFSET), ("Pointer", PVOID)]
class OVERLAPPED(Structure):
_anonymous_ = ["_offset_union"]
_fields_ = [
("Internal", ULONG_PTR),
("InternalHigh", ULONG_PTR),
("_offset_union", _OFFSET_UNION),
("hEvent", HANDLE),
]
LPOVERLAPPED = POINTER(OVERLAPPED)
# --- Define function prototypes for extra safety ---
LockFileEx = windll.kernel32.LockFileEx
LockFileEx.restype = BOOL
LockFileEx.argtypes = [HANDLE, DWORD, DWORD, DWORD, DWORD, LPOVERLAPPED]
UnlockFileEx = windll.kernel32.UnlockFileEx
UnlockFileEx.restype = BOOL
UnlockFileEx.argtypes = [HANDLE, DWORD, DWORD, DWORD, LPOVERLAPPED]
def lock(f, flags):
hfile = msvcrt.get_osfhandle(_fd(f))
overlapped = OVERLAPPED()
ret = LockFileEx(hfile, flags, 0, 0, 0xFFFF0000, byref(overlapped))
return bool(ret)
def unlock(f):
hfile = msvcrt.get_osfhandle(_fd(f))
overlapped = OVERLAPPED()
ret = UnlockFileEx(hfile, 0, 0, 0xFFFF0000, byref(overlapped))
return bool(ret)
else:
try:
import fcntl
LOCK_SH = fcntl.LOCK_SH # shared lock
LOCK_NB = fcntl.LOCK_NB # non-blocking
LOCK_EX = fcntl.LOCK_EX
except (ImportError, AttributeError):
# File locking is not supported.
LOCK_EX = LOCK_SH = LOCK_NB = 0
# Dummy functions that don't do anything.
def lock(f, flags):
# File is not locked
return False
def unlock(f):
# File is unlocked
return True
else:
def lock(f, flags):
try:
fcntl.flock(_fd(f), flags)
return True
except BlockingIOError:
return False
def unlock(f):
fcntl.flock(_fd(f), fcntl.LOCK_UN)
return True | /RoundBox-2022.4.21b0-py3-none-any.whl/RoundBox/core/files/locks.py | 0.68616 | 0.269374 | locks.py | pypi |
# Levels
DEBUG = 10
INFO = 20
WARNING = 30
ERROR = 40
CRITICAL = 50
class CheckMessage:
""" """
def __init__(self, level, msg: str, hint: str = None, obj=None, _id=None):
"""
:param level: The severity of the message. Use one of the predefined values: DEBUG, INFO, WARNING, ERROR,
CRITICAL. If the level is greater or equal to ERROR, then ROUNDBOX will prevent management
commands from executing. Messages with level lower than ERROR (i.e. warnings) are reported to the
console, but can be silenced.
:param msg: A short (less than 80 characters) string describing the problem.
The string should not contain newlines.
:param hint: A single-line string providing a hint for fixing the problem. If no hint can be provided,
or the hint is self-evident from the error message, the hint can be omitted, or a value of None
can be used.
:param obj: Optional. An object providing context for the message
(for example, the model where the problem was discovered).
The object should be a model, field, or manager or any other object that defines a __str__() method.
The method is used while reporting all messages and its result precedes the message.
:param _id: Optional string. A unique identifier for the issue.
Identifiers should follow the pattern applabel.X001, where X is one of the letters CEWID,
indicating the message severity (C for criticals, E for errors and so).
The number can be allocated by the application, but should be unique within that application.
"""
if not isinstance(level, int):
raise TypeError("The first argument should be level.")
self.level = level
self.msg = msg
self.hint = hint
self.obj = obj
self.id = _id
def __eq__(self, other):
return isinstance(other, self.__class__) and all(
getattr(self, attr) == getattr(other, attr)
for attr in ["level", "msg", "hint", "obj", "id"]
)
def __str__(self):
if self.obj is None:
obj = "?"
else:
obj = str(self.obj)
_id = "(%s) " % self.id if self.id else ""
hint = "\n\tHINT: %s" % self.hint if self.hint else ""
return "%s: %s%s%s" % (obj, _id, self.msg, hint)
def __repr__(self):
return "<%s: level=%r, msg=%r, hint=%r, obj=%r, id=%r>" % (
self.__class__.__name__,
self.level,
self.msg,
self.hint,
self.obj,
self.id,
)
def is_serious(self, level=ERROR):
return self.level >= level
def is_silenced(self):
from RoundBox.conf.project_settings import settings
return self.id in settings.SILENCED_SYSTEM_CHECKS
class Debug(CheckMessage):
""" """
def __init__(self, *args, **kwargs):
super().__init__(DEBUG, *args, **kwargs)
class Info(CheckMessage):
""" """
def __init__(self, *args, **kwargs):
super().__init__(INFO, *args, **kwargs)
class Warning(CheckMessage):
""" """
def __init__(self, *args, **kwargs):
super().__init__(WARNING, *args, **kwargs)
class Error(CheckMessage):
""" """
def __init__(self, *args, **kwargs):
super().__init__(ERROR, *args, **kwargs)
class Critical(CheckMessage):
""" """
def __init__(self, *args, **kwargs):
super().__init__(CRITICAL, *args, **kwargs) | /RoundBox-2022.4.21b0-py3-none-any.whl/RoundBox/core/checks/messages.py | 0.662469 | 0.299912 | messages.py | pypi |
import logging
from dataclasses import dataclass
from datetime import date, datetime
from typing import Final, TypedDict
from RoundBox.core.hass.helpers.typing import StateType
from RoundBox.utils.backports.strenum.enum import StrEnum
logger: Final = logging.getLogger(__name__)
class ExtraOptions(TypedDict):
"""Extra options or information for creating o more customizable interface"""
pass
class DeviceInfo(TypedDict):
"""Entity device information for device registry."""
default_manufacturer: str
default_model: str
default_name: str
name: str | None
alias: str | None
model: str | None
serial_number: str | None
manufacturer: str | None
sw_version: str | None
hw_version: str | None
class SensorDeviceClass(StrEnum):
"""Device class for sensors."""
# apparent power (VA)
APPARENT_POWER = "apparent_power"
# Air Quality Index
AQI = "aqi"
# % of battery that is left
BATTERY = "battery"
# ppm (parts per million) Carbon Monoxide gas concentration
CO = "carbon_monoxide"
# ppm (parts per million) Carbon Dioxide gas concentration
CO2 = "carbon_dioxide"
# current (A)
CURRENT = "current"
# date (ISO8601)
DATE = "date"
# energy (Wh, kWh, MWh)
ENERGY = "energy"
# frequency (Hz, kHz, MHz, GHz)
FREQUENCY = "frequency"
# gas (m³ or ft³)
GAS = "gas"
# % of humidity in the air
HUMIDITY = "humidity"
# current light level (lx/lm)
ILLUMINANCE = "illuminance"
# Amount of money (currency)
MONETARY = "monetary"
# Amount of NO2 (µg/m³)
NITROGEN_DIOXIDE = "nitrogen_dioxide"
# Amount of NO (µg/m³)
NITROGEN_MONOXIDE = "nitrogen_monoxide"
# Amount of N2O (µg/m³)
NITROUS_OXIDE = "nitrous_oxide"
# Amount of O3 (µg/m³)
OZONE = "ozone"
# Particulate matter <= 0.1 μm (µg/m³)
PM1 = "pm1"
# Particulate matter <= 10 μm (µg/m³)
PM10 = "pm10"
# Particulate matter <= 2.5 μm (µg/m³)
PM25 = "pm25"
# power factor (%)
POWER_FACTOR = "power_factor"
# power (W/kW)
POWER = "power"
# pressure (hPa/mbar)
PRESSURE = "pressure"
# reactive power (var)
REACTIVE_POWER = "reactive_power"
# signal strength (dB/dBm)
SIGNAL_STRENGTH = "signal_strength"
# Amount of SO2 (µg/m³)
SULPHUR_DIOXIDE = "sulphur_dioxide"
# temperature (C/F)
TEMPERATURE = "temperature"
# timestamp (ISO8601)
TIMESTAMP = "timestamp"
# Amount of VOC (µg/m³)
VOLATILE_ORGANIC_COMPOUNDS = "volatile_organic_compounds"
# voltage (V)
VOLTAGE = "voltage"
class SensorStateClass(StrEnum):
"""State class for sensors."""
# The state represents a measurement in present time
MEASUREMENT = "measurement"
# The state represents a total amount, e.g. net energy consumption
TOTAL = "total"
# The state represents a monotonically increasing total, e.g. an amount of consumed gas
TOTAL_INCREASING = "total_increasing"
@dataclass
class SensorEntityDescription:
"""A class that describes sensor entities."""
key: str
device_class: SensorDeviceClass | str | None = None
name: str | None = None
native_unit_of_measurement: str | None = None
state_class: SensorStateClass | str | None = None
unit_of_measurement: str | None = None
class SensorEntity:
"""Base class for sensor entities."""
entity_description: SensorEntityDescription
# Entity Properties
_attr_extra_options: ExtraOptions | str | None = None
_attr_device_info: DeviceInfo | None = None
_attr_name: str | None
_attr_unique_id: str | None = None
_attr_unit_of_measurement: str | None
_attr_device_class: SensorDeviceClass | str | None
_attr_native_unit_of_measurement: str | None
_attr_native_value: StateType | date | datetime = None
_attr_state_class: SensorStateClass | str | None
_temperature_conversion_reported = False
_sensor_option_unit_of_measurement: str | None = None
@property
def native_value(self) -> StateType | date | datetime:
"""Return the value reported by the sensor."""
return self._attr_native_value
@property
def native_unit_of_measurement(self) -> str | None:
"""Return the unit of measurement of the sensor, if any."""
if hasattr(self, "_attr_native_unit_of_measurement"):
return self._attr_native_unit_of_measurement
if hasattr(self, "entity_description"):
return self.entity_description.native_unit_of_measurement
return None
@property
def unit_of_measurement(self) -> str | None:
"""Return the unit of measurement of this entity, if any.
:return:
"""
if hasattr(self, "_attr_unit_of_measurement"):
return self._attr_unit_of_measurement
if hasattr(self, "entity_description"):
return self.entity_description.unit_of_measurement
return None
def __repr__(self) -> str:
"""Return the representation."""
return f"<Entity {self.name}>"
@property
def unique_id(self) -> str | None:
"""Return a unique ID."""
return self._attr_unique_id
@property
def name(self) -> str | None:
"""Return the name of the entity.
:return:
"""
if hasattr(self, "_attr_name"):
return self._attr_name
if hasattr(self, "entity_description"):
return self.entity_description.name
return None
@property
def device_info(self) -> DeviceInfo | None:
"""Return device specific attributes.
Implemented by platform classes.
:return:
"""
return self._attr_device_info
@property
def device_class(self) -> str | None:
"""Return the class of this device, from component DEVICE_CLASSES.
:return:
"""
if hasattr(self, "_attr_device_class"):
return self._attr_device_class
if hasattr(self, "entity_description"):
return self.entity_description.device_class
return None | /RoundBox-2022.4.21b0-py3-none-any.whl/RoundBox/core/hass/components/sensor/__init__.py | 0.910754 | 0.203391 | __init__.py | pypi |
import argparse
import os
import sys
from argparse import ArgumentParser, HelpFormatter
from io import TextIOBase
from RoundBox.const import __version__
from RoundBox.core import checks
from RoundBox.core.cliparser.color import color_style, no_style
ALL_CHECKS = "__all__"
class CommandError(Exception):
"""
Exception class indicating a problem while executing a management
command.
If this exception is raised during the execution of a management
command, it will be caught and turned into a nicely-printed error
message to the appropriate output stream (i.e., stderr); as a
result, raising this exception (with a sensible description of the
error) is the preferred way to indicate that something has gone
wrong in the execution of a command.
"""
def __init__(self, *args, returncode=1, **kwargs):
self.returncode = returncode
super().__init__(*args, **kwargs)
class SystemCheckError(CommandError):
"""
The system check framework detected unrecoverable errors.
"""
pass
class CommandParser(ArgumentParser):
"""
Customized ArgumentParser class to improve some error messages and prevent
SystemExit in several occasions, as SystemExit is unacceptable when a
command is called programmatically.
"""
def __init__(self, *, missing_args_message=None, called_from_command_line=None, **kwargs):
self.missing_args_message = missing_args_message
self.called_from_command_line = called_from_command_line
super().__init__(**kwargs)
def parse_args(self, args=None, namespace=None):
# Catch missing argument for a better error message
if self.missing_args_message and not (
args or any(not arg.startswith("-") for arg in args)
):
self.error(self.missing_args_message)
return super().parse_args(args, namespace)
def error(self, message):
if self.called_from_command_line:
super().error(message)
else:
raise CommandError("Error: %s" % message)
def handle_default_options(options):
"""
Include any default options that all commands should accept here
so that ManagementUtility can handle them before searching for
user commands.
"""
if options.settings:
os.environ["ROUNDBOX_SETTINGS_MODULE"] = options.settings
if options.pythonpath:
sys.path.insert(0, options.pythonpath)
class RoundBoxHelpFormatter(HelpFormatter):
"""
Customized formatter so that command-specific arguments appear in the
--help output before arguments common to all commands.
"""
show_last = {
"--version",
"--verbosity",
"--traceback",
"--settings",
"--pythonpath",
"--no-color",
"--force-color",
"--skip-checks",
}
def _reordered_actions(self, actions):
return sorted(actions, key=lambda a: set(a.option_strings) & self.show_last != set())
def add_usage(self, usage, actions, *args, **kwargs):
super().add_usage(usage, self._reordered_actions(actions), *args, **kwargs)
def add_arguments(self, actions):
super().add_arguments(self._reordered_actions(actions))
class OutputWrapper(TextIOBase):
"""
Wrapper around stdout/stderr
"""
@property
def style_func(self):
return self._style_func
@style_func.setter
def style_func(self, style_func):
if style_func and self.isatty():
self._style_func = style_func
else:
self._style_func = lambda x: x
def __init__(self, out, ending="\n"):
self._out = out
self.style_func = None
self.ending = ending
def __getattr__(self, name):
return getattr(self._out, name)
def flush(self):
if hasattr(self._out, "flush"):
self._out.flush()
def isatty(self):
return hasattr(self._out, "isatty") and self._out.isatty()
def write(self, msg="", style_func=None, ending=None):
ending = self.ending if ending is None else ending
if ending and not msg.endswith(ending):
msg += ending
style_func = style_func or self.style_func
self._out.write(style_func(msg))
class BaseCommand:
"""The base class from which all management commands ultimately
derive.
Use this class if you want access to all the mechanisms which
parse the command-line arguments and work out what code to call in
response; if you don't need to change any of that behavior,
consider using one of the subclasses defined in this file.
If you are interested in overriding/customizing various aspects of
the command-parsing and -execution behavior, the normal flow works
as follows:
1. ``manage.py`` loads the command class
and calls its ``run_from_argv()`` method.
2. The ``run_from_argv()`` method calls ``create_parser()`` to get
an ``ArgumentParser`` for the arguments, parses them, performs
any environment changes requested by options like
``pythonpath``, and then calls the ``execute()`` method,
passing the parsed arguments.
3. The ``execute()`` method attempts to carry out the command by
calling the ``handle()`` method with the parsed arguments; any
output produced by ``handle()`` will be printed to standard
output and, if the command is intended to produce a block of
SQL statements, will be wrapped in ``BEGIN`` and ``COMMIT``.
4. If ``handle()`` or ``execute()`` raised any exception (e.g.
``CommandError``), ``run_from_argv()`` will instead print an error
message to ``stderr``.
Thus, the ``handle()`` method is typically the starting point for
subclasses; many built-in commands and command types either place
all of their logic in ``handle()``, or perform some additional
parsing work in ``handle()`` and then delegate from it to more
specialized methods as needed.
Several attributes affect behavior at various steps along the way:
``help``
A short description of the command, which will be printed in
help messages.
``requires_system_checks``
A list or tuple of tags, e.g. [Tags.files, Tags.version]. System
checks registered in the chosen tags will be checked for errors prior
to executing the command. The value '__all__' can be used to specify
that all system checks should be performed. Default value is '__all__'.
To validate an individual application's models
rather than all applications' models, call
``self.check(app_configs)`` from ``handle()``, where ``app_configs``
is the list of application's configuration provided by the
app registry.
``stealth_options``
A tuple of any options the command uses which aren't defined by the
argument parser.
"""
# Metadata about this command.
help = ""
# Configuration shortcuts that alter various logic.
_called_from_command_line = False
requires_system_checks = "__all__"
# Arguments, common to all commands, which aren't defined by the argument parser.
base_stealth_options = ("stderr", "stdout")
# Command-specific options not defined by the argument parser.
stealth_options = ()
suppressed_base_arguments = set()
def __init__(self, stdout=None, stderr=None, no_color=False, force_color=False):
self.stdout = OutputWrapper(stdout or sys.stdout)
self.stderr = OutputWrapper(stderr or sys.stderr)
if no_color and force_color:
raise CommandError("'no_color' and 'force_color' can't be used together.")
if no_color:
self.style = no_style()
else:
self.style = color_style(force_color)
self.stderr.style_func = self.style.ERROR
if (
not isinstance(self.requires_system_checks, (list, tuple))
and self.requires_system_checks != ALL_CHECKS
):
raise TypeError("requires_system_checks must be a list or tuple.")
def get_version(self):
"""
Return the RoundBox version, which should be correct for all built-in
RoundBox commands. User-supplied commands can override this method to
return their own version.
"""
return __version__
def create_parser(self, prog_name, subcommand, **kwargs):
"""
Create and return the ``ArgumentParser`` which will be used to
parse the arguments to this command.
"""
parser = CommandParser(
prog="%s %s" % (os.path.basename(prog_name), subcommand),
description=self.help or None,
formatter_class=RoundBoxHelpFormatter,
missing_args_message=getattr(self, "missing_args_message", None),
called_from_command_line=getattr(self, "_called_from_command_line", None),
**kwargs,
)
self.add_base_argument(
parser,
"--version",
action="version",
version=self.get_version(),
help="Show program's version number and exit.",
)
self.add_base_argument(
parser,
"-v",
"--verbosity",
default=1,
type=int,
choices=[0, 1, 2, 3],
help=(
"Verbosity level; 0=minimal output, 1=normal output, 2=verbose output, "
"3=very verbose output"
),
)
self.add_base_argument(
parser,
"--settings",
help=(
"The Python path to a settings module, e.g. "
'"myproject.settings.main". If this isn\'t provided, the '
"ROUND_BOX_SETTINGS_MODULE environment variable will be used."
),
)
self.add_base_argument(
parser,
"--pythonpath",
help="A directory to add to the Python path, e.g. " '"/home/RoundBox/myproject".',
)
self.add_base_argument(
parser,
"--traceback",
action="store_true",
help="Raise on CommandError exceptions.",
)
self.add_base_argument(
parser,
"--no-color",
action="store_true",
help="Don't colorize the command output.",
)
self.add_base_argument(
parser,
"--force-color",
action="store_true",
help="Force colorization of the command output.",
)
if self.requires_system_checks:
parser.add_argument(
"--skip-checks",
action="store_true",
help="Skip system checks.",
)
self.add_arguments(parser)
return parser
def add_arguments(self, parser):
"""
Entry point for subclassed commands to add custom arguments.
"""
pass
def add_base_argument(self, parser, *args, **kwargs):
"""
Call the parser's add_argument() method, suppressing the help text
according to BaseCommand.suppressed_base_arguments.
"""
for arg in args:
if arg in self.suppressed_base_arguments:
kwargs["help"] = argparse.SUPPRESS
break
parser.add_argument(*args, **kwargs)
def print_help(self, prog_name, subcommand):
"""
Print the help message for this command, derived from
``self.usage()``.
"""
parser = self.create_parser(prog_name, subcommand)
parser.print_help()
def run_from_argv(self, argv):
"""
Set up any environment changes requested (e.g., Python path
and RoundBox settings), then run this command. If the
command raises a ``CommandError``, intercept it and print it sensibly
to stderr. If the ``--traceback`` option is present or the raised
``Exception`` is not ``CommandError``, raise it.
"""
self._called_from_command_line = True
parser = self.create_parser(argv[0], argv[1])
options = parser.parse_args(argv[2:])
cmd_options = vars(options)
# Move positional args out of options to mimic legacy optparse
args = cmd_options.pop("args", ())
handle_default_options(options)
try:
self.execute(*args, **cmd_options)
except CommandError as e:
if options.traceback:
raise
# SystemCheckError takes care of its own formatting.
if isinstance(e, SystemCheckError):
self.stderr.write(str(e), lambda x: x)
else:
self.stderr.write("%s: %s" % (e.__class__.__name__, e))
sys.exit(e.returncode)
finally:
pass
def execute(self, *args, **options):
"""
Try to execute this command, performing system checks if needed (as
controlled by the ``requires_system_checks`` attribute, except if
force-skipped).
"""
if options["force_color"] and options["no_color"]:
raise CommandError("The --no-color and --force-color options can't be used together.")
if options["force_color"]:
self.style = color_style(force_color=True)
elif options["no_color"]:
self.style = no_style()
self.stderr.style_func = None
if options.get("stdout"):
self.stdout = OutputWrapper(options["stdout"])
if options.get("stderr"):
self.stderr = OutputWrapper(options["stderr"])
if self.requires_system_checks and not options["skip_checks"]:
if self.requires_system_checks == ALL_CHECKS:
self.check()
else:
self.check(tags=self.requires_system_checks)
output = self.handle(*args, **options)
return output
def check(
self,
app_configs=None,
tags=None,
display_num_errors=False,
include_deployment_checks=False,
fail_level=checks.ERROR,
):
"""
Use the system check framework to validate entire RoundBox project.
Raise CommandError for any serious message (error or critical errors).
If there are only light messages (like warnings), print them to stderr
and don't raise an exception.
"""
all_issues = checks.run_checks(
app_configs=app_configs,
tags=tags,
include_deployment_checks=include_deployment_checks,
)
header, body, footer = "", "", ""
visible_issue_count = 0 # excludes silenced warnings
if all_issues:
debugs = [e for e in all_issues if e.level < checks.INFO and not e.is_silenced()]
infos = [
e
for e in all_issues
if checks.INFO <= e.level < checks.WARNING and not e.is_silenced()
]
warnings = [
e
for e in all_issues
if checks.WARNING <= e.level < checks.ERROR and not e.is_silenced()
]
errors = [
e
for e in all_issues
if checks.ERROR <= e.level < checks.CRITICAL and not e.is_silenced()
]
criticals = [
e for e in all_issues if checks.CRITICAL <= e.level and not e.is_silenced()
]
sorted_issues = [
(criticals, "CRITICALS"),
(errors, "ERRORS"),
(warnings, "WARNINGS"),
(infos, "INFOS"),
(debugs, "DEBUGS"),
]
for issues, group_name in sorted_issues:
if issues:
visible_issue_count += len(issues)
formatted = (
self.style.ERROR(str(e)) if e.is_serious() else self.style.WARNING(str(e))
for e in issues
)
formatted = "\n".join(sorted(formatted))
body += "\n%s:\n%s\n" % (group_name, formatted)
if visible_issue_count:
header = "System check identified some issues:\n"
if display_num_errors:
if visible_issue_count:
footer += "\n"
footer += "System check identified %s (%s silenced)." % (
"no issues"
if visible_issue_count == 0
else "1 issue"
if visible_issue_count == 1
else "%s issues" % visible_issue_count,
len(all_issues) - visible_issue_count,
)
if any(e.is_serious(fail_level) and not e.is_silenced() for e in all_issues):
msg = self.style.ERROR("SystemCheckError: %s" % header) + body + footer
raise SystemCheckError(msg)
else:
msg = header + body + footer
if msg:
if visible_issue_count:
self.stderr.write(msg, lambda x: x)
else:
self.stdout.write(msg)
def handle(self, *args, **options):
"""
The actual logic of the command. Subclasses must implement
this method.
"""
raise NotImplementedError("subclasses of BaseCommand must provide a handle() method")
class AppCommand(BaseCommand):
"""
A management command which takes one or more installed application labels
as arguments, and does something with each of them.
Rather than implementing ``handle()``, subclasses must implement
``handle_app_config()``, which will be called once for each application.
"""
missing_args_message = "Enter at least one application label."
def add_arguments(self, parser):
parser.add_argument(
"args",
metavar="app_label",
nargs="+",
help="One or more application label.",
)
def handle(self, *app_labels, **options):
from RoundBox.apps import apps
try:
app_configs = [apps.get_app_config(app_label) for app_label in app_labels]
except (LookupError, ImportError) as e:
raise CommandError("%s. Are you sure your INSTALLED_APPS setting is correct?" % e)
output = []
for app_config in app_configs:
app_output = self.handle_app_config(app_config, **options)
if app_output:
output.append(app_output)
return "\n".join(output)
def handle_app_config(self, app_config, **options):
"""
Perform the command's actions for app_config, an AppConfig instance
corresponding to an application label given on the command line.
"""
raise NotImplementedError(
"Subclasses of AppCommand must provide a handle_app_config() method."
)
class LabelCommand(BaseCommand):
"""
A management command which takes one or more arbitrary arguments
(labels) on the command line, and does something with each of
them.
Rather than implementing ``handle()``, subclasses must implement
``handle_label()``, which will be called once for each label.
If the arguments should be names of installed applications, use
``AppCommand`` instead.
"""
label = "label"
missing_args_message = "Enter at least one %s." % label
def add_arguments(self, parser):
parser.add_argument("args", metavar=self.label, nargs="+")
def handle(self, *labels, **options):
output = []
for label in labels:
label_output = self.handle_label(label, **options)
if label_output:
output.append(label_output)
return "\n".join(output)
def handle_label(self, label, **options):
"""
Perform the command's actions for ``label``, which will be the
string as given on the command line.
"""
raise NotImplementedError(
"subclasses of LabelCommand must provide a handle_label() method"
) | /RoundBox-2022.4.21b0-py3-none-any.whl/RoundBox/core/cliparser/base.py | 0.608361 | 0.155431 | base.py | pypi |
import os
import sys
from imp import find_module
from typing import Optional # NOQA
from RoundBox.apps import apps
_jobs = None
def noneimplementation(meth):
"""
:param meth:
:return:
"""
return None
class JobError(Exception):
pass
class BaseJob:
help = "undefined job description."
when = None # type: Optional[str]
def execute(self):
raise NotImplementedError("Job needs to implement the execute method")
class MinutelyJob(BaseJob):
when = "minutely"
class QuarterHourlyJob(BaseJob):
when = "quarter_hourly"
class HourlyJob(BaseJob):
when = "hourly"
class DailyJob(BaseJob):
when = "daily"
class WeeklyJob(BaseJob):
when = "weekly"
class MonthlyJob(BaseJob):
when = "monthly"
class YearlyJob(BaseJob):
when = "yearly"
def my_import(name):
"""
:param name:
:return:
"""
try:
imp = __import__(name)
except ImportError as err:
raise JobError("Failed to import %s with error %s" % (name, err))
mods = name.split('.')
if len(mods) > 1:
for mod in mods[1:]:
imp = getattr(imp, mod)
return imp
def find_jobs(jobs_dir):
"""
:param jobs_dir:
:return:
"""
try:
return [
f[:-3] for f in os.listdir(jobs_dir) if not f.startswith('_') and f.endswith(".py")
]
except OSError:
return []
def find_job_module(app_name, when=None):
"""
:param app_name:
:param when:
:return:
"""
parts = app_name.split('.')
parts.append('jobs')
if when:
parts.append(when)
parts.reverse()
path = None
while parts:
part = parts.pop()
f, path, descr = find_module(part, path and [path] or None)
return path
def import_job(app_name, name, when=None):
"""
:param app_name:
:param name:
:param when:
:return:
"""
jobmodule = "%s.jobs.%s%s" % (app_name, when and "%s." % when or "", name)
job_mod = my_import(jobmodule)
# todo: more friendly message for AttributeError if job_mod does not exist
try:
job = job_mod.Job
except AttributeError:
raise JobError("Job module %s does not contain class instance named 'Job'" % jobmodule)
if when and not (job.when == when or job.when is None):
raise JobError("Job %s is not a %s job." % (jobmodule, when))
return job
def get_jobs(when=None, only_scheduled=False):
"""Return a dictionary mapping of job names together with their respective
application class.
:param when:
:param only_scheduled:
:return:
"""
# FIXME: HACK: make sure the project dir is on the path when executed as ./manage.py
try:
cpath = os.path.dirname(os.path.realpath(sys.argv[0]))
ppath = os.path.dirname(cpath)
if ppath not in sys.path:
sys.path.append(ppath)
except Exception:
pass
_jobs = {}
for app_name in [app.name for app in apps.get_app_configs()]:
scandirs = (
None,
'minutely',
'quarter_hourly',
'hourly',
'daily',
'weekly',
'monthly',
'yearly',
)
if when:
scandirs = None, when
for subdir in scandirs:
try:
path = find_job_module(app_name, subdir)
for name in find_jobs(path):
if (app_name, name) in _jobs:
raise JobError("Duplicate job %s" % name)
job = import_job(app_name, name, subdir)
if only_scheduled and job.when is None:
# only include jobs which are scheduled
continue
if when and job.when != when:
# generic job not in same schedule
continue
_jobs[(app_name, name)] = job
except ImportError:
# No job module -- continue scanning
pass
return _jobs
def get_job(app_name, job_name):
"""
:param app_name:
:param job_name:
:return:
"""
jobs = get_jobs()
if app_name:
return jobs[(app_name, job_name)]
else:
for a, j in jobs.keys():
if j == job_name:
return jobs[(a, j)]
raise KeyError("Job not found: %s" % job_name)
def print_jobs(
when=None, only_scheduled=False, show_when=True, show_appname=False, show_header=True
):
"""
:param when:
:param only_scheduled:
:param show_when:
:param show_appname:
:param show_header:
:return:
"""
jobmap = get_jobs(when, only_scheduled=only_scheduled)
print("Job List: %i jobs" % len(jobmap))
jlist = sorted(jobmap.keys())
if not jlist:
return
appname_spacer = "%%-%is" % max(len(e[0]) for e in jlist)
name_spacer = "%%-%is" % max(len(e[1]) for e in jlist)
when_spacer = "%%-%is" % max(len(e.when) for e in jobmap.values() if e.when)
if show_header:
line = " "
if show_appname:
line += appname_spacer % "appname" + " - "
line += name_spacer % "jobname"
if show_when:
line += " - " + when_spacer % "when"
line += " - help"
print(line)
print("-" * 80)
for app_name, job_name in jlist:
job = jobmap[(app_name, job_name)]
line = " "
if show_appname:
line += appname_spacer % app_name + " - "
line += name_spacer % job_name
if show_when:
line += " - " + when_spacer % (job.when and job.when or "")
line += " - " + job.help
print(line) | /RoundBox-2022.4.21b0-py3-none-any.whl/RoundBox/core/cliparser/jobs.py | 0.439026 | 0.152442 | jobs.py | pypi |
import functools
import inspect
@functools.lru_cache(maxsize=512)
def _get_func_parameters(func, remove_first):
parameters = tuple(inspect.signature(func).parameters.values())
if remove_first:
parameters = parameters[1:]
return parameters
def _get_callable_parameters(meth_or_func):
is_method = inspect.ismethod(meth_or_func)
func = meth_or_func.__func__ if is_method else meth_or_func
return _get_func_parameters(func, remove_first=is_method)
def get_func_args(func):
params = _get_callable_parameters(func)
return [
param.name for param in params if param.kind == inspect.Parameter.POSITIONAL_OR_KEYWORD
]
def get_func_full_args(func):
"""
Return a list of (argument name, default value) tuples. If the argument
does not have a default value, omit it in the tuple. Arguments such as
*args and **kwargs are also included.
"""
params = _get_callable_parameters(func)
args = []
for param in params:
name = param.name
# Ignore 'self'
if name == "self":
continue
if param.kind == inspect.Parameter.VAR_POSITIONAL:
name = "*" + name
elif param.kind == inspect.Parameter.VAR_KEYWORD:
name = "**" + name
if param.default != inspect.Parameter.empty:
args.append((name, param.default))
else:
args.append((name,))
return args
def func_accepts_kwargs(func):
"""Return True if function 'func' accepts keyword arguments **kwargs.
:param func:
:return:
"""
return any(p for p in _get_callable_parameters(func) if p.kind == p.VAR_KEYWORD)
def func_accepts_var_args(func):
"""
Return True if function 'func' accepts positional arguments *args.
"""
return any(p for p in _get_callable_parameters(func) if p.kind == p.VAR_POSITIONAL)
def method_has_no_args(meth):
"""Return True if a method only accepts 'self'."""
count = len([p for p in _get_callable_parameters(meth) if p.kind == p.POSITIONAL_OR_KEYWORD])
return count == 0 if inspect.ismethod(meth) else count == 1
def func_supports_parameter(func, name):
return any(param.name == name for param in _get_callable_parameters(func)) | /RoundBox-2022.4.21b0-py3-none-any.whl/RoundBox/utils/inspect.py | 0.561335 | 0.258935 | inspect.py | pypi |
import re
from collections.abc import Callable, Iterable, KeysView, Mapping
from datetime import datetime
from typing import Any, TypeVar
import slugify as unicode_slug
from .dt import as_local
_T = TypeVar("_T")
_U = TypeVar("_U")
RE_SANITIZE_FILENAME = re.compile(r"(~|\.\.|/|\\)")
RE_SANITIZE_PATH = re.compile(r"(~|\.(\.)+)")
def raise_if_invalid_filename(filename: str) -> None:
"""
Check if a filename is valid.
Raises a ValueError if the filename is invalid.
"""
if RE_SANITIZE_FILENAME.sub("", filename) != filename:
raise ValueError(f"{filename} is not a safe filename")
def raise_if_invalid_path(path: str) -> None:
"""
Check if a path is valid.
Raises a ValueError if the path is invalid.
"""
if RE_SANITIZE_PATH.sub("", path) != path:
raise ValueError(f"{path} is not a safe path")
def slugify(text: str | None, *, separator: str = "_") -> str:
"""Slugify a given text."""
if text == "" or text is None:
return ""
slug = unicode_slug.slugify(text, separator=separator)
return "unknown" if slug == "" else slug
def repr_helper(inp: Any) -> str:
"""Help creating a more readable string representation of objects."""
if isinstance(inp, Mapping):
return ", ".join(f"{repr_helper(key)}={repr_helper(item)}" for key, item in inp.items())
if isinstance(inp, datetime):
return as_local(inp).isoformat()
return str(inp)
def convert(value: _T | None, to_type: Callable[[_T], _U], default: _U | None = None) -> _U | None:
"""Convert value to to_type, returns default if fails."""
try:
return default if value is None else to_type(value)
except (ValueError, TypeError):
# If value could not be converted
return default
def ensure_unique_string(
preferred_string: str, current_strings: Iterable[str] | KeysView[str]
) -> str:
"""Return a string that is not present in current_strings.
If preferred string exists will append _2, _3, ..
"""
test_string = preferred_string
current_strings_set = set(current_strings)
tries = 1
while test_string in current_strings_set:
tries += 1
test_string = f"{preferred_string}_{tries}"
return test_string | /RoundBox-2022.4.21b0-py3-none-any.whl/RoundBox/utils/__init__.py | 0.777933 | 0.162746 | __init__.py | pypi |
# Copyright (c) Django Software Foundation and individual contributors.
# All rights reserved.
# https://github.com/home-assistant/core/blob/dev/LICENSE.md
import asyncio
import threading
from collections.abc import Callable, Coroutine
from datetime import datetime, timedelta
from functools import wraps
from typing import Any, TypeVar
from .dt import utcnow
class Throttle:
"""A class for throttling the execution of tasks.
This method decorator adds a cooldown to a method to prevent it from being
called more than 1 time within the timedelta interval `min_time` after it
returned its result.
Calling a method a second time during the interval will return None.
Pass keyword argument `no_throttle=True` to the wrapped method to make
the call not throttled.
Decorator takes in an optional second timedelta interval to throttle the
'no_throttle' calls.
Adds a datetime attribute `last_call` to the method.
"""
def __init__(self, min_time: timedelta, limit_no_throttle: timedelta | None = None) -> None:
"""Initialize the throttle.
:param min_time:
:param limit_no_throttle:
"""
self.min_time = min_time
self.limit_no_throttle = limit_no_throttle
def __call__(self, method: Callable) -> Callable:
"""Caller for the throttle.
:param method:
:return:
"""
# Make sure we return a coroutine if the method is async.
if asyncio.iscoroutinefunction(method):
async def throttled_value() -> None:
"""Stand-in function for when real func is being throttled."""
return None
else:
def throttled_value() -> None: # type: ignore[misc]
"""Stand-in function for when real func is being throttled."""
return None
if self.limit_no_throttle is not None:
method = Throttle(self.limit_no_throttle)(method)
# Different methods that can be passed in:
# - a function
# - an unbound function on a class
# - a method (bound function on a class)
# We want to be able to differentiate between function and unbound
# methods (which are considered functions).
# All methods have the classname in their qualname separated by a '.'
# Functions have a '.' in their qualname if defined inline, but will
# be prefixed by '.<locals>.' so we strip that out.
is_func = (
not hasattr(method, "__self__")
and "." not in method.__qualname__.split(".<locals>.")[-1]
)
@wraps(method)
def wrapper(*args: Any, **kwargs: Any) -> Callable | Coroutine:
"""Wrap that allows wrapped to be called only once per min_time.
If we cannot acquire the lock, it is running so return None.
:param args:
:param kwargs:
:return:
"""
if hasattr(method, "__self__"):
host = getattr(method, "__self__")
elif is_func:
host = wrapper
else:
host = args[0] if args else wrapper
# pylint: disable=protected-access # to _throttle
if not hasattr(host, "_throttle"):
host._throttle = {}
if id(self) not in host._throttle:
host._throttle[id(self)] = [threading.Lock(), None]
throttle = host._throttle[id(self)]
# pylint: enable=protected-access
if not throttle[0].acquire(False):
return throttled_value()
# Check if method is never called or no_throttle is given
force = kwargs.pop("no_throttle", False) or not throttle[1]
try:
if force or utcnow() - throttle[1] > self.min_time:
result = method(*args, **kwargs)
throttle[1] = utcnow()
return result # type: ignore[no-any-return]
return throttled_value()
finally:
throttle[0].release()
return wrapper | /RoundBox-2022.4.21b0-py3-none-any.whl/RoundBox/utils/throttle.py | 0.939203 | 0.234757 | throttle.py | pypi |
import logging
from typing import IO, Any, Literal, Mapping, Optional
# Type aliases used in function signatures.
EscapeCodes = Mapping[str, str]
LogColors = Mapping[str, str]
SecondaryLogColors = Mapping[str, LogColors]
# The default colors to use for the debug levels
default_log_colors = {
"DEBUG": "light_blue",
"INFO": "light_green",
"WARNING": "light_yellow",
"ERROR": "red",
"CRITICAL": "purple",
}
def esc(*codes: int) -> str:
"""Returns escape codes from format codes
:param codes:
:return:
"""
return "\033[" + ";".join(str(_code) for _code in codes) + "m"
escape_codes = {
"reset": esc(0),
"bold": esc(1),
"thin": esc(2),
}
escape_codes_foreground = {
"black": 30,
"red": 31,
"green": 32,
"yellow": 33,
"blue": 34,
"purple": 35,
"cyan": 36,
"white": 37,
"light_black": 90,
"light_red": 91,
"light_green": 92,
"light_yellow": 93,
"light_blue": 94,
"light_purple": 95,
"light_cyan": 96,
"light_white": 97,
}
escape_codes_background = {
"black": 40,
"red": 41,
"green": 42,
"yellow": 43,
"blue": 44,
"purple": 45,
"cyan": 46,
"white": 47,
"light_black": 100,
"light_red": 101,
"light_green": 102,
"light_yellow": 103,
"light_blue": 104,
"light_purple": 105,
"light_cyan": 106,
"light_white": 107,
# Bold background colors don't exist,
# but we used to provide these names.
"bold_black": 100,
"bold_red": 101,
"bold_green": 102,
"bold_yellow": 103,
"bold_blue": 104,
"bold_purple": 105,
"bold_cyan": 106,
"bold_white": 107,
}
# Foreground without prefix
for name, code in escape_codes_foreground.items():
escape_codes["%s" % name] = esc(code)
escape_codes["bold_%s" % name] = esc(1, code)
escape_codes["thin_%s" % name] = esc(2, code)
# Foreground with fg_ prefix
for name, code in escape_codes_foreground.items():
escape_codes["fg_%s" % name] = esc(code)
escape_codes["fg_bold_%s" % name] = esc(1, code)
escape_codes["fg_thin_%s" % name] = esc(2, code)
# Background with bg_ prefix
for name, code in escape_codes_background.items():
escape_codes["bg_%s" % name] = esc(code)
# 256 colour support
for code in range(256):
escape_codes["fg_%d" % code] = esc(38, 5, code)
escape_codes["bg_%d" % code] = esc(48, 5, code)
def parse_colors(string: str) -> str:
"""Return escape codes from a color sequence string.
:param string:
:return:
"""
return "".join(escape_codes[n] for n in string.split(",") if n)
class ColoredRecord:
"""Wraps a LogRecord, adding escape codes to the internal dict.
The internal dict is used when formatting the message (by the PercentStyle,
StrFormatStyle, and StringTemplateStyle classes).
"""
def __init__(self, record: logging.LogRecord, escapes: EscapeCodes) -> None:
self.__dict__.update(record.__dict__)
self.__dict__.update(escapes) | /RoundBox-2022.4.21b0-py3-none-any.whl/RoundBox/utils/log/color.py | 0.880303 | 0.328516 | color.py | pypi |
import logging
import time
from hashlib import md5
from RoundBox.conf.project_settings import settings
from RoundBox.core.cache import cache
class RequireDebugFalse(logging.Filter):
def filter(self, records):
return not settings.DEBUG
class RequireDebugTrue(logging.Filter):
def filter(self, record):
return settings.DEBUG
class RateLimiterFilter(logging.Filter):
def filter(self, record):
"""
:param record:
:return:
"""
# Rate is specified as 1 messages logged per N seconds. (aka cache timeout)
rate = getattr(settings, 'RATE_LIMITER_FILTER_RATE', 10)
prefix = getattr(settings, 'RATE_LIMITER_FILTER_PREFIX', 'ratelimiterfilter')
subject = record.getMessage()
cache_key = "%s:%s" % (prefix, md5(subject.encode('utf-8')).hexdigest())
cache_count_key = "%s:count" % cache_key
result = cache.get_many([cache_key, cache_count_key])
value = result.get(cache_key)
cntr = result.get(cache_count_key)
if not cntr:
cntr = 1
cache.set(cache_count_key, cntr, rate + 60)
if value:
cache.incr(cache_count_key)
return False
record.msg = "[%sx] %s" % (cntr, record.msg)
cache.set(cache_key, time.time(), rate)
return True
class PasswordMaskingFilter(logging.Filter):
"""Filter and mask the password values from dictionary that are listed in log"""
def filter(self, record) -> bool:
"""The call signature matches string interpolation: args can be a tuple or a lone dict
:param record:
:return:
"""
if isinstance(record.args, dict):
record.args = self.sanitize_dict(record.args)
else:
record.args = tuple(self.sanitize_dict(i) for i in record.args)
return True
@staticmethod
def sanitize_dict(d) -> dict:
"""
:param d:
:return:
"""
if not isinstance(d, dict):
return d
if any(i for i in d.keys() if 'password' in i):
d = d.copy() # Ensure that we won't clobber anything critical
for k, v in d.items():
if 'password' in k:
d[k] = '*** PASSWORD ***'
return d | /RoundBox-2022.4.21b0-py3-none-any.whl/RoundBox/utils/log/filters.py | 0.675872 | 0.193929 | filters.py | pypi |
import logging
import os
from pprint import pformat
from typing import IO, Any, Literal, Mapping, Optional
from RoundBox.core.cliparser.color import color_style
from RoundBox.utils.log.filters import PasswordMaskingFilter
from . import themes
from .color import (
ColoredRecord,
EscapeCodes,
LogColors,
SecondaryLogColors,
default_log_colors,
escape_codes,
parse_colors,
)
# The default format to use for each style
default_formats = {
"%": "%(log_color)s%(levelname)s:%(name)s:%(message)s",
"{": "{log_color}{levelname}:{name}:{message}",
"$": "${log_color}${levelname}:${name}:${message}",
}
class ServerFormatter(logging.Formatter):
default_time_format = '%d/%b/%Y %H:%M:%S'
def __init__(self, *args, **kwargs):
self.style = color_style()
super().__init__(*args, **kwargs)
def format(self, record):
msg = record.msg
if self.uses_server_time() and not hasattr(record, 'server_time'):
record.server_time = self.formatTime(record, self.datefmt)
record.msg = msg
return super().format(record)
def uses_server_time(self):
return self._fmt.find('{server_time}') >= 0
class PrivacyFormatter(logging.Formatter):
def format(self, record):
res = super().format(record)
if hasattr(record, 'request'):
filtered_request = PasswordMaskingFilter.sanitize_dict(record.request)
res += '\n\t' + pformat(filtered_request, indent=4).replace('\n', '\n\t')
return res
class ColoredFormatter(logging.Formatter):
"""Special custom formatter for colorizing log messages!"""
def __init__(
self,
fmt: Optional[str] = None,
datefmt: Optional[str] = None,
style: Literal["%", "{", "$"] = "%",
log_colors: Optional[LogColors] = None,
reset: bool = True,
secondary_log_colors: Optional[SecondaryLogColors] = None,
validate: bool = True,
stream: Optional[IO] = None,
no_color: bool = False,
force_color: bool = False,
icons=None,
icon_style='rounded',
) -> None:
"""Set the format and colors the ColoredFormatter will use.
The ``fmt``, ``datefmt`` and ``style`` args are passed on to the
``logging.Formatter`` constructor.
The ``secondary_log_colors`` argument can be used to create additional
``log_color`` attributes. Each key in the dictionary will set
``{key}_log_color``, using the value to select from a different
``log_colors`` set.
:param fmt: The format string to use.
:param datefmt: A format string for the date.
:param style: The format style to use. (*No meaning prior to Python 3.2.*)
:param log_colors: A mapping of log level names to color names.
:param reset: A mapping of log level names to color names.
:param secondary_log_colors: Map secondary ``log_color`` attributes. (*New in version 2.6.*)
:param validate: Validate the format string.
:param stream: The stream formatted messages will be printed to. Used to toggle colour
on non-TTY outputs. Optional.
:param no_color: Disable color output.
:param force_color: Enable color output. Takes precedence over `no_color`.
:param icons: dict of level:value for icons
:param icons_style: str
"""
# Select a default format if `fmt` is not provided.
fmt = default_formats[style] if fmt is None else fmt
super().__init__(fmt, datefmt, style, validate)
self.log_colors = log_colors if log_colors is not None else default_log_colors
self.secondary_log_colors = (
secondary_log_colors if secondary_log_colors is not None else {}
)
self.reset = reset
self.stream = stream
self.no_color = no_color
self.force_color = force_color
self.icon_style = icon_style
self.theme_icons = icons if icons else themes.icons.get(self.icon_style)
self.fmt = fmt
self.fmt = style
@property
def is_tty(self):
"""
:return:
"""
isatty = getattr(self.stream, 'isatty', None)
return isatty and isatty()
def formatMessage(self, record: logging.LogRecord) -> str:
"""Format a message from a record object.
:param record:
:return:
"""
levelname = record.levelname # len7 limit
if levelname == 'CRITICAL':
levelname = record.levelname = 'FATAL'
record.icon = self.theme_icons.get(levelname, '')
escapes = self._escape_code_map(levelname)
wrapper = ColoredRecord(record, escapes)
message = super().formatMessage(wrapper) # type: ignore
message = self._append_reset(message, escapes)
return message
def _escape_code_map(self, item: str) -> EscapeCodes:
"""Build a map of keys to escape codes for use in message formatting.
If _blank_escape_codes() returns True, all values will be an empty string.
:param item:
:return:
"""
codes = {**escape_codes}
codes.setdefault("log_color", self._get_escape_code(self.log_colors, item))
for name, colors in self.secondary_log_colors.items():
codes.setdefault("%s_log_color" % name, self._get_escape_code(colors, item))
if self._blank_escape_codes():
codes = {key: "" for key in codes.keys()}
return codes
def _blank_escape_codes(self):
"""Return True if we should be prevented from printing escape codes.
:return:
"""
if self.force_color or "FORCE_COLOR" in os.environ:
return False
if self.no_color or "NO_COLOR" in os.environ:
return True
if self.stream is not None and not self.stream.isatty():
return True
return False
@staticmethod
def _get_escape_code(log_colors: LogColors, item: str) -> str:
"""Extract a color sequence from a mapping, and return escape codes.
:param log_colors:
:param item:
:return:
"""
return parse_colors(log_colors.get(item, ""))
def _append_reset(self, message: str, escapes: EscapeCodes) -> str:
"""Add a reset code to the end of the message, if it's not already there.
:param message:
:param escapes:
:return:
"""
reset_escape_code = escapes["reset"]
if self.reset and not message.endswith(reset_escape_code):
message += reset_escape_code
return message
class LevelFormatter:
"""An extension of ColoredFormatter that uses per-level format strings."""
def __init__(self, fmt: Mapping[str, str], **kwargs: Any) -> None:
"""Configure a ColoredFormatter with its own format string for each log level.
Supports fmt as a dict. All other args are passed on to the
``colorlog.ColoredFormatter`` constructor.
:Parameters:
- fmt (dict):
A mapping of log levels (represented as strings, e.g. 'WARNING') to
format strings. (*New in version 2.7.0)
(All other parameters are the same as in colorlog.ColoredFormatter)
Example:
formatter = colorlog.LevelFormatter(
fmt={
"DEBUG": "%(log_color)s%(message)s (%(module)s:%(lineno)d)",
"INFO": "%(log_color)s%(message)s",
"WARNING": "%(log_color)sWRN: %(message)s (%(module)s:%(lineno)d)",
"ERROR": "%(log_color)sERR: %(message)s (%(module)s:%(lineno)d)",
"CRITICAL": "%(log_color)sCRT: %(message)s (%(module)s:%(lineno)d)",
}
)
:param fmt:
:param kwargs:
"""
self.formatters = {level: ColoredFormatter(fmt=f, **kwargs) for level, f in fmt.items()}
def format(self, record: logging.LogRecord) -> str:
return self.formatters[record.levelname].format(record) | /RoundBox-2022.4.21b0-py3-none-any.whl/RoundBox/utils/log/formatter.py | 0.86306 | 0.166981 | formatter.py | pypi |
# Multi-consumer multi-producer dispatching mechanism
# Originally based on pydispatch (BSD) https://pypi.org/project/PyDispatcher/2.0.1/
# See license.txt for original license.
# Modified for Growatt Monitor purpose
import logging
import threading
import weakref
from RoundBox.utils.inspect import func_accepts_kwargs
logger = logging.getLogger('RoundBox.dispatch')
def _make_id(target):
if hasattr(target, '__func__'):
return id(target.__self__), id(target.__func__)
return id(target)
NONE_ID = _make_id(None)
# A marker for caching
NO_RECEIVERS = object()
class Signal:
"""
Base class for all signals
Internal attributes:
receivers
{ receiverkey (id) : weakref(receiver) }
"""
def __init__(self, use_caching=False):
"""
Create a new signal.
"""
self.receivers = []
self.lock = threading.Lock()
self.use_caching = use_caching
# For convenience, we create empty caches even if they are not used.
# A note about caching: if use_caching is defined, then for each
# distinct sender we cache the receivers that sender has in
# 'sender_receivers_cache'. The cache is cleaned when .connect() or
# .disconnect() is called and populated on send().
self.sender_receivers_cache = weakref.WeakKeyDictionary() if use_caching else {}
self._dead_receivers = False
def connect(self, receiver, sender=None, weak=True, dispatch_uid=None):
"""
Connect receiver to sender for signal.
Arguments:
receiver
A function or an instance method which is to receive signals.
Receivers must be hashable objects.
If weak is True, then receiver must be weak referenceable.
Receivers must be able to accept keyword arguments.
If a receiver is connected with a dispatch_uid argument, it
will not be added if another receiver was already connected
with that dispatch_uid.
sender
The sender to which the receiver should respond. Must either be
a Python object, or None to receive events from any sender.
weak
Whether to use weak references to the receiver. By default, the
module will attempt to use weak references to the receiver
objects. If this parameter is false, then strong references will
be used.
dispatch_uid
An identifier used to uniquely identify a particular instance of
a receiver. This will usually be a string, though it may be
anything hashable.
"""
from RoundBox.conf.project_settings import settings
# If DEBUG is on, check that we got a good receiver
if settings.configured and settings.DEBUG:
if not callable(receiver):
raise TypeError('Signal receivers must be callable.')
# Check for **kwargs
if not func_accepts_kwargs(receiver):
raise ValueError("Signal receivers must accept keyword arguments (**kwargs).")
if dispatch_uid:
lookup_key = (dispatch_uid, _make_id(sender))
else:
lookup_key = (_make_id(receiver), _make_id(sender))
if weak:
ref = weakref.ref
receiver_object = receiver
# Check for bound methods
if hasattr(receiver, '__self__') and hasattr(receiver, '__func__'):
ref = weakref.WeakMethod
receiver_object = receiver.__self__
receiver = ref(receiver)
weakref.finalize(receiver_object, self._remove_receiver)
with self.lock:
self._clear_dead_receivers()
if not any(r_key == lookup_key for r_key, _ in self.receivers):
self.receivers.append((lookup_key, receiver))
self.sender_receivers_cache.clear()
def disconnect(self, receiver=None, sender=None, dispatch_uid=None):
"""
Disconnect receiver from sender for signal.
If weak references are used, disconnect need not be called. The receiver
will be removed from dispatch automatically.
Arguments:
receiver
The registered receiver to disconnect. May be none if
dispatch_uid is specified.
sender
The registered sender to disconnect
dispatch_uid
the unique identifier of the receiver to disconnect
"""
if dispatch_uid:
lookup_key = (dispatch_uid, _make_id(sender))
else:
lookup_key = (_make_id(receiver), _make_id(sender))
disconnected = False
with self.lock:
self._clear_dead_receivers()
for index in range(len(self.receivers)):
(r_key, _) = self.receivers[index]
if r_key == lookup_key:
disconnected = True
del self.receivers[index]
break
self.sender_receivers_cache.clear()
return disconnected
def has_listeners(self, sender=None):
return bool(self._live_receivers(sender))
def send(self, sender, **named):
"""
Send signal from sender to all connected receivers.
If any receiver raises an error, the error propagates back through send,
terminating the dispatch loop. So it's possible that all receivers
won't be called if an error is raised.
Arguments:
sender
The sender of the signal. Either a specific object or None.
named
Named arguments which will be passed to receivers.
Return a list of tuple pairs [(receiver, response), ... ].
"""
if not self.receivers or self.sender_receivers_cache.get(sender) is NO_RECEIVERS:
return []
return [
(receiver, receiver(signal=self, sender=sender, **named))
for receiver in self._live_receivers(sender)
]
def send_robust(self, sender, **named):
"""
Send signal from sender to all connected receivers catching errors.
Arguments:
sender
The sender of the signal. Can be any Python object (normally one
registered with a connect if you actually want something to
occur).
named
Named arguments which will be passed to receivers.
Return a list of tuple pairs [(receiver, response), ... ].
If any receiver raises an error (specifically any subclass of
Exception), return the error instance as the result for that receiver.
"""
if not self.receivers or self.sender_receivers_cache.get(sender) is NO_RECEIVERS:
return []
# Call each receiver with whatever arguments it can accept.
# Return a list of tuple pairs [(receiver, response), ... ].
responses = []
for receiver in self._live_receivers(sender):
try:
response = receiver(signal=self, sender=sender, **named)
except Exception as err:
logger.error(
'Error calling %s in Signal.send_robust() (%s)',
receiver.__qualname__,
err,
exc_info=err,
)
responses.append((receiver, err))
else:
responses.append((receiver, response))
return responses
def _clear_dead_receivers(self):
# Note: caller is assumed to hold self.lock.
if self._dead_receivers:
self._dead_receivers = False
self.receivers = [
r
for r in self.receivers
if not (isinstance(r[1], weakref.ReferenceType) and r[1]() is None)
]
def _live_receivers(self, sender):
"""
Filter sequence of receivers to get resolved, live receivers.
This checks for weak references and resolves them, then returning only
live receivers.
"""
receivers = None
if self.use_caching and not self._dead_receivers:
receivers = self.sender_receivers_cache.get(sender)
# We could end up here with NO_RECEIVERS even if we do check this case in
# .send() prior to calling _live_receivers() due to concurrent .send() call.
if receivers is NO_RECEIVERS:
return []
if receivers is None:
with self.lock:
self._clear_dead_receivers()
senderkey = _make_id(sender)
receivers = []
for (receiverkey, r_senderkey), receiver in self.receivers:
if r_senderkey == NONE_ID or r_senderkey == senderkey:
receivers.append(receiver)
if self.use_caching:
if not receivers:
self.sender_receivers_cache[sender] = NO_RECEIVERS
else:
# Note, we must cache the weakref versions.
self.sender_receivers_cache[sender] = receivers
non_weak_receivers = []
for receiver in receivers:
if isinstance(receiver, weakref.ReferenceType):
# Dereference the weak reference.
receiver = receiver()
if receiver is not None:
non_weak_receivers.append(receiver)
else:
non_weak_receivers.append(receiver)
return non_weak_receivers
def _remove_receiver(self, receiver=None):
# Mark that the self.receivers list has dead weakrefs. If so, we will
# clean those up in connect, disconnect and _live_receivers while
# holding self.lock. Note that doing the cleanup here isn't a good
# idea, _remove_receiver() will be called as side effect of garbage
# collection, and so the call can happen while we are already holding
# self.lock.
self._dead_receivers = True
def receiver(signal, **kwargs):
"""
A decorator for connecting receivers to signals. Used by passing in the
signal (or list of signals) and keyword arguments to connect::
@receiver(post_save, sender=MyModel)
def signal_receiver(sender, **kwargs):
...
@receiver([post_save, post_delete], sender=MyModel)
def signals_receiver(sender, **kwargs):
...
"""
def _decorator(func):
if isinstance(signal, (list, tuple)):
for s in signal:
s.connect(func, **kwargs)
else:
signal.connect(func, **kwargs)
return func
return _decorator | /RoundBox-2022.4.21b0-py3-none-any.whl/RoundBox/dispatch/dispatcher.py | 0.824321 | 0.172921 | dispatcher.py | pypi |
class Calculator:
# Define Calculator class.
#
# All calculations made by class is rounded with 8 symbols precision.
# Calculator memory is controled by Memory class.
#
# This class has methods to do perform:
# I. Calculations:
# Addition, substraction, multiplication,
# division, taking a (n) root of a number.
# II. Memory related methods:
# Reset memory, undo last operation stored
# in Memory class, print calculator history.
def __init__(self):
self.memory = Memory()
def add(self, operand):
# Calculate addition by the given operand.
#
# Calculations are based on the last Memory.history.total value.
try:
base = self.memory.history[-1].total
total = round(base + operand, 8)
self.memory.memory_add_item(base, "+", operand, total)
return total
except:
print(f"Provided operand '{operand}' is not valid in {self.add.__name__} method")
def substract(self, operand):
# Calculate substraction by the given operand.
#
# Calculations are based on the last Memory.history.total value.
try:
base = self.memory.history[-1].total
total = round(base - operand, 8)
self.memory.memory_add_item(base, "-", operand, total)
return total
except:
print(f"Provided operand '{operand}' is not valid in {self.substract.__name__} method")
def multiply(self, operand):
# Calculate multiplication by the given operand.
#
# Calculations are based on the last Memory.history.total value.
try:
base = self.memory.history[-1].total
total = round(base * operand, 8)
self.memory.memory_add_item(base, "*", operand, total)
return total
except:
print(f"Provided operand '{operand}' is not valid in {self.multiply.__name__} method")
def divide(self, operand):
# Calculate division by the given operand.
#
# Calculations are based on the last Memory.history.total value.
try:
base = self.memory.history[-1].total
total = round(base / operand, 8)
self.memory.memory_add_item(base, "/", operand, total)
return total
except:
print(f"Provided operand '{operand}' is not valid in {self.divide.__name__} method")
def root(self, power):
# Calculate root of given power.
#
# Calculations are based on the last Memory.history.total value.
try:
base = self.memory.history[-1].total
total = round(base ** (1/power), 8)
self.memory.memory_add_item(base, "** 1 /", power, total)
return total
except:
print(f"Provided power of root '{power}' is not valid in {self.root.__name__} method")
def reset_memory(self):
# Delete calculator memory object and initialize new Memory object.
del self.memory
self.memory = Memory()
def undo(self):
# Remove last item in Memory.history list.
#
# Calls Memory class method.
self.memory.memory_remove_last()
def print_history(self):
# Print history of all operations stored in Memory.history.
for item in self.memory.history:
print(f"{item.base} {item.operation} {item.operand} = {+ item.total}")
class Memory:
# Calculator module memory control.
def __init__(self):
self.history = []
self.history.append(self.History_item(0, "Initial value", 0, 0))
class History_item:
# History item construct to store at Memory.history.
def __init__(self, base, operation, operand, total):
self.base = base
self.operation = operation
self.operand = operand
self.total = total
def memory_add_item(self, base, operation, operand, total):
# Add new Memory.history entry using History_item construct.
new_history_item = self.History_item(base, operation, operand, total)
self.history.append(new_history_item)
def memory_remove_last(self):
# Remove last item of Memory.history.
if len(self.history) > 1:
self.history.pop() | /rounded_calculate-0.1.1-py3-none-any.whl/rounded_calculate/calculator.py | 0.80871 | 0.358353 | calculator.py | pypi |
# Rounders
The `rounders` package extends the functionality provided by Python's
built-in [`round`](https://docs.python.org/3/library/functions.html#round)
function. It aims to provide a more complete and consistent collection of
decimal rounding functionality than is provided by the Python core and standard
library. Specifically, it provides:
* Drop-in replacements for `round` that use rounding modes other than
round-ties-to-even (for example, the commonly needed round-ties-to-away).
* Functionality for rounding to a given number of significant figures,
rather than to a set number of places after (or before) the decimal point.
## General-purpose rounding functions
There are four general-purpose rounding functions.
* The `round` function has the same signature as the built-in `round`, but also allows a
rounding mode to be specified. Like `round`, it supports rounding to the nearest
integer in the direction of the given rounding mode, and rounding to a given number of
places while preserving the type of the input.
```python
>>> from rounders import round, TIES_TO_AWAY, TO_MINUS
>>> round(2.5) # The default rounding mode is TIES_TO_EVEN
2
>>> round(2.5, mode=TIES_TO_AWAY) # round halfway cases away from zero
3
>>> round(2.97, 1, mode=TO_MINUS) # round towards negative infinity (like floor)
2.9
>>> round(Decimal(-1628), -2, mode=TO_MINUS) # Decimal and Fraction types supported
Decimal('-1.7E+3')
```
* The `round_to_figures` function rounds to a given number of significant figures,
rather than to a given number of places before or after the decimal point.
```python
>>> from rounders import round_to_figures, TO_AWAY
>>> round_to_figures(1.234567, 3)
1.23
>>> round_to_figures(1234567., 3)
1230000.0
>>> round_to_figures(0.0001234567, 3)
0.000123
>>> round_to_figures(0.0001234567, 3, mode=TO_AWAY) # round away from zero
0.000124
```
* The `round_to_int` and `round_to_places` functions provide the two pieces of
functionality that `round` combines: `round_to_int` rounds to a
nearby integer using the given rounding mode, while `round_to_places` always
expects an `ndigits` argument and rounds to the given number of places. The `round`
function is currently a simple wrapper around `round_to_int` and `round_to_places`.
```python
>>> from rounders import round_to_int, round_to_places, TO_PLUS
>>> round_to_int(3.1415, mode=TO_PLUS)
4
>>> round_to_places(3.1415, 2, mode=TO_PLUS)
3.15
```
There are currently thirteen different rounding modes provided, listed
[below](#rounding-modes).
## Functions providing alternative rounding modes
There are thirteen functions that act as drop-in replacements for `round`, but that
use a different rounding mode. For example, if you always want to round ties away
from zero instead of to the nearest even number, you can do this:
```python
>>> from rounders import round_ties_to_away as round
>>> round(4.5)
5
>>> round(1.25, 1)
1.3
```
Or if you want a version of `math.ceil` that accepts a number of places after the point,
you can do:
```python
>>> from rounders import ceil
>>> ceil(1.78)
2
>>> ceil(1.782, 2)
1.79
>>> ceil(-1.782, 2)
-1.78
```
The complete list of functions is [below](#rounding-modes)
## Rounding modes and mode-specific rounding functions
These are the currently supported rounding modes, along with their corresponding
mode-specific rounding functions. The functions `trunc`, `floor` and `ceil` are
aliases for `round_to_zero`, `round_to_minus` and `round_to_plus`, respectively.
### To-nearest rounding modes
There are six to-nearest rounding modes: these all round to the closest target value
(e.g., to the closest integer in the case of `round_to_int`), and differ only in their
handling of ties.
| Rounding mode | Function | Description |
|-----------------|-----------------------|----------------------------------------|
| `TIES_TO_EVEN` | `round_ties_to_even` | Ties rounded to the nearest even value |
| `TIES_TO_ODD` | `round_ties_to_odd` | Ties rounded to the nearest odd value |
| `TIES_TO_AWAY` | `round_ties_to_away` | Ties rounded away from zero |
| `TIES_TO_ZERO` | `round_ties_to_zero` | Ties rounded towards zero |
| `TIES_TO_MINUS` | `round_ties_to_minus` | Ties rounded towards negative infinity |
| `TIES_TO_PLUS` | `round_ties_to_plus` | Ties rounded towards positive infinity |
### Directed rounding modes
There are six matching directed rounding modes: for these, all values between any two
representable output values will be rounded in the same direction.
| Rounding mode | Function | Description |
|---------------|-----------------------|---------------------------------|
| `TO_EVEN` | `round_to_even` | Round to the nearest even value |
| `TO_ODD` | `round_to_odd` | Round to the nearest odd value |
| `TO_AWAY` | `round_to_away` | Round away from zero |
| `TO_ZERO` | `round_to_zero` | Round towards zero |
| `TO_MINUS` | `round_to_minus` | Round towards negative infinity |
| `TO_PLUS` | `round_to_plus` | Round towards positive infinity |
### Miscellaneous rounding modes
There's one miscellaneous rounding mode `TO_ZERO_05_AWAY`, with corresponding function
`round_to_zero_05_away`.
| Rounding mode | Function | Description |
|-------------------|-------------------------|-------------------|
| `TO_ZERO_05_AWAY` | `round_to_zero_05_away` | See below |
This rounding mode matches the behaviour of `TO_ZERO`, _except_ in the case where
rounding towards zero would produce a final significant digit of `0` or `5`. In that
case, it matches the behaviour of `TO_AWAY` instead. Note that in the case where the
value is already rounded to the required number of digits, neither `TO_ZERO` nor
`TO_AWAY` would change its value, and similarly `TO_ZERO_05_AWAY` does not change
the value in this case.
```python
>>> from rounders import round_to_zero_05_away
>>> round_to_zero_05_away(1.234, 1) # behaves like `TO_ZERO`
1.2
>>> round_to_zero_05_away(-1.294, 1) # also behaves like `TO_ZERO`
-1.2
>>> round_to_zero_05_away(1.534, 1) # `TO_ZERO` would give 1.5, so round away
1.6
>>> round_to_zero_05_away(-2.088, 1) # `TO_ZERO` would give -2.0, so round away
-2.1
>>> round_to_zero_05_away(3.5, 1) # `TO_ZERO` wouldn't change the value; leave as-is
3.5
```
## Notes on rounding modes
Some notes on particular rounding modes:
* `TIES_TO_EVEN` goes by a [variety of
names](https://en.wikipedia.org/wiki/Rounding#Rounding_half_to_even), including
"Banker's rounding", "statisticians' rounding", and "Dutch rounding". It matches
Python's default rounding mode and the IEEE 754 default rounding mode,
`roundTiesToEven`. Many other languages also use this rounding mode by default.
* `TIES_TO_AWAY` appears to be the rounding mode most commonly taught in schools, and
the mode that users often mistakenly expect `round` to use.
* `TIES_TO_PLUS` matches the rounding mode used by JavaScript's `Math.round`, and also
appears to be commonly taught. (See [ECMA-262, 13th
edn.](https://262.ecma-international.org/13.0/), §21.3.2.28.)
* `TIES_TO_ZERO` is used in IEEE 754's "Augmented arithmetic operations".
* `TO_ZERO` matches the behaviour of `math.trunc`
* `TO_PLUS` matches the behaviour of `math.ceil`
* `TO_MINUS` matches the behaviour of `math.floor`
* `TO_ODD` is interesting as a form of "round for reround", providing a way to avoid the
phenomenon of [double
rounding](https://en.wikipedia.org/wiki/Rounding#Double_rounding). Suppose we're
given a real number `x` and a number of places `p`. Let `y` be the result of rounding
`x` to `p + 2` places using the `TO_ODD` rounding mode. Then `y` can act as a proxy
for `x` when rounding to `p` places, in the sense that `y` and `x` will round the
same way under any of the rounding modes defined in this module. (The binary analog
of `TO_ODD` is a little more useful here - it works in the same way, but requires
only two extra bits for the intermediate value instead of two extra digits.)
* `TO_ZERO_05_AWAY` also provides a form of "round for reround", but is more efficient
in that it only requires one extra decimal digit instead of two. Given a value `x`
and a number of places `p`, if `y = round(x, p + 1, mode=TO_ZERO_05_AWAY)`, then
`round(x, p, mode=mode) == round(y, p, mode=mode)` for any of the thirteen rounding
modes defined in this package.
```python
>>> from rounders import *
>>> import random
>>> x = random.uniform(-1.0, 1.0)
>>> y = round(x, 5, mode=TO_ZERO_05_AWAY)
>>> round(x, 4, mode=TO_ZERO) == round(y, 4, mode=TO_ZERO)
True
>>> round(x, 4, mode=TIES_TO_ODD) == round(y, 4, mode=TIES_TO_ODD)
True
>>> round(x, 4, mode=TO_ZERO_05_AWAY) == round(y, 4, mode=TO_ZERO_05_AWAY)
True
```
On relationships between the rounding modes in this package and rounding modes
elsewhere:
* IEEE 754 defines five "rounding-direction" attributes: `roundTiesToEven`,
`roundTiesToAway`, `roundTowardPositive`, `roundTowardNegative` and `roundTowardZero`.
These match `TIES_TO_EVEN`, `TIES_TO_AWAY`, `TO_PLUS`, `TO_MINUS` and `TO_ZERO`,
respectively. The "Augmented arithmetic operations" section of IEEE 754-2019 also
defines an attribute `roundTiesToZero`, corresponding to `TIES_TO_ZERO` in this
module.
| IEEE 754 rounding direction | `rounders` rounding mode |
|-----------------------------|--------------------------|
| `roundTiesToEven` | `TIES_TO_EVEN` |
| `roundTiesToAway` | `TIES_TO_AWAY` |
| `roundTiesToZero` | `TIES_TO_ZERO` |
| `roundTowardPositive` | `TO_PLUS` |
| `roundTowardNegative` | `TO_MINUS` |
| `roundTowardZero` | `TO_ZERO` |
* As of Python 3.11, Python's
[`decimal`](https://docs.python.org/3/library/decimal.html) module defines eight
rounding options, corresponding to the rounding modes in this module as follows:
| `decimal` rounding option | `rounders` rounding mode |
|---------------------------|--------------------------|
| `ROUND_CEILING` | `TO_PLUS` |
| `ROUND_DOWN` | `TO_ZERO` |
| `ROUND_FLOOR` | `TO_MINUS` |
| `ROUND_HALF_DOWN` | `TIES_TO_ZERO` |
| `ROUND_HALF_EVEN` | `TIES_TO_EVEN` |
| `ROUND_HALF_UP` | `TIES_TO_AWAY` |
| `ROUND_UP` | `TO_AWAY` |
| `ROUND_05UP` | `TO_ZERO_05_AWAY` |
## Supported numeric types
Out of the box, `rounders` supports Python's built-in numeric types: `int`, `float`,
`decimal.Decimal` and `fractions.Fraction`. Under the hood, it uses
[`functools.singledispatch`](https://docs.python.org/3/library/functools.html#functools.singledispatch)
for all type-specific operations. This should allow easy extension to new numeric
types in the future. The extension mechanism has not yet stabilised.
## Future directions
Major goals for future releases:
- Add formatting support, including the ability to specify rounding direction in a
format specification.
- Finalise and document mechanisms for adding support for custom types.
- Improve performance of `round`, especially for the `float` type, with the aid of
a C extension if necessary.
- Better document the pitfalls of `round` applied to binary floats (especially for
directed rounding modes, where `round` is not idempotent).
| /rounders-0.1.0.tar.gz/rounders-0.1.0/README.md | 0.943932 | 0.963575 | README.md | pypi |
import os
import re
from collections import defaultdict
from typing import Dict, Generator, List, Optional, TextIO, Tuple, Union
class INI:
"""
Class for parsing INI files.
Current Restrictions:
- key/value pairs must be separated by =
- keys may not begin or end with whitespace
- values will have beginning or ending whitespace stripped when returned.
- Comments will only be ignored if they are on one line, but not
if they are on the same line as a key/value pair, where they will be treated as part of the value
Implementation notes:
- Validation of key/value pairs occurs when data is used, not when the file is read.
- When replacing keys with duplicates, all old keys will be removed from all sections (in the
case of duplicate sections), and the new elements will be inserted in a single block at the
location of the first old key.
- Lists returned by the `[]` operator should not be modified, as the underlying data will not change.
"""
class _Section:
def __init__(self, ini: "INI", values: List[str]):
self.values = values
self.ini = ini
def __contains__(self, key: str) -> bool:
try:
next(self._find_values(key.strip()))
return True
except StopIteration:
return False
def __iter__(self):
for _, key, _ in self.items():
yield key
def items(self):
for index, elem in enumerate(self.values):
# Ignore empty lines and comments
if not elem.strip() or elem.strip()[0] in ("#", "["):
continue
if "=" not in elem:
raise RuntimeError(f"Unrecognized line {elem}")
elem_key, value = elem.split("=", 1)
yield index, elem_key.strip(), value.strip()
def _find_values(self, key: str) -> Generator[Tuple[int, str], None, None]:
for index, elem_key, value in self.items():
if elem_key == key:
yield index, value
def __getitem__(self, key: str) -> List[str]:
key = key.strip()
return [value for index, value in self._find_values(key)]
def __delitem__(self, key: str):
values = self._find_values(key)
for index, _ in reversed(list(values)):
del self.values[index]
def __setitem__(self, key: str, values: Union[str, List[str]]):
oldvalues = list(self._find_values(key))
if isinstance(values, str):
values = [values]
if oldvalues:
start_index = oldvalues[0][0]
for index, _ in reversed(oldvalues):
del self.values[index]
for new_value in reversed(values):
self.values.insert(start_index, self.format(key, new_value))
else:
self.values.extend([self.format(key, value) for value in values])
def format(self, key: str, value: str) -> str:
"""
Formats key and value into an entry.
E.g.
'key = value\n'
"""
key = key.strip()
value = value.strip()
return f"{key} = {value}" + self.ini.linesep
def __init__(self, file: Optional[TextIO] = None, *, linesep: str = os.linesep):
self.section_map: Dict[Optional[str], List[INI._Section]] = defaultdict(list)
self.sections: List[INI._Section] = []
self.linesep = linesep
if file is not None:
try:
while True:
self._add_section(file)
except EOFError:
pass
def _add_section(self, file: TextIO):
section_name = None
values: List[str] = []
def add():
section = INI._Section(self, values)
self.section_map[section_name].append(section)
self.sections.append(section)
while True:
previous_position = file.tell()
line = file.readline()
if not line:
add()
raise EOFError
if line.strip().startswith("[") and not values:
match = re.match(r"\s*\[(.*)\]", line)
if not match:
raise RuntimeError(
f"Line {line} should contain a section but it does not"
)
section_name = match.groups()[0]
if line.strip().startswith("[") and values:
# Rewind to previous position so that the header
# can be read by the next section
file.seek(previous_position)
break
values.append(line)
add()
def __setitem__(
self, section_key: Tuple[Optional[str], str], value: Union[str, List[str]]
):
section, key = section_key
found = False
if section in self.section_map:
# Add key to an existing section
for tmp_section in self.section_map[section]:
if key in tmp_section:
# If the key previously existed, the new value will be in the first location of that key
# Once the key has been found, remove matching entries from all other sections
if found:
del tmp_section[key]
else:
tmp_section[key] = value
found = True
if not found:
# If the key did not previously exist, it will be appended to the end of the last section
self.section_map[section][-1][key] = value
else:
# Add new section and insert key
new_section = INI._Section(self, [f"[{section}]" + self.linesep])
new_section[key] = value
self.section_map[section].append(new_section)
self.sections.append(new_section)
# Note: this always yields the default section (None)
def __iter__(self):
yield from self.section_map
def keys(self, section: Optional[str]) -> Generator[str, None, None]:
"""Yields the keys for the given section"""
for tmp_section in self.section_map[section]:
yield from tmp_section
def __getitem__(
self, section_key: Tuple[Optional[str], str]
) -> Union[None, str, List[str]]:
section, key = section_key
results: List[str] = []
for tmp_section in self.section_map[section]:
if key in tmp_section:
results.extend(tmp_section[key])
if len(results) == 1:
return results[0]
if results:
return results
return None
def dump(self) -> str:
values = []
for section in self.sections:
values.extend(section.values)
return "".join(
[
# Make sure each line ends with a line separator
# E.g. in case original file did not end with one
(value if value.endswith(self.linesep) else value + self.linesep)
for value in values
]
) | /roundtripini-0.3.0.tar.gz/roundtripini-0.3.0/roundtripini.py | 0.773644 | 0.373476 | roundtripini.py | pypi |
import math
import matplotlib.pyplot as plt
from .Generaldistribution import Distribution
class Gaussian(Distribution):
""" Gaussian distribution class for calculating and
visualizing a Gaussian distribution.
Attributes:
mean (float) representing the mean value of the distribution
stdev (float) representing the standard deviation of the distribution
data_list (list of floats) a list of floats extracted from the data file
"""
def __init__(self, mu=0, sigma=1):
Distribution.__init__(self, mu, sigma)
def calculate_mean(self):
"""Function to calculate the mean of the data set.
Args:
None
Returns:
float: mean of the data set
"""
avg = 1.0 * sum(self.data) / len(self.data)
self.mean = avg
return self.mean
def calculate_stdev(self, sample=True):
"""Function to calculate the standard deviation of the data set.
Args:
sample (bool): whether the data represents a sample or population
Returns:
float: standard deviation of the data set
"""
if sample:
n = len(self.data) - 1
else:
n = len(self.data)
mean = self.calculate_mean()
sigma = 0
for d in self.data:
sigma += (d - mean) ** 2
sigma = math.sqrt(sigma / n)
self.stdev = sigma
return self.stdev
def plot_histogram(self):
"""Function to output a histogram of the instance variable data using
matplotlib pyplot library.
Args:
None
Returns:
None
"""
plt.hist(self.data)
plt.title('Histogram of Data')
plt.xlabel('data')
plt.ylabel('count')
def pdf(self, x):
"""Probability density function calculator for the gaussian distribution.
Args:
x (float): point for calculating the probability density function
Returns:
float: probability density function output
"""
return (1.0 / (self.stdev * math.sqrt(2*math.pi))) * math.exp(-0.5*((x - self.mean) / self.stdev) ** 2)
def plot_histogram_pdf(self, n_spaces = 50):
"""Function to plot the normalized histogram of the data and a plot of the
probability density function along the same range
Args:
n_spaces (int): number of data points
Returns:
list: x values for the pdf plot
list: y values for the pdf plot
"""
mu = self.mean
sigma = self.stdev
min_range = min(self.data)
max_range = max(self.data)
# calculates the interval between x values
interval = 1.0 * (max_range - min_range) / n_spaces
x = []
y = []
# calculate the x values to visualize
for i in range(n_spaces):
tmp = min_range + interval*i
x.append(tmp)
y.append(self.pdf(tmp))
# make the plots
fig, axes = plt.subplots(2,sharex=True)
fig.subplots_adjust(hspace=.5)
axes[0].hist(self.data, density=True)
axes[0].set_title('Normed Histogram of Data')
axes[0].set_ylabel('Density')
axes[1].plot(x, y)
axes[1].set_title('Normal Distribution for \n Sample Mean and Sample Standard Deviation')
axes[0].set_ylabel('Density')
plt.show()
return x, y
def __add__(self, other):
"""Function to add together two Gaussian distributions
Args:
other (Gaussian): Gaussian instance
Returns:
Gaussian: Gaussian distribution
"""
result = Gaussian()
result.mean = self.mean + other.mean
result.stdev = math.sqrt(self.stdev ** 2 + other.stdev ** 2)
return result
def __repr__(self):
"""Function to output the characteristics of the Gaussian instance
Args:
None
Returns:
string: characteristics of the Gaussian
"""
return "mean {}, standard deviation {}".format(self.mean, self.stdev) | /roung_distributions-0.2.tar.gz/roung_distributions-0.2/roung_distributions/Gaussiandistribution.py | 0.688364 | 0.853058 | Gaussiandistribution.py | pypi |
[](https://github.com/rouskinlab/rouskinhf/actions/workflows/CI.yml)
[](https://github.com/rouskinlab/rouskinhf/actions/workflows/release.yml)


# Download your RNA data from HuggingFace with rouskinhf!
A repo to manipulate the data for our RNA structure prediction model. This repo allows you to:
- pull datasets from the Rouskinlab's HuggingFace
- create datasets from local files and push them to HuggingFace, from the formats:
- `.fasta`
- `.ct`
- `.json` (DREEM output format)
- `.json` (Rouskinlab's huggingface format)
## Important notes
- Sequences with bases different than `A`, `C`, `G`, `T`, `U`, `N`, `a`, `c`, `g`, `t`, `u`, `n` are not supported. The data will be filtered out.
## Dependencies
- [RNAstructure](https://rna.urmc.rochester.edu/RNAstructure.html) (also available on [Rouskinlab GitHub](https://github.com/rouskinlab/RNAstructure)).
## Push a new release to Pypi
1. Edit version to `vx.y.z` in `pyproject.toml`. Then run in a terminal `git add . && git commit -m 'vx.y.z' && git push`.
2. Create and push a git tag `vx.y.z` by running in a terminal `git tag 'vx.y.z' && git push --tag`.
3. Create a release for the tag `vx.y.z` on Github Release.
4. Make sure that the Github Action `Publish distributions 📦 to PyPI` passed on Github Actions.
## Installation
### Get a HuggingFace token
Go to [HuggingFace](https://huggingface.co/) and create an account. Then go to your profile and copy your token ([huggingface.co/settings/tokens](https://huggingface.co/settings/tokens)).
### Create an environment file
Open a terminal and type:
```bash
nano env
```
Copy paste the following content, and change the values to your own:
```bash
export HUGGINGFACE_TOKEN="your token here" # you must change this to your HuggingFace token
export DATA_FOLDER="data/datafolders" # where the datafolder are stored by default, change it if you want to store it somewhere else
export DATA_FOLDER_TESTING="data/input_files_for_testing" # Don't touch this
export RNASTRUCTURE_PATH="/Users/ymdt/src/RNAstructure/exe" # Change this to the path of your RNAstructure executable
export RNASTRUCTURE_TEMP_FOLDER="temp" # You can change this to the path of your RNAstructure temp folder
```
Then save the file and exit nano.
### Source the environment
```bash
source env
```
### Install the package with pip
```bash
pip install rouskinhf
```
## Tutorials
### Authentify your machine to HuggingFace
See the [tutorial](https://github.com/rouskinlab/rouskinhf/blob/main/tutorials/huggingface.ipynb).
### Download a datafolder from HuggingFace
See the [tutorial](https://github.com/rouskinlab/rouskinhf/blob/main/tutorials/use_for_models.ipynb).
### Create a datafolder from local files and push it to HuggingFace
See the [tutorial](https://github.com/rouskinlab/rouskinhf/blob/main/tutorials/create_push_pull.ipynb).
## About
### Sourcing the environment and keeping your environment variable secret
The variables defined in the `env` file are required by `rouskinhf`. Make that before you use `rouskinhf`, you run in a terminal:
```bash
source env
```
or, in a Jupyter notebook:
```python
!pip install python-dotenv
%load_ext dotenv
%dotenv env
```
or, in a python script or Jupyter notebook:
```python
from rouskinhf import setup_env
setup_env(
HUGGINGFACE_TOKEN="your token here",
DATA_FOLDER="data/datafolders",
...
)
```
The point of using environment variables is to ensure the privacy of your huggingface token. Make sure to add your `env` file to your `.gitignore`, so your HuggingFace token doesn't get pushed to any public repository.
### Import data with ``import_dataset``
This repo provides a function ``import_dataset``, which allows your to pull a dataset from HuggingFace and store it locally. If the data is already stored locally, it will be loaded from the local folder. The type of data available is the DMS signal and the structure, under the shape of paired bases tuples. The function has the following signature:
```python
def import_dataset(name:str, data:str, force_download:bool=False)->np.ndarray:
"""Finds the dataset with the given name for the given type of data.
Parameters
----------
name : str
Name of the dataset to find.
data : str
Name of the type of data to find the dataset for (structure or DMS).
force_download : bool
Whether to force download the dataset from HuggingFace Hub. Defaults to False.
Returns
-------
ndarray
The dataset with the given name for the given type of data.
Example
-------
>>> import_dataset(name='for_testing', data='structure').keys()
dict_keys(['references', 'sequences', 'structure'])
>>> import_dataset(name='for_testing', data='DMS').keys()
dict_keys(['references', 'sequences', 'DMS'])
>>> import_dataset(name='for_testing', data='structure', force_download=True).keys()
dict_keys(['references', 'sequences', 'structure'])
>>> import_dataset(name='for_testing', data='DMS', force_download=True).keys()
dict_keys(['references', 'sequences', 'DMS'])
```
### FYI, the datafolder object
The datafolder object is a wrapper around your local folder and HuggingFace API, to keep a consistent datastructure across your datasets. It contains multiple methods to create datasets from various input formats, store the data and metadata in a systematic way, and push / pull from HuggingFace.
On HuggingFace, the datafolder stores the data under the following structure:
```bash
HUGGINGFACE DATAFOLDER
- [datafolder name]
- source
- whichever file(s) you used to create the dataset (fasta, set of CTs, etc.).
- data.json # the data under a human readable format.
- info.json # the metadata of the dataset. This file indicates how we got the DMS signal and the structures (directly from the source or from a prediction).
- README.md # the metadata of the dataset in a human readable format.
```
Locally, we have the same structure with the addition of .npy files which contain the data in a machine readable format. Each .npy file contains a numpy array of the data, and the name of the file is the name of the corresponding key in the data.json file. The source file won’t be downloaded by default. Hence, the local structure is:
```bash
LOCAL DATAFOLDER
- [datafolder name]
...
- README.md # the metadata of the dataset in a human readable format
- references.npy
- sequences.npy
- base_pairs.npy
- dms.npy
```
| /rouskinhf-0.2.6.tar.gz/rouskinhf-0.2.6/README.md | 0.740456 | 0.936168 | README.md | pypi |
from __future__ import annotations
from typing import Any, Sequence
import numpy as np
from sklearn.cluster import AgglomerativeClustering
from sklearn.metrics import silhouette_score
class ClusteringHelper:
"""
A helper class to perform clustering of items
based on a pre-computed distance matrix.
The clustering is the agglomerative clustering
algorithm implemented in Scikit-learn.
"""
def __init__(self, distances: np.ndarray) -> None:
self._distances = distances
self._model = None
self.optimization_scores: Sequence[float] = []
@property
def labels(self) -> np.ndarray:
"""Return the cluster labels"""
if not self._model:
raise ValueError("Labels are not defined yet. Run clustering first")
return self._model.labels_
def fixed_clustering(self, n_clusters: int, **kwargs: Any) -> np.ndarray:
"""
Make a fixed number of clusters.
Additional arguments to the clustering algorithm
can be passed in as key-word arguments
:param n_clusters: the desired number of clusters
:return: the cluster index for each observation
"""
if "linkage" not in kwargs:
kwargs["linkage"] = "single"
kwargs["affinity"] = "precomputed"
kwargs["n_clusters"] = n_clusters
model = AgglomerativeClustering(**kwargs)
model.fit(self._distances)
self._model = model
return model.labels_
def linkage_matrix(self, **kwargs: Any) -> np.ndarray:
"""
Compute the linkage matrix.
Additional arguments to the clustering algorithm
can be passed in as key-word arguments
:return: the linkage matrix
"""
if "linkage" not in kwargs:
kwargs["linkage"] = "single"
kwargs["affinity"] = "precomputed"
kwargs["n_clusters"] = None
kwargs["distance_threshold"] = 0.0
model = AgglomerativeClustering(**kwargs)
model.fit(self._distances)
self._model = model
counts = np.zeros(len(model.distances_))
matrix = np.column_stack([model.children_, model.distances_, counts])
return matrix.astype(float)
def optimize(self, max_clusters: int = 5, **kwargs: Any) -> np.ndarray:
"""
Optimize the number of cluster based Silhouette metric.
Additional arguments to the clustering algorithm
can be passed in as key-word arguments
:param max_clusters: the maximum number of clusters to consider
:return: the cluster index for each observation
"""
max_score = None
best_size = None
self.optimization_scores = []
for n_clusters in range(2, min(max_clusters + 1, len(self._distances))):
clusters = self.fixed_clustering(n_clusters, **kwargs)
score = silhouette_score(self._distances, clusters, metric="precomputed")
self.optimization_scores.append(score)
if best_size is None or score > max_score:
max_score = score
best_size = n_clusters
if best_size is None:
best_size = max_clusters
return self.fixed_clustering(best_size, **kwargs)
@staticmethod
def cluster(distances: np.ndarray, n_clusters: int, **kwargs: Any) -> np.ndarray:
"""
Cluster items based on a pre-computed distance matrix using a hierarchical clustering.
:param distances: the distance matrix
:param n_clusters: the desired number of clusters
:return: the cluster index for each observation
"""
helper = ClusteringHelper(distances)
max_clusters = kwargs.pop("max_clusters", 5)
if n_clusters < 2:
return helper.optimize(max_clusters=max_clusters, **kwargs)
return helper.fixed_clustering(n_clusters, **kwargs) | /route-distances-1.1.0.tar.gz/route-distances-1.1.0/route_distances/clustering.py | 0.960888 | 0.610831 | clustering.py | pypi |
from __future__ import annotations
from typing import List
import random
from enum import Enum
from operator import itemgetter
import numpy as np
from rdkit import Chem, DataStructs
from rdkit.Chem import AllChem
from apted import Config as BaseAptedConfig
from scipy.spatial.distance import jaccard as jaccard_dist
from route_distances.utils.type_utils import StrDict, Callable
class TreeContent(str, Enum):
"""Possibilities for distance calculations on reaction trees"""
MOLECULES = "molecules"
REACTIONS = "reactions"
BOTH = "both"
class AptedConfig(BaseAptedConfig):
"""
This is a helper class for the tree edit distance
calculation. It defines how the substitution
cost is calculated and how to obtain children nodes.
:param randomize: if True, the children will be shuffled
:param sort_children: if True, the children will be sorted
:param dist_func: the distance function used for renaming nodes, Jaccard by default
"""
def __init__(
self,
randomize: bool = False,
sort_children: bool = False,
dist_func: Callable[[np.ndarray, np.ndarray], float] = None,
) -> None:
super().__init__()
self._randomize = randomize
self._sort_children = sort_children
self._dist_func = dist_func or jaccard_dist
def rename(self, node1: StrDict, node2: StrDict) -> float:
if node1["type"] != node2["type"]:
return 1
fp1 = node1["fingerprint"]
fp2 = node2["fingerprint"]
return self._dist_func(fp1, fp2)
def children(self, node: StrDict) -> List[StrDict]:
if self._sort_children:
return sorted(node["children"], key=itemgetter("sort_key"))
if not self._randomize:
return node["children"]
children = list(node["children"])
random.shuffle(children)
return children
class StandardFingerprintFactory:
"""
Calculate Morgan fingerprint for molecules, and difference fingerprints for reactions
:param radius: the radius of the fingerprint
:param nbits: the fingerprint lengths
"""
def __init__(self, radius: int = 2, nbits: int = 2048) -> None:
self._fp_params = (radius, nbits)
def __call__(self, tree: StrDict, parent: StrDict = None) -> None:
if tree["type"] == "reaction":
if parent is None:
raise ValueError(
"Must specify parent when making Morgan fingerprints for reaction nodes"
)
self._add_rxn_fingerprint(tree, parent)
else:
self._add_mol_fingerprints(tree)
def _add_mol_fingerprints(self, tree: StrDict) -> None:
if "fingerprint" not in tree:
mol = Chem.MolFromSmiles(tree["smiles"])
rd_fp = AllChem.GetMorganFingerprintAsBitVect(mol, *self._fp_params)
tree["fingerprint"] = np.zeros((1,), dtype=np.int8)
DataStructs.ConvertToNumpyArray(rd_fp, tree["fingerprint"])
tree["sort_key"] = "".join(f"{digit}" for digit in tree["fingerprint"])
if "children" not in tree:
tree["children"] = []
for child in tree["children"]:
for grandchild in child["children"]:
self._add_mol_fingerprints(grandchild)
def _add_rxn_fingerprint(self, node: StrDict, parent: StrDict) -> None:
if "fingerprint" not in node:
node["fingerprint"] = parent["fingerprint"].copy()
for reactant in node["children"]:
node["fingerprint"] -= reactant["fingerprint"]
node["sort_key"] = "".join(f"{digit}" for digit in node["fingerprint"])
for child in node["children"]:
for grandchild in child.get("children", []):
self._add_rxn_fingerprint(grandchild, child) | /route-distances-1.1.0.tar.gz/route-distances-1.1.0/route_distances/ted/utils.py | 0.908316 | 0.339472 | utils.py | pypi |
from __future__ import annotations
import itertools
import math
from copy import deepcopy
from typing import List, Union, Iterable, Tuple, Callable, Optional
from logging import getLogger
import numpy as np
from apted import APTED as Apted
from route_distances.ted.utils import (
TreeContent,
AptedConfig,
StandardFingerprintFactory,
)
from route_distances.validation import validate_dict
from route_distances.utils.type_utils import StrDict
_FloatIterator = Iterable[float]
class ReactionTreeWrapper:
"""
Wrapper for a reaction tree that can calculate distances between
trees.
:param reaction_tree: the reaction tree to wrap
:param content: the content of the route to consider in the distance calculation
:param exhaustive_limit: if the number of possible ordered trees are below this limit create them all
:param fp_factory: the factory of the fingerprint, Morgan fingerprint for molecules and reactions by default
:param dist_func: the distance function to use when renaming nodes
"""
_index_permutations = {
n: list(itertools.permutations(range(n), n)) for n in range(1, 8)
}
def __init__(
self,
reaction_tree: StrDict,
content: Union[str, TreeContent] = TreeContent.MOLECULES,
exhaustive_limit: int = 20,
fp_factory: Callable[[StrDict, Optional[StrDict]], None] = None,
dist_func: Callable[[np.ndarray, np.ndarray], float] = None,
) -> None:
validate_dict(reaction_tree)
single_node_tree = not bool(reaction_tree.get("children", []))
if single_node_tree and content == TreeContent.REACTIONS:
raise ValueError(
"Cannot create wrapping with content = reactions for a tree without reactions"
)
self._logger = getLogger("route_distances")
# Will convert string input automatically
self._content = TreeContent(content)
self._base_tree = deepcopy(reaction_tree)
self._fp_factory = fp_factory or StandardFingerprintFactory()
self._add_fingerprints(self._base_tree)
if self._content != TreeContent.MOLECULES and not single_node_tree:
self._add_fingerprints(self._base_tree["children"][0], self._base_tree)
if self._content == TreeContent.MOLECULES:
self._base_tree = self._remove_children_nodes(self._base_tree)
elif not single_node_tree and self._content == TreeContent.REACTIONS:
self._base_tree = self._remove_children_nodes(
self._base_tree["children"][0]
)
self._trees = []
self._tree_count, self._node_index_list = self._inspect_tree()
self._enumeration = self._tree_count <= exhaustive_limit
if self._enumeration:
self._create_all_trees()
else:
self._trees.append(self._base_tree)
self._dist_func = dist_func
@property
def info(self) -> StrDict:
"""Return a dictionary with internal information about the wrapper"""
return {
"content": self._content,
"tree count": self._tree_count,
"enumeration": self._enumeration,
}
@property
def first_tree(self) -> StrDict:
"""Return the first created ordered tree"""
return self._trees[0]
@property
def trees(self) -> List[StrDict]:
"""Return a list of all created ordered trees"""
return self._trees
def distance_iter(
self, other: "ReactionTreeWrapper", exhaustive_limit: int = 20
) -> _FloatIterator:
"""
Iterate over all distances computed between this and another tree
There are three possible enumeration of distances possible dependent
on the number of possible ordered trees for the two routes that are compared
* If the product of the number of possible ordered trees for both routes are
below `exhaustive_limit` compute the distance between all pair of trees
* If both self and other has been fully enumerated (i.e. all ordered trees has been created)
compute the distances between all trees of the route with the most ordered trees and
the first tree of the other route
* Compute `exhaustive_limit` number of distances by shuffling the child order for
each of the routes.
The rules are applied top-to-bottom.
:param other: another tree to calculate distance to
:param exhaustive_limit: used to determine what type of enumeration to do
:yield: the next computed distance between self and other
"""
if self._tree_count * other.info["tree count"] < exhaustive_limit:
yield from self._distance_iter_exhaustive(other)
elif self._enumeration or other.info["enumeration"]:
yield from self._distance_iter_semi_exhaustive(other)
else:
yield from self._distance_iter_random(other, exhaustive_limit)
def distance_to(
self, other: "ReactionTreeWrapper", exhaustive_limit: int = 20
) -> float:
"""
Calculate the minimum distance from this route to another route
Enumerate the distances using `distance_iter`.
:param other: another tree to calculate distance to
:param exhaustive_limit: used to determine what type of enumeration to do
:return: the minimum distance
"""
min_dist = 1e6
min_iter = -1
for iteration, distance in enumerate(
self.distance_iter(other, exhaustive_limit)
):
if distance < min_dist:
min_iter = iteration
min_dist = distance
self._logger.debug(f"Found minimum after {min_iter} iterations")
return min_dist
def distance_to_with_sorting(self, other: "ReactionTreeWrapper") -> float:
"""
Compute the distance to another tree, by simpling sorting the children
of both trees. This is not guaranteed to return the minimum distance.
:param other: another tree to calculate distance to
:return: the distance
"""
config = AptedConfig(sort_children=True, dist_func=self._dist_func)
return Apted(self.first_tree, other.first_tree, config).compute_edit_distance()
def _add_fingerprints(self, tree: StrDict, parent: StrDict = None) -> None:
if "fingerprint" not in tree:
try:
self._fp_factory(tree, parent)
except ValueError:
pass
if "fingerprint" not in tree:
tree["fingerprint"] = []
tree["sort_key"] = "".join(f"{digit}" for digit in tree["fingerprint"])
if "children" not in tree:
tree["children"] = []
for child in tree["children"]:
for grandchild in child["children"]:
self._add_fingerprints(grandchild, child)
def _create_all_trees(self) -> None:
self._trees = []
# Iterate over all possible combinations of child order
for order_list in itertools.product(*self._node_index_list):
self._trees.append(
self._create_tree_recursively(self._base_tree, list(order_list))
)
def _create_tree_recursively(
self,
node: StrDict,
order_list: List[List[int]],
) -> StrDict:
new_tree = self._make_base_copy(node)
children = node.get("children", [])
if children:
child_order = order_list.pop(0)
assert len(child_order) == len(children)
new_children = [
self._create_tree_recursively(child, order_list) for child in children
]
new_tree["children"] = [new_children[idx] for idx in child_order]
return new_tree
def _distance_iter_exhaustive(self, other: "ReactionTreeWrapper") -> _FloatIterator:
self._logger.debug(
f"APTED: Exhaustive search. {len(self.trees)} {len(other.trees)}"
)
config = AptedConfig(randomize=False, dist_func=self._dist_func)
for tree1, tree2 in itertools.product(self.trees, other.trees):
yield Apted(tree1, tree2, config).compute_edit_distance()
def _distance_iter_random(
self, other: "ReactionTreeWrapper", ntimes: int
) -> _FloatIterator:
self._logger.debug(
f"APTED: Heuristic search. {len(self.trees)} {len(other.trees)}"
)
config = AptedConfig(randomize=False, dist_func=self._dist_func)
yield Apted(self.first_tree, other.first_tree, config).compute_edit_distance()
config = AptedConfig(randomize=True, dist_func=self._dist_func)
for _ in range(ntimes):
yield Apted(
self.first_tree, other.first_tree, config
).compute_edit_distance()
def _distance_iter_semi_exhaustive(
self, other: "ReactionTreeWrapper"
) -> _FloatIterator:
self._logger.debug(
f"APTED: Semi-exhaustive search. {len(self.trees)} {len(other.trees)}"
)
if len(self.trees) < len(other.trees):
first_wrapper = self
second_wrapper = other
else:
first_wrapper = other
second_wrapper = self
config = AptedConfig(randomize=False, dist_func=self._dist_func)
for tree1 in first_wrapper.trees:
yield Apted(
tree1, second_wrapper.first_tree, config
).compute_edit_distance()
def _inspect_tree(self) -> Tuple[int, List[List[int]]]:
"""
Find the number of children for each node in the tree, which
will be used to compute the number of possible combinations of child orders
Also accumulate the possible child orders for the nodes.
"""
def _recurse_tree(node):
children = node.get("children", [])
nchildren = len(children)
permutations.append(math.factorial(nchildren))
if nchildren > 0:
node_index_list.append(list(self._index_permutations[nchildren]))
for child in children:
_recurse_tree(child)
permutations: List[int] = []
node_index_list: List[List[int]] = []
_recurse_tree(self._base_tree)
if not permutations:
return 0, []
return int(np.prod(permutations)), node_index_list
@staticmethod
def _make_base_copy(node: StrDict) -> StrDict:
return {
"type": node["type"],
"smiles": node.get("smiles", ""),
"metadata": node.get("metadata"),
"fingerprint": node["fingerprint"],
"sort_key": node["sort_key"],
"children": [],
}
@staticmethod
def _remove_children_nodes(tree: StrDict) -> StrDict:
new_tree = ReactionTreeWrapper._make_base_copy(tree)
if tree.get("children"):
new_tree["children"] = []
for child in tree["children"]:
new_tree["children"].extend(
[
ReactionTreeWrapper._remove_children_nodes(grandchild)
for grandchild in child.get("children", [])
]
)
return new_tree | /route-distances-1.1.0.tar.gz/route-distances-1.1.0/route_distances/ted/reactiontree.py | 0.90958 | 0.366817 | reactiontree.py | pypi |
from typing import Dict, Any, Set, List, Tuple
import numpy as np
from route_distances.utils.type_utils import StrDict
def calc_depth(tree_dict: StrDict, depth: int = 0) -> int:
"""
Calculate the depth of a route, recursively
:param tree_dict: the route
:param depth: the current depth, don't specify for route
"""
children = tree_dict.get("children", [])
if children:
return max(calc_depth(child, depth + 1) for child in children)
return depth
def calc_llr(tree_dict: StrDict) -> int:
"""
Calculate the longest linear route for a synthetic route
:param tree_dict: the route
"""
return calc_depth(tree_dict) // 2
def extract_leaves(
tree_dict: StrDict,
) -> Set[str]:
"""
Extract a set with the SMILES of all the leaf nodes, i.e.
starting material
:param tree_dict: the route
:return: a set of SMILE strings
"""
def traverse(tree_dict: StrDict, leaves: Set[str]) -> None:
children = tree_dict.get("children", [])
if children:
for child in children:
traverse(child, leaves)
else:
leaves.add(tree_dict["smiles"])
leaves = set()
traverse(tree_dict, leaves)
return leaves
def is_solved(route: StrDict) -> bool:
"""
Find if a route is solved, i.e. if all starting material
is in stock.
To be accurate, each molecule node need to have an extra
boolean property called `in_stock`.
:param route: the route to analyze
"""
def find_leaves_not_in_stock(tree_dict: StrDict) -> None:
children = tree_dict.get("children", [])
if not children and not tree_dict.get("in_stock", True):
raise ValueError(f"child not in stock {tree_dict}")
elif children:
for child in children:
find_leaves_not_in_stock(child)
try:
find_leaves_not_in_stock(route)
except ValueError:
return False
return True
def route_score(
tree_dict: StrDict,
mol_costs: Dict[bool, float] = None,
average_yield=0.8,
reaction_cost=1.0,
) -> float:
"""
Calculate the score of route using the method from
(Badowski et al. Chem Sci. 2019, 10, 4640).
The reaction cost is constant and the yield is an average yield.
The starting materials are assigned a cost based on whether they are in
stock or not. By default starting material in stock is assigned a
cost of 1 and starting material not in stock is assigned a cost of 10.
To be accurate, each molecule node need to have an extra
boolean property called `in_stock`.
:param tree_dict: the route to analyze
:param mol_costs: the starting material cost
:param average_yield: the average yield, defaults to 0.8
:param reaction_cost: the reaction cost, defaults to 1.0
:return: the computed cost
"""
mol_cost = mol_costs or {True: 1, False: 10}
reactions = tree_dict.get("children", [])
if not reactions:
return mol_cost[tree_dict.get("in_stock", True)]
child_sum = sum(
1 / average_yield * route_score(child) for child in reactions[0]["children"]
)
return reaction_cost + child_sum
def route_scorer(routes: List[StrDict]) -> Tuple[List[StrDict], List[float]]:
"""
Scores and sort a list of routes.
Returns a tuple of the sorted routes and their costs.
:param routes: the routes to score
:return: the sorted routes and their costs
"""
scores = np.asarray([route_score(route) for route in routes])
sorted_idx = np.argsort(scores)
routes = [routes[idx] for idx in sorted_idx]
return routes, scores[sorted_idx].tolist()
def route_ranks(scores: List[float]) -> List[int]:
"""
Compute the rank of route scores. Rank starts at 1
:param scores: the route scores
:return: a list of ranks for each route
"""
ranks = [1]
for idx in range(1, len(scores)):
if abs(scores[idx] - scores[idx - 1]) < 1e-8:
ranks.append(ranks[idx - 1])
else:
ranks.append(ranks[idx - 1] + 1)
return ranks | /route-distances-1.1.0.tar.gz/route-distances-1.1.0/route_distances/utils/routes.py | 0.937351 | 0.673312 | routes.py | pypi |
import argparse
import torch
from pytorch_lightning import Trainer, seed_everything
from pytorch_lightning.loggers import TensorBoardLogger, CSVLogger
from pytorch_lightning.callbacks import ModelCheckpoint
import route_distances.lstm.defaults as defaults
from route_distances.lstm.data import TreeDataModule
from route_distances.lstm.models import RouteDistanceModel
from route_distances.lstm.utils import accumulate_stats
def _get_args():
parser = argparse.ArgumentParser(
"Tool to train an LSTM-based model for route distances"
)
parser.add_argument("--trees", required=True)
parser.add_argument("--epochs", type=int, default=defaults.EPOCHS)
parser.add_argument("--batch_size", type=int, default=defaults.BATCH_SIZE)
parser.add_argument("--lr", type=float, default=defaults.LEARNING_RATE)
parser.add_argument("--weight_decay", type=float, default=defaults.WEIGHT_DECAY)
parser.add_argument("--dropout", type=float, default=defaults.DROPOUT_PROB)
parser.add_argument("--fp_size", type=int, default=defaults.FP_SIZE)
parser.add_argument("--lstm_size", type=int, default=defaults.LSTM_SIZE)
parser.add_argument("--split_part", type=float, default=defaults.SPLIT_PART)
parser.add_argument("--split_seed", type=float, default=defaults.SPLIT_SEED)
return parser.parse_args()
def main(seed=None) -> None:
"""Entry-point for CLI tool"""
args = _get_args()
print(str(args).replace("Namespace", "Arguments used = "))
if seed is not None:
seed_everything(seed)
data = TreeDataModule(
args.trees,
batch_size=args.batch_size,
split_part=args.split_part,
split_seed=args.split_seed,
)
kwargs = {
"fp_size": args.fp_size,
"lstm_size": args.lstm_size,
"dropout_prob": args.dropout,
"learning_rate": args.lr,
"weight_decay": args.weight_decay,
}
model = RouteDistanceModel(**kwargs)
gpus = int(torch.cuda.is_available())
tb_logger = TensorBoardLogger("tb_logs", name=f"route-dist")
csv_logger = CSVLogger("csv_logs", name=f"route-dist")
checkpoint = ModelCheckpoint(monitor="val_monitor", save_last=True)
trainer = Trainer(
gpus=gpus,
logger=[tb_logger, csv_logger],
callbacks=[checkpoint],
max_epochs=args.epochs,
deterministic=seed is not None,
)
trainer.fit(model, datamodule=data)
ret = trainer.test(datamodule=data)
print("=== Test results === ")
accum = accumulate_stats(ret)
for key, value in accum.items():
print(f"{key}: {value:0.4f}")
if __name__ == "__main__":
main() | /route-distances-1.1.0.tar.gz/route-distances-1.1.0/route_distances/tools/train_lstm_model.py | 0.766992 | 0.285098 | train_lstm_model.py | pypi |
from __future__ import annotations
import argparse
import warnings
import time
import math
from typing import List
import pandas as pd
from tqdm import tqdm
import route_distances.lstm.defaults as defaults
from route_distances.route_distances import route_distances_calculator
from route_distances.clustering import ClusteringHelper
from route_distances.utils.type_utils import RouteDistancesCalculator
def _get_args() -> argparse.Namespace:
parser = argparse.ArgumentParser(
"Tool to calculate pairwise distances for AiZynthFinder output"
)
parser.add_argument("--files", nargs="+", required=True)
parser.add_argument("--fp_size", type=int, default=defaults.FP_SIZE)
parser.add_argument("--lstm_size", type=int, default=defaults.LSTM_SIZE)
parser.add_argument("--model", required=True)
parser.add_argument("--only_clustering", action="store_true", default=False)
parser.add_argument("--nclusters", type=int, default=None)
parser.add_argument("--min_density", type=int, default=None)
parser.add_argument("--output", default="finder_output_dist.hdf5")
return parser.parse_args()
def _merge_inputs(filenames: List[str]) -> pd.DataFrame:
data = None
for filename in filenames:
temp_data = pd.read_hdf(filename, "table")
assert isinstance(temp_data, pd.DataFrame)
if data is None:
data = temp_data
else:
data = pd.concat([data, temp_data])
return data
def _calc_distances(row: pd.Series, calculator: RouteDistancesCalculator) -> pd.Series:
if len(row.trees) == 1:
return pd.Series({"distance_matrix": [[0.0]], "distances_time": 0})
time0 = time.perf_counter_ns()
distances = calculator(row.trees)
dict_ = {
"distance_matrix": distances.tolist(),
"distances_time": (time.perf_counter_ns() - time0) * 1e-9,
}
return pd.Series(dict_)
def _do_clustering(
row: pd.Series, nclusters: int, min_density: int = None
) -> pd.Series:
if row.distance_matrix == [[0.0]] or len(row.trees) < 3:
return pd.Series({"cluster_labels": [], "cluster_time": 0})
if min_density is None:
max_clusters = min(len(row.trees), 10)
else:
max_clusters = int(math.ceil(len(row.trees) / min_density))
time0 = time.perf_counter_ns()
labels = ClusteringHelper.cluster(
row.distance_matrix, nclusters, max_clusters=max_clusters
).tolist()
cluster_time = (time.perf_counter_ns() - time0) * 1e-9
return pd.Series({"cluster_labels": labels, "cluster_time": cluster_time})
def main() -> None:
"""Entry-point for CLI tool"""
args = _get_args()
tqdm.pandas()
data = _merge_inputs(args.files)
if args.only_clustering:
calculator = None
elif args.model == "ted":
calculator = route_distances_calculator("ted", content="both")
else:
calculator = route_distances_calculator(
"lstm",
model_path=args.model,
fp_size=args.fp_size,
lstm_size=args.lstm_size,
)
if not args.only_clustering:
dist_data = data.progress_apply(_calc_distances, axis=1, calculator=calculator)
data = data.assign(
distance_matrix=dist_data.distance_matrix,
distances_time=dist_data.distances_time,
)
if args.nclusters is not None:
cluster_data = data.progress_apply(
_do_clustering,
axis=1,
nclusters=args.nclusters,
min_density=args.min_density,
)
data = data.assign(
cluster_labels=cluster_data.cluster_labels,
cluster_time=cluster_data.cluster_time,
)
with warnings.catch_warnings(): # This wil suppress a PerformanceWarning
warnings.simplefilter("ignore")
data.to_hdf(args.output, "table")
if __name__ == "__main__":
main() | /route-distances-1.1.0.tar.gz/route-distances-1.1.0/route_distances/tools/cluster_aizynth_output.py | 0.727492 | 0.311047 | cluster_aizynth_output.py | pypi |
import argparse
import pickle
import pandas as pd
import numpy as np
from tqdm import tqdm
import route_distances.lstm.defaults as defaults
from route_distances.lstm.features import preprocess_reaction_tree
def _get_args():
parser = argparse.ArgumentParser(
"Tool to prepare output from AiZynthFinder for model training"
)
parser.add_argument("--files", nargs="+", required=True)
parser.add_argument("--fp_size", type=int, default=defaults.FP_SIZE)
parser.add_argument("--use_reduced", action="store_true", default=False)
parser.add_argument("--output", required=True)
return parser.parse_args()
def _similarity(idx1, idx2, labels):
if len(labels) == 0 or (labels[idx1] == labels[idx2]):
return 1, 1
return -1, 0
def main():
"""Entry-point for CLI tool"""
args = _get_args()
offset = 0
tree_list = []
pairs = []
for filename in tqdm(args.files, desc="# of files processed: "):
data = pd.read_hdf(filename, "table")
for trees, distances, labels in zip(
tqdm(data.trees.values, leave=False, desc="# of targets processed"),
data.distance_matrix.values,
data.cluster_labels.values,
):
np_distances = np.asarray(distances)
for i, tree1 in enumerate(trees):
tree_list.append(preprocess_reaction_tree(tree1, args.fp_size))
for j, _ in enumerate(trees):
if j < i and args.use_reduced:
continue
loss_target, pair_similarity = _similarity(i, j, labels)
pairs.append(
(
i + offset,
j + offset,
np_distances[i, j],
pair_similarity,
loss_target,
)
)
offset += len(trees)
print(f"Preprocessed {len(tree_list)} trees in {len(pairs)} pairs")
with open(args.output, "wb") as fileobj:
pickle.dump(
{
"trees": tree_list,
"pairs": pairs,
},
fileobj,
)
if __name__ == "__main__":
main() | /route-distances-1.1.0.tar.gz/route-distances-1.1.0/route_distances/tools/prepare_aizynthfinder_output.py | 0.480966 | 0.196248 | prepare_aizynthfinder_output.py | pypi |
import pickle
import random
import multiprocessing
from typing import List, Tuple, Set, Union
from pytorch_lightning import LightningDataModule
from torch.utils.data import Dataset, DataLoader
import route_distances.lstm.defaults as defaults
from route_distances.lstm.utils import collate_batch
_PairType = Tuple[Union[int, float], ...]
class InMemoryTreeDataset(Dataset):
"""Represent an in-memory set of trees, and pairwise distances"""
def __init__(self, pairs, trees):
self.trees = trees
self.pairs = pairs
def __len__(self):
return len(self.pairs)
def __getitem__(self, item):
tree_index1, tree_index2, *target_values = self.pairs[item]
item = {
"tree1": self.trees[tree_index1],
"tree2": self.trees[tree_index2],
"ted": target_values[0],
}
return item
class TreeDataModule(LightningDataModule):
"""Represent a PyTorch Lightning datamodule for load and collecting data for model training"""
def __init__(
self,
pickle_path: str,
batch_size: int = defaults.BATCH_SIZE,
split_part: float = defaults.SPLIT_PART,
split_seed: int = defaults.SPLIT_SEED,
shuffle: bool = True,
) -> None:
super().__init__()
self._pickle_path = pickle_path
self._batch_size = batch_size
self._split_part = split_part
self._split_seed = split_seed
self._shuffle = shuffle
self._num_workers = multiprocessing.cpu_count()
self._all_pairs = []
self._all_trees = []
self.train_dataset = None
self.val_dataset = None
self.test_dataset = None
def setup(self, stage: str = None) -> None:
with open(self._pickle_path, "rb") as fileobj:
raw_data = pickle.load(fileobj)
self._all_pairs = raw_data["pairs"]
self._all_trees = raw_data["trees"]
self._split_data()
def train_dataloader(self) -> DataLoader:
return DataLoader(
self.train_dataset,
batch_size=self._batch_size,
collate_fn=collate_batch,
shuffle=self._shuffle,
num_workers=self._num_workers,
)
def val_dataloader(self) -> DataLoader:
return DataLoader(
self.val_dataset,
batch_size=self._batch_size,
collate_fn=collate_batch,
num_workers=self._num_workers,
)
def test_dataloader(self) -> DataLoader:
return DataLoader(
self.test_dataset,
batch_size=self._batch_size,
collate_fn=collate_batch,
num_workers=self._num_workers,
)
def _make_dataset(
self,
segments: List[List[_PairType]],
indices: Set[int],
sample_size: int,
) -> InMemoryTreeDataset:
segment_ids = list(indices)
random.shuffle(segment_ids)
taken = []
for segment_id in segment_ids:
taken.extend(segments[segment_id])
indices -= {segment_id}
if len(taken) >= sample_size:
break
return InMemoryTreeDataset(taken, self._all_trees)
def _make_segments(self) -> List[List[_PairType]]:
segments = []
segment = []
seen = set()
for pair in self._all_pairs:
idx1, idx2, *_ = pair
if idx1 == idx2 and idx2 not in seen and segment:
segments.append(segment)
segment = []
segment.append(pair)
seen.add(idx1)
seen.add(idx2)
segments.append(segment)
return segments
def _split_data(self) -> None:
"""
Split the data into training, validation and test set using a
segmented approach. First the pairs are split into non-overlapping
pairs, corresponding to different sets of target molecules.
Second a dataset is built up by adding all pairs from a segment until
a sufficiently large dataset has been created.
"""
random.seed(self._split_seed)
dataset_len = len(self._all_pairs)
val_len = round(dataset_len * self._split_part)
train_len = dataset_len - 2 * val_len
pair_segments = self._make_segments()
if len(pair_segments) < 3:
raise ValueError(
f"Could only make {len(pair_segments)} segments from the pairs. Unable to split the dataset"
)
indices = set(range(len(pair_segments)))
self.train_dataset = self._make_dataset(pair_segments, indices, train_len)
self.val_dataset = self._make_dataset(pair_segments, indices, val_len)
self.test_dataset = self._make_dataset(pair_segments, indices, val_len)
print("=== Data split ===")
print(
f"Training dataset: {len(self.train_dataset)} ({len(self.train_dataset)/len(self._all_pairs)*100:.2f}%)"
)
print(
f"Validation dataset: {len(self.val_dataset)} ({len(self.val_dataset) / len(self._all_pairs) * 100:.2f}%)"
)
print(
f"Test dataset: {len(self.test_dataset)} ({len(self.test_dataset) / len(self._all_pairs) * 100:.2f}%)"
) | /route-distances-1.1.0.tar.gz/route-distances-1.1.0/route_distances/lstm/data.py | 0.826011 | 0.355523 | data.py | pypi |
import numpy as np
from rdkit import Chem, DataStructs
from rdkit.Chem import AllChem
from treelstm import calculate_evaluation_orders
import route_distances.lstm.defaults as defaults
from route_distances.lstm.utils import (
add_node_index,
gather_adjacency_list,
gather_node_attributes,
)
from route_distances.validation import validate_dict
from route_distances.utils.type_utils import StrDict
def add_fingerprints(
tree: StrDict,
radius: int = 2,
nbits: int = defaults.FP_SIZE,
) -> None:
"""
Add Morgan fingerprints to the input tree
:param tree: the input tree
:param radius: the radius of the Morgan calculation
:param nbits: the length of the bitvector
"""
mol = Chem.MolFromSmiles(tree["smiles"])
rd_fp = AllChem.GetMorganFingerprintAsBitVect(
mol, radius=radius, nBits=nbits, useFeatures=False, useChirality=True
)
np_fp = np.empty(radius, np.int8)
DataStructs.ConvertToNumpyArray(rd_fp, np_fp)
tree["fingerprint"] = np_fp
for child in tree.get("children", []):
add_fingerprints(child, radius, nbits)
def remove_reactions(tree: StrDict) -> StrDict:
"""
Remove reaction nodes from the input tree
Does not overwrite the original tree.
"""
new_tree = {"smiles": tree["smiles"]}
if tree.get("children"):
new_tree["children"] = [
remove_reactions(grandchild)
for grandchild in tree["children"][0]["children"]
]
return new_tree
def preprocess_reaction_tree(
tree: StrDict, nfeatures: int = defaults.FP_SIZE
) -> StrDict:
"""
Preprocess a reaction tree as produced by AiZynthFinder
:param tree: the input tree
:param nfeatures: the number of features, i.e. fingerprint length
:return: a tree that could be fed to the LSTM-based model
"""
validate_dict(tree)
tree = remove_reactions(tree)
add_fingerprints(tree, nbits=nfeatures)
add_node_index(tree)
features = np.asarray(gather_node_attributes(tree, "fingerprint"))
adjacency_list = gather_adjacency_list(tree)
if adjacency_list:
node_order, edge_order = calculate_evaluation_orders(
adjacency_list, len(features)
)
else:
node_order = np.asarray([0])
edge_order = np.asarray([])
return {
"features": features,
"node_order": node_order,
"adjacency_list": np.array(adjacency_list),
"edge_order": edge_order,
"num_nodes": len(features),
"num_trees": 1,
} | /route-distances-1.1.0.tar.gz/route-distances-1.1.0/route_distances/lstm/features.py | 0.726814 | 0.514156 | features.py | pypi |
from typing import Dict, List, Any
from collections import defaultdict
import torch
from route_distances.utils.type_utils import StrDict
def accumulate_stats(stats: List[Dict[str, float]]) -> Dict[str, float]:
"""Accumulate statistics from a list of statistics"""
accum: StrDict = defaultdict(float)
for output in stats:
for key, value in output.items():
accum[key] += value
return accum
def add_node_index(node: StrDict, n: int = 0) -> int:
"""Add an index to the node and all its children"""
node["index"] = n
for child in node.get("children", []):
n += 1
n = add_node_index(child, n)
return n
def collate_batch(batch: List[StrDict]) -> StrDict:
"""
Collate a batch of tree data
Collate the first tree of all pairs together, and then collate
the second tree of all pairs.
Convert all matrices to pytorch tensors.
The output dictionary has the following keys:
- tree1: the collated first tree for all pairs
- tree2: the collated second tree for all pairs
- ted: the TED for each pair of trees
:param batch: the list of tree data
:return: the collated batch
"""
def _make_tensor(key, dtype):
return torch.tensor([sample[key] for sample in batch], dtype=dtype)
trees1 = collate_trees([sample["tree1"] for sample in batch])
trees2 = collate_trees([sample["tree2"] for sample in batch])
teds = _make_tensor("ted", torch.float32)
return {"tree1": trees1, "tree2": trees2, "ted": teds}
def collate_trees(trees: List[StrDict]) -> StrDict:
"""
Collate a list of trees by stacking the feature vectors, the node orders and the
edge orders. The adjacency list if adjusted with an offset.
This is a modified version from treelstm package that also converts all matrices to tensors
The output dictionary has the following keys:
- features: the stacked node features
- node_order: the stacked node orders
- edge_order: the stacked edge orders
- adjacency_list: the stack and adjusted adjacency list
- tree_size: the number of nodes in each tree
:param trees: the trees to collate
:return: the collated tree data
"""
def _make_tensor(key, dtype):
return torch.cat([torch.tensor(tree[key], dtype=dtype) for tree in trees])
tree_sizes = [tree["num_nodes"] for tree in trees]
batched_features = _make_tensor("features", torch.float32)
batched_node_order = _make_tensor("node_order", torch.int64)
batched_edge_order = _make_tensor("edge_order", torch.int64)
batched_adjacency_list = []
offset = 0
for nnodes, tree in zip(tree_sizes, trees):
batched_adjacency_list.append(
torch.tensor(tree["adjacency_list"], dtype=torch.int64) + offset
)
offset += nnodes
batched_adjacency_list = torch.cat(batched_adjacency_list) # noqa
return {
"features": batched_features,
"node_order": batched_node_order,
"edge_order": batched_edge_order,
"adjacency_list": batched_adjacency_list,
"tree_sizes": tree_sizes,
}
def gather_adjacency_list(node: StrDict) -> List[List[int]]:
"""
Create the adjacency list of a tree
:param node: the current node in the tree
:return: the adjacency list
"""
adjacency_list = []
for child in node.get("children", []):
adjacency_list.append([node["index"], child["index"]])
adjacency_list.extend(gather_adjacency_list(child))
return adjacency_list
def gather_node_attributes(node: StrDict, key: str) -> List[Any]:
"""
Collect node attributes by recursively traversing the tree
:param node: the current node in the tree
:param key: the name of the attribute to extract
:return: the list of attributes gathered
"""
features = [node[key]]
for child in node.get("children", []):
features.extend(gather_node_attributes(child, key))
return features | /route-distances-1.1.0.tar.gz/route-distances-1.1.0/route_distances/lstm/utils.py | 0.945538 | 0.75101 | utils.py | pypi |
from typing import List, Tuple
import torch
import pytorch_lightning as lightning
from treelstm import TreeLSTM as TreeLSTMBase
from torchmetrics import MeanAbsoluteError, R2Score
import route_distances.lstm.defaults as defaults
from route_distances.lstm.utils import accumulate_stats
from route_distances.utils.type_utils import StrDict
class _TreeLstmWithPreCompression(torch.nn.Module):
def __init__(self, fp_size: int, lstm_size: int, dropout_prob: float) -> None:
super().__init__()
self._compression = torch.nn.Sequential(
torch.nn.Linear(fp_size, lstm_size),
torch.nn.ReLU(),
torch.nn.Dropout(p=dropout_prob),
torch.nn.Linear(lstm_size, lstm_size),
torch.nn.ReLU(),
)
self._tree_lstm = TreeLSTMBase(lstm_size, lstm_size)
def forward(self, tree_batch: StrDict) -> torch.Tensor:
"""
Forward pass
:param tree_batch: collated trees from the `route_distances.utils.collate_trees` function.
:return: the LSTM representation of the first nodes
"""
features = self._compression(tree_batch["features"])
lstm_output, _ = self._tree_lstm(
features,
tree_batch["node_order"],
tree_batch["adjacency_list"],
tree_batch["edge_order"],
)
# Only save value of top-node
lstm_output = torch.stack(
[t[0, :] for t in torch.split(lstm_output, tree_batch["tree_sizes"], dim=0)]
)
return lstm_output
class RouteDistanceModel(lightning.LightningModule):
"""
Model for computing the distances between two synthesis routes
:param fp_size: the length of the fingerprint vector
:param lstm_size: the size o the LSTM cell
:param dropout_prob: the dropout probability
:param learning_rate: the initial learning rate of the optimizer
:param weight_decay: weight decay factor of the optimizer
"""
def __init__(
self,
fp_size: int = defaults.FP_SIZE,
lstm_size: int = defaults.LSTM_SIZE,
dropout_prob: float = defaults.DROPOUT_PROB,
learning_rate: float = defaults.LEARNING_RATE,
weight_decay: float = defaults.WEIGHT_DECAY,
) -> None:
super().__init__()
self.save_hyperparameters()
self._tree_lstm = _TreeLstmWithPreCompression(fp_size, lstm_size, dropout_prob)
self._pdist = torch.nn.PairwiseDistance(p=2)
self._loss_func = torch.nn.MSELoss()
self._mae = MeanAbsoluteError()
self._r2 = R2Score()
self._lr = learning_rate
self._weight_decay = weight_decay
def forward(self, tree_data: StrDict) -> torch.Tensor:
"""
Calculate the pairwise distances between the input trees
:param tree_data: collated trees from the `route_distances.utils.collate_trees` function.
:return: the distances in condensed form
"""
lstm_enc = self._tree_lstm(tree_data)
return torch.pdist(lstm_enc)
def training_step(self, batch: StrDict, _) -> torch.Tensor:
"""
One step in the training loop
:param batch: collated pair data from the `route_distances.utils.collate_batch` function
:param _: ignored
:return: the loss tensor
"""
pred = self._calculate_distance(batch)
loss = self._loss_func(pred, batch["ted"])
self.log("train_mae_step", self._mae(pred, batch["ted"]), prog_bar=True)
self.log("train_loss_step", loss.item())
return loss
def validation_step(self, batch: StrDict, _) -> StrDict:
"""
One step in the validation loop
:param batch: collated pair data from the `route_distances.utils.collate_batch` function
:param _: ignored
:return: the validation metrics
"""
return self._val_and_test_step(batch, "val")
def validation_epoch_end(self, outputs: List[StrDict]) -> None:
"""Log the average validation metrics"""
self._log_average_metrics(outputs)
def test_step(self, batch: StrDict, _) -> StrDict:
"""
One step in the test loop
:param batch: collated pair data from the `route_distances.utils.collate_batch` function
:param _: ignored
:return: the test metrics
"""
return self._val_and_test_step(batch, "test")
def test_epoch_end(self, outputs: List[StrDict]) -> None:
"""Log the average test metrics"""
self._log_average_metrics(outputs)
def configure_optimizers(
self,
) -> Tuple[List[torch.optim.Adam], List[StrDict]]:
"""Setup the Adam optimiser and scheduler"""
optim = torch.optim.Adam(
self.parameters(), lr=self._lr, weight_decay=self._weight_decay
)
scheduler = {
"scheduler": torch.optim.lr_scheduler.ReduceLROnPlateau(optim),
"monitor": "val_loss",
}
return [optim], [scheduler]
def _calculate_distance(self, batch: StrDict) -> torch.Tensor:
lstm_out1 = self._tree_lstm(batch["tree1"])
lstm_out2 = self._tree_lstm(batch["tree2"])
return self._pdist(lstm_out1, lstm_out2)
def _log_average_metrics(self, outputs: List[StrDict]) -> None:
accum = accumulate_stats(outputs)
for key, value in accum.items():
self.log(key, value / len(outputs))
def _val_and_test_step(self, batch: StrDict, prefix: str) -> StrDict:
self.eval()
pred = self._calculate_distance(batch)
loss = self._loss_func(pred, batch["ted"])
mae = self._mae(pred, batch["ted"])
return {
f"{prefix}_loss": loss.item(),
f"{prefix}_mae": mae,
f"{prefix}_monitor": mae,
f"{prefix}_r2": self._r2(pred, batch["ted"]),
} | /route-distances-1.1.0.tar.gz/route-distances-1.1.0/route_distances/lstm/models.py | 0.954974 | 0.521288 | models.py | pypi |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.