code stringlengths 114 1.05M | path stringlengths 3 312 | quality_prob float64 0.5 0.99 | learning_prob float64 0.2 1 | filename stringlengths 3 168 | kind stringclasses 1
value |
|---|---|---|---|---|---|
from __future__ import annotations
from typing import List
from dataclasses import dataclass
from util import F
# pub enum Expr<F> {
# Const(F),
# Sum(Vec<Expr<F>>),
# Mul(Vec<Expr<F>>),
# Neg(Box<Expr<F>>),
# Pow(Box<Expr<F>>, u32),
# Query(Queriable<F>),
# Halo2Expr(Expression<F>),
# }
@dataclass
class Expr:
def __neg__(self: Expr) -> Neg:
return Neg(self)
def __add__(self: Expr, rhs: ToExpr) -> Sum:
rhs = to_expr(rhs)
return Sum([self, rhs])
def __radd__(self: Expr, lhs: ToExpr) -> Sum:
return Expr.__add__(lhs, self)
def __sub__(self: Expr, rhs: ToExpr) -> Sum:
rhs = to_expr(rhs)
return Sum([self, Neg(rhs)])
def __rsub__(self: Expr, lhs: ToExpr) -> Sum:
return Expr.__sub__(lhs, self)
def __mul__(self: Expr, rhs: ToExpr) -> Mul:
rhs = to_expr(rhs)
return Mul([self, rhs])
def __rmul__(self: Expr, lhs: ToExpr) -> Mul:
return Expr.__mul__(lhs, self)
def __pow__(self: Expr, rhs: int) -> Pow:
return Pow(self, rhs)
@dataclass
class Const(Expr):
value: F
def __str__(self: Const) -> str:
return str(self.value)
def __json__(self):
return {"Const": self.value}
@dataclass
class Sum(Expr):
exprs: List[Expr]
def __str__(self: Sum) -> str:
result = "("
for i, expr in enumerate(self.exprs):
if type(expr) is Neg:
if i == 0:
result += "-"
else:
result += " - "
else:
if i > 0:
result += " + "
result += str(expr)
result += ")"
return result
def __json__(self):
return {"Sum": [expr.__json__() for expr in self.exprs]}
def __add__(self: Sum, rhs: ToExpr) -> Sum:
rhs = to_expr(rhs)
return Sum(self.exprs + [rhs])
def __radd__(self: Sum, lhs: ToExpr) -> Sum:
return Sum.__add__(lhs, self)
def __sub__(self: Sum, rhs: ToExpr) -> Sum:
rhs = to_expr(rhs)
return Sum(self.exprs + [Neg(rhs)])
def __rsub__(self: Sum, lhs: ToExpr) -> Sum:
return Sum.__sub__(lhs, self)
@dataclass
class Mul(Expr):
exprs: List[Expr]
def __str__(self: Mul) -> str:
return "*".join([str(expr) for expr in self.exprs])
def __json__(self):
return {"Mul": [expr.__json__() for expr in self.exprs]}
def __mul__(self: Mul, rhs: ToExpr) -> Mul:
rhs = to_expr(rhs)
return Mul(self.exprs + [rhs])
def __rmul__(self: Mul, lhs: ToExpr) -> Mul:
return Mul.__mul__(lhs, self)
@dataclass
class Neg(Expr):
expr: Expr
def __str__(self: Neg) -> str:
return "(-" + str(self.expr) + ")"
def __json__(self):
return {"Neg": self.expr.__json__()}
def __neg__(self: Neg) -> Expr:
return self.expr
@dataclass
class Pow(Expr):
expr: Expr
pow: int
def __str__(self: Pow) -> str:
return str(self.expr) + "^" + str(self.pow)
def __json__(self):
return {"Pow": [self.expr.__json__(), self.pow]}
ToExpr = Expr | int | F
def to_expr(v: ToExpr) -> Expr:
if isinstance(v, Expr):
return v
elif isinstance(v, int):
if v >= 0:
return Const(F(v))
else:
return Neg(Const(F(-v)))
elif isinstance(v, F):
return Const(v)
else:
raise TypeError(
f"Type {type(v)} is not ToExpr (one of Expr, int, F, or Constraint)."
) | /rust_chiquito-0.1.0.tar.gz/rust_chiquito-0.1.0/pychiquito/expr.py | 0.832305 | 0.329216 | expr.py | pypi |
# Part 3: Fibonacci Example
# Chapter 3: Witness
Now, we will generate multiple witnesses to test the soundness of our circuit constraints. Note that we only intend to accept the following set of values for signals "a", "b", and "c". "Soundness" in this context refers to faulty witness successfully verified against the constraints (false positives), so any set of witness assignments that is different from the table below but still passes the constraints incurs a "soundness" error.
| Step Type | Step Instance Index || Signals ||| Setups ||
| :-: | :-: | :-: | :-: | :-: | :-: | :-: | :-: |
||| a | b | c | constraint 1 | constraint 2 | constraint 3 |
| fibo step | 0 | 1 | 1 | 2 | a + b == c | b == a.next | c == b.next |
| fibo step | 1 | 1 | 2 | 3 | a + b == c | b == a.next | c == b.next |
| fibo step | 2 | 2 | 3 | 5 | a + b == c | b == a.next | c == b.next |
| fibo step | 3 | 3 | 5 | 8 | a + b == c | b == a.next | c == b.next |
| ... | ... || ... ||| ... ||
## Setup
We setup the same circuit and witness in Part 1 which were successfully verified:
```
from __future__ import annotations
from typing import Tuple
from dsl import Circuit, StepType
from cb import eq
from util import F
class FiboStep(StepType):
def setup(self):
self.c = self.internal("c")
self.constr(eq(self.circuit.a + self.circuit.b, self.c))
self.transition(eq(self.circuit.b, self.circuit.a.next()))
self.transition(eq(self.c, self.circuit.b.next()))
def wg(self, args):
a_value, b_value = args
self.assign(self.circuit.a, F(a_value))
self.assign(self.circuit.b, F(b_value))
self.assign(self.c, F(a_value + b_value))
class Fibonacci(Circuit):
def setup(self):
self.a = self.forward("a")
self.b = self.forward("b")
self.fibo_step = self.step_type(FiboStep(self, "fibo_step"))
def trace(self, args):
self.add(self.fibo_step, (1, 1))
a = 1
b = 2
for i in range(1, 4):
self.add(self.fibo_step, (a, b))
prev_a = a
a = b
b += prev_a
fibo = Fibonacci()
fibo_witness = fibo.gen_witness(None)
fibo.halo2_mock_prover(fibo_witness)
```
Now we swap the first step instance from `(1, 1, 2)` to `(0, 2, 2)`. We use the `evil_witness` function to swap step index 0 assignment index 0 to `F(0)` and step index 0 assignment index 0 to `F(2)`.
```
evil_witness = fibo_witness.evil_witness_test(step_instance_indices=[0, 0], assignment_indices=[0, 1], rhs=[F(0), F(2)])
```
Print the `evil_witness` to confirm that the swap was successful:
```
print(evil_witness)
```
Now, generate and verify the proof with `evil_witness`:
```
fibo.halo2_mock_prover(evil_witness)
```
Surprisingly, `evil_witness` generated a proof that passed verification. This constitutes a soundness error, because the first step instance isn't `(1, 1, 2)` as we initially specified, so why can the witness still pass the constraints?
The answer is simple, because in the first step instance, we never constrained the values of "a" and "b" to 1 and 1 in `setup` of `FiboStep`. We also didn't constrain the first step instance to be `FiboStep`.
You might be wondering: in `trace`, didn't we set "a" and "b" to `(1, 1)` and added `FiboStep` as the first step instance? In fact, `trace` and `wg` are really helper functions for the prover to easily generate a witness, whose data can be tampered with as shown in `evil_witness_test`. The only conditions enforced are defined in circuit and step type `setup`. Therefore, to fix the soundness error, we need to add more constraints, in Chapter 4.
| /rust_chiquito-0.1.0.tar.gz/rust_chiquito-0.1.0/pychiquito/tutorial_pt3_ch3.ipynb | 0.735452 | 0.965674 | tutorial_pt3_ch3.ipynb | pypi |
# Part 1: Introduction to Chiquito and PyChiquito
Chiquito is a high-level structured language for implementing zero knowledge proof applications, currently being implemented in the DSL Working Group of PSE, Ethereum Foundation. It is a step-based zkDSL (zero knowledge domain specific language) that provides better syntax and abstraction for features like constraint building and column placement when writing PLONKish circuits, which supports custom gates and lookup arguments. Chiquito has a Halo2 backend, which is a low level zkDSL that writes circuits using the PLONKish arithemtization. Chiquito is working on supporting additional backends.
Chiquito comes in two flavors: ChiquitoCore (Rust) and PyChiquito (Python). This tutorial focuses on PyChiquito.
[PyChiquito](https://github.com/qwang98/PyChiquito) is the Python front end of ChiquitoCore. It exposes Python functions that user writes circuits with. [ChiquitoCore](https://github.com/privacy-scaling-explorations/chiquito) is called by the Python functions behind the scenes and converts what the user writes into a Halo2 circuit.
The key advantages of Chiquito/PyChiquito include:
- Abstraction and simplification on the readability and learnability of Halo2 circuits.
- Composabiity with other Halo2 circuits.
- Modularity of using multiple frontends (Python and Rust) and backends (Halo2 and beyond).
- Smooth user experience with a dynamically typed language (Python).
For more on why you need Chiquito/PyChiquito, refer to [What is Chiquito?](https://hackmd.io/h6innd6RTwex4aBExignhw), [Chiquito README](https://github.com/privacy-scaling-explorations/chiquito#readme), and the [Appendix](https://github.com/privacy-scaling-explorations/chiquito/blob/main/Appendix.md/#design-principles) on its design principles. For more on PLONKish concepts and Halo2 circuits, refer to the [Halo2 book](https://zcash.github.io/halo2/index.html).
| /rust_chiquito-0.1.0.tar.gz/rust_chiquito-0.1.0/pychiquito/tutorial_pt1.ipynb | 0.804636 | 0.972545 | tutorial_pt1.ipynb | pypi |
from __future__ import annotations
from typing import Tuple
from dsl import Circuit, StepType
from cb import eq
from query import Queriable
from util import F
class Fibonacci(Circuit):
def setup(self: Fibonacci):
self.a: Queriable = self.forward("a")
self.b: Queriable = self.forward("b")
self.fibo_step = self.step_type(FiboStep(self, "fibo_step"))
self.fibo_last_step = self.step_type(FiboLastStep(self, "fibo_last_step"))
self.pragma_first_step(self.fibo_step)
self.pragma_last_step(self.fibo_last_step)
self.pragma_num_steps(11)
# self.pragma_disable_q_enable()
# self.expose(self.b, First())
# self.expose(self.a, Last())
# self.expose(self.a, Step(1))
def trace(self: Fibonacci, args: Any):
self.add(self.fibo_step, (1, 1))
a = 1
b = 2
for i in range(1, 10):
self.add(self.fibo_step, (a, b))
prev_a = a
a = b
b += prev_a
self.add(self.fibo_last_step, (a, b))
class FiboStep(StepType):
def setup(self: FiboStep):
self.c = self.internal("c")
self.constr(eq(self.circuit.a + self.circuit.b, self.c))
self.transition(eq(self.circuit.b, self.circuit.a.next()))
self.transition(eq(self.c, self.circuit.b.next()))
def wg(self: FiboStep, args: Tuple[int, int]):
a_value, b_value = args
self.assign(self.circuit.a, F(a_value))
self.assign(self.circuit.b, F(b_value))
self.assign(self.c, F(a_value + b_value))
class FiboLastStep(StepType):
def setup(self: FiboLastStep):
self.c = self.internal("c")
self.constr(eq(self.circuit.a + self.circuit.b, self.c))
def wg(self: FiboLastStep, values=Tuple[int, int]):
a_value, b_value = values
self.assign(self.circuit.a, F(a_value))
self.assign(self.circuit.b, F(b_value))
self.assign(self.c, F(a_value + b_value))
fibo = Fibonacci()
fibo_witness = fibo.gen_witness(None)
fibo.halo2_mock_prover(fibo_witness) | /rust_chiquito-0.1.0.tar.gz/rust_chiquito-0.1.0/pychiquito/fibonacci.py | 0.723114 | 0.236373 | fibonacci.py | pypi |
# Part 3: Fibonacci Example
# Chapter 2: StepType and Circuit
In this Chapter, we code out the concepts learned in Chapter 1 in PyChiquito, but before that, let's import the dependencies first.
## Imports
These imports are for the typing hints included in this example. They are not required, because Python is a dynamically typed interpreted language and typings aren't enforced.
```
from __future__ import annotations
from typing import Tuple
```
The following imports are required, including:
- `Circuit` and `StepType`, the most important data types, from the domain specific language (dsl).
- Equal constraint `eq` from the constraint builder (cb).
- Field element `F` from utils.
```
from dsl import Circuit, StepType
from cb import eq
from util import F
```
## StepType
Before putting everything together into a circuit, we need to define the step types first. Remember that the Fibonacci circuit is composed of one single step type, defined as 3 signals "a", "b", and "c", plus three constraints `a + b == c`, `b == a.next`, and `c == b.next`, where "next" means the same signal in the next step instance:
| Step Type | Step Instance Index || Signals ||| Setups ||
| :-: | :-: | :-: | :-: | :-: | :-: | :-: | :-: |
||| a | b | c | constraint 1 | constraint 2 | constraint 3 |
| fibo step | 0 | 1 | 1 | 2 | a + b == c | b == a.next | c == b.next |
| fibo step | 1 | 1 | 2 | 3 | a + b == c | b == a.next | c == b.next |
| fibo step | 2 | 2 | 3 | 5 | a + b == c | b == a.next | c == b.next |
| fibo step | 3 | 3 | 5 | 8 | a + b == c | b == a.next | c == b.next |
| ... | ... || ... ||| ... ||
PyChiquito provides a `StepType` parent class that we can customarily inherit. For each `StepType`, we need to define two functions:
- `setup`, which defines constraints using signals
- `wg`, which defines witness assignment for the step type
## Signal Types
Now, a bit more on the signals. In Chiquito, there are signals that we can only query for the current step instance, which we call "internal signals". There are also signals that we can query for non-current step instances, such as the next step instance, which we call "forward signals". In the example above, "a" and "b" were both queried at the next step instance as `a.next` and `b.next` respectively, and therefore are "forward signals". "c" is only ever queried at the current step instance, and therefore is called "internal signal". In Chiquito, querying to a non-current step instance is also referred to as "rotation", which is a positive or negative number relative to the current step instance. We can call `next` on a forward signal, implying a rotation of `+1`. There are additional Chiquito signal types, such as "shared signal" and "fixed signal", which allows for arbitrary positive or negative rotation. However, in this Fibonacci example, we will only use forward signals "a" and "b" as well as internal signal "c".
## FiboStep Setup
We now define the only step type, `FiboStep`:
```
class FiboStep(StepType):
def setup(self):
self.c = self.internal("c")
self.constr(eq(self.circuit.a + self.circuit.b, self.c))
self.transition(eq(self.circuit.b, self.circuit.a.next()))
self.transition(eq(self.c, self.circuit.b.next()))
def wg(self):
# TODO
```
Here, "c" is defined using `self.internal` as an internal signal that's only queried within a `FiboStep` instance. We didn't define "a" and "b", as they are forward signals which Chiquito defines on the circuit-level. More on that later.
Next, we define constraints among signals, both forward and internal. There are two types of constraints in PyChiquito:
- `constr` stands for constraints among signals that are queried within a step type instance, i.e. internal signals.
- `transition` stands for constraints involving circuit-level signals, i.e. forward signals and etc.
In the code snippet above, forward signals "a" and "b" are expressed as `self.circuit.a` and `self.circuit.b`, whereas internal signal "c" is expressed as `self.c`, because "a" and "b" are at the circuit-level. `self.circuit.a.next()` queries the value of circuit-level signal "a" at the next step instance. `eq` is a constraint builder that enforces equality between the two arguments passed in. It builds the three constraints of `FiboStep`: `a + b == c`, `b == a.next`, and `c == b.next`.
## FiboStep Witness Generation
```
class FiboStep(StepType):
def setup(self: FiboStep):
# ...
def wg(self: FiboStep, args: Tuple[int, int]):
a_value, b_value = args
self.assign(self.circuit.a, F(a_value))
self.assign(self.circuit.b, F(b_value))
self.assign(self.c, F(a_value + b_value))
```
In the example above, `wg` (witness generation) defines witness value assignments at the step type level. Here, the `args` we pass in is a tuple of values for signals "a" and "b". We assign them to forward signals "a" and "b" and then their sum to internal signal "c".
Note that in `self.assign`, `a_value` and `b_value` are both wrapped in `F`, which converts them from int to field elements. All witness assignments in PyChiquito are field elements.
Putting everything for `FiboStep` together, we have:
```
class FiboStep(StepType):
def setup(self):
self.c = self.internal("c")
self.constr(eq(self.circuit.a + self.circuit.b, self.c))
self.transition(eq(self.circuit.b, self.circuit.a.next()))
self.transition(eq(self.c, self.circuit.b.next()))
def wg(self, args):
a_value, b_value = args
self.assign(self.circuit.a, F(a_value))
self.assign(self.circuit.b, F(b_value))
self.assign(self.c, F(a_value + b_value))
```
## Circuit
Now that we finished constructing the only step type `FiboStep`, we can build a `Circuit` object in PyChiquito. PyChiquito provides a `Circuit` parent class that we can customarily inherit. For each `Circuit`, we need to define two functions:
- `setup`, which configures the circuit with signals and step types.
- `trace`, which instantiates step types and defines the trace of assigning witness values.
## Circuit Setup
We first define the circuit `setup`:
```
class Fibonacci(Circuit):
def setup(self):
self.a = self.forward("a")
self.b = self.forward("b")
self.fibo_step = self.step_type(FiboStep(self, "fibo_step"))
def trace(self):
# TODO
```
Remember that previously we already added internal signal "c" to `FiboStep`. Now we add two forward signals "a" and "b" to the circuit-level. We append these signals to the circuit by defining them as `self.a` and `self.b`. Forward signals are created using `self.forward`.
Next, we append the only step type to the circuit by defining it as `self.fibo_step`. `step_type` function only has one argument, which is the `FiboStep` object created using its class constructor.
## Circuit Trace
Now we instantiate step types and assign witness values using `trace`:
```
class Fibonacci(Circuit):
def setup(self):
# ...
def trace(self, args):
self.add(self.fibo_step, (1, 1))
a = 1
b = 2
for i in range(1, 4):
self.add(self.fibo_step, (a, b))
prev_a = a
a = b
b += prev_a
```
`trace` takes two arguments, the `Fibonacci` circuit itself and the witness value assignment arguments `args`. We call `self.add` to instantiate `fibo_step` we defined and pass in the witness values for "a" and "b". Note that we only hardcoded witness values for the first step instance as `(1, 1)`, because all other witness values can be calculated given the nature of Fibonacci.
Note that `self.add` creates step instance by calling `wg` associated with the step type. Therefore, the second input of `self.add`, e.g. `(a, b)` in `self.add(self.fibo_step, (a, b))`, needs to match `args` in `FiboStep` `wg`, i.e. tuple of `a_value, b_value`.
We didn't pass in witness values for "c", because they are calculated in `FiboStep` `wg`.
Note that we need to pass in witness value assignments in a single argument `args` and therefore we use a tuple in this case. `args` can really be any data type as long as it's one single argument.
FiboStep
After creating the first `FiboStep` instance, we loop over `FiboStep` instantiation for 3 more times, each time calculating and passing in a different tuple of assignments. Voila, here's our Fibonacci circuit with 4 `FiboStep` instances:
```
class FiboStep(StepType):
def setup(self):
self.c = self.internal("c")
self.constr(eq(self.circuit.a + self.circuit.b, self.c))
self.transition(eq(self.circuit.b, self.circuit.a.next()))
self.transition(eq(self.c, self.circuit.b.next()))
def wg(self, args):
a_value, b_value = args
self.assign(self.circuit.a, F(a_value))
self.assign(self.circuit.b, F(b_value))
self.assign(self.c, F(a_value + b_value))
class Fibonacci(Circuit):
def setup(self):
self.a = self.forward("a")
self.b = self.forward("b")
self.fibo_step = self.step_type(FiboStep(self, "fibo_step"))
def trace(self, args):
self.add(self.fibo_step, (1, 1))
a = 1
b = 2
for i in range(1, 4):
self.add(self.fibo_step, (a, b))
prev_a = a
a = b
b += prev_a
```
## Putting Everything Together
Everything we went through above defines how the circuit and its step type are configured and witness values assigned to them. To instantiate the circuit, we call the class constructor:
```
fibo = Fibonacci()
```
You can also print the circuit. In the print out, you will see the single step type `FiboStep` and two forward signals "a" and "b" at the circuit-level. Within `FiboStep`, you will see one internal signal "c" and the constraints. The big random looking numbers are UUIDs that we use to uniquely identify objects in the circuit, which you don't need to worry about.
```
print(fibo)
```
After initiating the Fibonacci circuit, we can generate witness assignments for it. `gen_witness` takes one argument of external input with `Any` type. However, because the only external input, `(1, 1)`, was hardcoded in `trace`, we don't need to provide an additional one and can put `None` for this argument. In practice, one circuit can have many different sets of witness assignments, each generated by a different external input argument. This is why we expose the `gen_witness` function to you.
```
fibo_witness = fibo.gen_witness(None)
```
Again, you can print the witness assignments:
```
print(fibo_witness)
```
Finally, we can generate and verify proof with the witness using the Halo2 mock prover. The print out includes Halo2 and ChiquitoCore debug messages. `Ok(())` means that proof was correctly generated and verified for the witness and circuit. `Err()` prints out Halo2 and ChiquitoCore error messages, usually because some constraints in the circuit were not satisfied. Here, you should see the `Ok(())` print out.
```
fibo.halo2_mock_prover(fibo_witness)
```
Congratulations! Now you finished writing your first Fibonacci circuit and learned about the most essential concepts behind the step-based design of Chiquito, which simply combines step instances into a circuit! With abstraction, composability, modularity, and smooth user experience as the key tenets, writing Halo2 circuits has never been easier with PyChiquito!
Next up, in Chapter 3, you will learn about testing your circuit with multiple different witnesses.
| /rust_chiquito-0.1.0.tar.gz/rust_chiquito-0.1.0/pychiquito/tutorial_pt3_ch2.ipynb | 0.488527 | 0.991977 | tutorial_pt3_ch2.ipynb | pypi |
from __future__ import annotations
from copy import copy
from typing import Iterable, Optional, Set, Tuple, Union
import attrs
from attrs import define, frozen
import rust_circuit as rc
from rust_circuit.py_utils import assert_never
@frozen
class ScopeGlobalTraversal:
traversal: rc.IterativeMatcher
@frozen
class ScopeMatcher:
matcher: rc.IterativeMatcher
ScopeUpdate = Union[ScopeMatcher, ScopeGlobalTraversal]
NamedUpdates = Tuple[Tuple[ScopeUpdate, Optional[str]], ...]
@define
class ScopeManager:
"""
Wrapper around a `circuit` for the purposes of doing rewrites
Note that self.unique() is the circuit focused on, but self.circuit may be
a parent in a larger computational graph
"""
circuit: rc.Circuit
# TODO: add nice stuff for working with names if that seems nice
named_updates: NamedUpdates = ()
assert_matched_exists: bool = True
assert_matched_unique: bool = False
def __attrs_post_init__(self):
self.check()
def check(self):
matched = self.circuit.get(self.matcher())
if self.assert_matched_exists:
assert len(matched) > 0
if self.assert_matched_unique:
assert len(matched) == 1
def raw_matcher(self, *extra: rc.IterativeMatcherIn):
return rc.IterativeMatcher.new_chain(*self.matchers, *extra)
def traversal(self, *extra_traversals: rc.IterativeMatcherIn):
return rc.IterativeMatcher.all(*self.traversals, *extra_traversals)
def matcher(self, *extra: rc.IterativeMatcherIn, extra_traversals: Iterable[rc.IterativeMatcherIn] = []):
return self.raw_matcher(*extra) & self.traversal(*extra_traversals)
@property
def c(self):
return self.circuit
@property
def matchers(self):
yield rc.restrict(rc.IterativeMatcher(True), term_if_matches=True)
for g_m, _ in self.named_updates:
if isinstance(g_m, ScopeMatcher):
yield g_m.matcher
@property
def traversals(self):
for g_m, _ in self.named_updates:
if isinstance(g_m, ScopeGlobalTraversal):
yield g_m.traversal
def sub_get_new_updates(self, sub_update: ScopeUpdate, name: Optional[str] = None) -> NamedUpdates:
"""get new named_updates
sub_item is either node getter for chain or term_early_at item"""
return self.named_updates + ((sub_update, name),)
def sub_update(self, sub_update: ScopeUpdate, name: Optional[str] = None) -> ScopeManager:
"""evolve new instance with new named_updates (see sub_u)"""
return attrs.evolve(self, named_updates=self.sub_get_new_updates(sub_update, name=name))
def sub_update_(self, sub_update: ScopeUpdate, name: Optional[str] = None):
"""mutably set to new named_updates (see sub_u)"""
new_named_updates = self.sub_get_new_updates(sub_update, name=name)
attrs.evolve(self, named_updates=new_named_updates) # run check
self.named_updates = new_named_updates
def sub_matcher(self, matcher: rc.IterativeMatcherIn, name: Optional[str] = None):
"""wrap sub"""
return self.sub_update(ScopeMatcher(rc.IterativeMatcher(matcher)), name=name)
def sub_matcher_(self, matcher: rc.IterativeMatcherIn, name: Optional[str] = None):
"""wrap sub_"""
return self.sub_update_(ScopeMatcher(rc.IterativeMatcher(matcher)), name=name)
def sub_traversal(self, traversal: rc.IterativeMatcherIn, name: Optional[str] = None):
"""wrap sub"""
return self.sub_update(ScopeGlobalTraversal(rc.IterativeMatcher(traversal)), name=name)
def sub_traversal_(self, traversal: rc.IterativeMatcherIn, name: Optional[str] = None):
"""wrap sub_"""
return self.sub_update_(ScopeGlobalTraversal(rc.IterativeMatcher(traversal)), name=name)
def remove_by_name(self, name: str):
return tuple((i, s) for i, s in self.named_updates if s != name)
def pop_sub_by_name_(self, name: str):
self.named_updates = self.remove_by_name(name)
def pop_sub_by_name(self, name: str) -> ScopeManager:
return attrs.evolve(self, named_updates=self.remove_by_name(name))
def pop_sub_(self):
self.named_updates = self.named_updates[:-1]
def pop_sub(self) -> ScopeManager:
return attrs.evolve(self, named_updates=self.named_updates[:-1])
def clear_sub_(self):
self.named_updates = ()
def clear_sub(self) -> ScopeManager:
return attrs.evolve(self, named_updates=())
def clone(self):
return copy(self)
def get_bound_updater(
self,
updater: Union[rc.Updater, rc.BoundUpdater, rc.TransformIn],
apply_global: bool = False,
):
if not isinstance(updater, (rc.Updater, rc.BoundUpdater)):
updater = rc.Updater(updater)
if not isinstance(updater, rc.BoundUpdater):
if apply_global:
return updater.bind(rc.IterativeMatcher.term(match_next=True))
else:
return updater.bind(self.matcher())
if isinstance(updater, rc.BoundUpdater):
return self.matcher(updater.matcher).updater(updater.updater.transform)
else:
assert_never(updater)
def update(
self,
updater: Union[rc.Updater, rc.BoundUpdater, rc.TransformIn],
apply_global: bool = False,
) -> ScopeManager:
"""evolve new instance with updated circuit (see get_bound_updater)"""
return attrs.evolve(self, circuit=self.get_bound_updater(updater, apply_global=apply_global)(self.circuit))
def update_(
self,
updater: Union[rc.Updater, rc.BoundUpdater, rc.TransformIn],
apply_global: bool = False,
) -> None:
"""mutably set to new updated circuit (see get_bound_updater)"""
self.circuit = self.get_bound_updater(updater, apply_global=apply_global)(self.circuit)
def print_path(
self,
printer: rc.PrintOptionsBase = rc.PrintOptions(bijection=False, colorer=lambda _: 3),
path_from: rc.IterativeMatcherIn = rc.IterativeMatcher.term(match_next=True),
):
"""Prints the path to the currently examined nodes.
Use the colorer of the print passed as argument to color matched nodes"""
matched = self.matched_circuits()
path_printer = (
printer.evolve(
traversal=self.traversal(rc.new_traversal(term_early_at=~rc.Matcher.match_any_found(matched))),
colorer=lambda x: (printer.colorer(x) if x in matched else None), # type: ignore
bijection=False,
)
if isinstance(printer, rc.PrintOptions)
else printer.evolve(
traversal=self.traversal(rc.new_traversal(term_early_at=~rc.Matcher.match_any_found(matched))),
colorer=lambda x: (printer.colorer(x) if x in matched else None), # type: ignore
)
)
print()
path_printer.print(rc.Getter().get_unique(self.circuit, path_from)) # type: ignore
def print(self, printer: rc.PrintOptionsBase = rc.PrintOptions(bijection=False)):
# TODO: support HTML printer
printer = (
printer.evolve(traversal=self.traversal(printer.traversal), bijection=False)
if isinstance(printer, rc.PrintOptions)
else printer.evolve(traversal=self.traversal(printer.traversal))
)
for c in self.matched_circuits():
print()
printer.print(c)
def unique(self) -> rc.Circuit:
return self.matcher().get_unique(self.circuit)
def matched_circuits(self) -> Set[rc.Circuit]:
return self.matcher().get(self.circuit) | /rust_circuit-0.4.12-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl/rust_circuit/scope_manager.py | 0.816955 | 0.451508 | scope_manager.py | pypi |
from time import perf_counter
from typing import Generic, Iterable, List, Mapping, NoReturn, Optional, Tuple, TypeVar, Union
import torch
from ._rust import TorchAxisIndex
def assert_never(x: NoReturn) -> NoReturn:
raise AssertionError(f"Invalid value: {x!r}")
def check_cast(cls, c, exception=TypeError):
if isinstance(c, cls):
return c
else:
raise exception(f"Requested type {repr(cls)} does not match the type of the input, {type(c)}.")
class timed:
def __init__(self, name, min_to_print=None, extra_print=None):
self.name = name
self.min_to_print = min_to_print
self.extra_print = extra_print
def __enter__(self):
self.start = perf_counter()
return self
def __exit__(self, type, value, traceback):
self.time = perf_counter() - self.start
self.readout = f"{self.name} took {self.time:.4f}"
if self.min_to_print is None or self.min_to_print < self.time:
print(self.readout + (self.extra_print if self.extra_print is not None else ""))
TorchIndex = Union[Tuple[TorchAxisIndex, ...], TorchAxisIndex]
class Indexer:
"""Helper for defining slices more easily which always returns tuples
(instead of sometimes returning just TorchAxisIndex)."""
def __getitem__(self, idx: TorchIndex) -> Tuple[TorchAxisIndex, ...]:
if isinstance(idx, tuple):
return idx
return (idx,)
class Slicer:
"""Helper for defining slices more easily which always returns slices"""
def __getitem__(self, idx: slice) -> slice:
assert isinstance(idx, slice)
return idx
I = Indexer()
S = Slicer()
KT = TypeVar("KT")
VT = TypeVar("VT")
# Dict is an invariant type by default. We declare FrozenDict to be covariant using Generic.
class FrozenDict(dict, Generic[KT, VT]):
# TODO: maybe this should actually use frozendict...
_cached_hash: Optional[int] = None
_cached_tuple: Optional[Tuple] = None
def _to_tuple(self):
if self._cached_tuple is None:
self._cached_tuple = tuple(sorted(self.items(), key=lambda x: hash(x)))
return self._cached_tuple
def __hash__(self):
if self._cached_hash is None:
self._cached_hash = hash(("tao's FrozenDict", self._to_tuple()))
return self._cached_hash
def __eq__(self, other):
# Possible hash collisions when sorting the tuple in _to_tuple make the
# implementation of FrozenDict.__eq__ complicated"
# TODO use frozendict
return isinstance(other, FrozenDict) and dict(self) == dict(other)
def clear_cache(self):
self._cached_hash = self._cached_tuple = None
def __repr__(self):
return f"{self.__class__.__name__}({super().__repr__()})"
def get_slice_list(n: int) -> List[TorchAxisIndex]:
return [slice(None)] * n
def make_index_at(index: TorchIndex, at: int):
return make_index_at_many({at: index})
def to_axis_index(idx: TorchIndex) -> TorchAxisIndex:
if isinstance(idx, tuple):
assert len(idx) == 1
return idx[0]
else:
return idx
def make_index_at_many(at_idx: Mapping[int, TorchIndex]) -> Tuple[TorchAxisIndex, ...]:
out = get_slice_list(max(at_idx.keys(), default=-1) + 1)
for at, idx in at_idx.items():
out[at] = to_axis_index(idx)
return tuple(out) | /rust_circuit-0.4.12-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl/rust_circuit/py_utils.py | 0.834306 | 0.272153 | py_utils.py | pypi |
from typing import Dict, Literal, Optional
import einops
import jax.numpy as jnp
import numpy as np
import torch
from interp.model.blocks import BatchNormFixed, LayerNorm
from interp.model.gpt_model import Gpt
from interp.model.gpt_modules import Attention, GptBlock, PosEncType, id_norm
from rust_circuit import module_library as mod_l
from . import _rust as rc
def from_converted(arr: jnp.ndarray, name: Optional[str] = None):
return rc.Array(torch.from_numpy(np.array(arr)), name=name)
def from_converted_tup(arr: jnp.ndarray, s: str):
return (s, from_converted(arr))
def get_norm(norm_b):
if not isinstance(norm_b, (LayerNorm, BatchNormFixed)):
raise NotImplementedError("unsupported norm type, currently only supports ln/bn fixed", type(norm_b))
if norm_b.epsilon != 1e-5:
raise NotImplementedError("TODO: support variable epsilon")
nt = "ln" if isinstance(norm_b, LayerNorm) else "bn"
out = [
from_converted_tup(norm_b.variables["params"]["bias"], s=f"{nt}.w.bias"),
from_converted_tup(norm_b.variables["params"]["scale"], s=f"{nt}.w.scale"),
]
if nt == "bn":
out.extend(
[
from_converted_tup(norm_b.variables["params"]["mean"], s=f"{nt}.mean"),
from_converted_tup(norm_b.variables["params"]["var"], s=f"{nt}.var"),
]
)
return out, nt
def get_attention(attention_b: Attention, use_pos: bool):
if attention_b.softmax_type != "softmax":
raise NotImplementedError
q_w, k_w, v_w = attention_b.get_qkv_mats()
o_w = einops.rearrange(
attention_b.project_output.get_weights(),
"hidden_out (num_heads head_size) -> num_heads hidden_out head_size",
num_heads=attention_b.num_heads,
)
out = [
from_converted_tup(q_w, "a.w.q"),
from_converted_tup(k_w, "a.w.k"),
from_converted_tup(v_w, "a.w.v"),
from_converted_tup(o_w, "a.w.o"),
]
if attention_b.bias:
q_bias, k_bias, v_bias = einops.rearrange(
attention_b.attn_weights.get_bias(),
"(k num_heads head_size) -> k num_heads head_size",
k=3,
num_heads=q_w.shape[0],
)
o_bias = attention_b.project_output.get_bias()
out.extend(
[
from_converted_tup(q_bias * (2 if use_pos else 1), "a.w.q_bias"),
from_converted_tup(k_bias * (2 if use_pos else 1), "a.w.k_bias"),
from_converted_tup(v_bias, "a.w.v_bias"),
from_converted_tup(o_bias, "a.w.o_bias"),
]
)
return out
def get_mlp(block_b: GptBlock):
proj_in = block_b.linear1.get_weights()
in_bias = block_b.linear1.get_bias()
proj_out = block_b.linear2.get_weights()
if block_b.mlp_act_type == "bilinear":
# only half of params are used in this case
proj_out = einops.rearrange(proj_out, "hidden_out (a mlp_proj) -> a hidden_out mlp_proj", a=2)[0]
out = [
from_converted_tup(proj_in, "m.w.proj_in"),
from_converted_tup(in_bias, "m.w.in_bias"),
from_converted_tup(proj_out, "m.w.proj_out"),
]
if block_b.linear2.use_bias:
out.append(from_converted_tup(block_b.linear2.get_bias(), "m.w.out_bias"))
return out
def get_block(block_b: GptBlock, pos_enc_type: PosEncType):
all_inputs: dict[str, rc.Circuit] = {}
use_norm = block_b.norm1 is not id_norm
attn_pos = pos_enc_type == "shortformer"
norm_type = None
if use_norm:
norm_circs_attn, norm_type = get_norm(block_b.norm1)
all_inputs.update(mod_l.apply_prefix(dict(norm_circs_attn), "a"))
all_inputs.update(get_attention(block_b.attention, use_pos=attn_pos))
if block_b.use_mlp:
if use_norm:
mlp_norm_circs, norm_type_m = get_norm(block_b.norm2)
assert norm_type_m == norm_type
all_inputs.update(mod_l.apply_prefix(dict(mlp_norm_circs), "m"))
all_inputs.update(get_mlp(block_b))
return all_inputs
def get_model(model_b: Gpt):
all_inputs: Dict[str, rc.Circuit] = {}
for block_i in range(model_b.num_layers):
block_b = model_b.blocks[block_i]
all_inputs.update(
{mod_l.add_number(s, block_i): c for s, c in get_block(block_b, pos_enc_type=model_b.pos_enc_type).items()}
)
if model_b.norm_type != "none" and model_b.use_norm_output:
norm, _ = get_norm(model_b.norm_output)
all_inputs.update(mod_l.apply_prefix(dict(norm), "final"))
unembed_name = "t.w.unembed"
if model_b.classifier:
unembedding = from_converted(model_b.embedding.linear_out.get_weights(), name=unembed_name)
else:
unembedding = from_converted(model_b.embedding.token_unembedding.embedding, name=unembed_name)
all_inputs[unembedding.name] = unembedding
if model_b.classifier:
output_bias = from_converted(model_b.embedding.linear_out.get_bias(), name="t.w.unembed_bias")
all_inputs[output_bias.name] = output_bias
return all_inputs
def get_model_info(model_b: Gpt, model_class: str = "GPTBeginEndToks"):
norm_type: Optional[Literal["ln", "bn"]]
if model_b.norm_type == "none":
norm_type = None
elif model_b.norm_type == "layer_norm":
norm_type = "ln"
elif model_b.norm_type == "batch_norm_fixed":
norm_type = "bn"
else:
raise NotImplementedError("unsupported norm type, currently only supports ln/bn fixed", model_b.norm_type)
return mod_l.TransformerInfo(
mod_l.TransformerParams(
mod_l.TransformerBlockParams(
norm_type=norm_type,
attn_bias=model_b.attn_bias,
attn_pos=model_b.pos_enc_type == "shortformer",
use_mlp=model_b.use_mlp,
mlp_act_type=model_b.mlp_act_type,
mlp_output_bias=model_b.mlp_bias,
),
num_layers=model_b.num_layers,
use_norm_output=model_b.use_norm_output,
output_bias=model_b.classifier,
),
model_class=model_class,
pos_enc_type=model_b.pos_enc_type,
causal_mask=model_b.causal_mask,
)
def get_bound_model(model_b: Gpt, model_class: str = "GPTBeginEndToks"):
all_inputs = mod_l.rename_circs_to_keys(get_model(model_b), "_arr")
tok_embeds = from_converted(model_b.embedding.token_embedding.embedding, name="t.w.tok_embeds")
pos_embeds = from_converted(model_b.embedding.position_embedding.embedding, name="t.w.pos_embeds")
info = get_model_info(model_b, model_class=model_class)
model_ret = info.params.get()
circ = rc.module_new_bind(model_ret.body, *list(all_inputs.items()), name="t.bind_w")
return circ, (tok_embeds, pos_embeds), info, model_ret | /rust_circuit-0.4.12-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl/rust_circuit/jax_to_module.py | 0.710226 | 0.376451 | jax_to_module.py | pypi |
from __future__ import annotations
import itertools
from typing import Callable, Iterable, List, Optional, Sequence, Tuple, TypeVar, cast
from rust_circuit import optional as op
from ._rust import (
Add,
Circuit,
Cumulant,
Einsum,
Module,
ModuleArgSpec,
ModuleSpec,
Scalar,
Symbol,
extract_rewrite_raw,
kappa_term,
)
# source: https://stackoverflow.com/questions/19368375/set-partitions-in-python
def partition(collection: List[T]) -> Iterable[List[List[T]]]:
if len(collection) == 0:
yield []
return
if len(collection) == 1:
yield [collection]
return
first = collection[0]
for smaller in partition(collection[1:]):
# insert `first` in each of the subpartition's subsets
for n, subset in enumerate(smaller):
yield smaller[:n] + [[first] + subset] + smaller[n + 1 :]
# put `first` in its own subset
yield [[first]] + smaller
def cum_name_to_eps_name(cum_name: str):
"""Replace the "k" at the beginning by an "eps" """
return "eps" + cum_name[1:] if cum_name[0] == "k" else cum_name + "_eps"
def kappa_hat(cumulant: Cumulant):
"""this is an mathematical object we define for eps attrib/decomposition"""
assert cumulant.num_children > 0
if cumulant.num_children == 1:
return Add.minus(cumulant.children[0], cumulant, name=f"{cum_name_to_eps_name(cumulant.name)}")
return cumulant
def eps_term(args: List[List[Tuple[int, Circuit]]]):
new_out, _ = kappa_term(args, on_sub_cumulant_fn=kappa_hat)
# Note: autonaming + rename in kappa_hat will produce a nice name
return new_out
def get_multiplier_default(p: List[List[Tuple[int, Circuit]]]) -> float:
return (-1) ** sum(len(b) > 1 for b in p)
T = TypeVar("T")
def split_in_buckets(seq: Sequence[T], bucket_sizes: Sequence[int]) -> List[Sequence[T]]:
res: List[Sequence[T]] = []
runnning_count = 0
for bsize in bucket_sizes:
res.append(seq[runnning_count : runnning_count + bsize])
runnning_count += bsize
return res
def eps_attrib_module(
node_ranks: Sequence[int],
non_centered: bool = False,
name: str = "epsilon",
extra_filter: Callable[[List[List[Tuple[int, Circuit]]]], bool] = lambda _: True,
get_multiplier: Callable[[List[List[Tuple[int, Circuit]]]], float] = get_multiplier_default,
symbolic_size_start: int = 50,
) -> ModuleSpec:
"""returns epsilion(*circuits)
non_centered means we don't subtract out the cumulant
"""
shapes = split_in_buckets([None] * 1000, node_ranks)
circuits: List[Circuit] = list[Circuit](
[Symbol.new_with_random_uuid(tuple(shape), f"input_{i}") for i, shape in enumerate(shapes)]
)
input_specs = [(c, ModuleArgSpec(cast(Symbol, c))) for c in circuits]
out: List[Einsum] = []
if len(circuits) == 0:
eps: Circuit = Scalar(1.0, name=name)
return extract_rewrite_raw(eps, input_specs, prefix_to_strip=None, module_name=name).spec
if non_centered and len(circuits) == 1:
eps = circuits[0]
return extract_rewrite_raw(eps, input_specs, prefix_to_strip=None, module_name=name).spec
for p in partition(list(enumerate(circuits))):
if (non_centered and len(p) == 1) or not extra_filter(p):
continue
new_out = eps_term(p)
assert new_out.shape == tuple(itertools.chain.from_iterable(c.shape for c in circuits))
out.append(Einsum.scalar_mul(new_out, get_multiplier(p)))
eps = Add(*out, name=f"{name}_sum")
return extract_rewrite_raw(eps, input_specs, prefix_to_strip=None, module_name=name).spec
def eps_attrib(
cumulant: Cumulant,
non_centered: bool = False,
name: Optional[str] = None,
extra_filter: Callable[[List[List[Tuple[int, Circuit]]]], bool] = lambda _: True,
get_multiplier: Callable[[List[List[Tuple[int, Circuit]]]], float] = get_multiplier_default,
):
"""returns epsilion(*cumulant.circuits)
non_centered means we don't subtract out the cumulant
"""
name_ = op.unwrap_or(name, cum_name_to_eps_name(cumulant.name))
cum_circuits = cumulant.children
return Module.new_flat(
eps_attrib_module(
[c.rank for c in cum_circuits],
non_centered=non_centered,
extra_filter=extra_filter,
get_multiplier=get_multiplier,
),
*cum_circuits,
name=name_,
) | /rust_circuit-0.4.12-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl/rust_circuit/cum_algo.py | 0.916232 | 0.429549 | cum_algo.py | pypi |
import functools
import hashlib
import weakref
from typing import *
if TYPE_CHECKING:
from interp.circuit.circuit import Circuit, Shape
import torch
import rust_circuit.optional as op
from rust_circuit.py_utils import assert_never
from . import _rust as rc
from ._rust import Add as rAdd
from ._rust import Array as rArray
from ._rust import Circuit as rCircuit
from ._rust import Concat as rConcat
from ._rust import Cumulant as rCumulant
from ._rust import DiscreteVar as rDiscreteVar
from ._rust import Einsum as rEinsum
from ._rust import GeneralFunction as rGeneralFunction
from ._rust import GeneralFunctionShapeInfo, GeneralFunctionSimpleSpec
from ._rust import Index as rIndex
from ._rust import Rearrange as rRearrange
from ._rust import Scalar as rScalar
from ._rust import Scatter as rScatter
from ._rust import Schedule as rSchedule
from ._rust import StoredCumulantVar as rStoredCumulantVar
from ._rust import Tag as rTag
from ._rust import TorchDeviceDtypeOp, scatter_to_concat
from .general_function_spec_base import GeneralFunctionSpecBase
MYPY = False
class CantConvertCircuitError(Exception):
def __init__(self):
try:
import hypothesis
except:
super().__init__()
return
if hypothesis.currently_in_test_context():
hypothesis.assume(False)
super().__init__()
class FromPyGeneralFunctionSpec(GeneralFunctionSpecBase):
name_val: str
function_val: Callable # Callable[[*torch.Tensor], torch.Tensor]
get_shape_val: Callable # Callable[[*Shape], Optional[Sequence[int]]]
num_non_batchable_output_dims: int
input_batchability: Tuple[bool, ...]
def __init__(
self,
name: str,
function: Callable,
get_shape: Callable,
num_non_batchable_output_dims: int,
input_batchability: Tuple[bool, ...],
) -> None:
self.name_val = name
if not MYPY: # this works on newer versions of MYPY - likely a bug
self.function_val = function
self.get_shape_val = get_shape
self.num_non_batchable_output_dims = num_non_batchable_output_dims
self.input_batchability = input_batchability
@property
def name(self) -> str:
return self.name_val
def compute_hash_bytes(self) -> bytes:
from interp.circuit.eq_by_big_hash import hash_add, hash_add_by_id
m = hashlib.blake2b(str(self.__class__).encode())
hash_add(m, self.name)
hash_add_by_id(m, self.function_val)
hash_add_by_id(m, self.get_shape_val)
hash_add(m, self.num_non_batchable_output_dims)
hash_add(m, self.input_batchability)
return m.digest()
def function(self, *tensors: torch.Tensor) -> torch.Tensor:
return self.function_val(*tensors)
def get_shape_info(self, *shapes: "Shape") -> GeneralFunctionShapeInfo:
return GeneralFunctionShapeInfo(
self.get_shape_val(*shapes), self.num_non_batchable_output_dims, self.input_batchability
)
def py_to_rust(py: "Circuit", device_dtype_op=TorchDeviceDtypeOp()) -> rCircuit:
from interp.circuit.circuit import Circuit
from interp.circuit.circuit_compiler.util import TorchAxisIndex
from interp.circuit.circuit_utils import device_eq
from interp.circuit.computational_node import (
Add,
Concat,
Einsum,
GeneralFunction,
Index,
UnaryRearrange,
WildFunction,
)
from interp.circuit.constant import ArrayConstant, FloatConstant, One, Zero
from interp.circuit.cumulant import Cumulant
from interp.circuit.var import AutoTag, DiscreteVar, StoredCumulantVar
@functools.cache
def recurse(py: Circuit) -> rCircuit:
if isinstance(py, (Zero, One, FloatConstant)):
return rScalar(py.value, py.shape, py.name)
elif isinstance(py, ArrayConstant):
tensor = py.value
if tensor.shape != py.shape:
tensor = torch.broadcast_to(py.value, py.shape)
if (
device_dtype_op.dtype is not None
and (torch_dtype := getattr(torch, device_dtype_op.dtype)) != tensor.dtype
):
tensor = tensor.to(dtype=torch_dtype)
if not device_eq(tensor, device_dtype_op.device):
tensor = tensor.to(device=device_dtype_op.device)
return rArray(tensor, py.name)
elif isinstance(py, Add):
return rAdd(*[recurse(x) for x in py.to_unweighted().items.keys()], name=py.name)
elif isinstance(py, Einsum):
return rEinsum(*[(recurse(operand), ints) for operand, ints in py.args], out_axes=py.out_axes, name=py.name)
elif isinstance(py, UnaryRearrange):
spec = py.get_spec()
rust_child = recurse(py.node)
return rRearrange(rust_child, spec.to_rust(), py.name)
elif isinstance(py, Index):
index = py.index
new_idx: List[TorchAxisIndex] = []
# convert slices from python approach
for i, s in zip(index, py.node.shape):
if (not MYPY) and isinstance(i, slice):
fix = lambda item: op.map(item, lambda x: max(min(x, s), -s))
i = slice(fix(i.start), fix(i.stop))
elif (not MYPY) and isinstance(i, torch.Tensor) and i.ndim == 0:
i = int(i.item())
new_idx.append(i)
index = tuple(new_idx)
if device_dtype_op.device is not None:
index = tuple(
[
x.to(device=device_dtype_op.device)
if isinstance(x, torch.Tensor) and not device_eq(x, device_dtype_op.device)
else x
for x in index
]
)
return rIndex(recurse(py.node), index, py.name)
elif isinstance(py, Concat):
return rConcat(*[recurse(x) for x in py.circuits], axis=py.axis, name=py.name)
elif isinstance(py, AutoTag):
return rTag(recurse(py.node), py.uuid, py.name)
elif isinstance(py, GeneralFunction):
recursed = recurse(py.node)
if py.rust_spec is not None:
return rGeneralFunction(recursed, spec=py.rust_spec, name=py.name)
name = py.function.__name__.removesuffix("_fn") if hasattr(py.function, "__name__") else "unk_py_fn"
result = rGeneralFunction.new_by_name_op(recursed, spec_name=name, name=py.name)
if result is not None:
return result
else:
num_non_batchable = len(py.normalized_non_batch_dims())
gspec = FromPyGeneralFunctionSpec(
name,
py.function,
py_generalfunction_get_shape(num_non_batchable),
num_non_batchable,
(py.allows_batching,),
)
return rGeneralFunction(recursed, spec=gspec, name=py.name)
elif isinstance(py, WildFunction):
# TODO: not well tested!!!
rec_nodes = [recurse(n) for n in py.nodes]
gspec = FromPyGeneralFunctionSpec(
py.function.get_name()
if hasattr(py.function, "get_name")
else py.function.__name__.removesuffix("_fn"),
py.function,
py.get_wild_function_shape_getter(),
py.num_non_batchable_output_dims,
tuple(py.input_batchability),
)
return rGeneralFunction(*rec_nodes, spec=gspec, name=py.name)
elif isinstance(py, Cumulant):
return rCumulant(*[recurse(x) for x in py.circuits], name=py.name)
elif isinstance(py, DiscreteVar):
return rDiscreteVar(recurse(py.values), recurse(py.probs_and_group), py.name)
elif isinstance(py, StoredCumulantVar):
return rStoredCumulantVar.new_mv(
recurse(py.mean),
recurse(py.cov),
{k: recurse(v) for k, v in py.higher_cumulants.items()},
py.uuid,
py.name,
)
else:
raise NotImplementedError(f"py_to_rust for {py.__class__.__name__} unimplemented")
return recurse(py)
def rust_to_py(rust: rCircuit):
from interp.circuit.circuit import Circuit
from interp.circuit.computational_node import Add, Concat, Einsum, GeneralFunction, Index, UnaryRearrange
from interp.circuit.constant import ArrayConstant, FloatConstant
from interp.circuit.cumulant import Cumulant
from interp.circuit.var import AutoTag, DiscreteVar, StoredCumulantVar
@functools.cache
def recurse(rust: rCircuit):
result = recurse_raw(rust)
assert result.shape == rust.shape, (result.shape, rust.shape, rust, result)
assert result.ndim == len(rust.shape)
return result
def recurse_raw(rust: rCircuit) -> Circuit:
if isinstance(rust, rScalar):
return FloatConstant(rust.value, rust.shape, rust.name)
elif isinstance(rust, rArray):
return ArrayConstant(rust.value, rust.value.shape, name=rust.name)
elif isinstance(rust, rAdd):
return Add.from_unweighted_list([recurse(x) for x in rust.children], rust.name)
elif isinstance(rust, rEinsum):
return Einsum.from_axes_tuples(
*[(recurse(operand), ints) for operand, ints in rust.args], out_axes=rust.out_axes, name=rust.name
)
elif isinstance(rust, rRearrange):
return UnaryRearrange.from_spec(
recurse(rust.node),
rust.spec.to_py_rearrange_spec(rust.node.shape),
rust.name,
)
elif isinstance(rust, rIndex):
return Index(recurse(rust.node), tuple(rust.idx), name=rust.name)
elif isinstance(rust, rGeneralFunction):
spec = rust.spec
if not (
isinstance(spec, (GeneralFunctionSimpleSpec, FromPyGeneralFunctionSpec))
and (rust.children[0].shape == rust.shape)
and (len(rust.children) == 1)
):
raise CantConvertCircuitError()
if isinstance(spec, GeneralFunctionSimpleSpec):
allows_batching = True
function = spec.get_function()
elif isinstance(spec, FromPyGeneralFunctionSpec):
allows_batching = spec.input_batchability[0]
function = spec.function_val
else:
assert_never(spec)
return GeneralFunction(
recurse(rust.children[0]),
function,
None,
name=rust.name,
allows_batching=allows_batching, # this is always batchable
non_batch_dims=tuple(range(-spec.num_non_batchable_output_dims, 0)),
rust_spec=spec,
)
elif isinstance(rust, rConcat):
return Concat(tuple([recurse(x) for x in rust.children]), rust.axis, rust.name)
elif isinstance(rust, rScatter):
return recurse(scatter_to_concat(rust))
elif isinstance(rust, rTag):
return AutoTag(recurse(rust.node), rust.uuid, rust.name)
elif isinstance(rust, rDiscreteVar):
return DiscreteVar(recurse(rust.values), recurse(rust.probs_and_group), rust.name)
elif isinstance(rust, rStoredCumulantVar):
highers = {k: recurse(v) for k, v in rust.cumulants.items() if k != 1 and k != 2}
return StoredCumulantVar(
recurse(rust.cumulants[1]), recurse(rust.cumulants[2]), highers, rust.uuid, rust.name
)
elif isinstance(rust, rCumulant):
return Cumulant(tuple([recurse(x) for x in rust.children]), rust.name)
elif isinstance(rust, rc.Module):
raise CantConvertCircuitError()
else:
raise NotImplementedError(rust)
return recurse(rust)
def py_generalfunction_get_shape(num_non_batchable: int):
def check(shape: "Shape"):
assert len(shape) >= num_non_batchable
return shape
return check
# this takes python circuit, rust version takes rust circuit
def schedule_replace_circuits(schedule: rSchedule, map: Dict["Circuit", torch.Tensor]):
new_dict = {py_to_rust(k).hash: v for k, v in map.items()}
result = schedule.replace_tensors(new_dict)
return result
def rust_get_f64_evaluator():
from interp.circuit.circuit import MemoizedFn
from interp.circuit.circuit_utils import evaluate_fn
return lambda circuits: [MemoizedFn(evaluate_fn(dtype=torch.float64))(c) for c in circuits]
def evaluate_py(pycirc):
return py_to_rust(pycirc).evaluate()
def eval_opt_f64_py(pycirc):
return rc.optimize_and_evaluate(
py_to_rust(pycirc, TorchDeviceDtypeOp(dtype="float64")), rc.OptimizationSettings()
).to(dtype=torch.float64)
def cached_circuit_by_hash(fn):
"""Caches a function Circuit->Any, only storing hash of input"""
cachy: Dict[Tuple, Any] = {}
@functools.wraps(fn)
def wrapped_fn(circuit: rCircuit, *other_args):
nonlocal cachy
key = tuple([circuit.hash, *other_args])
if key in cachy:
return cachy[key]
result = fn(circuit, *other_args)
cachy[key] = result
return result
return wrapped_fn
def cached_circuit_by_hash_weak(fn):
"""Caches a function Circuit->Any, only storing hash of input"""
cachy: Dict[Tuple, weakref.ref[Any]] = {}
@functools.wraps(fn)
def wrapped_fn(circuit: rCircuit, *other_args):
nonlocal cachy
key = tuple([circuit.hash, *other_args])
if key in cachy and (cached_result := cachy[key]()) is not None:
return cached_result
result = fn(circuit, *other_args)
cachy[key] = weakref.ref(result)
return result
return wrapped_fn | /rust_circuit-0.4.12-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl/rust_circuit/interop_rust.py | 0.793866 | 0.238317 | interop_rust.py | pypi |
import jax
import matplotlib.pyplot as plt
import numpy as np
import torch
import interp.tools.optional as op
from interp.model.gpt_model import Gpt
from interp.model.model_loading import load_model_mem_cached
from interp.tools.log import Idxs, KeyIdxs, LoggerCache, LogInfo, construct_mut_log_cache
from .setup import ParenDataset, ParenTokenizer
jax.config.update("jax_platform_name", "cpu")
#%% [markdown]
"""This notebook produces plots for the causal scrubbing paren balancer writeup."""
# %%
# define model
MODEL_ID = "jun9_paren_balancer"
ds = ParenDataset.load(MODEL_ID)
ds = ds[:1000]
# %%
def bce_with_logits_loss(logits, labels):
targets = torch.tensor(labels, dtype=torch.float, device="cuda:0")
logit_diff = logits[:, 1] - logits[:, 0]
correct = (logit_diff > 0) == targets
return torch.nn.BCEWithLogitsLoss(reduction="none")(logit_diff, targets), correct
def get_bound_jax_model(model_id: str):
jax_model, jax_params, _ = load_model_mem_cached(model_id)
return jax_model.bind(jax_params)
def run_dataset_on_jax_model(dataset: ParenDataset, model_id: str, log_info=None):
mask = (dataset.tokens_flat.value == ParenTokenizer.PAD_TOKEN).cpu().numpy()
config = Gpt.CallConfig(pos_mask=mask)
log = construct_mut_log_cache(log_info, None)
jax_model = get_bound_jax_model(model_id)
out = jax_model(dataset.tokens_flat.value.cpu().numpy(), log=log, config=config)[:, 0, :]
probs = jax.nn.softmax(out)
return out, probs, op.map(log, lambda log: log.cache)
# %%
a2_by_head = KeyIdxs("blocks.attention.out_by_head", idxs=Idxs.single(2))
norm_in = KeyIdxs("final_out.norm.inp")
logger = LoggerCache.from_key_idxs([a2_by_head, norm_in])
out, probs, cache = run_dataset_on_jax_model(ds, MODEL_ID, log_info=LogInfo(logger))
cache = op.unwrap(cache)
h20_out = cache.get(a2_by_head)[:, 0, 0, :]
h21_out = cache.get(a2_by_head)[:, 1, 0, :]
all_terms = cache.get(norm_in)[:, 0, :]
# %%
def attribution_score(head_term, full_sum):
assert head_term.ndim == 2 and full_sum.ndim == 2 # [batch, hiddendim]
rem = full_sum - head_term # []
possible_sums = head_term[:, None, :] + rem[None, :, :] # [batch.head, batch.remainders, hidden]
logits = np.array(get_bound_jax_model(MODEL_ID).out(possible_sums))
logit_diffs = logits[..., 1] - logits[..., 0]
return logit_diffs.mean(1) - logit_diffs.mean()
h20_attr = attribution_score(h20_out, all_terms)
h21_attr = attribution_score(h21_out, all_terms)
# %%
# set some globals that later cells can change
# thanks late binding closures!
alpha = 0.5
size = 25
def mk_scatter(filter, label=None, color=None, ax=None):
ax = plt.gca() if ax is None else ax
ax.scatter(h20_attr[filter], h21_attr[filter], label=label, color=color, s=size, alpha=alpha)
is_balanced = np.array(ds.is_balanced.value, dtype="bool")
count_test = ds.count_test.bool()
horizon_test = ds.horizon_test.bool()
ele_and_open = count_test & horizon_test
# %%
mk_scatter(is_balanced, "balanced", "#1b9e77")
mk_scatter(count_test & ~horizon_test, "just horizon failure", "#e7298a")
mk_scatter(~count_test & horizon_test, "just count ${}^($ failure", "#d95f02")
mk_scatter(~count_test & ~horizon_test, "both failures", "#7570b3")
plt.xlabel("Logit difference from 2.0")
plt.ylabel("Logit difference from 2.1")
plt.legend()
plt.gcf().set_size_inches(5, 5)
plt.show()
# %%
ele_and_open = count_test & ds.starts_with_open
mk_scatter(~ele_and_open & ~horizon_test, "both failures", "#7570b3")
mk_scatter(~ele_and_open & horizon_test, "just count${}^($ failure", "#d95f02")
mk_scatter(ele_and_open & ~horizon_test, "just horizon failure", "#e7298a")
mk_scatter(is_balanced, "balanced", "#1b9e77")
plt.xlabel("Logit difference from 2.0")
plt.ylabel("Logit difference from 2.1")
plt.legend()
plt.gcf().set_size_inches(5, 5)
plt.show()
# %%
fig, axs = plt.subplots(1, 3, sharey=True, figsize=(10, 3))
axs[0].set_title("Passes horizon test?")
pass_c = "#377eb8"
fail_c = "#e41a1c"
alpha = 0.3
size = 8
mk_scatter(horizon_test, color=pass_c, ax=axs[0])
mk_scatter(~horizon_test, color=fail_c, ax=axs[0])
axs[1].set_title("Passes count test?")
mk_scatter(~count_test, color=fail_c, ax=axs[1])
mk_scatter(count_test, color=pass_c, ax=axs[1])
axs[2].set_title("Passes count${}^($ test?")
mk_scatter(~ele_and_open, "fail", fail_c, ax=axs[2])
mk_scatter(ele_and_open, "pass", pass_c, ax=axs[2])
axs[2].legend()
axs[0].set_xlabel("Logit difference from 2.0")
axs[1].set_xlabel("Logit difference from 2.0")
axs[2].set_xlabel("Logit difference from 2.0")
axs[0].set_ylabel("Logit difference from 2.1")
plt.show()
# %% | /rust_circuit-0.4.12-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl/rust_circuit/demos/paren_balancer/writeup_attribution_plots.py | 0.768125 | 0.459682 | writeup_attribution_plots.py | pypi |
# %% [markdown]
"""
# Day 4b: Paren Balancer Causal Scrubbing
To start, please read this [less wrong post](https://www.lesswrong.com/s/h95ayYYwMebGEYN5y/p/kjudfaQazMmC74SbF).
We will replicate the experiments today.
<!-- toc -->
"""
import uuid
from typing import Optional, Tuple
import numpy as np
import torch
from torch.nn.functional import binary_cross_entropy_with_logits
import rust_circuit as rc
import rust_circuit.optional as op
from rust_circuit.algebric_rewrite import residual_rewrite, split_to_concat
from rust_circuit.causal_scrubbing.experiment import (
Experiment,
ExperimentCheck,
ExperimentEvalSettings,
ScrubbedExperiment,
)
from rust_circuit.causal_scrubbing.hypothesis import (
Correspondence,
ExactSampler,
FuncSampler,
InterpNode,
UncondSampler,
chain_excluding,
corr_root_matcher,
)
from rust_circuit.demos.notebook_testing import NotebookInTesting
from rust_circuit.model_rewrites import To, configure_transformer
from rust_circuit.module_library import load_model_id
from rust_circuit.py_utils import I
from rust_circuit.ui.ui import circuit_graph_ui, ui_default_hidden_matcher
from . import test_paren_balancer_exercises as tests
from .setup import ParenDataset, ParenTokenizer, get_h00_open_vector
ui_hidden_matcher = ui_default_hidden_matcher
# %%
# define model
MAIN = __name__ == "__main__"
SEQ_LEN = 42
NUM_EXAMPLES = 4000
MODEL_ID = "jun9_paren_balancer"
PRINT_CIRCUITS = True
ACTUALLY_RUN = True
SLOW_EXPERIMENTS = True
DEFAULT_CHECKS: ExperimentCheck = True
EVAL_DEVICE = "cuda:0"
# If you have less memory, you will want to reduce this and also add a batch_size
MAX_MEMORY = 30_000_000_000
BATCH_SIZE = None
if NotebookInTesting.currently_in_notebook_test:
EVAL_DEVICE = "cpu"
SLOW_EXPERIMENTS = False
MAIN = True # a bit of a lie but we want to actually run things
# %% [markdown]
"""
## Setup
No exercises here! It may be helpful to read over the code, however.
### Circuit loading
If any of these operations confuse you, try printing out the circuit before and after!
Step 1: Initial loading
"""
circ_dict, _, model_info = load_model_id(MODEL_ID)
circuit = circ_dict["t.bind_w"]
#%% [markdown]
"""
Step 2: We bind the model to an input by attaching a placeholder symbol input named "tokens" to the model. We then specify the attention mask that prevents attending to padding depends on this tokens array.
We use one hot tokens as this makes defining the attention mask a simple indexing operation.
The symbol has a random fixed uuid (fixed as this gives consistency when comparing in tests).
"""
toks_uuid = uuid.UUID("ce34280e-169f-40bd-b78e-8adeb4274aba")
tokens_arr = rc.Symbol((SEQ_LEN, ParenTokenizer.vocab_size), uuid=toks_uuid, name="tokens")
tok_embeds = rc.Einsum.from_fancy_string(
"seqlen vocab_size, vocab_size hidden -> seqlen hidden", tokens_arr, circ_dict["t.w.tok_embeds"], name="tok_embeds"
)
attn_mask = rc.Add.minus(
rc.Scalar(1),
rc.Index(tokens_arr, I[:, ParenTokenizer.PAD_TOKEN]),
name="pos_mask",
)
circuit = model_info.bind_to_input(circuit, tok_embeds, circ_dict["t.w.pos_embeds"], attn_mask)
# [markdown]
"""
Step 3: rewrites the circuit into a more conveninet structure to work with using `configure_transformer`.
This flattens out the residual stream (as opposed to the nested layer structure originally) and, pushes down the weight bindings, and separates out each attention layer into a sum of heads.
"""
circuit = circuit.update(
"t.bind_w",
lambda c: configure_transformer(
c,
To.ATTN_HEAD_MLP_NORM,
split_by_head_config="full",
use_pull_up_head_split=True,
use_flatten_res=True,
flatten_components=True,
),
)
# [markdown]
"""
Some additional misc rewrites.
We substitute the inputs to be duplicated everywhere they apperar in the model instead of being in one outer module bind.
We also index as we only care about the classification at position 0, and use `rc.conform_all_modules` to replace any remaining symbolic shapes with their numeric values.
"""
circuit = circuit.cast_module().substitute()
circuit = rc.Index(circuit, I[0]).rename("logits_pos0")
circuit = rc.conform_all_modules(circuit)
# %% [markdown]
"""
Finally, some custom renames that make the circuit more intuitive.
"""
circuit = circuit.update("t.call", lambda c: c.rename("logits"))
circuit = circuit.update("t.call", lambda c: c.rename("logits_with_bias"))
circuit = circuit.update(rc.Regex(r"[am]\d(.h\d)?$"), lambda c: c.rename(c.name + ".inner"))
circuit = circuit.update("t.inp_tok_pos", lambda c: c.rename("embeds"))
circuit = circuit.update("t.a.mask", lambda c: c.rename("padding_mask"))
for l in range(model_info.params.num_layers):
circuit = circuit.update(f"b{l}.m", lambda c: c.rename(f"m{l}"))
circuit = circuit.update(f"b{l}.a.h0", lambda c: c.rename(f"a{l}.h0"))
circuit = circuit.update(f"b{l}.a.h1", lambda c: c.rename(f"a{l}.h1"))
next = "final" if l == model_info.params.num_layers - 1 else f"a{l+1}"
circuit = circuit.update(f"b{l}", lambda c: c.rename(f"{next}.input"))
printer = rc.PrintHtmlOptions(
shape_only_when_necessary=False,
traversal=rc.restrict(
rc.IterativeMatcher("embeds", "padding_mask", "final.norm", rc.Regex(r"^[am]\d(.h\d)?$")), term_if_matches=True
),
)
# %%
if PRINT_CIRCUITS:
printer.print(circuit)
circuit_graph_ui(circuit, default_hidden=ui_hidden_matcher.get(circuit))
# %% [markdown]
"""
## dataset and experiment code
We have a custom dataset class that precomputes some features of paren sequences, and handles pretty printing / etc.
"""
ds = ParenDataset.load()
def bce_with_logits_loss(logits: torch.Tensor, labels: torch.Tensor):
"""
Computes the binary cross entropy loss for the provided labels.
logits: [batch, 2]. Class 0 is unbalanced logit, class 1 is balanced logit.
labels: [batch]. True if balanced.
"""
targets = labels.to(dtype=logits.dtype, device=logits.device)
logit_diff = logits[..., 1] - logits[..., 0]
correct = (logit_diff > 0) == targets
loss = binary_cross_entropy_with_logits(logit_diff, targets, reduction="none")
return loss, correct
def paren_experiment(
circuit: rc.Circuit,
dataset: ParenDataset,
corr: Correspondence,
checks: ExperimentCheck = DEFAULT_CHECKS,
random_seed=1,
actually_run=ACTUALLY_RUN,
num_examples=NUM_EXAMPLES,
batch_size=BATCH_SIZE,
**kwargs,
) -> Tuple[ScrubbedExperiment, Optional[float]]:
ex = Experiment(
circuit,
dataset,
corr,
random_seed=random_seed,
check=checks,
**kwargs,
)
scrubbed = ex.scrub(num_examples, treeify=actually_run)
overall_loss: Optional[float] = None
if actually_run:
logits = scrubbed.evaluate(
ExperimentEvalSettings(
optim_settings=rc.OptimizationSettings(
max_memory=MAX_MEMORY,
),
device_dtype=rc.TorchDeviceDtypeOp(device=EVAL_DEVICE),
optimize=True,
batch_size=batch_size,
),
)
ref_ds = ParenDataset.unwrap(scrubbed.ref_ds)
labels = ref_ds.is_balanced.value
def loss_str(mask):
loss, correct = bce_with_logits_loss(logits[mask], labels[mask])
loss = loss.cpu()
std_err = loss.std() / len(loss) ** 0.5
return f"{loss.mean():.3f} SE={std_err:.3f} acc={correct.float().mean():.1%} "
print(f" overall: {loss_str(slice(None))}")
print(f" on bal: {loss_str(labels.to(dtype=torch.bool))}")
print(f" on unbal: {loss_str(~labels.to(dtype=torch.bool))}")
print(f" on count failures: {loss_str(~ref_ds.count_test.to(dtype=torch.bool))}") # type: ignore
print(f" on horizon failures: {loss_str(~ref_ds.horizon_test.to(dtype=torch.bool))}") # type: ignore
overall_loss = bce_with_logits_loss(logits, labels)[0].mean().item()
return scrubbed, overall_loss
def check_loss(loss: Optional[float], target: float, std_err: float):
# we usually set tol to be 4 * SE
assert loss is not None
err = abs(loss - target)
assert err < 4 * std_err, f"err too large! loss ({loss:.2f}) != target ({loss:.2f}) ± 4*SE ({std_err:.2f})"
if err > 2 * std_err:
raise Warning("Err is kinda large! loss ({loss:.2f}) != target ({loss:.2f}) ± 2*SE ({std_err:.2f})")
# %% [markdown]
"""
Helpful tidbit on tests:
Most of the tests in this file raise assertion errors that contain extra data, for instance the objects from the comparison that failed. It can be convenient to catch this data to debug. For instance:
```
def check_eq(a, b):
assert a == b, ("not equal!", a, b)
try:
check_eq(0, 1)
except AssertionError as e:
a, b = e.args[0][1], e.args[0][2]
print(a, b)
```
"""
# %% [markdown]
"""
## Experiment 0
To start with let's measure two baselines:
- running the model normally
- interchanging the logits randomly
Make causal scrubbing experiments that impliment both of these. In each case there should be a single interp node named "logits".
The tests do explictly check that the interp nodes in the correspondence are named correctly, in order to facilitate more helpful feedback.
"""
if "SOLUTION":
corr0a = Correspondence()
corr0a.add(InterpNode(cond_sampler=ExactSampler(), name="logits"), corr_root_matcher)
else:
corr0a = Correspondence()
if MAIN:
tests.t_ex0a_corr(corr0a)
print("\nEx0a: Exact sampling")
ex0a, loss0a = paren_experiment(circuit, ds, corr0a)
check_loss(loss0a, 0, 0.01)
if "SOLUTION":
corr0b = Correspondence()
corr0b.add(InterpNode(cond_sampler=UncondSampler(), name="logits"), corr_root_matcher)
else:
corr0b = Correspondence()
if MAIN:
tests.t_ex0b_corr(corr0b)
print("\nEx0b: Interchanging logits")
ex0b, loss0b = paren_experiment(circuit, ds, corr0b)
check_loss(loss0b, 4.30, 0.12)
# %% [markdown]
"""
## Experiment 1
Now, let's construct a basic experiment to determine the role that different heads play.
We'll start by testing the following claimed hypothesis:
- Heads 1.0 and 2.0 compute the count test, and check that there are equal numbers of open and close parentheses
- Head 2.1 computes the horizon test.
### Matchers
"""
if "SOLUTION":
# There are several ways to define this matcher, eg:
# m_10 = rc.IterativeMatcher("final.input").chain(rc.restrict("a1.h0", end_depth=2))
# or defining all_components = {"a0.h0", "a0.h1", "m0", ..., "m2"} and then
# m_10 = chain_excluding(corr_root_matcher, "a1.h0", all_components - "a1.h0")
# It's just down to personal preference / what is clearer in the circumstance.
m_10 = chain_excluding(corr_root_matcher, "a1.h0", {"m2", "m1", "a2.h0", "a2.h1"})
m_20 = chain_excluding(corr_root_matcher, "a2.h0", "m2")
m_21 = chain_excluding(corr_root_matcher, "a2.h1", "m2")
else:
"""
Define the following matchers. You only want to match _direct_ paths, that is paths through the
residual stream and not through direct paths. This can be accomplished by calling `rc.restrict` or
the chain_excluding utilty included in causal scrubbing code.
"""
m_10 = rc.IterativeMatcher()
m_20 = rc.IterativeMatcher()
m_21 = rc.IterativeMatcher()
if MAIN:
tests.t_m_10(m_10)
tests.t_m_20(m_20)
tests.t_m_21(m_21)
# %% [markdown]
"""
### Cond Samplers
"""
def passes_count(d: ParenDataset) -> torch.Tensor:
"""
Returns a bool tensor of shape [len(d)]
Result is true when the corresponding datum has equal numbers of open and close parens
"""
# not used in solution, as implimented in ParenDataset
raise NotImplementedError
def passes_horizon(d: ParenDataset) -> torch.Tensor:
"""
Returns a bool tensor of shape [len(d)]
Result is true when the corresponding datum passes the right to left horizon test as described in the [writeup](https://www.lesswrong.com/s/h95ayYYwMebGEYN5y/p/kjudfaQazMmC74SbF#Algorithm).
"""
# not used in solution, as implimented in ParenDataset
raise NotImplementedError
if "SOLUTION":
count_cond = FuncSampler(lambda d: ParenDataset.unwrap(d).count_test)
horizon_cond = FuncSampler(lambda d: ParenDataset.unwrap(d).horizon_test)
else:
"""
Define the following cond samplers using FuncSamplers.
Write your own functions for this based on `ParenDataset.tokens_flat`.
(Yes, there are predefined properties on ParenDataset. You can use them if you are short on time and want to skip this test. They also use caching to make things faster, so if speed gets to be annoyingly slow for later experiments you could switch over).
"""
count_cond = FuncSampler(lambda d: passes_count(ParenDataset.unwrap(d)))
horizon_cond = FuncSampler(lambda d: passes_horizon(ParenDataset.unwrap(d)))
if MAIN:
tests.t_count_cond(count_cond)
tests.t_horizon_cond(horizon_cond)
# %% [markdown]
"""
This first correspondence should have 4 nodes:
- The root node, named "logits", with an ExactSampler (any sampler that agrees on the labels will be equivilant,
but an exact sampler is somewhat more computationally efficient).
- Three nodes for the three heads of interest, named "10", "20", and "21". The first two should use the cond sampler provided (`count_cond` for now), the third should use the `horizon_cond` sampler.
The exact interp node names are checked by the test, which allows it to give more meaningful feedback.
"""
def make_ex1_corr(cs_for_h10_and_h20) -> Correspondence:
"""SOLUTION"""
corr = Correspondence()
i_logits = InterpNode(name="logits", cond_sampler=ExactSampler())
corr.add(i_logits, corr_root_matcher)
corr.add(i_logits.make_descendant(cs_for_h10_and_h20, "10"), m_10)
corr.add(i_logits.make_descendant(cs_for_h10_and_h20, "20"), m_20)
corr.add(i_logits.make_descendant(horizon_cond, "21"), m_21)
return corr
# %%
if MAIN:
print("\nEx1a: Just the count cond")
tests.t_ex1_corr(make_ex1_corr, count_cond)
ex1a, loss1a = paren_experiment(circuit, ds, make_ex1_corr(count_cond))
check_loss(loss1a, 0.52, 0.04)
if PRINT_CIRCUITS:
ex1a.print()
# %% [markdown]
"""
As discussed in the writeup, we can more accurately capture the equivilance classes of 1.0 and 2.0's output by including if the first parenthesis is open or closed.
This is a natural feature for these heads to use: a sequence will always be unbalanced if it starts with a close parenthesis, and as these heads depend strongly on the residual stream at position 1 anyhow (as we will show in experiement 2) the information is readily accessible.
(reminder: position 0 is always the [START] token, so position 1 is the first parentheses. All sequences in our dataset have >= 2 parentheses in them, so you can assume position 1 is either an open or close paren.)
Define some new cond samplers that incorporate this feature.
"""
def passes_starts_open(d: ParenDataset) -> torch.Tensor:
"""
Returns a bool tensor of shape [len(d)].
Result is true when the corresponding datum starts with '('.
"""
raise NotImplementedError
def passes_count_open(d: ParenDataset) -> torch.Tensor:
"""
Returns a bool tensor of shape [len(d)].
Result is true when the corresponding datum starts with '(' and there are equal numbers of open and close parens in the entire sequence.
"""
raise NotImplementedError
if "SOLUTION":
start_open_cond = FuncSampler(lambda d: ParenDataset.unwrap(d).starts_with_open)
count_open_cond = FuncSampler(lambda d: ParenDataset.unwrap(d).count_test & ParenDataset.unwrap(d).starts_with_open)
else:
"""
Two more samplers! the first that checks that the first paren is an open parentheses,
the next tests the input passes count test AND the first paren is open.
We don't use the pure start_open test yet, but we will soon and it's nice to define it here.
"""
start_open_cond = FuncSampler(lambda d: passes_starts_open(ParenDataset.unwrap(d)))
count_open_cond = FuncSampler(lambda d: passes_count_open(ParenDataset.unwrap(d)))
if MAIN:
tests.t_start_open_cond(start_open_cond)
tests.t_count_open_cond(count_open_cond)
# %%
if MAIN:
print("\nEx1b: Without a0")
tests.t_ex1_corr(make_ex1_corr, count_open_cond)
ex1b, loss1b = paren_experiment(circuit, ds, make_ex1_corr(count_open_cond))
check_loss(loss1b, 0.30, 0.04)
if PRINT_CIRCUITS:
ex1b.print()
# [markdown]
"""
Bonus: Can you improve on the loss by specifying other direct paths, or choosing better features to ensure
agreement along?
"""
# %%
if "SOLUTION":
if MAIN:
print("\nEx1 bonus 1: With a0")
corr = make_ex1_corr(count_open_cond)
i_00 = corr.get_by_name("logits").make_descendant(count_open_cond, "00")
m_00 = chain_excluding(corr_root_matcher, "a0.h0", {"m2", "m1", "m0", "a2.h0", "a2.h1", "a1.h0", "a1.h1"})
corr.add(i_00, m_00)
ex1_bonus1 = paren_experiment(circuit, ds, corr)
# %%
# We only test up until this point in CircleCI so that it's fast
if "SKIP":
NotebookInTesting.exit_if_in_testing()
# %% [markdown]
"""
## Experiment 2! Diving into heads 1.0 and 2.0
We are going split up experiment 2 into four parts:
- Part 1: 1.0 and 2.0 only depend on their input at position 1
- Part 2 (ex2a in writeup): 1.0 and 2.0 only depend on:
- the output of 0.0 (which computes $p$, the proportion of open parentheses) and
- the embeds (which encode if the first paren is open)
- Part 3: Projecting the output of 0.0 onto a single direction
- Part 4 (ex2b in writeup): Estimate the output of 0.0 with a function $\phi(p)$
## Part 1: Splitting up the input to 1.0 and 2.0 by sequence position
One of the claims we'd like to test is that only the input at position 1 (the first paren position) matters for both heads 1.0 and 2.0.
Currently, however, there is no node of our circuit corresponding to "the input at position 1". Let's change that!
Write a `separate_pos1` function that will transform a circuit `node` into:
```
'node_concat' Concat
'node.pos_0' Index [0:1, :]
'node'
'node.pos_1' Index [1:2, :]
'node'
'node.pos_2_41' Index [2:42, :]
'node'
```
This can be acomplished by calling `split_to_concat()` (from algebraic_rewrites.py) and `.rename`-ing the result.
Then split the input node, but only along paths that are reached through head 2.0 and 1.0. (We don't want to split the input to 2.1 in particular, as we'll split that differently later.)
If you are struggling to get the exact correct circuit, you are free to import it from the solution file and print it out. You can also try `print(rc.diff_circuits(your_circuit, our_circuit))` though
Yes, the tests are very strict about naming things exactly correctly.
This is partially because it is convenient for tests, but also because names are really important!
Good names reduce confusion about what that random node of the circuit actually means.
Mis-naming nodes is also a frequent cause of bugs, e.g. a matcher that traverses a path that it wasn't supposted to.
"""
# %%
def separate_pos1(c: rc.Circuit) -> rc.Circuit:
"SOLUTION"
return split_to_concat(
c, 0, [0, 1, torch.arange(2, 42)], partitioning_idx_names=["pos0", "pos1", "pos2_42"], use_dot=True
).rename(f"{c.name}_concat")
ex2_part1_circuit = circuit
if "SOLUTION":
ex2_part1_circuit = ex2_part1_circuit.update(rc.IterativeMatcher("a2.h0").chain("a2.input"), separate_pos1)
ex2_part1_circuit = ex2_part1_circuit.update(rc.IterativeMatcher("a1.h0").chain("a1.input"), separate_pos1)
if MAIN and PRINT_CIRCUITS:
subcirc = ex2_part1_circuit.get_unique(rc.IterativeMatcher("a2.h0").chain("a2.input_concat"))
printer.print(subcirc)
circuit_graph_ui(subcirc, default_hidden=ui_hidden_matcher.get(subcirc))
if MAIN:
tests.t_ex2_part1_circuit(ex2_part1_circuit)
# %% [markdown]
"""
Now we can test the claim that both 1.0 and 2.0 only cares about positon 1!
We'll need new matchers, which just matches the pos_1 input.
"""
if "SOLUTION":
m_10_p1 = m_10.chain("a1.input.at_pos1")
m_20_p1 = m_20.chain("a2.input.at_pos1")
else:
m_10_p1 = rc.IterativeMatcher()
m_20_p1 = rc.IterativeMatcher()
if MAIN:
tests.t_m_10_p1(m_10_p1)
tests.t_m_20_p1(m_20_p1)
# %% [markdown]
"""
Then create a correspondence that extends the one returned by `make_ex1_corr(count_open_cond)` so that both 1.0 and 2.0 only use information from position 1. `Correspondence.get_by_name` is useful here.
Have your new nodes be named "10_p1" and "20_p1".
"""
def make_ex2_part1_corr() -> Correspondence:
"SOLUTION"
corr = make_ex1_corr(count_open_cond)
i_10_p1 = corr.get_by_name("10").make_descendant(name="10_p1", cond_sampler=count_open_cond)
corr.add(i_10_p1, m_10_p1)
i_20_p1 = corr.get_by_name("20").make_descendant(name="20_p1", cond_sampler=count_open_cond)
corr.add(i_20_p1, m_20_p1)
return corr
if MAIN:
tests.t_make_ex2_part1_corr(make_ex2_part1_corr())
print("\nEx 2 part 1: 1.0/2.0 depend on position 1 input")
ex2_p1, loss2_p1 = paren_experiment(ex2_part1_circuit, ds, make_ex2_part1_corr())
# %% [markdown]
"""
### Part 2
We now construct experiment 2a from the writeup. We will be strict about where 1.0 and 2.0 learn the features they depend on. We claim that the 'count test' is determined by head 0.0 checking the exact proportion of open parens in the sequence and outputting this into the residual stream at position 1.
We thus need to also split up the output of attention head 0.0, so we can specify it only cares about the output of this head at position 1. Again, let's only split it for the branch of the circuit we are working with: copies of 0.0 that are upstream of either `m_10_p1` or `m_20_p1`.
"""
ex2_part2_circuit = ex2_part1_circuit
if "SOLUTION":
ex2_part2_circuit = ex2_part2_circuit.update((m_10_p1 | m_20_p1).chain("a0.h0"), separate_pos1)
if MAIN and PRINT_CIRCUITS:
printer.print(ex2_part2_circuit.get_unique(m_10_p1))
if MAIN:
tests.t_ex2_part2_circuit(ex2_part2_circuit)
# %% [markdown]
"""
First, make a new cond sampler that samples an input that agrees on what is called $p_1^($ in the writeup. This can be done with a FuncSampler based on a function with the following equivalence classes:
- one class for _all_ inputs that start with a close parenthesis
- one class for every value of $p$ (proportion of open parentheses in the entire sequence)
Note the actual values returned aren't important, just the equivialance clases.
"""
def p1_if_starts_open(d: ParenDataset):
"""Returns a tensor of size [len(ds)]. The value represents p_1 if the sequence starts open, and is constant otherwise"""
"SOLUTION"
return torch.where(
d.starts_with_open,
d.p_open_after[:, 1],
torch.tensor(-1.0, dtype=torch.float32),
)
p1_open_cond = FuncSampler(lambda d: p1_if_starts_open(ParenDataset.unwrap(d)))
# %% [markdown]
"""And some matchers"""
if "SOLUTION":
m_10_p1_h00 = m_10_p1.chain("a0.h0.at_pos1")
m_20_p1_h00 = m_20_p1.chain("a0.h0.at_pos1")
else:
m_10_p1_h00 = rc.IterativeMatcher()
m_20_p1_h00 = rc.IterativeMatcher()
# %% [markdown]
"""
Now make the correspondence!
You should add 4 nodes to the correspondence from part 1:
- "10_p1_00"
- "20_p1_00"
- "10_p1_emb"
- "20_p1_emb"
"""
def make_ex2_part2_corr() -> Correspondence:
"SOLUTION"
corr = make_ex2_part1_corr()
i_10_p1 = corr.get_by_name("10_p1")
i_20_p1 = corr.get_by_name("20_p1")
# indirect paths to 0.0
i_10_p1_00 = i_10_p1.make_descendant(name="10_p1_00", cond_sampler=p1_open_cond)
i_20_p1_00 = i_20_p1.make_descendant(name="20_p1_00", cond_sampler=p1_open_cond)
corr.add(i_10_p1_00, m_10_p1_h00)
corr.add(i_20_p1_00, m_20_p1_h00)
i_10_p1_emb = i_10_p1.make_descendant(name="10_p1_emb", cond_sampler=start_open_cond)
i_20_p1_emb = i_20_p1.make_descendant(name="20_p1_emb", cond_sampler=start_open_cond)
corr.add(i_10_p1_emb, chain_excluding(m_10_p1, "embeds", "a0.h0"))
corr.add(i_20_p1_emb, chain_excluding(m_20_p1, "embeds", "a0.h0"))
return corr
if MAIN:
tests.t_ex2_part2_corr(make_ex2_part2_corr())
print("\nEx 2 part 2 (2a in writeup): 1.0/2.0 depend on position 0.0 and emb")
ex2a, loss2a = paren_experiment(ex2_part2_circuit, ds, make_ex2_part2_corr())
check_loss(loss2a, 0.55, 0.04)
# %% [markdown]
"""
### Part 3: Projecting 0.0 onto a single direction
#### Circuit rewrite
Another claim we would like to test is that only the output of 0.0 written in a particular direction is important.
To do this we will rewrite the output of 0.0 as the sum of two terms: the (projection)[https://en.wikipedia.org/wiki/Vector_projection] and rejection (aka the perpendicular component) along this direction.
"""
# %%
h00_open_vector = get_h00_open_vector(MODEL_ID)
def project_into_direction(c: rc.Circuit, v: torch.Tensor = h00_open_vector) -> rc.Circuit:
"""
Return a circuit that computes `c`: [seq_len, 56] projected the direction of vector `v`: [56].
Call the resulting circuit `{c.name}_projected`.
"""
"SOLUTION"
v_dir_array = rc.Array(v / torch.linalg.norm(v), "dir")
return rc.Einsum.from_einsum_string(
"s e, e, f -> s f", c.rename(c.name + "_orig"), v_dir_array, v_dir_array, name=f"{c.name}_projected"
)
if MAIN:
tests.t_project_into_direction(project_into_direction)
def get_ex2_part3_circuit(c: rc.Circuit, project_fn=project_into_direction):
"""
Uses `residual_rewrite` to write head 0.0 at position 1 (when reached by either `m_10_p1_h00` or `m_20_p1_h00`), as a sum of the projection and the rejection along h00_open_vector. The head retains it's same name, with children named `{head.name}_projected` and `{head.name}_projected_residual`.
"""
"SOLUTION"
split = lambda h00: residual_rewrite(h00, project_fn(h00), "projected")[0]
return c.update((m_10_p1_h00 | m_20_p1_h00).chain("a0.h0"), split)
ex2_part3_circuit = get_ex2_part3_circuit(ex2_part2_circuit)
if MAIN and PRINT_CIRCUITS:
proj_printer = printer.evolve(
traversal=rc.new_traversal(term_early_at={"a0.h0.at_pos0", "a0.h0.at_pos2_42", "a0.h0_orig"})
)
subcirc = ex2_part3_circuit.get_unique(m_10_p1.chain("a0.h0_concat"))
proj_printer.print(subcirc)
if MAIN:
tests.t_ex2_part3_circuit(get_ex2_part3_circuit)
# %% [markdown]
"""
Now make the correspondence. Be sure to avoid the residual node!
This correspondence requires adding two new nodes:
- "10_p1_00_projected"
- "20_p1_00_projected"
"""
def make_ex2_part3_corr() -> Correspondence:
"SOLUTION"
corr = make_ex2_part2_corr()
corr.add(
corr.get_by_name("10_p1_00").make_descendant(p1_open_cond, "10_p1_00_projected"),
chain_excluding(m_10_p1_h00, "a0.h0_projected", "a0.h0_projected_residual"),
)
corr.add(
corr.get_by_name("20_p1_00").make_descendant(p1_open_cond, "20_p1_00_projected"),
chain_excluding(m_20_p1_h00, "a0.h0_projected", "a0.h0_projected_residual"),
)
return corr
if MAIN:
tests.t_ex2_part3_corr(make_ex2_part3_corr())
print("\nEx 2 part 3: Projecting h00 into one direction")
ex2_p3, loss2_p3 = paren_experiment(ex2_part3_circuit, ds, make_ex2_part3_corr())
# %% [markdown]
"""
### Part 4: The $\phi$ function
"""
# %%
def compute_phi_circuit(tokens: rc.Circuit):
"""
tokens: [seq_len, vocab_size] array of one hot tokens representing a sequence of parens
(see ParenTokenizer for the one_hot ordering)
Returns a circuit that computes phi: tokens -> R^56
phi = h00_open_vector(2p - 1)
where p = proportion of parens in `tokens` that are open.
Returns a circuit with name 'a0.h0_phi'.
"""
"SOLUTION"
num_opens = rc.Index(tokens, [slice(None), ParenTokenizer.OPEN_TOKEN], name="is_open").sum(
axis=-1, name="num_opens"
)
num_closes = rc.Index(tokens, [slice(None), ParenTokenizer.CLOSE_TOKEN], name="is_close").sum(
axis=-1, name="num_closes"
)
num_parens = rc.Add(num_opens, num_closes, name="num_parens")
p_open = num_opens.mul(rc.reciprocal(num_parens), name="p_open")
p_open_mul_factor = p_open.mul_scalar(2).sub(rc.Scalar(1), name="p_open_mul_factor")
dir = rc.Array(h00_open_vector, "open direction")
return dir.mul(p_open_mul_factor, name="a0.h0_phi")
if MAIN:
tests.t_compute_phi_circuit(compute_phi_circuit)
# %%
def get_ex2_part4_circuit(orig_circuit: rc.Circuit = ex2_part2_circuit, compute_phi_circuit_fn=compute_phi_circuit):
"""
Split the output of head 0.0 at position 1, when reached through the appropriate paths, into a phi estimate
and the residual of this estimate.
The resulting subcircuit should have name 'a0.h0' with children 'a0.h0_phi' and 'a0.h0_phi_residual'.
"""
"SOLUTION"
split_by_phi = lambda c: residual_rewrite(c, compute_phi_circuit_fn(c.get_unique("tokens")), "phi")[0]
return orig_circuit.update((m_10_p1_h00 | m_20_p1_h00).chain("a0.h0"), split_by_phi)
ex2_part4_circuit = get_ex2_part4_circuit()
if MAIN and PRINT_CIRCUITS:
proj_printer = printer.evolve(
traversal=rc.new_traversal(term_early_at={"a0.h0.at_pos0", "a0.h0.at_pos2_42", "a0.h0_orig"})
)
subcirc = ex2_part4_circuit.get_unique(m_10_p1.chain("a0.h0_concat"))
proj_printer.print(subcirc)
if MAIN:
tests.t_ex2_part4_circuit(get_ex2_part4_circuit)
# %% [markdown]
"""
And now make the correspondence -- it should be very similar to the one from part 3. Build on top of the one from part **2**, with new node names "10_p1_00_phi" and "20_p1_00_phi".
"""
def make_ex2_part4_corr() -> Correspondence:
"SOLUTION"
corr = make_ex2_part2_corr()
corr.add(
corr.get_by_name("10_p1_00").make_descendant(p1_open_cond, "10_p1_00_phi"),
chain_excluding(m_10_p1_h00, "a0.h0_phi", "a0.h0_phi_residual"),
)
corr.add(
corr.get_by_name("20_p1_00").make_descendant(p1_open_cond, "20_p1_00_phi"),
chain_excluding(m_20_p1_h00, "a0.h0_phi", "a0.h0_phi_residual"),
)
return corr
if MAIN:
tests.t_ex2_part4_corr(make_ex2_part4_corr())
print("Ex2 part 4 (2b in writeup): replace a0 by phi(p)")
ex2b, loss2b = paren_experiment(ex2_part4_circuit, ds, make_ex2_part4_corr())
check_loss(loss2b, 0.53, 0.04)
# %% [markdown]
"""
Congradulations! This is the end of the main part of today's content. Below is some additional content that covers experiments 3 and 4 from the writeup, although with less detailed testing and instructions.
"""
# %%
if "SKIP":
######### Bonus experiments! ########
# some bonus tests that don't appear in the writeup
# these test the direct dependances of how information flows from a0 -> 2.0
# note we don't have a dependance on the tokens here for the 'starts open'
# which is probably a shortcoming of these hypotheses
def make_ex2_explicit_paths_corr(from_10=True, to_m0_m1_a0=True, all_to_a0=True) -> Correspondence:
corr = make_ex1_corr(count_open_cond)
i_20 = corr.get_by_name("20")
i_20_p1 = i_20.make_descendant(name="20_p1", cond_sampler=count_open_cond)
corr.add(i_20_p1, m_20_p1)
if to_m0_m1_a0:
i_20_p1_a0 = i_20_p1.make_descendant(name="20_p1_a0", cond_sampler=count_open_cond)
i_20_p1_m0 = i_20_p1.make_descendant(name="20_p1_m0", cond_sampler=count_open_cond)
i_20_p1_m1 = i_20_p1.make_descendant(name="20_p1_m1", cond_sampler=count_open_cond)
m_20_p1_m0 = chain_excluding(m_20_p1, "m0", "m1")
m_20_p1_m1 = m_20_p1.chain("m1")
corr.add(i_20_p1_a0, chain_excluding(m_20_p1, "a0.h0", {"m0", "a1.h0", "a1.h1", "m1"}))
corr.add(i_20_p1_m0, m_20_p1_m0)
corr.add(i_20_p1_m1, m_20_p1_m1)
if all_to_a0:
# m0 depends on a0
i_20_p1_m0_a0 = i_20_p1_m0.make_descendant(name="20_p1_m0_00", cond_sampler=p1_open_cond)
# m1 depends on a0, m0
i_20_p1_m1_a0 = i_20_p1_m1.make_descendant(name="20_p1_m1_00", cond_sampler=p1_open_cond)
i_20_p1_m1_m0 = i_20_p1_m1.make_descendant(name="20_p1_m1_m0", cond_sampler=p1_open_cond)
# m1.m0 depends on a0
i_20_p1_m1_m0_a0 = i_20_p1_m1_m0.make_descendant(name="20_p1_m1_m0_00", cond_sampler=p1_open_cond)
corr.add(i_20_p1_m0_a0, m_20_p1_m0.chain("a0.h0"))
corr.add(i_20_p1_m1_a0, chain_excluding(m_20_p1_m1, "a0.h0", "m0"))
corr.add(i_20_p1_m1_m0, m_20_p1_m1.chain("m0"))
corr.add(i_20_p1_m1_m0_a0, m_20_p1_m1.chain("m0").chain("a0.h0"))
if from_10:
i_10 = corr.get_by_name("10")
i_10_p1 = i_10.make_descendant(name="10_p1", cond_sampler=count_open_cond)
corr.add(i_10_p1, m_10_p1)
if to_m0_m1_a0:
i_10_p1_a0 = i_10_p1.make_descendant(name="10_p1_a0", cond_sampler=count_open_cond)
i_10_p1_m0 = i_10_p1.make_descendant(name="10_p1_m0", cond_sampler=count_open_cond)
corr.add(i_10_p1_a0, chain_excluding(m_10_p1, "a0.h0", {"m0"}))
m10_p1_m0 = m_10_p1.chain("m0")
corr.add(i_10_p1_m0, m10_p1_m0)
if all_to_a0:
i_10_p1_m0_a0 = i_10_p1_m0.make_descendant(name="10_p1_m0_00", cond_sampler=p1_open_cond)
corr.add(i_10_p1_m0_a0, m10_p1_m0.chain("a0.h0"))
return corr
#%%
if MAIN:
print("Ex 2_bonus_a: 2.0 depends on only pos1")
ex2_bonus_a = paren_experiment(ex2_part2_circuit, ds, make_ex2_explicit_paths_corr(False, False, False))
print("Ex 2_bonus_b: 2.0 depends on only pos1, which depends on a0 + m0 + m1")
ex2_bonus_b = paren_experiment(ex2_part2_circuit, ds, make_ex2_explicit_paths_corr(False, True, False))
print("Ex 2_bonus_c: 2.0 depends on only pos1, which depends on a0 + m0 + m1, which bottom out at a0")
ex2_bonus_c = paren_experiment(ex2_part2_circuit, ds, make_ex2_explicit_paths_corr(False, True, True))
if PRINT_CIRCUITS:
ex2_part2_circuit.print(printer)
# %%
"""
# Experiment 3
"""
def separate_all_seqpos(c: rc.Circuit) -> rc.Circuit:
"""
Separate c into all possible sequence positions.
c is renamed to `{c.name}_concat`, with children `{c.name}.at_pos{i}`
"""
"SOLUTION"
return split_to_concat(c, 0, range(42), partitioning_idx_names=[f"pos{i}" for i in range(42)], use_dot=True).rename(
f"{c.name}_concat"
)
if MAIN:
tests.t_separate_all_seqpos(separate_all_seqpos)
ex3_circuit = circuit
if "SOLUTION":
ex3_circuit = ex3_circuit.update(rc.IterativeMatcher("a2.h1").chain("a2.input"), separate_all_seqpos)
if MAIN and PRINT_CIRCUITS:
printer.print(ex3_circuit.get_unique(rc.IterativeMatcher("a2.input_concat")))
if MAIN:
tests.t_ex3_circuit(ex3_circuit)
# %% [markdown]
"""
When adjusted = True, use the `ds.adjusted_p_open_after` attribute instead of `ds.p_open_after` to compute the horizon test.
One possible gotcha in this section is late-binding-closures messing with the values of i. I think if you follow the outline you should be fine, but if you get strange bugs it's one possibility.
"""
def to_horizon_vals(d: ParenDataset, i: int, adjusted: bool = False) -> torch.Tensor:
"""
Returns a value for the horizon_i test dividing up the input datums into 5 equivalence classes.
The actual numerical return values don't have inherent meaning, but are defined as follows:
0 on padding,
positive on plausibly-balanced positions,
negative on unbalance-evidence positions,
1 / -1 on END_TOKENS,
2 / -2 on non-end tokens
"""
"SOLUTION"
ps: torch.Tensor = d.adjusted_p_open_after if adjusted else d.p_open_after
toks = d.tokens_flat.value
assert toks.shape == ps.shape
conds = [
toks[:, i] == ParenTokenizer.PAD_TOKEN,
(toks[:, i] == ParenTokenizer.END_TOKEN) & (toks[:, i - 1] == ParenTokenizer.OPEN_TOKEN),
(toks[:, i] == ParenTokenizer.END_TOKEN) & (toks[:, i - 1] == ParenTokenizer.CLOSE_TOKEN),
ps[:, i] > 0.5,
ps[:, i] <= 0.5,
]
vals = np.select(conds, [0, -1, 1, -2, 2], default=np.nan)
if np.any(np.isnan(vals)):
print(i)
print(d[np.isnan(vals)][:10])
print(ps[np.isnan(vals)][:10])
raise AssertionError
return torch.tensor(vals)
if MAIN:
tests.t_to_horizon_vals(to_horizon_vals)
def get_horizon_cond(i: int, adjusted: bool) -> FuncSampler:
"""Func sampler for horizon_i"""
"SOLUTION"
return FuncSampler(lambda d: to_horizon_vals(ParenDataset.unwrap(d), i, adjusted))
def get_horizon_all_cond(adjusted: bool) -> FuncSampler:
"""Func sampler horizon_all"""
"SOLUTION"
def to_horizon_all_vals(d: ParenDataset):
stacked = torch.stack([to_horizon_vals(d, i, adjusted) for i in range(0, 42)], dim=-1)
return (stacked >= 0).all(dim=-1)
return FuncSampler(lambda d: to_horizon_all_vals(ParenDataset.unwrap(d)))
if MAIN:
tests.t_get_horizon_all_cond(get_horizon_all_cond)
def make_ex3_corr(adjusted: bool = False, corr=None) -> Correspondence:
"""
`adjusted`: uses `adjusted_p_open_after` based conditions if True, `p_open_after` otherwise.
`corr`: The starting corr. Uses experiemnt 1b corr by default.
Makes the following modifications:
- Changes the cond sampler on node `21` to be the horizon_all cond sampler.
- Adds one node for each sequence position, called `21_p{i}` with horizon_i cond sampler.
- Also adds a node `pos_mask`, ensuring the pos_mask of head 2.1 is sampled from an input with the same input length.
"""
"SOLUTION"
corr = op.unwrap_or(corr, make_ex1_corr(count_open_cond))
i_21 = corr.get_by_name("21")
i_21.cond_sampler = get_horizon_all_cond(adjusted)
for i in range(0, 42):
i_pos_i = i_21.make_descendant(name=f"21_p{i}", cond_sampler=get_horizon_cond(i, adjusted))
corr.add(i_pos_i, m_21.chain(f"a2.input.at_pos{i}"))
i_pos_mask = i_21.make_descendant(
name="pos_mask", cond_sampler=FuncSampler(lambda d: ParenDataset.unwrap(d).input_lengths)
)
m_pos_mask = chain_excluding(m_21, "pos_mask", "a2.input")
corr.add(i_pos_mask, m_pos_mask)
return corr
# %% [markdown]
"""
Note, we have a mini-replication crisis and with the current code can't replicate the exact numbers from the writeup. The handling of the position mask is somewhat different, although much more sane imo. I haven't had time to diagnose the exact cause of the difference.
In any case, expect your loss to be closer to ~1.23 for ex3a and ~1.17 for ex3b
"""
if MAIN:
print("splitting up 2.1 input by seqpos")
tests.t_make_ex3_corr(make_ex3_corr)
print("\nEx3a: first with real open proprotion")
ex3a, loss3a = paren_experiment(ex3_circuit, ds, make_ex3_corr(adjusted=False))
check_loss(loss3a, 1.124, 0.1) # sad sad sad
print("\nEx3b: now with adjusted open proportion")
ex3b, loss3b = paren_experiment(ex3_circuit, ds, make_ex3_corr(adjusted=True))
check_loss(loss3b, 1.140, 0.1) # sad sad sad
if PRINT_CIRCUITS:
ex3b.print()
# %%
if "SKIP":
def make_ex3c_corr(adjusted: bool):
corr = make_ex3_corr(adjusted)
for i in range(42):
i_pi = corr.get_by_name(f"21_p{i}")
m_pi = corr[i_pi]
corr.add(
i_pi.make_descendant(get_horizon_cond(i, adjusted), f"21_p{i}_a0"),
chain_excluding(m_pi, "a0.h0", {"m0", "m1"}),
)
corr.add(
i_pi.make_descendant(get_horizon_cond(i, adjusted), f"21_p{i}_m0"), chain_excluding(m_pi, "m0", "m1")
)
corr.add(i_pi.make_descendant(get_horizon_cond(i, adjusted), f"21_p{i}_m1"), m_pi.chain("m1"))
return corr
if MAIN and SLOW_EXPERIMENTS:
print("Ex3c: Extending each seqpos to {a0.0, m0, m1}")
ex3c = paren_experiment(ex3_circuit, ds, make_ex3c_corr(True))
def make_ex3d_corr(adjusted: bool):
corr = make_ex3_corr(adjusted)
for i in range(42):
i_pi = corr.get_by_name(f"21_p{i}")
m_pi = corr[i_pi]
corr.add(i_pi.make_descendant(ExactSampler(), f"21_p{i}_a0"), chain_excluding(m_pi, "a0.h0", {"m0", "m1"}))
corr.add(i_pi.make_descendant(ExactSampler(), f"21_p{i}_m0"), chain_excluding(m_pi, "m0", "m1"))
corr.add(i_pi.make_descendant(ExactSampler(), f"21_p{i}_m1"), m_pi.chain("m1"))
return corr
if MAIN and SLOW_EXPERIMENTS:
print("Ex3d: Extending each seqpos to a0.0+m0+m1")
ex3d = paren_experiment(ex3_circuit, ds, make_ex3d_corr(True))
# %% [markdown]
"""
Now, combine experiments 2 (phi rewrite) and 3 (with adj. proportion)! Expected loss is ~1.64
"""
ex4_circuit = circuit
if "SOLUTION":
ex4_circuit = ex2_part4_circuit.update(rc.IterativeMatcher("a2.h1").chain("a2.input"), separate_all_seqpos)
ex4_corr = make_ex3_corr(adjusted=True, corr=make_ex2_part4_corr())
else:
ex4_corr = Correspondence()
if MAIN and PRINT_CIRCUITS:
printer.print(ex4_circuit)
if MAIN:
print("\nEx4: Ex2b (1.0 and 2.0 phi rewrite) + Ex3b (2.1 split by seqpos with p_adj)")
ex4, loss4 = paren_experiment(ex4_circuit, ds, ex4_corr)
check_loss(loss4, 1.7, 0.1) # sad sad sad
# %% | /rust_circuit-0.4.12-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl/rust_circuit/demos/paren_balancer/causal_scrubbing_experiments.py | 0.401688 | 0.570989 | causal_scrubbing_experiments.py | pypi |
import os
from typing import Callable, Optional, Tuple
import jax
import numpy as np
import seaborn as sns
import torch
import torch as t
from adversarial.simple_task.dataset_utils import load_dataset
from adversarial.simple_task.model_architectures.simple_transformer import SimpleTokenizer
from interp.circuit import computational_node
from interp.circuit.algebric_rewrite import MUL_REST, rearrange_muls, residual_rewrite
from interp.circuit.circuit import Circuit
from interp.circuit.circuit_model_rewrites import basic_cum_expand_run
from interp.circuit.circuit_utils import cast_circuit
from interp.circuit.computational_node import Add, Einsum, GeneralFunction, Index
from interp.circuit.constant import ArrayConstant, One, Zero
from interp.circuit.cum_algo import cumulant_function_derivative_estim
from interp.circuit.cumulant import Cumulant
from interp.circuit.function_rewrites import get_relu_fake_derivatives
from interp.circuit.get_update_node import FunctionIterativeNodeMatcher as F
from interp.circuit.get_update_node import NameMatcher as NM
from interp.circuit.get_update_node import NodeUpdater as NU
from interp.circuit.get_update_node import Replace
from interp.circuit.print_circuit import PrintCircuit
from interp.circuit.projects.estim_helper import *
from interp.circuit.projects.interp_utils import *
from interp.circuit.projects.punct.utils import standard_pdi
from interp.circuit.scope_manager import ScopeManager
from interp.circuit.scope_rewrites import basic_factor_distribute
from interp.circuit.var import DiscreteVar
from interp.tools.indexer import TORCH_INDEXER as I
sns.set_theme()
os.environ["XLA_PYTHON_CLIENT_ALLOCATOR"] = "platform"
os.environ["XLA_FLAGS"] = "--xla_force_host_platform_device_count=8" # Use 8 CPU devices
os.environ["RR_CIRCUITS_REPR_NAME"] = "true"
RRFS_DIR = os.path.expanduser("~/rrfs")
RRFS_INTERP_MODELS_DIR = f"{RRFS_DIR}/interpretability_models_jax/"
os.environ["INTERPRETABILITY_MODELS_DIR"] = os.environ.get(
"INTERPRETABILITY_MODELS_DIR",
os.path.expanduser("~/interp_models_jax/")
if os.path.exists(os.path.expanduser("~/interp_models_jax/"))
else RRFS_INTERP_MODELS_DIR,
)
jax.config.update("jax_platform_name", "cpu")
# %%
task = "balanced_parens"
tokenizer = SimpleTokenizer.task_tokenizer(task)
DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
class ParenBalanceDataSet:
def __init__(self, data_list, tokenizer):
self.vocab_size = len(tokenizer.i_to_t)
strs, is_balanced = [s for s, a in data_list], [a for s, a in data_list]
self.seqs = np.array(tokenizer.tokenize(strs))
self.is_balanced = np.array(is_balanced)
self.strs = strs
def get_one_hots(self):
return torch.nn.functional.one_hot(torch.LongTensor(self.seqs), self.vocab_size)
def get_random_paren_list(
name: str, selector: Callable[[str, bool], bool] = lambda _1, _2: True
) -> Tuple[list[Tuple[str, bool]], list[Tuple[str, bool]]]:
ds_train = [(s, r) for s, r in load_dataset(task, name)["train"] if selector(s, r)]
ds_dev = [(s, r) for s, r in load_dataset(task, name)["dev"] if selector(s, r)]
return ds_train, ds_dev
def all_quite_close(x, y):
m = torch.max(torch.abs(x.detach().clone().cpu() - y.detach().clone().cpu()))
return m.item() < 1e-4
def inverse_cumsum(a: torch.Tensor, axis=-1) -> torch.Tensor:
return torch.flip(torch.flip(a, dims=[axis]).cumsum(dim=axis), dims=[axis])
def count_open_propotions(toks: torch.Tensor) -> torch.Tensor:
device = toks.device
if len(list(toks.shape)) == 0:
return torch.FloatTensor([0.0]).to(device)
open_parens_counts = inverse_cumsum(toks[..., 3])
close_parens_counts = inverse_cumsum(toks[..., 4])
return open_parens_counts / torch.maximum(
open_parens_counts + close_parens_counts, torch.FloatTensor([1.0]).to(device)
)
def feature_to_overall_elevation_wrong(x: torch.Tensor, bal_value: int = 0, unbal_value: int = 1) -> torch.Tensor:
return (unbal_value - bal_value) * x[..., 1] + bal_value
def feature_to_neg_elevation(x: torch.Tensor, bal_value: int = 0, unbal_value: int = 1) -> torch.Tensor:
return (unbal_value - bal_value) * torch.max(x, dim=-1).values + bal_value
def is_unbalanced(toks: torch.Tensor, func=torch.sigmoid) -> torch.Tensor:
"""
Take a shape
(b *) seq_length * vocab_size
tensor (batch is optional) and calculate whether it is unbalanced (0) or not (1)
"""
if len(list(toks.shape)) == 0:
return torch.zeros(1, 1, 1)
p = count_open_propotions(toks)
x = feature_to_overall_elevation_wrong(torch.where(torch.abs(p - 0.5) > 0.01, 1.0, 0.0), -10, 20)
y = feature_to_neg_elevation(torch.where(p - 0.5 > 0.01, 1.0, 0.0), -10, 20)
return func(x + y)
def total_elev(toks: torch.Tensor) -> torch.Tensor:
"""
Same as is_unbalanced, except measures whether something fails the total elevation test (1.0) or not (0.0)
"""
if len(list(toks.shape)) == 0:
return torch.zeros(1, 1, 1)
p = count_open_propotions(toks)
x = feature_to_overall_elevation_wrong(torch.where(torch.abs(p - 0.5) > 0.01, 1.0, 0.0), -10, 10)
return torch.sigmoid(x)
def min_elev(toks: torch.Tensor, func=torch.sigmoid) -> torch.Tensor:
"""
Same as is_unbalanced, except measures whether something fails the minimum elevation test (1.0) or not (0.0)
"""
if len(list(toks.shape)) == 0:
return torch.zeros(1, 1, 1)
p = count_open_propotions(toks)
y = feature_to_neg_elevation(torch.where(p - 0.5 > 0.01, 1.0, 0.0), -10, 10)
return func(y)
def get_independent_dataset(
tokenizer=tokenizer,
) -> Tuple[torch.Tensor, Optional[torch.Tensor]]:
"""
Return `INDPENDENT_MIX`: a dataset of
length 40 parenthesis strings where failing the total elevation test is independent of failing the minimum elevation test. Note that three times as many data points are unbalanced strings compared to balanced strings
"""
def string_is_good(s: str):
return len(s) == 40 and s[0] == "("
random_ds_name = "random_choice_len_40_extra_yeses_16"
balanced_ds_name = "random_choice_len_40_balanced_advexes"
unbalanced_selector = lambda s, r: not r and string_is_good(s)
balanced_selector = lambda s, r: r and string_is_good(s)
ds_unbalanced_train, _ = get_random_paren_list(random_ds_name, unbalanced_selector)
ds_balanced_train, _ = get_random_paren_list(balanced_ds_name, balanced_selector)
big_ds = ParenBalanceDataSet(ds_unbalanced_train, tokenizer)
m = big_ds.get_one_hots().cpu()
total_elev_failures = []
min_elev_failures = []
both_failures = []
for i in range(m.shape[0]):
if not all_quite_close(total_elev(m[i : i + 1]), min_elev(m[i : i + 1])):
if all_quite_close(total_elev(m[i : i + 1]), torch.zeros(1)):
min_elev_failures.append(m[i : i + 1])
else:
total_elev_failures.append(m[i : i + 1])
else:
both_failures.append(m[i : i + 1])
total_fail_tens = torch.concat(total_elev_failures)
min_fail_tens = torch.concat(min_elev_failures)
both_fail_tens = torch.concat(both_failures)
number_of_balance_to_load = max(len(min_elev_failures), len(total_elev_failures))
balanced_sample_ds = ParenBalanceDataSet(ds_balanced_train[:number_of_balance_to_load], tokenizer)
portion_sizes = min(len(min_elev_failures), len(total_elev_failures), len(both_failures))
return (
torch.cat(
(
both_fail_tens[:portion_sizes],
total_fail_tens[:portion_sizes],
min_fail_tens[:portion_sizes],
balanced_sample_ds.get_one_hots()[:portion_sizes],
),
0,
),
None,
)
def get_sub_circuit(circuit: Circuit, child_name: str):
"""
Get a circuit leaf in the computational graph of `circuit`
that has name `child_name`
"""
assert isinstance(circuit, Circuit)
assert isinstance(child_name, str)
return F(NM(child_name)).g().get_unique_c(circuit)
def get_truth_for_cumulant(model: Circuit, function, name: str = "true") -> Circuit:
"""
Return the circuit representing the input parenthesis sequence
"""
tokens = get_sub_circuit(model, "tokens")
return Index(
GeneralFunction(
tokens,
lambda x: torch.broadcast_to(function(x)[:, None, None], x.shape),
non_batch_dims=(-1, -2),
name=f"{name}_function",
),
I[0, 0],
name=name,
)
def replace_tokens(data: torch.Tensor, *circuits: Circuit):
"""
Replace the tokens inside all circuits
"""
new_tokens = ArrayConstant.from_converted(data, dtype=torch.float, name="tokens", device=DEVICE)
new_node = DiscreteVar(new_tokens, name="tokens_var")
return *[NU(Replace(new_node), F(NM("tokens")))(c) for c in circuits], new_node
do_cumulant_evaluations = True
sample_ds, val_ds = get_independent_dataset(tokenizer=tokenizer)
#%%
def make_handcrafted_transformer():
"""
`tokens_var` is 42 * 5 in shape
"""
# Embedding
# We encode ( (3) as e_1, and ) (4) as e_2 in the 2D residual stream
# also, encode the end of sequence token as 1 (this is needed for the total elevation test)
m = torch.zeros(5, 2)
m[3][0] = 1.0
m[4][1] = 1.0
m[0][0] = 1.0
fake_tokens_var = Zero(shape=(42, 5), name="tokens")
embed_matrix = ArrayConstant(m, name="w.embed")
embedded = Einsum.from_einsum_str("lo,ox->lx", fake_tokens_var, embed_matrix, name="embed")
# QKV weights
# we make queries for both e_1 and e_2 be 10.0 (queries and keys are 1D)
# on the other hand the value vectors are +1 for open brackets and -1 for close brackets
qw = ArrayConstant(torch.ones(2, 1) * 10, name="q.w")
kw = One(shape=(2, 1), name="k.w")
m2 = t.zeros(2, 1)
m2[0][0] = 1.0
m2[1][0] = -1.0
vw = ArrayConstant(m2, name="v.w")
# QKV calculations
# note we also use masking, so everything only pays attention to things before it
q = Einsum.from_einsum_str("io,li->lo", qw, embedded, name="q")
k = Einsum.from_einsum_str("io,li->lo", kw, embedded, name="k")
v = Einsum.from_einsum_str("io,li->lo", vw, embedded, name="v")
qk = Einsum.from_einsum_str("lo,Lo->lL", q, k, name="qk")
upper_triangular_tensor = t.ones(42, 42).float() * (-1e6)
for i in range(42):
for j in range(42):
if i <= j:
upper_triangular_tensor[i][j] = 0
upper_triangular = ArrayConstant(upper_triangular_tensor, name="upper_triangular")
scores = Add.from_unweighted_list([qk, upper_triangular], name="masked")
probs = computational_node.softmax(scores, name="probs")
values = Einsum.from_einsum_str("lL,Lo->lo", probs, v, name="head.out")
# this setup means (as you can verify!) if any of the last 39 positions are positive, the elevation test has failed. Additionally if the last position (which attend to all the sequence) is not <= 0, the string is unbalanced,
# So we need reverse this positions sign, then take a ReLU and if ANYWHERE is positive then the string is unbalanced; this is pretty much an MLP!
m3 = t.eye(42).float()
m3[0, 0] = -1.0 # I thonk...
m3[:, 41] = 0
m3[41, :] = 0
m1w = ArrayConstant(m3, name="m1.w")
m1ed = Einsum.from_einsum_str("lL,lo->L", m1w, values, name="m1.out")
relued = computational_node.relu(m1ed, name="m1.act")
# then add up all the entries
m2w = ArrayConstant(t.ones(42, 42), name="m2.w")
m2ed = Einsum.from_einsum_str("lL,l->L", m2w, relued, name="m2.out")
# taking sigmoid and the convention that 0.5 rounds down to 0, this model achieves perfect performance on length 40 sequences
logits = Index(m2ed, I[0], name="logits")
return logits
circuit = make_handcrafted_transformer()
truth = get_truth_for_cumulant(circuit, is_unbalanced)
circuit = cast_circuit(circuit, device=DEVICE)
truth = cast_circuit(truth, device=DEVICE)
circuit, truth, new_node = replace_tokens(sample_ds, circuit, truth)
# %%
# some tools for printing circuits
d1 = PrintCircuit(
print_html=True, colorize=lambda _: True, max_depth=1, print_shape=True, copy_button=True
) # depth 1 printing, but collapsibles mean things can be expanded
print("View the collapsible tree for the circuit:")
d1(circuit)
# %%
# Now let's define the covariance we want to explain
# (why is the covariance between the true answer and the difference between the two output logits high?)
cumulant_circuit = Cumulant((circuit, truth))
scopes = [ScopeManager(cumulant_circuit)] # all scopes considered, as a list because checking how things change
d1(scopes[-1])
#%% [markdown]
# We now need to "push down" the cumulant. This is equivalent to noticing that the output logits $L$ are equal to (an entry in) $Mx$, where $M$ is the final matrix multiply.
# So we have $K(Mx, T) = MK(x, T)$, as $M$ is constant. So "pushing down" the cumulant means factoring out the $M$ term:
# %%
scopes.append(
scopes[-1].u(
NU(
lambda x: basic_cum_expand_run( # expand cumulant
Cumulant.unwrap(x),
cum_expand_through_factored_add_concat=True,
until_func=NM("true"), # do not expand through "true"
child_suffix="logits", # ... instead expand through "logits"
),
F(NM(scopes[-1].unique().name)), # the name of the cumulant that we want to expand
)
)
)
d1(scopes[-1])
#%%
print("Now push down the index in a similar way:")
scopes.append(scopes[-1].u(NU(lambda x: standard_pdi(x), F(NM(scopes[-1].unique().name)))))
# upto here, maybe delete ya extra type safety
#%%
print("Now let's see what the covariance is!")
estim = EstimHelper(Zero(), use_new_estim=True, device=DEVICE)
list(estim.estimate(scopes[-1].unique()).items())[0][1].item()
#%%
print("Now let's expand the derivative non-linearity by estimating with the first order term")
def relu_rewrite(
scope: ScopeManager, cumulant_name: str, relu_name: str, highest_deriv: int = 6, std_deviation: float = 0.1
):
"""
Expand a ReLU non-linearity (GeneralFunction) `relu_name`, which has a parent (Cumulant) `cumulant_name, into `highest_deriv` number of terms. Approximates ReLUs with the derivative of a normal PDF with std `std_deviation`
"""
def run_expand_deriv_relu(x: Circuit, relu_name: str):
"""
Function that updates ReLU nodes
"""
pre_act = GeneralFunction.unwrap(F(NM(relu_name)).g().get_unique_c(x))
estim = cumulant_function_derivative_estim(
Cumulant.unwrap(x),
pre_act,
highest_deriv=highest_deriv,
get_derivative=get_relu_fake_derivatives(pre_act, fake_dirac_std=std_deviation),
)
out, _, _ = residual_rewrite(x, estim, running_name="d_estim")
return out
return scope.u(NU(lambda x: run_expand_deriv_relu(x, relu_name), F(NM(cumulant_name))), no_eq_besides_name=False)
scopes.append(relu_rewrite(scopes[-1], cumulant_name="I k2 m1.act, true I", relu_name="m1.act"))
d1(scopes[-1])
#%%
scopes.append(basic_factor_distribute(scopes[-1], modify_inplace=False))
#%% The tree looks quite different now!
# Note the Jacobians here (which are large and mean we won't be able to evaluate many of these cumulants) are actually diagonal, as ReLU acts elementwise. See batched_deriv for a more efficient use (which we don't need here as we only need lower order terms)
scopes.append(
scopes[-1].sub_get(F(NM("I k2 m1.act, true I_d_estim_out")))
) # get derivative terms (we're now approximating the original cumulant)
#%%
scopes.append(
scopes[-1].u(
NU(
lambda x: basic_cum_expand_run( # expand cumulant
Cumulant.unwrap(x),
cum_expand_through_factored_add_concat=True,
until_func=NM("true"),
child_suffix="m1.out",
),
F(NM("I k2 m1.out, true I")),
),
no_eq_besides_name=False, # now needed...
)
)
#%%
scopes.append(basic_factor_distribute(scopes[-1], modify_inplace=False))
#%%
scopes.append(
scopes[-1].sub_get(F(NM("I k2 m1.act, true I_d_estim_item_1_out")))
) # now it is a scalar we get the first derivative
#%%
print("Let's check the cumulant between this approximation and the truth:")
print(list(estim.estimate(scopes[-1].unique()).items())[0][1].item())
print(
"Note that this is less than half of the total effect size. It turns out it is possible to explain behaviour in this case though"
)
#%%
scopes.append(
scopes[-1].u(
NU(
lambda x: basic_cum_expand_run( # expand cumulant
Cumulant.unwrap(x),
cum_expand_through_factored_add_concat=True,
until_func=NM("true"),
child_suffix="head.out",
),
F(NM("I k2 head.out, true I")),
),
no_eq_besides_name=False, # now needed...
)
)
#%%
scopes.append(basic_factor_distribute(scopes[-1], modify_inplace=False))
#%%
circuits_to_estimate = Add.unwrap(scopes[-1].unique()).items.keys()
out = estim.estimate(*circuits_to_estimate)
for k, v in out.items():
print(k.name, v.item())
print("This is not surprising since recall that the attention pattern was constant")
#%%
scopes.append(scopes[-1].sub_get(F(NM("I k2 true, v k1 probs I perm_out"))))
#%%
print("Finally we can rearrage the Einsums to get a product of the corellatios with the input, and another matrix:")
final_circuit = rearrange_muls(Einsum.unwrap(scopes[-1].unique()), (0, MUL_REST))
d1(final_circuit)
#%%
q = list(estim.estimate(get_sub_circuit(final_circuit, "I k2 tokens_var, true I")).items())[0][1][:, 3:]
def plot_matrix(m):
if len(m.shape) < 2:
m.unsqueeze(0)
sns.heatmap(m.detach().cpu())
plot_matrix(q)
print("This basically shows us correlations between the input and the truth, so doesn't give much signal")
# %%
q2 = list(
estim.estimate(
get_sub_circuit(final_circuit, "m1.w m1.act_deriv_d_estim_expectation d_estim_recip_1! m2.w_idxed")
).items()
)[0][
1
] # [:, 3:]
plot_matrix(q2.unsqueeze(0))
print(
"The high value at 0 is meaningless since it is timesed by 0 in the dataset corellation term. The rest of the plot shows that the final position accounts for most of the variance (the total elevation test) and the cyclical bands show the minimum elevation test occurs from right to left as the first minimum elevation failure will always be at an odd time"
) | /rust_circuit-0.4.12-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl/rust_circuit/demos/rust_circuit_demos/handcrafted_model_cumulants.py | 0.853043 | 0.364523 | handcrafted_model_cumulants.py | pypi |
# %%
from __future__ import annotations
import pytest
from rust_circuit import Index, Matcher, Parser, PrintOptions, default_index_traversal, push_down_index, restrict
from rust_circuit.py_utils import I
# %%
# }}}
# setup {{{
# %% [markdown]
# ## Setup
# %%
circuit = Parser(tensors_as_random=True, on_repeat_check_info_same=False, allow_hash_with_random=True)(
"""
0 'log_probs' [57, 50259] GeneralFunction log_softmax
1 'logits' [57, 50259] Einsum ab,cb->ac
2 'final.n' [57, 256] Add
3 'final.n.w.bias' [256] Array 151e0290b45764bf65fe8e61
4 'final.n.y_out' [57, 256] Einsum ab,a->ab
5 'final.n.y_scale' [57, 256] Einsum ab,b->ab
6 'final.n.z_mean' [57, 256] Einsum ab,bc->ac
7 'final.inp' [57, 256] Add
8 'tok_embeds' [57, 256] Array f738ed95c0f778275c1ecc91
9 'a0' [57, 256] Einsum abc,bcd->ad
10 'a0.comb_v' [57, 8, 32] Einsum abc,acd->bad
11 'a0.probs' [8, 57, 57] GeneralFunction softmax
12 'a0.scores' [8, 57, 57] Add
13 'a0.scores_mul_mask' [8, 57, 57] Einsum abc,bc->abc
14 'a0.scores_not_masked' [8, 57, 57] Einsum abc,adc,->abd
15 'a0.f_q' [8, 57, 32] Add
16 'a0.q' [8, 57, 32] Einsum abc,dc->adb
17 'a0.w.q' [8, 32, 256] Array ef8f32f62ccaa15af90ed096
18 'a0.n' [57, 256] Add
19 'a0.n.w.bias' [256] Array 925f1bb4ca52e7573018ba34
20 'a0.n.y_out' [57, 256] Einsum ab,a->ab
21 'a0.n.y_scale' [57, 256] Einsum ab,b->ab
22 'a0.n.z_mean' [57, 256] Einsum ab,bc->ac
8 tok_embeds
23 'a0.n.c.sub_mean' [256, 256] Array 4d11427c8d9c09763ca3c7de
24 'a0.n.w.scale' [256] Array 03798fe8f36bb9e6271f8bcd
25 'a0.n.full_mul' [57] GeneralFunction rsqrt
26 'a0.n.c.var_p_eps' [57] Add
27 'a0.n.var' [57] Einsum ab,ab,->a
22 a0.n.z_mean
22 a0.n.z_mean
28 'a0.n.c.recip_h_size' [] Scalar 0.00390625
29 'a0.n.eps' [] Scalar 0.00001
30 'a0.w.pos_q' [8, 57, 32] Einsum abc,dc->adb
17 a0.w.q
31 'w.pos_embeds' [57, 256] Array 47eda059261d25d4d7283df3
32 'a0.f_k' [8, 57, 32] Add
33 'a0.k' [8, 57, 32] Einsum abc,dc->adb
34 'a0.w.k' [8, 32, 256] Array 3eae23cb6134604792918d08
18 a0.n
35 'a0.w.pos_k' [8, 57, 32] Einsum abc,dc->adb
34 a0.w.k
31 w.pos_embeds
36 'a0.c.div_head_size' [] Scalar 0.17677669529663687
37 'a0.c.score_mask' [57, 57] Array 4d251ae1987ead35d704cd1b
38 'a0.c.score_neg_inf_bias' [57, 57] Array 3346b3e97cda641e9cbec006
39 'a0.v' [8, 57, 32] Einsum abc,dc->adb
40 'a0.w.v' [8, 32, 256] Array 9c803a30d26e5d50b28376dd
18 a0.n
41 'a0.w.out' [8, 32, 256] Array a737a4b485f2e1cc3db22333
42 'm0' [57, 256] Einsum ab,cb->ac
43 'm0.act' [57, 1024] GeneralFunction gelu
44 'm0.add0' [57, 1024] Add
45 'm0.before_product0' [57, 1024] Einsum ab,cb->ac
46 'm0.n' [57, 256] Add
47 'm0.n.w.bias' [256] Array 9ae61c8299e708d1b3c12c75
48 'm0.n.y_out' [57, 256] Einsum ab,a->ab
49 'm0.n.y_scale' [57, 256] Einsum ab,b->ab
50 'm0.n.z_mean' [57, 256] Einsum ab,bc->ac
51 'm0.inp' [57, 256] Add
8 tok_embeds
9 a0
52 'm0.n.c.sub_mean' [256, 256] Array 4d11427c8d9c09763ca3c7de
53 'm0.n.w.scale' [256] Array c0b8ebacfbd81ed1c905e5d1
54 'm0.n.full_mul' [57] GeneralFunction rsqrt
55 'm0.n.c.var_p_eps' [57] Add
56 'm0.n.var' [57] Einsum ab,ab,->a
50 m0.n.z_mean
50 m0.n.z_mean
57 'm0.n.c.recip_h_size' [] Scalar 0.00390625
58 'm0.n.eps' [] Scalar 0.00001
59 'm0.w.w0' [1024, 256] Array dc59d55cbe737267d1acbbf9
60 'm0.w.b0' [1024] Array 7dbf23f05f2ff50ab6ae3d30
61 'm0.w.w1' [256, 1024] Array 5002910549bc78a47941545c
62 'm0.w.b1' [256] Array 67e1c57f81b33d2e6f6cfbe3
63 'a1' [57, 256] Einsum abc,bcd->ad
64 'a1.comb_v' [57, 8, 32] Einsum abc,acd->bad
65 'a1.probs' [8, 57, 57] GeneralFunction softmax
66 'a1.scores' [8, 57, 57] Add
67 'a1.scores_mul_mask' [8, 57, 57] Einsum abc,bc->abc
68 'a1.scores_not_masked' [8, 57, 57] Einsum abc,adc,->abd
69 'a1.f_q' [8, 57, 32] Add
70 'a1.q' [8, 57, 32] Einsum abc,dc->adb
71 'a1.w.q' [8, 32, 256] Array 91124557e0f6f11619a770d7
72 'a1.n' [57, 256] Add
73 'a1.n.w.bias' [256] Array 9c4b4d2ec8664cb710486d99
74 'a1.n.y_out' [57, 256] Einsum ab,a->ab
75 'a1.n.y_scale' [57, 256] Einsum ab,b->ab
76 'a1.n.z_mean' [57, 256] Einsum ab,bc->ac
77 'a1.inp' [57, 256] Add
8 tok_embeds
9 a0
42 m0
62 m0.w.b1
78 'a1.n.c.sub_mean' [256, 256] Array 4d11427c8d9c09763ca3c7de
79 'a1.n.w.scale' [256] Array 0fcb1f000bb89dda2e86c04f
80 'a1.n.full_mul' [57] GeneralFunction rsqrt
81 'a1.n.c.var_p_eps' [57] Add
82 'a1.n.var' [57] Einsum ab,ab,->a
76 a1.n.z_mean
76 a1.n.z_mean
83 'a1.n.c.recip_h_size' [] Scalar 0.00390625
84 'a1.n.eps' [] Scalar 0.00001
85 'a1.w.pos_q' [8, 57, 32] Einsum abc,dc->adb
71 a1.w.q
31 w.pos_embeds
86 'a1.f_k' [8, 57, 32] Add
87 'a1.k' [8, 57, 32] Einsum abc,dc->adb
88 'a1.w.k' [8, 32, 256] Array 15f4e9cd023913a9a854a514
72 a1.n
89 'a1.w.pos_k' [8, 57, 32] Einsum abc,dc->adb
88 a1.w.k
31 w.pos_embeds
90 'a1.c.div_head_size' [] Scalar 0.17677669529663687
91 'a1.c.score_mask' [57, 57] Array 4d251ae1987ead35d704cd1b
92 'a1.c.score_neg_inf_bias' [57, 57] Array 3346b3e97cda641e9cbec006
93 'a1.v' [8, 57, 32] Einsum abc,dc->adb
94 'a1.w.v' [8, 32, 256] Array 5aaef038d2194d529aea8758
72 a1.n
95 'a1.w.out' [8, 32, 256] Array 4bfcebee9a186581d8de84cc
96 'm1' [57, 256] Einsum ab,cb->ac
97 'm1.act' [57, 1024] GeneralFunction gelu
98 'm1.add0' [57, 1024] Add
99 'm1.before_product0' [57, 1024] Einsum ab,cb->ac
100 'm1.n' [57, 256] Add
101 'm1.n.w.bias' [256] Array c8ee8c692edc6d92e7c2c219
102 'm1.n.y_out' [57, 256] Einsum ab,a->ab
103 'm1.n.y_scale' [57, 256] Einsum ab,b->ab
104 'm1.n.z_mean' [57, 256] Einsum ab,bc->ac
105 'm1.inp' [57, 256] Add
8 tok_embeds
9 a0
42 m0
62 m0.w.b1
63 a1
106 'm1.n.c.sub_mean' [256, 256] Array 4d11427c8d9c09763ca3c7de
107 'm1.n.w.scale' [256] Array 617a80d2cb0cb45318ef9885
108 'm1.n.full_mul' [57] GeneralFunction rsqrt
109 'm1.n.c.var_p_eps' [57] Add
110 'm1.n.var' [57] Einsum ab,ab,->a
104 m1.n.z_mean
104 m1.n.z_mean
111 'm1.n.c.recip_h_size' [] Scalar 0.00390625
112 'm1.n.eps' [] Scalar 0.00001
113 'm1.w.w0' [1024, 256] Array c46ef0b6b06fa0a25a9e8f74
114 'm1.w.b0' [1024] Array 1eeee3393a8ef176b116cec0
115 'm1.w.w1' [256, 1024] Array 040df31a221c659c1da228b7
116 'm1.w.b1' [256] Array 7010bd45175fc579786a025a
117 'final.n.c.sub_mean' [256, 256] Array 4d11427c8d9c09763ca3c7de
118 'final.n.w.scale' [256] Array 3900ff7658d3f069ae29926e
119 'final.n.full_mul' [57] GeneralFunction rsqrt
120 'final.n.c.var_p_eps' [57] Add
121 'final.n.var' [57] Einsum ab,ab,->a
6 final.n.z_mean
6 final.n.z_mean
122 'final.n.c.recip_h_size' [] Scalar 0.00390625
123 'final.n.eps' [] Scalar 0.00001
124 'w.unembed' [50259, 256] Array 460a894d3c021fb94cabdfb1
"""
)
# %%
# }}}
# %%
pushed = push_down_index(
Index(circuit, [17]), restrict(default_index_traversal(), term_early_at="final.inp"), suffix="_idx"
)
pushed.print(PrintOptions(shape_only_when_necessary=False))
# %%
pushed_many = push_down_index(
Index(circuit, I[2:7]), restrict(default_index_traversal(), term_early_at="final.inp"), suffix="_idx"
)
pushed_many.print(PrintOptions(shape_only_when_necessary=False))
# %%
pushed_sub = push_down_index(Index(pushed_many, I[1:3]), default_index_traversal(), suffix="_IDX")
pushed_sub.print(PrintOptions(shape_only_when_necessary=False))
# %%
pushed_far = push_down_index(Index(circuit, I[2:7]), suffix="_idx") # push down all the way!
pushed_far.print(PrintOptions(shape_only_when_necessary=False))
_ = Matcher("m1_idx").get_unique(pushed_far)
with pytest.raises(RuntimeError):
Matcher("m1").get_unique(pushed_far) # this doesn't exist
_ = Matcher("a1_idx").get_unique(pushed_far)
with pytest.raises(RuntimeError):
Matcher("a1").get_unique(pushed_far) # this doesn't exist
# but both of these will exist due to sequence position translation from a1
_ = Matcher("m0_idx").get_unique(pushed_far)
_ = Matcher("m0").get_unique(pushed_far)
_ = Matcher("a0_idx").get_unique(pushed_far)
_ = Matcher("a0").get_unique(pushed_far) | /rust_circuit-0.4.12-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl/rust_circuit/demos/rust_circuit_demos/push_down_index.py | 0.619701 | 0.233526 | push_down_index.py | pypi |
from rust_circuit.causal_scrubbing.hypothesis import (
PNP,
ExactSampler,
FuncSampler,
InterpNode,
MaybePoolAnnotation,
PoolAnnotation,
UncondSampler,
UncondTogetherSampler,
)
from rust_circuit.causal_scrubbing.testing_utils import IntDataset
class FirstSampler(FuncSampler):
func = lambda x: IntDataset.unwrap(x).xs.value[:, 0]
def __init__(self, pool_annot: MaybePoolAnnotation = PNP):
super().__init__(FirstSampler.func, pool_annot)
dataset = IntDataset.of_shape((1000, 2), lambda x: x[0])
# root node's sampler doesn't have pools enabled
inode = InterpNode(ExactSampler(), "A")
# this sampler has a pool of size 4
first_sampler = FirstSampler(pool_annot=PoolAnnotation(4))
# 40 nodes, each with the same sampler and with the same parent node.
for j in range(40):
inode.make_descendant(first_sampler, f"B{j}")
sampled_inputs = inode.sample(None, dataset, dataset)
# only one sampler with pools
assert len(sampled_inputs.sampler_pools) == 1
first_sampler_pools = sampled_inputs.sampler_pools[(first_sampler, dataset)]
# only one parent ds this sampler was called on
assert len(first_sampler_pools) == 1
first_sampler_pool_for_parent_ds = list(first_sampler_pools.values())[0]
# pool has size 4
assert len(first_sampler_pool_for_parent_ds) == 4
# so overall we have 5 different sampled datasets: one for the root, 4 for the rest
assert len(set(sampled_inputs.datasets.values())) == 5
inode.print(sampled_inputs=sampled_inputs)
# %%
"""
Why use pools?
This tames the explosion of the treeified model, while only failing to scrub high-order
correlations (that probably you didn't care about anyway). So, this is recommended if your
treeified model is too big (e.g for deep networks with non-trivial samplers).
"""
# %%
"""
In fact, there are additional cases where we could use samples from a pool. The first is:
given a different (by id) sampler, which is the same by attrs eq. This is for convenience, so you
don't have to keep track of all your CondSamplers. As long as you use the same PoolAnnotation
whenever you create CondSamplers, any two that are identical will share a pool (keep in mind
that lambdas are not equal, though, for when you are writing FuncSamplers).
"""
shared_pool = PoolAnnotation(1)
inode = InterpNode(ExactSampler(), "A")
b0 = inode.make_descendant(FirstSampler(pool_annot=shared_pool), "B0")
b1 = inode.make_descendant(FirstSampler(pool_annot=shared_pool), "B1")
assert b0.cond_sampler == b1.cond_sampler
sampled_inputs = inode.sample(None, dataset, dataset)
assert len(sampled_inputs.sampler_pools) == 1
assert len(set(sampled_inputs.datasets.values())) == 2 # one from the root, one from the other two nodes
inode.print(sampled_inputs=sampled_inputs)
# %%
# Meanwhile, samplers with different pool annotations are not equal, and don't share a pool:
inode = InterpNode(ExactSampler(), "A")
b0 = inode.make_descendant(FirstSampler(pool_annot=PoolAnnotation(1)), "B0")
b1 = inode.make_descendant(FirstSampler(pool_annot=PoolAnnotation(1)), "B1")
assert b0.cond_sampler != b1.cond_sampler
sampled_inputs = inode.sample(None, dataset, dataset)
assert len(sampled_inputs.sampler_pools) == 2
assert len(set(sampled_inputs.datasets.values())) == 3 # one from each node
inode.print(sampled_inputs=sampled_inputs)
# %%
# And just for completeness, here's what we get with no pools enabled:
first_sampler = FirstSampler()
inode = InterpNode(ExactSampler(), "A")
b0 = inode.make_descendant(first_sampler, "B0")
b1 = inode.make_descendant(first_sampler, "B1")
sampled_inputs = inode.sample(None, dataset, dataset)
assert len(sampled_inputs.sampler_pools) == 0
assert len(set(sampled_inputs.datasets.values())) == 3 # one from each node
inode.print(sampled_inputs=sampled_inputs)
# %%
"""
The other case where we could reuse samples is: if our sampler is called on a different reference,
which is nonetheless "the same, according to this sampler." Suppose you wrote a sampler which only
cared about the first element in a tuple, and the ref was (1, 2) so the sampler sampled (1, 5). If
the sampler then sees (1, 3), (1, 5) is just as likely to be sampled as it was the first time!
Without this, we might find we rarely make use of our pools, as it might be unlikely for a
sampler to see the same dataset in the course of a causal scrubbing run.
By default, datasets are only equivalent to themselves, i.e. if you extend CondSampler without
overriding ds_eq_class this sort of reuse won't happen. ExactSampler keeps this behavior.
Meanwhile, FuncSampler puts all datasets with the same value of its func in one equivalence class;
and UncondSampler puts all datasets into a single equivalence class.
"""
first_sampler = FirstSampler(pool_annot=PoolAnnotation(1))
inode = InterpNode(ExactSampler(), "A")
# B and C will separately sample data with the same first element
inode_b = inode.make_descendant(FirstSampler(), "B")
inode_c = inode.make_descendant(FirstSampler(), "C")
# So Db and Dc will see different parent ds but with the same first element:
inode_b.make_descendant(first_sampler, "Db")
inode_c.make_descendant(first_sampler, "Dc")
sampled_inputs = inode.sample(None, dataset, dataset)
# only one sampler with pools
assert len(sampled_inputs.sampler_pools) == 1
first_sampler_pools = sampled_inputs.sampler_pools[(first_sampler, dataset)]
# one equivalence class of ds this sampler was called on
eq_class = first_sampler.ds_eq_class(sampled_inputs.datasets[inode_b])
assert eq_class == first_sampler.ds_eq_class(sampled_inputs.datasets[inode_c])
assert len(first_sampler_pools) == 1
first_sampler_pool_for_parent_ds = first_sampler_pools[eq_class]
# pool has size 1
assert len(first_sampler_pool_for_parent_ds) == 1
# so overall we have 4 different sampled datasets: 3 from A B and C, 1 from the pool.
assert len(set(sampled_inputs.datasets.values())) == 4
inode.print(sampled_inputs=sampled_inputs)
# %%
# # Meanwhile if the datasets are "not the same", they do not share a pool:
inode = InterpNode(ExactSampler(), "A")
# B and C will sample totally random data:
inode_b = inode.make_descendant(UncondSampler(), "B")
inode_c = inode.make_descendant(UncondSampler(), "C")
# So Db and Dc will see different parent ds:
inode_b.make_descendant(first_sampler, "Db")
inode_c.make_descendant(first_sampler, "Dc")
sampled_inputs = inode.sample(None, dataset, dataset)
# only one sampler with pools
assert len(sampled_inputs.sampler_pools) == 1
first_sampler_pools = sampled_inputs.sampler_pools[(first_sampler, dataset)]
# two parent ds this sampler was called on
eq_class_b = first_sampler.ds_eq_class(sampled_inputs.datasets[inode_b])
eq_class_c = first_sampler.ds_eq_class(sampled_inputs.datasets[inode_c])
assert eq_class_b != eq_class_c
assert len(first_sampler_pools) == 2
first_sampler_pool_for_b_parent_ds = first_sampler_pools[eq_class_b]
first_sampler_pool_for_c_parent_ds = first_sampler_pools[eq_class_c]
# each pool has size 1
assert len(first_sampler_pool_for_b_parent_ds) == 1
assert len(first_sampler_pool_for_c_parent_ds) == 1
# so overall we have 5 different sampled datasets: 1 for each node
assert len(set(sampled_inputs.datasets.values())) == 5
inode.print(sampled_inputs=sampled_inputs)
# %%
"""
To sum up, we have:
- for each CondSampler hash and source ds:
- for each ds eq class, according to the sampler:
- as many samples as the pool size in the sampler's PoolAnnotation specified (in practice
we sample these lazily, so there's no overhead to using pools when e.g. you only sample once).
"""
# %%
"""
Why use pools? pt 2
Additionally, pools can be used semantially; in particular, a pool of size 1 enforces
things are sampled together.
(And, of course, pools can be used for other_inputs_samplers.)
"""
inode = InterpNode(ExactSampler(), "A")
inode_b = inode.make_descendant(FirstSampler(), "B", other_inputs_sampler=UncondTogetherSampler())
inode_c = inode.make_descendant(FirstSampler(), "C", other_inputs_sampler=UncondTogetherSampler())
# This sampler is always the same by default; but you can pass different uuids to it if desired
assert inode_b.other_inputs_sampler == inode_c.other_inputs_sampler
inode_b.make_descendant(FirstSampler(), "Db")
inode_c.make_descendant(FirstSampler(), "Dc")
sampled_inputs = inode.sample(None, dataset, dataset)
# only one sampler with pools
assert len(sampled_inputs.sampler_pools) == 1
uncond_tog_sampler = UncondTogetherSampler()
uncond_sampler_pools = sampled_inputs.sampler_pools[(uncond_tog_sampler, dataset)]
# according to this sampler, all datasets are in the same equivalence class
eq_class_b = uncond_tog_sampler.ds_eq_class(sampled_inputs.datasets[inode_b])
assert eq_class_b == uncond_tog_sampler.ds_eq_class(sampled_inputs.datasets[inode_c])
assert len(uncond_sampler_pools) == 1
uncond_sampler_pool_for_parent_ds = uncond_sampler_pools[eq_class_b]
# pool has size 1
assert len(uncond_sampler_pool_for_parent_ds) == 1
# so overall we have 2 other_inputs_ds sampled: one from the root, one from the pool
assert len(set(sampled_inputs.other_inputs_datasets.values())) == 2
inode.print(sampled_inputs=sampled_inputs)
# %% | /rust_circuit-0.4.12-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl/rust_circuit/demos/causal_scrubbing/pool.py | 0.674265 | 0.6437 | pool.py | pypi |
from __future__ import annotations
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from typing import Optional, Tuple, Literal, Callable
from abc import ABC, abstractmethod
from dataclasses import dataclass
class Expr(ABC):
@abstractmethod
def render(self, out: list[str]):
...
def __str__(self):
out = []
self.render(out)
return "".join(out)
def __repr__(self):
return f"Expr.parse({str(self)!r})"
@staticmethod
def parse(s: str) -> "Expr":
e, str_idx = parse_expr(StrIdx(s, 0))
if e is None or not str_idx.done():
raise ValueError(f"Invalid expression: {s!r}")
return e
@dataclass(repr=False)
class IdentExpr(Expr):
name: str
idx: list[Expr]
def render(self, out: list[str]):
out.append(self.name)
if self.idx:
out.append("[")
for i, e in enumerate(self.idx):
if i > 0:
out.append(", ")
e.render(out)
out.append("]")
@dataclass(repr=False)
class IntExpr(Expr):
value: int
def render(self, out: list[str]):
out.append(str(self.value))
if TYPE_CHECKING:
EquationKind = Literal["=", "<-"]
@dataclass(repr=False)
class Equation:
kind: EquationKind
lhs: Expr
rhs: Expr
def render(self, out: list[str]):
self.lhs.render(out)
out.append(f" {self.kind} ")
self.rhs.render(out)
def __str__(self):
out = []
self.render(out)
return "".join(out)
def __repr__(self):
return f"Equation.parse({str(self)!r})"
@staticmethod
def parse(s: str) -> "Equation":
eq, str_idx = parse_equation(StrIdx(s, 0))
if eq is None or not str_idx.done():
raise ValueError(f"Invalid equation: {s!r}")
return eq
@dataclass
class StrIdx:
content: str
idx: int
def advance(self, n: int = 1) -> "StrIdx":
return StrIdx(self.content, self.idx + n)
def peek(self, n: int = 1) -> Optional[str]:
if self.idx + n > len(self.content):
return None
return self.content[self.idx : self.idx + n]
def next(self) -> Tuple[Optional[str], "StrIdx"]:
c = self.peek()
if c is None:
return None, self
return c, self.advance()
def skip_whitespace(self) -> "StrIdx":
while True:
if self.peek() in [" ", "\t", "\n"]:
self = self.advance()
elif self.peek() == "#":
while self.peek() not in ["\n", None]:
self = self.advance()
else:
break
return self
def done(self) -> bool:
self = self.skip_whitespace()
return self.peek() is None
def parse_ident(s: StrIdx) -> Tuple[Optional[str], StrIdx]:
orig = s
s = s.skip_whitespace()
name: list[str] = []
while (c := s.peek()) is not None and (c.isalnum() or c == "_"):
if len(name) == 0 and not (c.isalpha() or c == "_"):
return None, orig
name.append(c)
s = s.advance()
if len(name) == 0:
return None, orig
return "".join(name), s
def is_ident(s: str) -> bool:
ident = parse_ident(StrIdx(s, 0))[0]
return ident is not None and ident == s
def parse_int(s: StrIdx) -> Tuple[Optional[int], StrIdx]:
orig = s
s = s.skip_whitespace()
value: list[str] = []
while (c := s.peek()) is not None and c.isdigit():
value.append(c)
s = s.advance()
if len(value) == 0:
return None, orig
return int("".join(value)), s
def parse_expr(s: StrIdx) -> Tuple[Optional[Expr], StrIdx]:
orig = s
s = s.skip_whitespace()
ident, s = parse_ident(s)
if ident is not None:
idx: list[Expr] = []
s = s.skip_whitespace()
if s.peek() != "[":
return IdentExpr(ident, idx), s
s = s.advance()
while True:
s = s.skip_whitespace()
if s.peek() == "]":
s = s.advance()
break
e, s = parse_expr(s)
if e is None:
return None, orig
idx.append(e)
s = s.skip_whitespace()
if s.peek() == ",":
s = s.advance()
elif s.peek() != "]":
return None, orig
return IdentExpr(ident, idx), s
value, s = parse_int(s)
if value is not None:
return IntExpr(value), s
return None, orig
def parse_equation(s: StrIdx) -> Tuple[Optional[Equation], StrIdx]:
orig = s
lhs, s = parse_expr(s)
if lhs is None:
return None, orig
s = s.skip_whitespace()
kind: EquationKind
if s.peek() == "=":
kind = "="
elif s.peek(2) == "<-":
kind = "<-"
else:
return None, orig
s = s.advance(len(kind))
rhs, s = parse_expr(s)
if rhs is None:
return None, orig
return Equation(kind, lhs, rhs), s | /rust_circuit-0.4.12-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl/rust_circuit/index_util/internal/parse.py | 0.857768 | 0.264994 | parse.py | pypi |
from __future__ import annotations
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from typing import Optional, Union, TypeVar, Sequence, Literal
from dataclasses import dataclass
import rust_circuit.index_util.internal.parse as parse
@dataclass(frozen=True, eq=True)
class OutAxisIdx:
axis: int
@dataclass(frozen=True, eq=True)
class IntLitIdx:
value: int
@dataclass(frozen=True, eq=True)
class PosDataIdx:
pass
if TYPE_CHECKING:
T = TypeVar("T")
def is_unique(lst: list[T]) -> bool:
return len(set(lst)) == len(lst)
OUT_NAME: str = "out"
if TYPE_CHECKING:
IdxExpr = Union[OutAxisIdx, IntLitIdx, PosDataIdx]
@dataclass
class IdxRenderer:
axis_names: list[str]
pos_name: str
pos_indices: list[IdxExpr]
def render_indices(self, indices: list[IdxExpr], out: list[str]) -> None:
if len(indices) > 0:
out.append("[")
for i, idx in enumerate(indices):
if i > 0:
out.append(", ")
self.render_idx(idx, out)
out.append("]")
def render_idx(self, idx: IdxExpr, out: list[str]) -> None:
if isinstance(idx, OutAxisIdx):
out.append(self.axis_names[idx.axis])
elif isinstance(idx, IntLitIdx):
out.append(str(idx.value))
elif isinstance(idx, PosDataIdx):
out.append(self.pos_name)
self.render_indices(self.pos_indices, out)
else:
assert False
@dataclass(repr=False)
class GatherSpec:
out_rank: int
src_indices: list[IdxExpr]
pos_indices: list[IdxExpr]
axis_names: list[str]
src_name: str
pos_name: str
def __post_init__(self) -> None:
assert sum(1 for i in self.src_indices if isinstance(i, PosDataIdx)) == 1
assert all(isinstance(i, OutAxisIdx) or isinstance(i, IntLitIdx) for i in self.pos_indices)
assert all(i.axis < self.out_rank for i in self.src_indices + self.pos_indices if isinstance(i, OutAxisIdx))
assert all(parse.is_ident(n) for n in self.axis_names)
assert len(self.axis_names) == self.out_rank
assert parse.is_ident(self.src_name)
assert parse.is_ident(self.pos_name)
assert is_unique(self.axis_names + [self.src_name, self.pos_name, OUT_NAME])
def render(self, out: list[str]) -> None:
renderer = IdxRenderer(axis_names=self.axis_names, pos_name=self.pos_name, pos_indices=self.pos_indices)
out.append(f"{OUT_NAME}")
renderer.render_indices([OutAxisIdx(i) for i in range(self.out_rank)], out)
out.append(" = ")
out.append(self.src_name)
renderer.render_indices(self.src_indices, out)
def __str__(self) -> str:
out: list[str] = []
self.render(out)
return "".join(out)
def __repr__(self) -> str:
return f"GatherSpec.parse({str(self)!r}, {[self.src_name, self.pos_name]!r})"
@staticmethod
def resolve(equation: parse.Equation, arr_names: list[str]) -> "GatherSpec":
if equation.kind != "=":
raise ValueError(f"Gather formula must be written with '=' operator; got: {equation.kind!r}")
if len(arr_names) != 2:
raise ValueError(f"Expected exactly two input array names for gather; got: {arr_names!r}")
lhs = equation.lhs
if not (isinstance(lhs, parse.IdentExpr) and lhs.name == OUT_NAME):
raise ValueError(f"Invalid left hand side; expected {OUT_NAME!r} array, got: {lhs}")
if not is_unique([OUT_NAME] + arr_names):
raise ValueError("Array names must be unique")
forbidden_axis_names = set([OUT_NAME] + arr_names)
axis_names = []
for i, e in enumerate(lhs.idx):
if not (isinstance(e, parse.IdentExpr) and e.idx == []):
raise ValueError(f"Invalid index name: {e}")
if e.name in forbidden_axis_names:
raise ValueError(f"Index name conflicts with a name already in scope: {e.name!r}")
forbidden_axis_names.add(e.name)
axis_names.append(e.name)
axis_name_indices = {n: i for i, n in enumerate(axis_names)}
out_rank = len(axis_names)
rhs = equation.rhs
if not isinstance(rhs, parse.IdentExpr):
raise ValueError(f"Invalid right hand side; expected array access expression, got: {rhs}")
src_name = rhs.name
if src_name not in arr_names:
raise ValueError(f"Invalid 'src' array name; expected one of {arr_names!r}, got: {src_name!r}")
pos_name = [n for n in arr_names if n != src_name][0]
def resolve_direct_index(e: parse.Expr) -> Union[OutAxisIdx, IntLitIdx]:
if isinstance(e, parse.IdentExpr) and e.idx == []:
axis_idx = axis_name_indices.get(e.name)
if axis_idx is None:
raise ValueError(f"Index name not in scope; expected one of {axis_names!r}, got: {e.name!r}")
return OutAxisIdx(axis_idx)
elif isinstance(e, parse.IntExpr):
return IntLitIdx(e.value)
else:
raise ValueError(f"Invalid index expression: {e}")
src_indices: list[IdxExpr] = []
pos_indices: Optional[list[IdxExpr]] = None
for i, e in enumerate(rhs.idx):
if isinstance(e, parse.IdentExpr) and e.name == pos_name:
src_indices.append(PosDataIdx())
if pos_indices is not None:
raise ValueError(f"Index array {pos_name!r} appears more than once")
pos_indices = [resolve_direct_index(e2) for e2 in e.idx]
else:
src_indices.append(resolve_direct_index(e))
if pos_indices is None:
raise ValueError(f"Index array {pos_name!r} must be used")
return GatherSpec(
out_rank=out_rank,
axis_names=axis_names,
src_name=src_name,
src_indices=src_indices,
pos_name=pos_name,
pos_indices=pos_indices,
)
@staticmethod
def parse(s: str, arr_names: Sequence[str]) -> "GatherSpec":
return GatherSpec.resolve(parse.Equation.parse(s), list(arr_names))
if TYPE_CHECKING:
ScatterReduce = Optional[Literal["add", "multiply"]]
@dataclass
class ScatterSpec:
dst_indices: list[IdxExpr]
src_indices: list[IdxExpr]
pos_indices: list[IdxExpr]
axis_names: list[str]
dst_name: str
src_name: str
pos_name: str
reduce: ScatterReduce
def __post_init__(self) -> None:
assert sum(1 for i in self.dst_indices if isinstance(i, PosDataIdx)) == 1
assert all(
isinstance(i, OutAxisIdx) or isinstance(i, IntLitIdx)
for indices in [self.src_indices, self.pos_indices]
for i in indices
)
assert all(
i.axis < len(self.axis_names)
for indices in [self.dst_indices, self.src_indices, self.pos_indices]
for i in indices
if isinstance(i, OutAxisIdx)
)
assert all(parse.is_ident(n) for n in self.axis_names)
assert parse.is_ident(self.dst_name)
assert parse.is_ident(self.src_name)
assert parse.is_ident(self.pos_name)
assert is_unique(self.axis_names + [self.dst_name, self.src_name, self.pos_name])
def render(self, out: list[str]) -> None:
renderer = IdxRenderer(axis_names=self.axis_names, pos_name=self.pos_name, pos_indices=self.pos_indices)
out.append(self.dst_name)
renderer.render_indices(self.dst_indices, out)
out.append(" <- ")
out.append(self.src_name)
renderer.render_indices(self.src_indices, out)
def __str__(self) -> str:
out: list[str] = []
self.render(out)
return "".join(out)
def __repr__(self) -> str:
return f"ScatterSpec({self!s}, {[self.dst_name, self.src_name, self.pos_name]!r})"
@staticmethod
def resolve(equation: parse.Equation, arr_names: list[str], reduce: ScatterReduce) -> "ScatterSpec":
if equation.kind != "<-":
raise ValueError(f"Scatter formula must be written with '<-' operator; got: {equation.kind!r}")
if len(arr_names) != 3:
raise ValueError(f"Expected exactly three input array names for scatter; got: {arr_names!r}")
if not is_unique(arr_names):
raise ValueError(f"Input array names must be unique; got: {arr_names!r}")
lhs = equation.lhs
if not isinstance(lhs, parse.IdentExpr):
raise ValueError(f"Invalid left hand side; expected array access expression, got: {lhs}")
dst_name = lhs.name
if dst_name not in arr_names:
raise ValueError(f"Invalid 'dst' array name; expected one of {arr_names!r}, got: {dst_name!r}")
rhs = equation.rhs
if not isinstance(rhs, parse.IdentExpr):
raise ValueError(f"Invalid right hand side; expected array access expression, got: {rhs}")
src_name = rhs.name
if src_name not in arr_names:
raise ValueError(f"Invalid 'src' array name; expected one of {arr_names!r}, got: {src_name!r}")
pos_name = next(n for n in arr_names if n not in [dst_name, src_name])
axis_names: list[str] = []
axis_ids: dict[str, int] = {}
def resolve_direct_index(err_ctx: str, e: parse.Expr) -> Union[OutAxisIdx, IntLitIdx]:
if isinstance(e, parse.IdentExpr) and e.idx == []:
if e.name == pos_name:
raise ValueError(f"Scatter index array {pos_name!r} cannot appear in indices for {err_ctx}")
axis_idx = axis_ids.get(e.name)
if axis_idx is None:
axis_idx = len(axis_names)
axis_names.append(e.name)
axis_ids[e.name] = axis_idx
return OutAxisIdx(axis_idx)
elif isinstance(e, parse.IntExpr):
return IntLitIdx(e.value)
else:
raise ValueError(f"Invalid index expression in {err_ctx}: {e}")
dst_indices: list[IdxExpr] = []
pos_indices: Optional[list[IdxExpr]] = None
for i, e in enumerate(lhs.idx):
if isinstance(e, parse.IdentExpr) and e.name == pos_name:
dst_indices.append(PosDataIdx())
if pos_indices is not None:
raise ValueError(f"Scatter index array {pos_name!r} must be used exactly once")
pos_indices = [resolve_direct_index(repr(pos_name), e2) for e2 in e.idx]
else:
dst_indices.append(resolve_direct_index(repr(dst_name), e))
if pos_indices is None:
raise ValueError(f"Scatter index array {pos_name!r} must be used")
src_indices: list[IdxExpr] = [resolve_direct_index(repr(src_name), e) for e in rhs.idx]
all_axes = set(range(len(axis_names)))
dst_axes = set(i.axis for i in dst_indices if isinstance(i, OutAxisIdx))
pos_axes = set(i.axis for i in pos_indices if isinstance(i, OutAxisIdx))
missing_dst = all_axes - dst_axes
if len(missing_dst) > 1:
raise ValueError(
f"At most one index variable can be missing from indices used to directly access {dst_name!r} array; "
+ f"found {len(missing_dst)} missing indices: {[axis_names[i] for i in missing_dst]!r}"
)
missing_lhs = all_axes - dst_axes - pos_axes
if len(missing_lhs) > 0:
raise ValueError(
f"All index variables must be used on left hand side of scatter formula; "
+ f"found {len(missing_lhs)} missing indices: {[axis_names[i] for i in missing_lhs]!r}"
)
return ScatterSpec(
dst_indices=dst_indices,
src_indices=src_indices,
pos_indices=pos_indices,
axis_names=axis_names,
dst_name=dst_name,
src_name=src_name,
pos_name=pos_name,
reduce=reduce,
)
@staticmethod
def parse(s: str, arr_names: Sequence[str], reduce: ScatterReduce = None) -> "ScatterSpec":
return ScatterSpec.resolve(parse.Equation.parse(s), list(arr_names), reduce) | /rust_circuit-0.4.12-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl/rust_circuit/index_util/internal/resolve.py | 0.872605 | 0.50061 | resolve.py | pypi |
from __future__ import annotations
import functools
import uuid
from collections import Counter
from typing import Callable, Dict, Hashable, List, Optional, Protocol, Set, Tuple, Union
from warnings import warn
import attrs
import torch
from rust_circuit import (
Circuit,
GeneralFunction,
GeneralFunctionShapeInfo,
GeneralFunctionSpecBase,
IterativeMatcher,
IterativeMatcherIn,
MatcherIn,
PrintHtmlOptions,
PrintOptions,
hash_tensor,
new_traversal,
restrict,
)
from .dataset import Dataset, color_dataset
@attrs.define(eq=True, hash=True, init=False)
class PoolAnnotation:
"""
Pool annotation for a cond sampler. See pool.py demo for more info.
size: size of the pool, i.e. how many different datasets will be sampled. A pool size of 1 has semantic meaning
(you want to enforce things being sampled together). A pool of size, say, 4-10 is useful for performance.
Intermediate sizes seem confusing, it's not clear to me why you would want them.
id: to define which annotations, and therefore which cond samplers, are sharing a pool.
"""
size: int
id: uuid.UUID
def __init__(self, size: int = 4, id: Optional[uuid.UUID] = None):
if size <= 0:
raise ValueError(size)
self.size = size
self.id = uuid.uuid4() if id is None else id
@attrs.define(hash=True, eq=True)
class PoolNotPresent:
...
PNP = PoolNotPresent()
MaybePoolAnnotation = Union[PoolAnnotation, PoolNotPresent]
DsEqClassT = Hashable
Pool = List[Optional[Dataset]]
PoolPerDsEqClass = dict[DsEqClassT, Pool]
SharedPoolKey = Tuple["CondSampler", Dataset]
PoolsPerSampler = dict[SharedPoolKey, PoolPerDsEqClass]
@attrs.define(hash=True, eq=True)
class CondSampler(Protocol):
"""
Responsible for sampling a new dataset given a source dataset to draw from, and a reference dataset.
Parameters:
'pool_annot`: See pool.py demo for more info. If PoolNotPresent (the default), no sample reuse will
happen. Using pools is recommended if your treeified model is too big (e.g for deep networks with
non-trivial samplers).
"""
pool_annot: MaybePoolAnnotation = PNP
def __call__(self, ref: Dataset, ds: Dataset, rng=None) -> Dataset:
...
def ds_eq_class(self, ds: Dataset) -> Hashable:
"""
Defines what reference datasets share a pool. By default, no sharing is allowed.
This should return some value which will be the same for datasets that "are the same, according to this sampler."
Note: not guaranteed to actually agree with __call__, that's up to the author.
"""
return ds
def sample_and_update_pool(
self, ref: Dataset, ds: Dataset, rng: torch.Generator, pools_per_sampler: PoolsPerSampler
) -> Dataset:
"""
Reuses a dataset from the pool or samples a new one and updates the pool.
"""
if isinstance(self.pool_annot, PoolNotPresent):
return self(ref, ds, rng)
# Get pools for this sampler. Based on hash, which depends on its PoolAnnotation as well as other attrs.
if (self, ds) not in pools_per_sampler:
pools_per_sampler[(self, ds)] = PoolPerDsEqClass()
pools = pools_per_sampler[(self, ds)]
# Get the pool for the ref ds. Depends on what ref ds's eq class is, according to this sampler.
ref_eq_class = self.ds_eq_class(ref)
if ref_eq_class not in pools:
pools[ref_eq_class] = [None for _ in range(self.pool_annot.size)]
pool = pools[ref_eq_class]
# Pick a ds from the pool, lazily sampling a new one if needed.
picked = torch.randint(low=0, high=len(pool), size=(), generator=rng)
picked_ds = pool[picked]
if picked_ds is None:
picked_ds = self(ref, ds, rng)
pool[picked] = picked_ds
return picked_ds
def pretty_str(self, data: Optional[Dataset], datum_idx=0) -> str:
# Not __str__ because it takes a dataset
if data is None:
extra = ""
else:
datum = data[datum_idx]
extra = f"d={datum}"
cond_extra = self.str_cond(datum)
if cond_extra:
extra += ", " + cond_extra
extra = f"({extra})"
return f"{self.class_str()}{extra}"
def class_str(self):
return f"{self.__class__.__name__}" + (f"#{hash(self.pool_annot) & 1023}" if self.pool_annot != PNP else "")
def str_cond(self, datum: Dataset) -> str:
return ""
@attrs.define(hash=True, eq=True, init=False)
class FuncSampler(CondSampler):
"""
Samples each datum uniformly from the subset of ds that agrees on the value of func. That is, for x in ref_ds we sample a corresponding x' uniformly from `[y for y in ds if func(y) == func(x)]`.
This impliments the standard causal scrubbing sampling operation as described in the writeup, if func returns the feature(s) computed by the interp node.
`func: Dataset -> Tensor` must return a 1 or 2 dimensional tensor, where the first dimesion is the same length as the dataset the function is called on.
"""
func: Callable[[Dataset], torch.Tensor] = attrs.field(
kw_only=True
) # irrelevant since we are writing our own init method, but attrs complains otherwise
def __init__(
self,
func: Callable[[Dataset], torch.Tensor],
pool_annot: MaybePoolAnnotation = PNP,
):
self.func = func
self.pool_annot = pool_annot
@staticmethod
@functools.lru_cache(maxsize=128)
def get_matching_idxs(
func: Callable[[Dataset], torch.Tensor], ref: Dataset, ds: Dataset
) -> List[Tuple[torch.Tensor, torch.Tensor]]:
r_vals = func(ref)
d_vals = func(ds)
r_vals = r_vals[:, None] if r_vals.ndim == 1 else r_vals
d_vals = d_vals[:, None] if d_vals.ndim == 1 else d_vals
result = []
for elem in torch.unique(r_vals, dim=0):
matching_idxs_r = torch.nonzero(torch.all(r_vals == elem, dim=1).ravel(), as_tuple=True)[0]
matching_idxs_d = torch.nonzero(torch.all(d_vals == elem, dim=1).ravel(), as_tuple=True)[0]
r_count = len(matching_idxs_r)
d_count = len(matching_idxs_d)
assert d_count > 0, f"no matching idxs found for value {elem} of condition {func}"
if (
d_count < r_count
): # Unfortunately happens a lot when func is injective, because r may contain duplicates but d won't
warn(f"not enough unique dataset samples for value {elem} of condition {func}, {d_count} < {r_count}")
result.append((matching_idxs_r, matching_idxs_d))
return result
def __call__(self, ref: Dataset, ds: Dataset, rng=None) -> Dataset:
r_d_pairs = FuncSampler.get_matching_idxs(self.func, ref, ds)
idxs = torch.full((len(ref),), -1, dtype=torch.int64, device=r_d_pairs[0][0].device)
for matching_idxs_r, matching_idxs_d in r_d_pairs:
idxs[matching_idxs_r] = matching_idxs_d[
torch.multinomial(
torch.ones(len(matching_idxs_d), device=matching_idxs_d.device, dtype=torch.float32),
num_samples=len(matching_idxs_r),
replacement=True,
generator=rng,
)
]
assert (idxs != -1).all(), "this should never happen!"
return ds[idxs]
def ds_eq_class(self, ds):
return hash_tensor(self.func(ds))
def str_cond(self, datum: Dataset) -> str:
out = self.func(datum)[0]
if isinstance(out, torch.Tensor) and out.numel() == 1:
out = out.item() # type: ignore
return f"f(d)={str(out)}"
@attrs.define(hash=True, eq=True)
class UncondSampler(CondSampler):
"""
Samples randomly without conditioning on the reference dataset.
"""
def __call__(self, ref: Dataset, ds: Dataset, rng=None) -> Dataset:
return ds.sample(len(ref), rng)
def ds_eq_class(self, ds):
return ()
@attrs.define(hash=True, eq=True)
class UncondTogetherSampler(UncondSampler):
"""
Samples randomly without conditioning on the reference dataset,
and has a pool of size 1 so a single sample will be reused.
By default all instances of this class are equal, so all nodes with an
UncondTogetherSampler will get the same ds, but you can pass a different
uuid at init if you want to create groups of nodes.
"""
def __init__(self, id=uuid.UUID("566ae005-4a34-4e37-9383-731e1a722ef2")):
self.pool_annot = PoolAnnotation(1, id=id)
@attrs.define(hash=True, eq=True)
class ExactSampler(CondSampler):
def __call__(self, ref: Dataset, ds: Dataset, rng=None) -> Dataset:
return ref # should maybe check this is valid draw from ds
@attrs.define(hash=True, eq=True)
class FixedOtherSampler(UncondSampler):
other: Dataset = attrs.field(
kw_only=True
) # irrelevant since we are writing our own init method, but attrs complains otherwise
def __init__(self, other: Dataset):
super().__init__()
self.other = other
def __call__(self, ref: Dataset, ds: Dataset, rng=None) -> Dataset:
assert len(self.other) == len(ref), (len(self.other), len(ref))
return self.other # should maybe check this is a valid draw from ds
def chain_excluding(parent: IterativeMatcher, child: IterativeMatcherIn, term_early_at: MatcherIn = False):
"""Matches `child` from `parent`, excluding any intermediate nodes matched by `term_early_at`."""
if term_early_at is False:
return parent.chain(child)
return parent.chain(restrict(child, term_early_at=term_early_at))
@attrs.define(frozen=True)
class SampledInputs:
"""Holds the sampled inputs at each node of an interpretation graph."""
datasets: dict[InterpNode, Dataset] = attrs.field(factory=dict)
other_inputs_datasets: dict[InterpNode, Dataset] = attrs.field(factory=dict)
sampler_pools: PoolsPerSampler = attrs.field(factory=dict)
def __getitem__(self, node: InterpNode) -> Dataset:
if node.is_leaf():
return self.datasets[node]
else:
return self.other_inputs_datasets[node]
def get(self, node: InterpNode, default: Optional[Dataset] = None) -> Optional[Dataset]:
try:
return self[node]
except KeyError:
return default
class InterpNodeGeneralFunctionSpec(GeneralFunctionSpecBase):
def __init__(self, function, name):
self.function = function
self.name = name
def name(self):
return self.name
def function(self, *tensors):
return self.function(*tensors)
def get_shape_info(self, *shapes):
# empty shape
return GeneralFunctionShapeInfo((), 0, [False] * len(shapes))
class InterpNode:
"""
Interpretation graph (tree!) node.
It doesn't have to be a tree in general, but we are lazy and haven't written treeification code for it.
cond_sampler: samples data that this node is "indifferent between", e.g.
data agreeing on a feature with a reference datum if this node only cares about that feature
name: should be unique within an interpretation. it's nice to be able to uniquely identify things
by name rather than by more opaque id.
chidren: interpretation graph nodes that are children of this one. this is used for recursive sampling
when causal scrubbing, but not much else. it's also kind of nice to think of your interpretation
as being a graph.
other_inputs_sampler: formally, causal scrubbing hypotheses should have graph isomorphisms between
the interpretation and the graph you are trying to explain. for convenience, you can instead specify
how inputs into the image of this node should be sampled, if not otherwise stated. this is equivalent
to adding children to this node to make it surjective, and using this sampler as the cond_sampler for
all of those. not applicable for leaves. (for example, you could have your interpretation just consist
of the circuit you think is important, and say that other inputs should be sampled randomly).
"""
def __init__(self, cond_sampler: CondSampler, name: str, other_inputs_sampler: CondSampler = UncondSampler()):
self._name = name
self.cond_sampler = cond_sampler
self.other_inputs_sampler = other_inputs_sampler
self._children: Tuple["InterpNode", ...] = tuple()
@property
def name(self) -> str:
return self._name
@property
def children(self) -> Tuple["InterpNode", ...]:
return self._children
def make_descendant(
self,
cond_sampler: CondSampler,
name: str,
other_inputs_sampler: CondSampler = UncondSampler(),
) -> "InterpNode":
"""Create a child node and add it to this node's children."""
d = self.__class__(name=name, cond_sampler=cond_sampler, other_inputs_sampler=other_inputs_sampler)
self._children = self.children + (d,)
return d
def is_leaf(self) -> bool:
return len(self.children) == 0
def get_tree_size(self) -> int:
"""Gets the number of nodes in the interp graph tree rooted at this node."""
return 1 + sum([c.get_tree_size() for c in self.children])
def get_nodes(self: "InterpNode") -> List["InterpNode"]:
return [
self,
] + [n for c in self.children for n in c.get_nodes()]
def get_descendants_with_parents(self: "InterpNode") -> List[Tuple["InterpNode", "InterpNode"]]:
return [(self, c) for c in self.children] + [
pair for c in self.children for pair in c.get_descendants_with_parents()
]
def print(
self,
options: Optional[Union[PrintOptions, PrintHtmlOptions]] = None,
sampled_inputs: SampledInputs = SampledInputs(),
print_data=True,
color_by_data=True,
repr=False,
datum_idx=0,
):
"""
Parameters:
print_data: if True, prints a sample datum from cond_sampler as a comment
color_by_data: if True, colors nodes by ds
repr: if True, returns string instead of printing it (used for testing)
"""
def to_circuit(i: InterpNode) -> Tuple[Circuit, Dict[Circuit, "InterpNode"]]:
node_circuit_map = {}
child_circuits = []
for child in i.children:
child_circuit, child_map = to_circuit(child)
for k, v in child_map.items():
node_circuit_map[k] = v
child_circuits.append(child_circuit)
spec = InterpNodeGeneralFunctionSpec(lambda x: x, "")
this_circuit = GeneralFunction(*child_circuits, spec=spec, name=i.name)
node_circuit_map[this_circuit] = i
return this_circuit, node_circuit_map
c, map = to_circuit(self)
if options is None:
new_options: Union[PrintOptions, PrintHtmlOptions] = PrintHtmlOptions(traversal=new_traversal())
else:
new_options = options.evolve()
if print_data:
new_options.commenters += [
lambda c: map[c].str_samplers(sampled_inputs=sampled_inputs, datum_idx=datum_idx)
]
if isinstance(new_options, PrintOptions):
new_options.bijection = False
if color_by_data:
new_options.colorer = lambda c: color_dataset( # type: ignore
sampled_inputs.datasets.get(map[c]),
html=isinstance(new_options, PrintHtmlOptions),
)
if repr:
assert isinstance(new_options, PrintOptions)
return c.repr(new_options)
else:
c.print(new_options)
def _sample_into(
self,
rng,
source_ds,
parent_ds,
into: SampledInputs,
recursive: bool = True,
):
# The one place that mutating SampledInputs is allowed
ds = self.cond_sampler.sample_and_update_pool(parent_ds, source_ds, rng, into.sampler_pools)
if not self.is_leaf():
into.other_inputs_datasets[self] = self.other_inputs_sampler.sample_and_update_pool(
ds, source_ds, rng, into.sampler_pools
)
into.datasets[self] = ds
if recursive:
for child in self.children:
child._sample_into(rng, source_ds, ds, into)
def sample(
self,
rng,
source_ds,
parent_ds,
recursive=True,
) -> SampledInputs:
into = SampledInputs()
self._sample_into(rng, source_ds, parent_ds, into, recursive=recursive)
return into
def str_samplers(self, sampled_inputs: SampledInputs, datum_idx=0) -> str:
cond_sampler_str = self.cond_sampler.pretty_str(sampled_inputs.datasets.get(self), datum_idx=datum_idx)
samplers_str = f"cond_sampler={cond_sampler_str}"
if not self.is_leaf():
other_sampler_str = self.other_inputs_sampler.pretty_str(
sampled_inputs.other_inputs_datasets.get(self), datum_idx=datum_idx
)
samplers_str += f", other_inputs_sampler={other_sampler_str}"
return samplers_str
def pretty_str(self, sampled_inputs: SampledInputs) -> str:
return f"InterpNode(name={self.name}, {self.str_samplers(sampled_inputs)}, children={[c.name for c in self.children]})"
def __str__(self):
return self.pretty_str(SampledInputs())
def __repr__(self):
# Not the best repr; but this makes errors nicer for pyo stuff (which will repr rather than str)
return str(self)
corr_root_matcher = restrict(new_traversal(), term_if_matches=True)
class IllegalCorrespondenceError(Exception):
...
DatasetOrInputNames = Union[Dataset, Set[str]]
def get_input_names(ds: DatasetOrInputNames) -> Set[str]:
return ds.input_names if isinstance(ds, Dataset) else ds
def to_inputs(matcher: IterativeMatcher, ds: DatasetOrInputNames) -> IterativeMatcher:
return matcher.chain(get_input_names(ds))
class Correspondence:
"""
Holds correspondences between interp graph and model.
The parts of the model are pointed at by IterativeMatchers picking out subgraphs of the model (with a single source and sink).
In theory land we imagine that our correspondences are surjective, but in practice this is painful to implement: you either
need drastic circuit rewrites, or very many InterpNodes and entries in your correspondence. To make life easier, we allow
correspondences to model "branches" picked out by matchers.
corr: mapping from interpretation to branches in the model
"""
corr: Dict[InterpNode, IterativeMatcher]
i_names: Dict[str, InterpNode]
def __init__(self):
self.corr = {}
self.i_names = {}
def copy(self):
"""Shallow copy of a Correspondence. The InterpNodes and matchers are attached to a new Correspondence, but
not themselves copied."""
new = Correspondence()
new.corr = self.corr.copy() # type: ignore
new.i_names = self.i_names.copy()
return new
def __len__(self):
return len(self.corr)
def __getitem__(self, i: InterpNode) -> IterativeMatcher:
return self.corr[i]
def get_by_name(self, s: str) -> InterpNode:
return self.i_names[s]
def add(self, i_node: InterpNode, m_node: IterativeMatcher):
if i_node in self.corr:
raise ValueError("Node already in correspondence!", i_node, m_node)
if i_node.name in self.i_names:
raise ValueError(
"Different node with same name already in correspondence!", self.i_names[i_node.name], i_node, m_node
)
self.corr[i_node] = m_node
self.i_names[i_node.name] = i_node
def replace(self, i_node: InterpNode, m_node: IterativeMatcher):
if i_node not in self.corr:
raise ValueError("Node not found in correspondence!", i_node, m_node)
assert i_node.name in self.i_names, (
"Node in corr but its name is not in names map, should never happen!",
i_node,
m_node,
)
self.corr[i_node] = m_node
self.i_names[i_node.name] = i_node
def get_root(self) -> InterpNode:
if len(self) == 0:
raise IllegalCorrespondenceError("Empty correspondence has no root!")
i_nodes = list(self.corr.keys())
# store parent counts and check that exactly one is 0
parent_counts = {i: 0 for i in i_nodes} # this doesn't implement hash or eq but we can just id it
for i_node in i_nodes:
for child in i_node.children:
parent_counts[child] += 1
roots = [i for i, count in parent_counts.items() if count == 0]
if len(roots) > 1:
raise IllegalCorrespondenceError("Found multiple roots!", roots)
if len(roots) == 0:
raise IllegalCorrespondenceError("Found no root, not a tree! (No node is an ancestor of all other nodes)")
return roots[0]
def in_dfs_order(self) -> List[Tuple[InterpNode, IterativeMatcher]]:
if len(self) == 0:
return []
return [(i_node, self[i_node]) for i_node in self.get_root().get_nodes()]
def in_dfs_order_with_parents(self) -> List[Tuple[Optional[InterpNode], InterpNode, IterativeMatcher]]:
if len(self) == 0:
return []
root = self.get_root()
root_w_parent: List[Tuple[Optional[InterpNode], InterpNode, IterativeMatcher]] = [(None, root, self[root])]
return root_w_parent + [
(i_node, i_child, self[i_child]) for (i_node, i_child) in root.get_descendants_with_parents()
]
def check_complete(self):
for i_node in self.corr.keys():
children = (
i_node.children
) # this used to be i_node.get_nodes (all descendents) but just .children is enough
for d in children:
if d not in self.corr:
raise IllegalCorrespondenceError(
"Node is a descendent of a node in the corr but is not in the corr itself",
d,
i_node,
)
def check_well_defined(self, circuit: Circuit, ds: Optional[DatasetOrInputNames] = None):
"""
Checks that each interp graph node matches exactly one circuit node.
This may fail if e.g. your circuit has some "basically the same" nodes, like one with an extra 1 dim, but generally should pass.
Includes checking the implicit input nodes exist (by extending matchers to the inputs).
"""
for m_node in self.corr.values():
try:
m_node.get_unique(circuit)
except RuntimeError:
raise IllegalCorrespondenceError(
f"matcher {m_node} failed well defined check. This check is done in dfs order so all parents must have passed."
)
if ds is not None:
m_to_inputs_matcher = to_inputs(m_node, ds)
if not m_to_inputs_matcher.are_any_found(circuit):
raise IllegalCorrespondenceError(
f"matcher extended to inputs {m_to_inputs_matcher} failed existence check. This check is done in dfs order so all parents must have passed."
)
def check_injective_on_treeified(self, circuit: Circuit):
"""
Checks that every circuit node is matched by at most one interp graph node.
We expect this to be true after the circuit is tree-ified but not before
(and maybe not after we replace the treeified inputs, as some of them may
turn out to be identical).
"""
if len(self) == 0:
return
all_matched = self.get_all_matched(circuit)
multiple_matches = [(c, i) for (c, i) in Counter(all_matched).items() if i > 1]
if len(multiple_matches) != 0:
raise IllegalCorrespondenceError(multiple_matches)
def check_root_is_model(self):
root_branch = self.corr[self.get_root()]
if root_branch != corr_root_matcher:
raise IllegalCorrespondenceError("the root of the model branch should be empty")
def get_all_matched(self, circuit: Circuit) -> List[Circuit]:
"""Gets all circuit nodes matched by interp graph nodes."""
return [c for m_node in self.corr.values() for c in m_node.get(circuit)]
def check(self, circuit: Circuit, circuit_treeified=True):
self.check_complete()
self.check_root_is_model()
if circuit_treeified:
self.check_injective_on_treeified(circuit)
def sample(self, source_ds, rng, ref_ds) -> SampledInputs:
return self.get_root().sample(source_ds=source_ds, parent_ds=ref_ds, rng=rng)
def __repr__(self):
return f"{self.__class__.__name__}(corr={repr(self.corr)})" | /rust_circuit-0.4.12-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl/rust_circuit/causal_scrubbing/hypothesis.py | 0.93744 | 0.400691 | hypothesis.py | pypi |
from __future__ import annotations
from typing import Dict, Iterable, Mapping, Optional, Set, Type, TypeVar, Union
import attrs
import torch
from attr import frozen
import rust_circuit as rc
from ..py_utils import FrozenDict
def all_same_len(self: Dataset, attribute, arrs: Mapping[str, rc.Array]):
assert len(arrs) >= 1
l = None
for a in arrs.values():
if l is None:
l = a.shape[0]
elif not a.shape[0] == l:
raise ValueError(
f"All Arrays in a Dataset must have the same `len`, found instead at least two: {l}, {a.shape[0]}"
)
def arrs_to_map(arrs: Union[Iterable[rc.Array], Mapping[str, rc.Array]]) -> FrozenDict[str, rc.Array]:
if isinstance(arrs, Mapping):
for name, a in arrs.items():
assert name == a.name
return FrozenDict(arrs)
else:
arrs_map: Dict[str, rc.Array] = {}
for a in arrs:
if a.name in arrs_map:
if not a == arrs_map[a.name]:
raise ValueError(f"We found two different Arrays with the same name: {a}, {arrs_map[a.name]}")
arrs_map[a.name] = a
return FrozenDict(arrs_map)
def names_in_arrs(self: Dataset, attribute, input_names: Set[str]):
for name in input_names:
assert name in self.arrs, name
def frozenset_converter(xs: Set[str]) -> frozenset[str]: # mypy has issues with just using frozenset as converter
return frozenset(xs)
TDataset = TypeVar("TDataset", bound="Dataset")
@frozen(hash=True)
class Dataset:
# Holds arrays look-up-able by name; you can provide any iterable and it will be turned into a map.
# In particular no two can have the same name. Also, they can be accessed as `ds.arr_name`, unless
# you name your array something silly like __len__
arrs: Mapping[str, rc.Array] = attrs.field(validator=all_same_len, converter=arrs_to_map)
# Which arrays (by name) are inputs to the circuit that should be replaced when doing causal scrubbing.
# By default this is all of them.
input_names: Set[str] = attrs.field(
validator=names_in_arrs, factory=set, converter=frozenset_converter, kw_only=True
)
def __attrs_post_init__(self):
if not len(self.input_names):
object.__setattr__(self, "input_names", frozenset(self.arrs.keys()))
def __getattr__(self, __name: str) -> rc.Array:
try:
return self.arrs[__name]
except KeyError:
raise AttributeError(__name)
def __len__(self) -> int:
return list(self.arrs.values())[0].shape[0]
def __getitem__(self, idxs: rc.TorchAxisIndex):
if isinstance(idxs, int):
idxs = slice(idxs, idxs + 1)
return attrs.evolve(self, arrs={name: rc.Array(inp.value[idxs], name) for name, inp in self.arrs.items()})
def __str__(self) -> str:
# Probably you want to overwrite this when subclassing so you get pretty prints!
return str([f"<{a.name} {a.shape}>" for a in self.arrs.values()])
def sample(self, count, rng: Optional[torch.Generator] = None):
idxs = torch.multinomial(
torch.ones(size=(len(self),), dtype=torch.float32, device=(rng.device if rng else None)),
num_samples=count,
replacement=True,
generator=rng,
)
return self[idxs]
@classmethod
def unwrap(cls: Type[TDataset], d: Dataset) -> TDataset:
assert isinstance(d, cls), (type(d), cls)
return d
def color_dataset(ds, html=False):
def color_string_from_int(i: int):
hue = (i % (255 / 10)) * 10
return f"hsl({hue}, 90%, 60%)"
if html:
return "darkgrey" if ds is None else color_string_from_int(hash(ds))
else:
# pretty made up, would be nice if printer had a "color by feature"
return 6 if ds is None else 2 + hash(ds) % 6 | /rust_circuit-0.4.12-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl/rust_circuit/causal_scrubbing/dataset.py | 0.877948 | 0.557243 | dataset.py | pypi |
section_types = (
'custom', #0
'type', #1
'import', #2
'function', #3
'table', #4
'memory', #5
'global', #6
'export', #7
'start', #8
'element', #9
'code', #10
'data' #11
)
custom_section = 0
type_section = 1
import_section = 2
function_section = 3
table_section = 4
memory_section = 5
global_section = 6
export_section = 7
start_section = 8
element_section = 9
code_section = 10
data_section = 11
int_type = (
127, #i32
126, #i64
)
# external_kind
# A single-byte unsigned integer indicating the kind of definition being imported or defined:
# 0 indicating a Function import or definition
# 1 indicating a Table import or definition
# 2 indicating a Memory import or definition
# 3 indicating a Global import or definition
external_kinds = (
'Function',
'Table',
'Memory',
'Global'
)
allowed_functions = [
"__ashrti3",
"__lshlti3",
"__lshrti3",
"__ashlti3",
"__divti3",
"__udivti3",
"__modti3",
"__umodti3",
"__multi3",
"__addtf3",
"__subtf3",
"__multf3",
"__divtf3",
"__eqtf2",
"__netf2",
"__getf2",
"__gttf2",
"__lttf2",
"__letf2",
"__cmptf2",
"__unordtf2",
"__negtf2",
"__floatsitf",
"__floatunsitf",
"__floatditf",
"__floatunditf",
"__floattidf",
"__floatuntidf",
"__floatsidf",
"__extendsftf2",
"__extenddftf2",
"__fixtfti",
"__fixtfdi",
"__fixtfsi",
"__fixunstfti",
"__fixunstfdi",
"__fixunstfsi",
"__fixsfti",
"__fixdfti",
"__fixunssfti",
"__fixunsdfti",
"__trunctfdf2",
"__trunctfsf2",
"is_feature_active",
"activate_feature",
"get_resource_limits",
"set_resource_limits",
"set_proposed_producers",
"get_blockchain_parameters_packed",
"set_blockchain_parameters_packed",
"is_privileged",
"set_privileged",
"get_active_producers",
"db_idx64_store",
"db_idx64_remove",
"db_idx64_update",
"db_idx64_find_primary",
"db_idx64_find_secondary",
"db_idx64_lowerbound",
"db_idx64_upperbound",
"db_idx64_end",
"db_idx64_next",
"db_idx64_previous",
"db_idx128_store",
"db_idx128_remove",
"db_idx128_update",
"db_idx128_find_primary",
"db_idx128_find_secondary",
"db_idx128_lowerbound",
"db_idx128_upperbound",
"db_idx128_end",
"db_idx128_next",
"db_idx128_previous",
"db_idx256_store",
"db_idx256_remove",
"db_idx256_update",
"db_idx256_find_primary",
"db_idx256_find_secondary",
"db_idx256_lowerbound",
"db_idx256_upperbound",
"db_idx256_end",
"db_idx256_next",
"db_idx256_previous",
"db_idx_double_store",
"db_idx_double_remove",
"db_idx_double_update",
"db_idx_double_find_primary",
"db_idx_double_find_secondary",
"db_idx_double_lowerbound",
"db_idx_double_upperbound",
"db_idx_double_end",
"db_idx_double_next",
"db_idx_double_previous",
"db_idx_long_double_store",
"db_idx_long_double_remove",
"db_idx_long_double_update",
"db_idx_long_double_find_primary",
"db_idx_long_double_find_secondary",
"db_idx_long_double_lowerbound",
"db_idx_long_double_upperbound",
"db_idx_long_double_end",
"db_idx_long_double_next",
"db_idx_long_double_previous",
"db_store_i64",
"db_update_i64",
"db_remove_i64",
"db_get_i64",
"db_next_i64",
"db_previous_i64",
"db_find_i64",
"db_lowerbound_i64",
"db_upperbound_i64",
"db_end_i64",
"assert_recover_key",
"recover_key",
"assert_sha256",
"assert_sha1",
"assert_sha512",
"assert_ripemd160",
"sha1",
"sha256",
"sha512",
"ripemd160",
"check_transaction_authorization",
"check_permission_authorization",
"get_permission_last_used",
"get_account_creation_time",
"current_time",
"publication_time",
"abort",
"eosio_assert",
"eosio_assert_message",
"eosio_assert_code",
"eosio_exit",
"read_action_data",
"action_data_size",
"current_receiver",
"require_recipient",
"require_auth",
"require_auth2",
"has_auth",
"is_account",
"prints",
"prints_l",
"printi",
"printui",
"printi128",
"printui128",
"printsf",
"printdf",
"printqf",
"printn",
"printhex",
"read_transaction",
"transaction_size",
"expiration",
"tapos_block_prefix",
"tapos_block_num",
"get_action",
"send_inline",
"send_context_free_inline",
"send_deferred",
"cancel_deferred",
"get_context_free_data",
"memcpy",
"memmove",
"memcmp",
"memset",
"set_action_return_value",
"get_block_num",
"sha3",
"blake2_f",
"k1_recover",
"alt_bn128_add",
"alt_bn128_mul",
"alt_bn128_pair",
"mod_exp",
"get_code_hash",
]
class WasmReader(object):
def __init__(self, raw):
self.raw = raw
self.idx = 0
def read_bytes(self,size):
ret = self.raw[self.idx:self.idx+size]
self.idx += size
return ret
def read_byte(self):
ret = self.raw[self.idx]
self.idx += 1
return ret
def spec_binary_byte(self):
if len(self.raw)<=self.idx:
raise Exception("malformed")
ret = self.raw[self.idx]
self.idx += 1
return ret
#unsigned
def spec_binary_uN(self, N):
n=self.spec_binary_byte()
if n<2**7 and n<2**N:
return n
elif n>=2**7 and N>7:
m=self.spec_binary_uN(N-7)
return (2**7)*m+(n-2**7)
else:
raise Exception("malformed")
def read_u7(self):
return self.spec_binary_uN(7)
def read_u32(self):
return self.spec_binary_uN(32)
def end(self):
return self.idx == len(self.raw)
def remains(self):
return self.raw[self.idx:]
def read_uint32(self):
return self.read_bytes(4)
def read_uint64(self):
return self.read_bytes(8)
def check_import_section(wasm_file):
with open(wasm_file, 'rb') as f:
raw = f.read()
r = WasmReader(raw)
magic = r.read_bytes(4)
version = r.read_bytes(4)
not_allowed_functions = []
while not r.end():
id = r.read_byte()
payload_len = r.read_u32()
payload = r.read_bytes(payload_len)
pr = WasmReader(payload)
count = pr.read_u32()
if id == import_section:
for i in range(count):
module_len = pr.read_u32()
module_str = pr.read_bytes(module_len)
module_str = module_str.decode()
field_len = pr.read_u32()
field_str = pr.read_bytes(field_len)
field_str = field_str.decode()
kind = pr.read_u7()
assert kind == 0
type_index = pr.read_u32()
if module_str != 'env' or not field_str in allowed_functions:
not_allowed_functions.append(f'{module_str}.{field_str}')
if not_allowed_functions:
raise Exception(f'imported function(s) not allowed: {not_allowed_functions}') | /rust_contracts_builder-0.2.9-py3-none-any.whl/rust_contracts_builder/wasm_checker.py | 0.431345 | 0.150871 | wasm_checker.py | pypi |
import featuretools as ft
import json
from featuretools.primitives.utils import (
get_aggregation_primitives,
get_transform_primitives,
)
from .rust_dfs import Primitive, Feature
import numpy as np
from datetime import datetime as dt
tag_map = {
"numeric": "Numeric",
"category": "Categorical",
"categorical": "Categorical",
"time_index": "TimeIndex",
"timeindex": "TimeIndex",
"foreign_key": "ForeignKey",
"foreignkey": "ForeignKey",
"date_of_birth": "DateOfBirth",
"dateofbirth": "DateOfBirth",
"index": "Index",
}
def convert_primitive(ft_primitive):
fp_dict = serialize_primitive(ft_primitive(), "transform")
if fp_dict is None:
return None
input_types = []
for y in fp_dict["input_types"]:
input_types.append([(x["logical_type"], x["semantic_tag"]) for x in y])
return Primitive(
fp_dict["type"],
fp_dict["module"],
"transform",
fp_dict["commutative"],
input_types,
(
fp_dict["return_type"]["logical_type"],
fp_dict["return_type"]["semantic_tag"],
),
)
def convert_primitives(ft_primitives):
out = []
for fp in ft_primitives:
p = convert_primitive(fp)
if p:
out.append(p)
return out
def dataframe_to_features(df):
features = []
for name, col in df.ww.schema.columns.items():
col_dict = col_to_dict(col, "unknown")
f = Feature(
name,
col_dict["logical_type"],
col_dict["semantic_tag"],
)
features.append(f)
return features
def convert_features(f_features):
f_features = f_features.copy()
all_features = {}
while f_features:
f = f_features.pop(0)
if len(f.base_features) == 0:
all_features[f._name] = convert_feature(f)
elif all([x._name in all_features for x in f.base_features]):
base_features = [all_features[x._name] for x in f.base_features]
all_features[f._name] = convert_feature(f, base_features)
else:
for bf in f.base_features:
if bf._name not in all_features:
f_features.append(bf)
f_features.append(f)
return all_features
def get_primitive_return_type(primitive):
if primitive.return_type is None:
return_type = primitive.input_types[0]
if isinstance(return_type, list):
return_type = return_type[0]
else:
return_type = primitive.return_type
return return_type
def convert_feature(f_feature, base_features=None):
name = f_feature._name
primitive = type(f_feature.primitive)
r_primitive = convert_primitive(primitive)
if hasattr(f_feature, "return_type"):
col_dict = col_to_dict(f_feature.return_type, "unknown")
else:
col_dict = col_to_dict(get_primitive_return_type(primitive), "unknown")
return Feature(
name,
col_dict["logical_type"] or "Any",
col_dict["semantic_tag"] or "Any",
base_features,
r_primitive,
)
def col_to_dict(col, unknown_type="any"):
if col.logical_type:
lt_name = type(col.logical_type).__name__
else:
lt_name = unknown_type
semantic_tags = list(col.semantic_tags)
if len(semantic_tags):
# todo: handle multiple semantic tags
semantic_tags = tag_map[semantic_tags[0].lower()]
else:
semantic_tags = unknown_type
return {
"logical_type": lt_name,
"semantic_tag": semantic_tags,
}
def get_input_types(input_types):
if not isinstance(input_types[0], list):
input_types = [input_types]
out = []
for input_type_set in input_types:
out_set = []
for input_type in input_type_set:
out_set.append(col_to_dict(input_type))
out.append(out_set)
return out
def serialize_primitive(primitive, function_type):
"""build a dictionary with the data necessary to construct the given primitive"""
args_dict = {name: val for name, val in primitive.get_arguments()}
cls = type(primitive)
if type(primitive) == ft.primitives.base.PrimitiveBase:
return None
return_type = get_primitive_return_type(primitive)
return {
"type": cls.__name__,
"module": cls.__module__,
"arguments": args_dict,
"input_types": get_input_types(primitive.input_types),
"return_type": col_to_dict(return_type),
"function_type": function_type,
"commutative": primitive.commutative,
}
logical_type_mapping = {
"Boolean": [True, False],
"BooleanNullable": [True, False, np.nan],
"Datetime": [dt(2020, 1, 1, 12, 0, 0), dt(2020, 6, 1, 12, 0, 0)],
"EmailAddress": ["john.smith@example.com", "sally.jonex@example.com"],
"LatLong": [(1, 2), (3, 4)],
"NaturalLanguage": ["This is sentence 1", "This is sentence 2"],
"Ordinal": [1, 2, 3],
"URL": ["https://www.example.com", "https://www.example2.com"],
}
semantic_tag_mapping = {
"Categorical": ["A", "B", "C"],
"DateOfBirth": [dt(2020, 1, 1, 12, 0, 0), dt(2020, 6, 1, 12, 0, 0)],
"ForeignKey": [1, 2],
"Numeric": [1.2, 2.3, 3.4],
"TimeIndex": [dt(2020, 1, 1, 12, 0, 0), dt(2020, 6, 1, 12, 0, 0)],
}
def df_to_es(df):
es = ft.EntitySet(id="nums")
es.add_dataframe(df, "nums", index="idx")
return es
def serialize_feature(f):
base_features = [x._name for x in f.base_features]
cls = type(f.primitive)
primitive_name = cls.__name__
n2 = "_".join(base_features)
return {
"name": f"{primitive_name}_{n2}",
"base_features": base_features,
"generating_primitive": primitive_name,
"commutative": cls.commutative,
}
def save_features(features, fname="all_features.json"):
out = []
for f in features:
out.append(serialize_feature(f))
json.dump(out, open(fname, "w"))
def serialize_all_primitives():
transform_prim_dict = get_transform_primitives()
aggregation_prim_dict = get_aggregation_primitives()
trans_prims = list(transform_prim_dict.values())
agg_prims = list(aggregation_prim_dict.values())
all_prims = []
for p in trans_prims:
all_prims.append(serialize_primitive(p(), "transform", "any"))
for p in agg_prims:
all_prims.append(serialize_primitive(p(), "aggregation", "any"))
json.dump(all_prims, open("primitives.json", "w")) | /rust_dfs-0.3.0.tar.gz/rust_dfs-0.3.0/rust_dfs/utils.py | 0.408159 | 0.379896 | utils.py | pypi |
import random
from .utils import semantic_tag_mapping, logical_type_mapping
from woodwork.logical_types import Ordinal
import pandas as pd
import polars as pl
from datetime import datetime as dt
def flatten(xss):
return [x for xs in xss for x in xs]
def randomize(v):
random.seed(10)
h = v.copy()
random.shuffle(h)
return h
def gen_data_dict(value, nrows: int, start_idx=0, n_features=5) -> dict:
values = [value] * nrows
if isinstance(value, list):
values = flatten(values)
data = {f"F_{start_idx + n}": randomize(values)[:nrows] for n in range(n_features)}
return data
def generate_pandas_fake_dataframe(
n_rows=10, col_defs=[("Numeric", 2)], time_index=False
):
def gen_type_dict(lt, st, start_idx=0, n_features=5):
lt_dict = {}
st_dict = {}
for n in range(n_features):
name = f"F_{start_idx + n}"
if lt:
lt_dict[name] = lt
if st:
st_dict[name] = st
return lt_dict, st_dict
dataframes = [pd.DataFrame({"idx": range(n_rows)})]
lt_dict = {}
st_dict = {}
starting_col = 0
for typ, n_cols in col_defs:
logical_type = None
semantic_tag = None
if typ in logical_type_mapping:
logical_type = typ
values = logical_type_mapping[typ]
if logical_type == "Ordinal":
logical_type = Ordinal(order=values)
elif typ in semantic_tag_mapping:
semantic_tag = typ.lower()
values = semantic_tag_mapping[typ]
else:
values = typ
df_tmp = pd.DataFrame(gen_data_dict(values, n_rows, starting_col, n_cols))
dataframes.append(df_tmp)
lt_dict_tmp, st_dict_tmp = gen_type_dict(
logical_type, semantic_tag, starting_col, n_cols
)
lt_dict.update(lt_dict_tmp)
st_dict.update(st_dict_tmp)
starting_col += n_cols
df = pd.concat(dataframes, axis=1)
other_kwargs = {}
if time_index:
df["t_idx"] = pd.date_range(end=dt(2020, 1, 1, 12, 0, 0), periods=n_rows)
lt_dict["t_idx"] = "Datetime"
other_kwargs["time_index"] = "t_idx"
df.ww.init(
name="nums",
index="idx",
logical_types=lt_dict,
semantic_tags=st_dict,
**other_kwargs,
)
return df
def generate_polars_fake_dataframe(n_rows=10, col_defs=[("Numeric", 2)]):
data_dict = {"idx": range(n_rows)}
starting_col = 0
for typ, n_cols in col_defs:
if typ in logical_type_mapping:
values = logical_type_mapping[typ]
elif typ in semantic_tag_mapping:
values = semantic_tag_mapping[typ]
else:
values = typ
ddict_tmp = gen_data_dict(values, n_rows, starting_col, n_cols)
data_dict.update(ddict_tmp)
starting_col += n_cols
df = pl.DataFrame(data_dict)
return df | /rust_dfs-0.3.0.tar.gz/rust_dfs-0.3.0/rust_dfs/generate_fake_dataframe.py | 0.406273 | 0.422445 | generate_fake_dataframe.py | pypi |
# How to compile DLL in Rust and import it in Python. Useful hints.
Here I am providing a number of examples to compile [DLL](https://en.wikipedia.org/wiki/Dynamic-link_library) in Rust. In order to check the final DLL I am using Python, although DLL standard is compatible with almost all the widely known programming languages. The examples include plain functions, input and output arrays, structures and OOP-like approach. I also attached a benchmark. In the end, I am describing a way to create a Python package powered by Rust, that is ready to distribute on [PyPI](https://pypi.org/). Source code of the project is available on https://github.com/fomalhaut88/rust-dll-example.
## How to create a DLL project in Rust
1. Create a project as a library (using the flag `--lib`): `cargo new rust-dll-example --lib`
2. Add the following section in `Cargo.toml` to specify the type of the library:
```
[lib]
crate-type = ["cdylib"]
```
3. Add `#[no_mangle]` and `extern` to the exporting functions like this:
```rust
#[no_mangle]
pub extern fn add(left: usize, right: usize) -> usize {
left + right
}
```
4. Build the project: `cargo build --release`
After that the DLL file will appear by path `./target/release/rust_dll_example.dll` if you are using Windows or something similar on different platforms (usually `.so` for Linux and `.dylib` for Mac OS X).
It is a good practice to cover your functions with standard Rust tests and benchmarks, so you can control the correctness and the performance.
### Plain functions
Here are two plain functions written in Rust:
```rust
//! An addition of two unsigned integer numbers.
#[no_mangle]
pub extern fn add(left: usize, right: usize) -> usize {
left + right
}
//! Square of a float value.
#[no_mangle]
pub extern fn sqr(x: f64) -> f64 {
x * x
}
```
There are two differences to normal functions: the keyword `extern` so the function is linked to DLL interface and the attribuue `no_mangle` that is needed to disable the standard name encoding on the compilation stage (read more about it here: https://doc.rust-lang.org/reference/abi.html#the-no_mangle-attribute).
To call this functions in Python, there is a following code:
```python
import ctypes
dll = ctypes.CDLL("./target/release/rust_dll_example.dll")
# Test add
assert dll.add(2, 3) == 5
# Test sqr
dll.sqr.restype = ctypes.c_double
dll.sqr.argtypes = [ctypes.c_double]
assert dll.sqr(6.0) == 36.0
```
Notice, that for `sqr` function we had to specify input and output types explicitly before the call, so Python knows how to interpret the data. It is recommended to avoid skipping types specification as it is done for `add` function, despite of usually integer data types are set by default. Do you remember the phrase `Explicit is better than implicit.` from `The Zen of Python`? (The full text of it can always be outputed by `import this`)
### Passing arrays
There are two ways to handle arrays: by reference (norammly for fixed size arrays) and by pointer (it the size is unknown or mutable from call to call), and both a supported in DLLs.
```rust
//! Sum of elements of an array given by pointer and the size.
#[no_mangle]
pub extern fn array_sum(size: usize, arr: *const f64) -> f64 {
let mut res = 0.0;
for idx in 0..size {
unsafe {
res += *arr.offset(idx as isize);
}
}
res
}
//! Fill the given array with a float value.
#[no_mangle]
pub extern fn array_set(size: usize, arr: *mut f64, val: f64) {
for idx in 0..size {
unsafe {
*arr.offset(idx as isize) = val;
}
}
}
//! Set elements of a fixed size array to zeros.
#[no_mangle]
pub extern fn array3_zero(arr: &mut [f64; 3]) {
for idx in 0..arr.len() {
arr[idx] = 0.0;
}
}
//! Concatenate two arrays.
#[no_mangle]
pub extern fn array_concat(size1: usize, arr1: *const f64,
size2: usize, arr2: *const f64) -> *const f64 {
let mut res = Vec::with_capacity(size1 + size2);
res.resize(size1 + size2, 0.0);
unsafe {
ptr::copy(arr1, res.as_mut_ptr(), size1);
ptr::copy(arr2, res.as_mut_ptr().add(size1), size2);
}
Box::new(res).as_ptr()
}
//! Return fixed size array filled with the given value.
#[no_mangle]
pub extern fn array5_fill(val: f64) -> Box<[f64; 5]> {
Box::new([val; 5])
}
```
Accessing to the elements of an array passed by pointer is an unsafe operation for Rust, so it is reflected by the `unsafe` sections. If we return the array as the result, we can do it by reference (as it is in `array_concat`) or as a boxed fixed size array.
In order by call these functions from Python, we can use following commands:
```python
# Create array
arr_type = (ctypes.c_double * 5)
arr = arr_type(*[1.0, 2.0, 3.0, 4.0, 5.0])
# Test array_sum
dll.array_sum.restype = ctypes.c_double
dll.sqr.argtypes = [ctypes.c_uint64, arr_type]
assert dll.array_sum(5, arr) == 15.0
# Test array_set
dll.array_set.argtypes = [ctypes.c_uint64, arr_type, ctypes.c_double]
dll.array_set(5, arr, ctypes.c_double(3.0))
assert list(arr) == [3.0] * 5
# Test array3_zero
dll.array3_zero(arr)
assert list(arr) == [0.0, 0.0, 0.0, 3.0, 3.0]
# Test array_concat
dll.array_concat.argtypes = [ctypes.c_uint64, ctypes.c_double * 2,
ctypes.c_uint64, ctypes.c_double * 3]
dll.array_concat.restype = ctypes.POINTER(ctypes.c_double * 5)
arr1 = (ctypes.c_double * 2)(*[1.0, 2.0])
arr2 = (ctypes.c_double * 3)(*[3.0, 4.0, 5.0])
res = dll.array_concat(2, arr1, 3, arr2)
assert list(res.contents) == [1.0, 2.0, 3.0, 4.0, 5.0]
# Test array5_fill
dll.array5_fill.argtypes = [ctypes.c_double]
dll.array5_fill.restype = ctypes.POINTER(ctypes.c_double * 5)
arr = dll.array5_fill(2.5)
assert list(arr.contents) == [2.5] * 5
```
What is interesting there? First of all, we define array data type as a product of the type of element and the size. And after that we must convert Python list to the understandable by Rust format, because Python lists are totally not the same as C-compatible arrays. Also there is no difference from Python side regarding passing by reference or by pointer.
In two last functions we return arrays as the results: as a pointer and as a fixed size array. In both cases, in Python we should set `restype` to `ctypes.POINTER`, so after that we can extract the values from `.contents` attribute.
Working with strings (or bytes) is similar, because string is represented as an array of chars (or `uint8`), so on the Rust side they should have the types `*u8` or `[u8; SIZE]`, and on the Python side it is `ctypes.c_char * SIZE`.
### C-compatible structures
We also are allowed to work with complex data types like structures as with base types, but there are some additions. Let us look at the Rust code that implements a few functions for complex numbers:
```rust
#[derive(Debug, PartialEq)]
#[repr(C)]
pub struct Complex {
pub x: f64,
pub y: f64,
}
#[no_mangle]
pub extern fn complex_len(z: Complex) -> f64 {
(z.x * z.x + z.y * z.y).sqrt()
}
#[no_mangle]
pub extern fn complex_conj(z: Complex) -> Complex {
Complex {
x: z.x,
y: -z.y,
}
}
impl Complex {
#[no_mangle]
#[export_name="complex_real"]
pub extern fn real(&self) -> f64 {
self.x
}
#[no_mangle]
#[export_name="complex_image"]
pub extern fn image(&self) -> f64 {
self.y
}
#[no_mangle]
#[export_name="complex_mul"]
pub extern fn mul(&mut self, val: f64) {
self.x *= val;
self.y *= val;
}
}
```
We can see already familiar functions with `extern` and `no_mangle` keywords and passing variables is done as it would be for standard data types. I also added an OOP-like part (`impl` section), so the functions inside can be considered as methods of the structure. They also have `extern` and `no_mandge` but there is also `export_name` attribute that customizes the name of the linked function to reach it outside.
Notice, that the structure has the attribute `repr(C)` that is important, otherwise external programs barely can understand the way the data is stored in a structure instance. This is because there are several standards to manage inner data using different alignment algorithms (you can read more about it in [Data structure alignment](https://en.wikipedia.org/wiki/Data_structure_alignment)).
As for Python code:
```python
# Complex struct
class Complex(ctypes.Structure):
_fields_ = [
('x', ctypes.c_double),
('y', ctypes.c_double),
]
def __repr__(self):
return f"Complex(x={self.x}, y={self.y})"
def __eq__(self, other):
return self.x == other.x and self.y == other.y
z = Complex(x=3.0, y=-4.0)
# Test complex_len
dll.complex_len.argtypes = [Complex]
dll.complex_len.restype = ctypes.c_double
assert dll.complex_len(z) == 5.0
# Test complex_conj
dll.complex_conj.argtypes = [Complex]
dll.complex_conj.restype = Complex
assert dll.complex_conj(z) == Complex(x=3.0, y=4.0)
# Test real
dll.complex_real.restype = ctypes.c_double
assert dll.complex_real(z) == 3.0
# Test image
dll.complex_image.restype = ctypes.c_double
assert dll.complex_image(z) == -4.0
# Test mul
dll.complex_mul.argtypes = [ctypes.c_void_p, ctypes.c_double]
dll.complex_mul(ctypes.byref(z), 2.0)
assert z == Complex(x=6.0, y=-8.0)
```
Fortunately, `ctypes` supports the opportunity to define structure data type in a very friendly way. So after that we can work with `Complex` as with an ordinary data type, passing it to `argtypes` and `restype` attributes. But there is a peculiarity if the structure instance is supposed to be mutable: in this case we have to add `ctypes.byref` as it is done in `complex_mul` function. The argument type is set to `ctypes.c_void_p` instead of `Complex`.
This approach is good but it does not cover all the needs. The main lack is that only C-compatible data types are allowed in structure fields. If we are not going to share inner data of the structure outside there is a different approach shown in the next section.
### OOP example
In spite of the previous example, here we can use any data types in our structure we want. But we cannot access the fields outside, though we usually do not need it if we require OOP that supposes encapsulation.
```rust
struct Counter {
val: usize,
}
impl Counter {
#[no_mangle]
#[export_name="counter_new"]
pub extern fn new() -> Box<Self> {
Box::new(Self {
val: 0,
})
}
#[no_mangle]
#[export_name="counter_get"]
pub extern fn get(&self) -> usize {
self.val
}
#[no_mangle]
#[export_name="counter_increment"]
pub extern fn increment(&mut self) {
self.val += 1;
}
}
```
There is no `repr(C)` attribute and `new` returns boxed instance because we need to allocate the instance in the heap. The other methods are implemented similar to what we had for `Complex` structure.
```python
dll.counter_new.restype = ctypes.c_void_p
dll.counter_get.argtypes = [ctypes.c_void_p]
dll.counter_increment.argtypes = [ctypes.c_void_p]
class Counter:
_dll = dll
def __init__(self):
self._counter = self._dll.counter_new()
def get(self):
return self._dll.counter_get(self._counter)
def increment(self):
self._dll.counter_increment(self._counter)
# Create an instance
counter = Counter()
# Get value
assert counter.get() == 0
# Increment value
counter.increment()
# Get value
assert counter.get() == 1
```
On the Python level, we can define `Counter` as a class with the same methods, a class object keeps the Rust counter instance inside. Notice, that `self._counter` is a pointer that has the type `ctypes.c_void_p`.
## Benchmark example
Usually, developers combine Python and a low-level programming language to improve the performance. This is the most frequent reason why DLL is needed. So this article will not be complete if I do not include a real world case with a benchmark. For this purpose I implemented a sort of [Levenshtein distance](https://en.wikipedia.org/wiki/Levenshtein_distance) algorithm. I did not much care about the performance on Rust level, so it may work slower than the solutions you can find in the net, although the performance will be surely comparable.
```rust
use std::{cmp, slice};
//! Levenshtain distance algorithm
#[no_mangle]
#[export_name="levenshtein_distance"]
pub extern fn distance(n1: usize, p1: *const u8,
n2: usize, p2: *const u8) -> usize {
let s1 = unsafe { slice::from_raw_parts(p1, n1) };
let s2 = unsafe { slice::from_raw_parts(p2, n2) };
let lcs = lcs_solve(s1, s2);
cmp::max(n1, n2) - lcs
}
//! Calculate Longest Common Subsequence length
pub fn lcs_solve<T: PartialEq>(s1: &[T], s2: &[T]) -> usize {
let n1 = s1.len();
let n2 = s2.len();
let mut row = vec![0usize; n1];
for i2 in 0..n2 {
let mut prev = 0;
for i1 in 0..n1 {
let prev_new = row[i1];
if s1[i1] == s2[i2] {
row[i1] = prev + 1;
} else if i1 > 0 {
if row[i1] < row[i1 - 1] {
row[i1] = row[i1 - 1];
}
}
prev = prev_new;
}
}
row[n1 - 1]
}
```
Also I attached Rust benchmark:
```rust
#[cfg(test)]
mod tests {
use super::*;
use test::Bencher;
#[bench]
fn bench_distance(bencher: &mut Bencher) {
let s1 = "lewenstein";
let s2 = "levenshtein";
bencher.iter(|| {
distance(s1.len(), s1.as_ptr(),
s2.len(), s2.as_ptr());
});
}
}
```
Before run the benchmark do not forget to switch to the [Nightly toolchain](https://doc.rust-lang.org/book/appendix-07-nightly-rust.html):
```
rustup default nightly
```
On Python level we have following script (file `rust_dll_example/__init__.py`):
```python
import ctypes
_dll = ctypes.CDLL("./target/release/rust_dll_example.dll")
_dll.levenshtein_distance.restype = ctypes.c_uint64
_dll.levenshtein_distance.argtypes = [
ctypes.c_uint64, ctypes.c_char_p, ctypes.c_uint64, ctypes.c_char_p
]
def levenshtein_distance(s1, s2):
b1 = s1.encode()
b2 = s2.encode()
return _dll.levenshtein_distance(len(b1), b1, len(b2), b2)
```
If you want to benchmark the result on Python level, you can always do it with the help of [timeit](https://docs.python.org/3/library/timeit.html) with the command:
```
python -m timeit -s 'from rust_dll_example import levenshtein_distance' 'levenshtein_distance(\"lewenstein\", \"levenshtein\")'
```
## How to prepare a Python package
If we are going to distribute the Python library with compiled DLL inside, first, it is necessary to prepare `setup.py` having some non-standard lines. Here is an example:
```python
import os
import json
import subprocess as sp
from setuptools import find_packages, setup
def build_src():
sp.Popen(["cargo", "build", "--release"]).communicate()
def get_version():
if os.path.exists('version'):
with open('version') as f:
return f.read()
else:
out, _ = sp.Popen(["cargo", "metadata"], stdout=sp.PIPE).communicate()
metadata = json.loads(out.decode())
version = metadata['packages'][0]['version']
with open('version', 'w') as f:
f.write(version)
return version
def get_long_description():
with open('README.md') as f:
return f.read()
def get_dll_paths():
return [
'./target/release/rust_dll_example.dll',
]
# Build from source
build_src()
# Setup
setup(
name='rust-dll-example',
version=get_version(),
packages=find_packages(),
license="MIT",
description="",
long_description=get_long_description(),
long_description_content_type="text/markdown",
install_requires=[],
data_files=[('dlls', get_dll_paths()), ('', ['version'])],
)
```
Here we specified the path to DLL file as a data file for our package. Obviously, the old path `./target/release/rust_dll_example.dll` will not work. So we need to make some changes in `__init__.py` to import DLL correctly:
```python
...
_dll_path = os.path.join(sys.prefix, 'dlls', 'rust_dll_example.dll')
_dll = ctypes.CDLL(_dll_path)
...
```
Once `setup.py` is created and the path to DLL is corrected, we can build our Python package:
```
python setup.py sdist
```
After that the file `dist/rust-dll-example-0.1.0.tar.gz` will appear. It can be installed with `pip` and distributed on [PyPI](https://pypi.org/). In order to upload it on PyPI, run the following command:
```
twine upload dist/rust-dll-example-0.1.1.tar.gz
```
After that the project will be available on https://pypi.org/project/rust-dll-example/.
Notice! As far as DLL is compiled under a single platform (Windows 11 in my case), the library can be successfully installed only for the users with the same platform. Otherwise, the attached DLL file cannot be executed correctly. If you would like your library to be crossplatform, you need to build several DLLs for each platform and implement a more tricky way to catch the right DLL on Python level.
| /rust-dll-example-0.1.1.tar.gz/rust-dll-example-0.1.1/README.md | 0.751192 | 0.988177 | README.md | pypi |
from tempfile import TemporaryDirectory
from typing import Optional, Tuple, Any
from .rust_pyspec_glue import lib, ffi
class DB:
def __init__(self, path: Optional[str]) -> None:
if path is None:
self.db = lib.open_in_memory()
else:
self.db = lib.open(ffi.from_buffer(path.encode("ascii")), len(path))
self.tx = None
@staticmethod
def delete(path: str) -> None:
lib.delete_db(ffi.from_buffer(path.encode("ascii")), len(path))
def __del__(self) -> None:
if self.tx is not None:
lib.rollback_mutable(self.tx)
self.tx = None
if self.db is not None:
lib.drop_db(self.db)
self.db = None
def close(self) -> None:
if self.tx is not None:
self.rollback_mutable()
if self.db is None:
raise Exception("Already closed")
lib.drop_db(self.db)
self.db = None
def begin_mutable(self) -> None:
if self.tx is not None:
raise Exception("Transaction already in progress")
self.tx = lib.begin_mutable(self.db)
def rollback_mutable(self) -> None:
if self.tx is None:
raise Exception("No transaction in progress")
lib.rollback_mutable(self.tx)
self.tx = None
def commit_mutable(self) -> None:
if self.tx is None:
raise Exception("No transaction in progress")
lib.commit_mutable(self.tx)
self.tx = None
def set_metadata(self, key: bytes, value: bytes) -> None:
if self.tx is None:
raise Exception("No transaction in progress")
lib.set_metadata(
self.tx, ffi.from_buffer(key), len(key), ffi.from_buffer(value), len(value)
)
def get_metadata(self, key: bytes) -> Optional[bytes]:
if self.tx is None:
raise Exception("No transaction in progress")
metadata = lib.get_metadata(self.tx, ffi.from_buffer(key), len(key))
if not metadata.exists:
return None
else:
return bytes(ffi.buffer(metadata.value, metadata.value_len))
def state_root(self) -> bytes:
if self.tx is None:
raise Exception("No transaction in progress")
return bytes(ffi.buffer(lib.state_root(self.tx), 32))
def storage_root(self, address: bytes) -> bytes:
if self.tx is None:
raise Exception("No transaction in progress")
assert len(address) == 20
return bytes(ffi.buffer(lib.storage_root(self.tx, ffi.from_buffer(address)), 32))
def set_account(self, address: bytes, account: Optional[Any]) -> None:
if self.tx is None:
raise Exception("No transaction in progress")
assert len(address) == 20
if account is None:
lib.set_account_none(self.tx, ffi.from_buffer(address))
else:
lib.set_account_some(
self.tx,
ffi.from_buffer(address),
account.nonce,
ffi.from_buffer(account.balance.to_bytes(32, "big")),
ffi.from_buffer(account.code),
len(account.code),
)
def get_account_optional(self, address: bytes) -> Optional[Tuple[int, int, bytes]]:
if self.tx is None:
raise Exception("No transaction in progress")
assert len(address) == 20
account = lib.get_account_optional(self.tx, ffi.from_buffer(address))
if not account.exists:
return None
else:
return (
account.nonce,
int.from_bytes(ffi.buffer(account.balance, 32), "big"),
bytes(ffi.buffer(account.code, account.code_len)),
)
def set_storage(self, address: bytes, key: bytes, value: int) -> None:
if self.tx is None:
raise Exception("No transaction in progress")
assert len(address) == 20
assert len(key) == 32
lib.set_storage(
self.tx,
ffi.from_buffer(address),
ffi.from_buffer(key),
ffi.from_buffer(value.to_bytes(32, "big")),
)
def get_storage(self, address: bytes, key: bytes) -> int:
if self.tx is None:
raise Exception("No transaction in progress")
assert len(address) == 20
assert len(key) == 32
return int.from_bytes(
ffi.buffer(
lib.get_storage(
self.tx, ffi.from_buffer(address), ffi.from_buffer(key)
),
32,
),
"big",
)
def destroy_storage(self, address: bytes) -> None:
if self.tx is None:
raise Exception("No transaction in progress")
lib.destroy_storage(self.tx, ffi.from_buffer(address))
def debug_dump(self) -> None:
if self.tx is None:
raise Exception("No transaction in progress")
lib.debug_dump(self.tx) | /rust_pyspec_glue-0.0.5.tar.gz/rust_pyspec_glue-0.0.5/python/rust_pyspec_glue/__init__.py | 0.812942 | 0.198919 | __init__.py | pypi |
from esu.base import BaseAPI, Field, ObjectAlreadyHasId, ObjectHasNoId
from esu.firewall_template_rule import FirewallTemplateRule
class FirewallTemplate(BaseAPI):
"""
Args:
id (str): Идентификатор шаблона брандмауэра
name (str): Имя шаблона брандмауэра
vdc (object): Объект класса :class:`esu.Vdc`. ВЦОД, к которому
относится данный шаблон файрвола
description (str): описание для шаблона брандмауэра
.. warning:: Объект доступен только для чтения и не может быть создан,
изменен или удален.
"""
class Meta:
id = Field()
name = Field()
vdc = Field('esu.Vdc', allow_none=True)
description = Field()
@classmethod
def get_object(cls, id, token=None):
"""
Получить объект шаблона брандмауэра по его ID
Args:
id (str): Идентификатор шаблона брандмауэра
token (str): Токен для доступа к API. Если не передан, будет
использована переменная окружения **ESU_API_TOKEN**
Returns:
object: Возвращает объект шаблона брандмауэра
:class:`esu.FirewallTemplate`
"""
firewall = cls(token=token, id=id)
firewall._get_object('v1/firewall', firewall.id)
return firewall
def create(self):
"""
Создать объект
Raises:
ObjectAlreadyHasId: Если производится попытка создать объект,
который уже существует
"""
if self.id is not None:
raise ObjectAlreadyHasId
self._commit()
def save(self):
"""
Сохранить изменения
Raises:
ObjectHasNoId: Если производится попытка сохранить несуществующий
объект
"""
if self.id is None:
raise ObjectHasNoId
self._commit()
def _commit(self):
self._commit_object('v1/firewall', vdc=self.vdc.id, name=self.name,
description=self.description)
def destroy(self):
"""
Удалить объект
Raises:
ObjectHasNoId: Когда производится попытка удалить несуществующий
объект
"""
if self.id is None:
raise ObjectHasNoId
self._destroy_object('v1/firewall', self.id)
self.id = None
def get_firewall_rules(self):
"""
Получить список правил файрвола,
доступных в рамках данного шаблона брандмауэра.
Returns:
list: Список объектов :class:`esu.FirewallTemplateRule`
"""
if self.id is None:
raise ObjectHasNoId
return self._get_list('v1/firewall/{}/rule'.format(self.id),
FirewallTemplateRule, with_pages=False) | /rustack_esu-0.1.16-py3-none-any.whl/esu/firewall_template.py | 0.520984 | 0.223928 | firewall_template.py | pypi |
from esu.backup import Backup
from esu.base import BaseAPI, Field, ObjectAlreadyHasId, ObjectHasNoId
from esu.disk import Disk
from esu.firewall_template import FirewallTemplate
from esu.image import Image
from esu.kubernetes_template import KubernetesTemplate
from esu.port import Port
from esu.storage_profile import StorageProfile
from esu.template import Template
from esu.vm import Vm
from esu.vm_metadata import VmMetadata
class Vdc(BaseAPI):
"""
Args:
id (str): Идентификатор ВЦОД
name (str): Имя ВЦОД
hypervisor (object): Объект класса :class:`esu.Hypervisor`
project (object): Объект класса :class:`esu.Project`. Проект, к
которому относится данный ВЦОД
token (str): Токен для доступа к API. Если не передан, будет
использована переменная окружения **ESU_API_TOKEN**
.. note:: Поля ``name``, ``hypervisor`` и ``project`` необходимы для
создания.
Поле ``name`` может быть изменено для существующего объекта.
"""
class Meta:
id = Field()
name = Field()
hypervisor = Field('esu.Hypervisor')
project = Field('esu.Project')
@classmethod
def get_object(cls, id, token=None):
"""
Получить объект ВЦОД по его ID
Args:
id (str): Идентификатор ВЦОД
token (str): Токен для доступа к API. Если не передан, будет
использована переменная окружения **ESU_API_TOKEN**
Returns:
object: Возвращает объект ВЦОД :class:`esu.Vdc`
"""
vdc = cls(token=token, id=id)
vdc._get_object('v1/vdc', vdc.id)
return vdc
def create(self):
"""
Создать объект
Raises:
ObjectAlreadyHasId: Если производится попытка создать объект,
который уже существует
"""
if self.id is not None:
raise ObjectAlreadyHasId
self._commit()
def save(self):
"""
Сохранить изменения
Raises:
ObjectHasNoId: Если производится попытка сохранить несуществующий
объект
"""
if self.id is None:
raise ObjectHasNoId
self._commit()
def _commit(self):
self._commit_object('v1/vdc', project=self.project.id, name=self.name,
hypervisor=self.hypervisor.id)
def destroy(self):
"""
Удалить объект
Raises:
ObjectHasNoId: Когда производится попытка удалить несуществующий
объект
"""
if self.id is None:
raise ObjectHasNoId
self._destroy_object('v1/vdc', self.id)
self.id = None
def get_vms(self):
"""
Получить список виртуальных машин, доступных в рамках данного ВЦОД.
Returns:
list: Список объектов :class:`esu.Vm`
"""
if self.id is None:
raise ObjectHasNoId
return self._get_list('v1/vm', Vm, vdc=self.id)
def get_templates(self):
"""
Получить список шаблонов ОС для создания виртуальных машин, доступных
в данном ВЦОДе.
Returns:
list: Список объектов :class:`esu.Template`
"""
if self.id is None:
raise ObjectHasNoId
return self._get_list('v1/template', Template, with_pages=False,
vdc=self.id)
def get_storage_profiles(self):
"""
Получить список профилей хранения, которые используются при добавлении
дисков, доступных в данном ВЦОДе.
Returns:
list: Список объектов :class:`esu.StorageProfile`
"""
if self.id is None:
raise ObjectHasNoId
return self._get_list('v1/storage_profile', StorageProfile,
vdc=self.id)
def get_firewall_templates(self):
"""
Получить список шаблонов брандмауэра, доступных в данном ВЦОДе.
Returns:
list: Список объектов :class:`esu.FirewallTemplate`
"""
if self.id is None:
raise ObjectHasNoId
return self._get_list('v1/firewall', FirewallTemplate, vdc=self.id)
def get_networks(self):
"""
Получить список сетей, которые существуют в рамках данного ВЦОД.
Returns:
list: Список объектов :class:`esu.Network`
"""
return self._get_list('v1/network', 'esu.Network', vdc=self.id)
def get_routers(self):
"""
Получить список маршрутизаторов, которые доступны в рамках данного
ВЦОД.
Returns:
list: Список объектов :class:`esu.Router`
"""
return self._get_list('v1/router', 'esu.Router', vdc=self.id)
def get_ports(self):
"""
Получить список подключений, которые существуют в данном ВЦОДе.
Returns:
list: Список объектов :class:`esu.Port`
"""
return self._get_list('v1/port', 'esu.Port', vdc=self.id)
def get_disks(self):
"""
Получить список дисков, которые существуют в данном ВЦОДе.
Returns:
list: Список объектов :class:`esu.Disk`
"""
return self._get_list('v1/disk', 'esu.Disk', vdc=self.id)
def create_vm(self, name, template, password): # helper
"""
Быстрый способ создать виртуальный сервер в сети по-умолчанию и с
настройками по-умолчанию.
Args:
name (str): Название нового виртуального сервера
template (str): Название шаблона системы
password (str): Пароль, который будет установлен на сервер
"""
# pylint: disable=undefined-loop-variable
for template_ in self.get_templates():
if template_.name == template:
break
else:
raise ValueError('Template not found')
firewall = next(f for f in self.get_firewall_templates() \
if f.id == '00000000-0000-0000-0000-000000000000')
network = next(n for n in self.get_networks() if n.is_default)
port = Port(network=network, fw_templates=[firewall])
storage_profile = self.get_storage_profiles()[0]
disk = Disk(name='Системный диск', size=template_.min_hdd,
storage_profile=storage_profile)
metadata = []
for field in template_.get_fields():
value = field.default
if field.system_alias == 'password':
value = password
metadata.append(VmMetadata(field=field, value=value))
vm = Vm(name=name, cpu=template_.min_cpu, ram=template_.min_ram,
vdc=self, template=template_, metadata=metadata, ports=[port],
disks=[disk], token=self.token)
vm.create()
return vm
def get_k8s_templates(self):
"""
Получить список шаблонов k8s для создания кластеров, доступных
в данном ВЦОДе.
Returns:
list: Список объектов :class:`esu.KubernetesTemplate`
"""
if self.id is None:
raise ObjectHasNoId
return self._get_list('v1/kubernetes_template', KubernetesTemplate,
vdc=self.id)
def get_images(self):
"""
Получить список образов, доступных в данном ВЦОДе.
Returns:
list: Список объектов :class:`esu.Image`
"""
if self.id is None:
raise ObjectHasNoId
return self._get_list('v1/image', Image, vdc=self.id)
def get_backups(self):
"""
Получить список задач резервного копирования, доступных в данном ВЦОДе.
Returns:
list: Список объектов :class:`esu.Backup`
"""
if self.id is None:
raise ObjectHasNoId
return self._get_list('v1/backup', Backup, vdc=self.id) | /rustack_esu-0.1.16-py3-none-any.whl/esu/vdc.py | 0.560012 | 0.220489 | vdc.py | pypi |
from esu.base import BaseAPI, Field, ObjectAlreadyHasId, ObjectHasNoId
class Disk(BaseAPI):
"""
Args:
id (str): Идентификатор диска
name (str): Имя диска
size (int): Размер диска (ГБ)
scsi (str): Порт, к которому подключен диск
vm (object): Объект виртуального сервера :class:`esu.Vm`
storage_profile (object): Объект :class:`esu.StorageProfile`
.. note:: Поля ``name``, ``size``, ``storage_profile`` могут быть изменены
для существующего объекта.
.. warning:: ``storage_profile`` можно изменить только для дисков в
сегменте VMware когда диск подключен к виртуальному серверу.
"""
class Meta:
id = Field()
name = Field()
size = Field()
scsi = Field()
vdc = Field('esu.Vdc')
vm = Field('esu.Vm', allow_none=True)
storage_profile = Field('esu.StorageProfile')
@classmethod
def get_object(cls, id, token=None):
"""
Получить объект диска по его ID
Args:
id (str): Идентификатор диска
token (str): Токен для доступа к API. Если не передан, будет
использована переменная окружения **ESU_API_TOKEN**
Returns:
object: Возвращает объект диска :class:`esu.Disk`
"""
disk = cls(token=token, id=id)
disk._get_object('v1/disk', disk.id)
return disk
def create(self):
"""
Создать объект
Raises:
ObjectAlreadyHasId: Если производится попытка создать объект,
который уже существует
"""
if self.id is not None:
raise ObjectAlreadyHasId
self._commit()
def save(self):
"""
Сохранить изменения
Raises:
ObjectHasNoId: Если производится попытка сохранить несуществующий
объект
"""
if self.id is None:
raise ObjectHasNoId
self._commit()
def _commit(self):
disk = {
'name': self.name,
'size': self.size,
'storage_profile': self.storage_profile.id,
}
if self.vm:
disk['vm'] = self.vm.id
if self.vdc:
disk['vdc'] = self.vdc.id
self._commit_object('v1/disk', **disk)
def attach_disk(self, vm):
"""
Присоединить существующий во ВЦОДе диск к виртуальному серверу
"""
if not self.id:
raise ValueError('Disk is not exists')
if self.vm is not None:
raise ValueError('Disk must be unattached')
self._call('POST', 'v1/disk/{}/attach'.format(self.id), vm=vm.id)
self.vm = vm
self._fill()
def detach_disk(self):
"""
Отсоединить диск от виртуального сервера
"""
self._call('POST', 'v1/disk/{}/detach'.format(self.id))
self.vm = None
self._fill()
def destroy(self):
"""
Удалить объект
Raises:
ObjectHasNoId: Когда производится попытка удалить несуществующий
объект
"""
if self.id is None:
raise ObjectHasNoId
self._destroy_object('v1/disk', self.id)
self.id = None | /rustack_esu-0.1.16-py3-none-any.whl/esu/disk.py | 0.550849 | 0.21686 | disk.py | pypi |
from esu.base import BaseAPI, Field, ObjectAlreadyHasId, ObjectHasNoId
class RouterRoute(BaseAPI):
"""
Args:
id (str): Идентификатор маршрута
router (object): Объект класса :class:`esu.Router`. Роутер, к
которому относится данное правило
destination (str): CIDR сети в которую будет маршрутизирован трафик
nexthop (str): Адрес шлюза - роутера в сети из которой будет
осуществляться маршрутизация, подключенного к исходной
сети и к сети в которую будет маршрутизироваться трафик
token (str): Токен для доступа к API. Если не передан, будет
использована переменная окружения **ESU_API_TOKEN**
.. note:: Поля ``destination``, ``nexthop``, необходимы для создания.
"""
class Meta:
id = Field()
router = Field('esu.Router')
nexthop = Field()
destination = Field()
@classmethod
def get_object(cls, router, route_id, token=None):
"""
Получить объект маршрута на роутере по его ID
Args:
id (str): Идентификатор маршрута на роутере
token (str): Токен для доступа к API. Если не передан, будет
использована переменная окружения **ESU_API_TOKEN**
Returns:
object: Возвращает объект маршрута на роутере
:class:`esu.RouterRoute`
"""
route = cls(token=token, id=route_id, router=router)
route._get_object('v1/router/{}/route'.format(route.router.id),
route.id)
return route
def create(self):
"""
Создать объект
Raises:
ObjectAlreadyHasId: Если производится попытка создать объект,
который уже существует
"""
if self.id is not None:
raise ObjectAlreadyHasId
self._commit()
def save(self):
"""
Сохранить изменения
Raises:
ObjectHasNoId: Если производится попытка сохранить несуществующий
объект
"""
if self.id is None:
raise ObjectHasNoId
self._commit()
def _commit(self):
route = {"nexthop": self.nexthop, "destination": self.destination}
self._commit_object('v1/router/{}/route'.format(self.router.id),
**route)
def destroy(self):
"""
Удалить объект
Raises:
ObjectHasNoId: Когда производится попытка удалить несуществующий
объект
"""
if self.id is None:
raise ObjectHasNoId
self._destroy_object('v1/router/{}/route'.format(self.router.id),
self.id)
self.id = None | /rustack_esu-0.1.16-py3-none-any.whl/esu/router_route.py | 0.53048 | 0.355327 | router_route.py | pypi |
from esu.base import BaseAPI, Field, ObjectAlreadyHasId, ObjectHasNoId
class Snapshot(BaseAPI):
"""
Args:
id (str): Идентификатор снапшота
name (str): Имя снапшота
description (str): описание для снапшота
vm (object): Объект класса :class:`esu.Vm`. Сервер, к которому
относится данный снапшот
.. note:: Поле ``name`` и ``vm`` необходимо для создания
Поля ``description`` опцональны при создании
Поля ``name`` и ``description`` могут быть изменены для
существующего объекта
"""
class Meta:
id = Field()
name = Field()
vm = Field("esu.Vm")
description = Field()
@classmethod
def get_object(cls, id, token=None):
"""
Получить объект порта по его ID
Args:
id (str): Идентификатор снапшота
token (str): Токен для доступа к API. Если не передан, будет
использована переменная окружения **ESU_API_TOKEN**
Returns:
object: Возвращает объект порта :class:`esu.Port`
"""
snapshot = cls(token=token, id=id)
snapshot._get_object('v2/snapshot', snapshot.id)
return snapshot
def create(self):
"""
Создать объект
Raises:
ObjectAlreadyHasId: Если производится попытка создать объект,
который уже существует
"""
if self.id is not None:
raise ObjectAlreadyHasId
self._commit()
def save(self):
"""
Сохранить изменения
Raises:
ObjectHasNoId: Если производится попытка сохранить несуществующий
объект
"""
if self.id is None:
raise ObjectHasNoId
self._commit()
#pylint: disable=import-outside-toplevel
def _commit(self):
description = self.description or ''
self._commit_object('v2/snapshot', name=self.name,
description=description, vm=self.vm.id)
def destroy(self):
"""
Удалить объект
Raises:
ObjectHasNoId: Когда производится попытка удалить несуществующий
объект
"""
if self.id is None:
raise ObjectHasNoId
self._destroy_object('v2/snapshot', self.id)
self.id = None | /rustack_esu-0.1.16-py3-none-any.whl/esu/snapshot.py | 0.53437 | 0.329311 | snapshot.py | pypi |
from esu.base import BaseAPI, Field, ObjectAlreadyHasId, ObjectHasNoId
class PortForwarding(BaseAPI):
"""
Args:
id (str): Идентификатор перенаправления портов
floating (object): Объект класса :class:`esu.Port`. Порт на котором
будет создано перенаправление
token (str): Токен для доступа к API. Если не передан, будет
использована переменная окружения **ESU_API_TOKEN**
.. note:: Управление перенаправлением портов создаваемом на порте возможно
только в ресурсном пуле под управлением Openstack.
Поле ``floating`` необходимо для создания.
"""
class Meta:
id = Field()
name = Field()
vdc = Field('esu.Vdc')
floating = Field('esu.Port')
@classmethod
def get_object(cls, port_forwarding_id, token=None):
"""
Получить объект перенаправления портов по его ID
Args:
id (str): Идентификатор перенаправления портов
token (str): Токен для доступа к API. Если не передан, будет
использована переменная окружения **ESU_API_TOKEN**
Returns:
object: Возвращает объект маршрута на роутере
:class:`esu.RouterRoute`
"""
port_forwarding = cls(token=token, id=port_forwarding_id)
port_forwarding._get_object('v1/port_forwarding', port_forwarding.id)
return port_forwarding
def create(self):
"""
Создать объект
Raises:
ObjectAlreadyHasId: Если производится попытка создать объект,
который уже существует
"""
if self.id is not None:
raise ObjectAlreadyHasId
self._commit()
def _commit(self):
self._commit_object('v1/port_forwarding', floating=self.floating.id)
def destroy(self):
"""
Удалить объект
Raises:
ObjectHasNoId: Когда производится попытка удалить несуществующий
объект
"""
if self.id is None:
raise ObjectHasNoId
self._destroy_object('v1/port_forwarding', self.id)
self.id = None | /rustack_esu-0.1.16-py3-none-any.whl/esu/port_forwarding.py | 0.582372 | 0.349227 | port_forwarding.py | pypi |
from esu.base import BaseAPI, Field, ObjectAlreadyHasId, ObjectHasNoId
from esu.dns_record import DnsRecord
class Dns(BaseAPI):
"""
Args:
id (str): Идентификатор Dns
name (str): Имя Dns
project (object): Объект класса :class:`esu.Project`. Проект, к
которому относится данный Dns
token (str): Токен для доступа к API. Если не передан, будет
использована переменная окружения **ESU_API_TOKEN**
.. note:: Поля ``name`` и ``project`` необходимы для
создания.
Поле ``name`` может быть изменено для существующего объекта.
"""
class Meta:
id = Field()
name = Field()
project = Field('esu.Project')
@classmethod
def get_object(cls, id, token=None):
"""
Получить объект Dns по его ID
Args:
id (str): Идентификатор Dns
token (str): Токен для доступа к API. Если не передан, будет
использована переменная окружения **ESU_API_TOKEN**
Returns:
object: Возвращает объект Dns :class:`esu.Dns`
"""
dns = cls(token=token, id=id)
dns._get_object('v1/dns', dns.id)
return dns
def create(self):
"""
Создать объект
Raises:
ObjectAlreadyHasId: Если производится попытка создать объект,
который уже существует
"""
if self.id is not None:
raise ObjectAlreadyHasId
self._commit()
def save(self):
"""
Сохранить изменения
Raises:
ObjectHasNoId: Если производится попытка сохранить несуществующий
объект
"""
if self.id is None:
raise ObjectHasNoId
self._commit()
def _commit(self):
self._commit_object('v1/dns', project=self.project.id, name=self.name)
def destroy(self):
"""
Удалить объект
Raises:
ObjectHasNoId: Когда производится попытка удалить несуществующий
объект
"""
if self.id is None:
raise ObjectHasNoId
self._destroy_object('v1/dns', self.id)
self.id = None
def get_dns_records(self):
"""
Получить список днс записей, доступных в рамках данного Dns.
Returns:
list: Список объектов :class:`esu.DnsRecord`
"""
if self.id is None:
raise ObjectHasNoId
return self._get_list('v1/dns/{}/record'.format(self.id), DnsRecord) | /rustack_esu-0.1.16-py3-none-any.whl/esu/dns.py | 0.543106 | 0.248888 | dns.py | pypi |
from esu.base import BaseAPI, Field, ObjectAlreadyHasId, ObjectHasNoId
class PortForwardingRule(BaseAPI):
"""
Args:
id (str): Идентификатор правила перенаправления портов
port (object): Объект класса :class:`esu.Port`. Порт сервера до
которого осуществляется перенаправление
internal_port (int): Внутренний порт устройства, с которого
осуществляется перенаправление
external_port (int): Внешний порт устройства, на который
осуществляется перенаправление
protocol (str): Протокол по которому будет осуществляться
перенаправление
token (str): Токен для доступа к API. Если не передан, будет
использована переменная окружения **ESU_API_TOKEN**
.. note:: Управление перенаправлением портов создаваемом на порте возможно
только в ресурсном пуле под управлением Openstack.
Поля ``external_port``, ``internal_port``, ``protocol``
необходимы для создания.
"""
class Meta:
port_forwarding = Field('esu.PortForwarding')
port = Field('esu.Port')
id = Field()
external_port = Field()
internal_port = Field()
protocol = Field()
@classmethod
def get_object(cls, port_forwarding, rule_id, token=None):
"""
Получить объект маршрута на роутере по его ID
Args:
id (str): Идентификатор маршрута на роутере
token (str): Токен для доступа к API. Если не передан, будет
использована переменная окружения **ESU_API_TOKEN**
Returns:
object: Возвращает объект маршрута на роутере
:class:`esu.RouterRoute`
"""
pf_rule = cls(token=token, id=rule_id, port_forwarding=port_forwarding)
pf_rule._get_object(
'v1/port_forwarding/'
'{}/rule'.format(pf_rule.port_forwarding.id), pf_rule.id)
return pf_rule
def create(self):
"""
Создать объект
Raises:
ObjectAlreadyHasId: Если производится попытка создать объект,
который уже существует
"""
if self.id is not None:
raise ObjectAlreadyHasId
self._commit()
def save(self):
"""
Сохранить изменения
Raises:
ObjectHasNoId: Если производится попытка сохранить несуществующий
объект
"""
if self.id is None:
raise ObjectHasNoId
self._commit()
def _commit(self):
port_forwarding_rule = {
"internal_port": self.internal_port,
"external_port": self.external_port,
"protocol": self.protocol,
"port": self.port.id
}
self._commit_object(
'v1/port_forwarding/'
'{}/rule'.format(self.port_forwarding.id), **port_forwarding_rule)
def destroy(self):
"""
Удалить объект
Raises:
ObjectHasNoId: Когда производится попытка удалить несуществующий
объект
"""
if self.id is None:
raise ObjectHasNoId
self._destroy_object(
'v1/port_forwarding/'
'{}/rule'.format(self.port_forwarding.id), self.id)
self.id = None | /rustack_esu-0.1.16-py3-none-any.whl/esu/port_forwarding_rule.py | 0.545044 | 0.370624 | port_forwarding_rule.py | pypi |
from esu.base import BaseAPI, Field, ObjectAlreadyHasId, ObjectHasNoId
class Kubernetes(BaseAPI):
"""
Args:
id (str): Идентификатор
name (str): Имя
node_cpu (int): CPU нод
node_ram (int): RAM нод
node_disk_size (int): Размер диска нод
node_storage_profile (object): Объект :class:`esu.StorageProfile`
nodes_count (int): Количество нод в кластере
vdc (object): Объект класса :class:`esu.Vdc`. ВЦОД, к которому
относится данный кластер
template (str): Идентификатор шаблона Kubernetes
user_public_key (string): публичный SSH ключ
floating (object): Объект класса :class:`esu.Port`.
Порт подключения кластера к внешней сети.
Если None, кластер не имеет подключения к внешней сети.
"""
class Meta:
id = Field()
name = Field()
node_cpu = Field()
node_ram = Field()
node_disk_size = Field()
node_storage_profile = Field('esu.StorageProfile')
nodes_count = Field()
template = Field('esu.KubernetesTemplate')
user_public_key = Field()
vdc = Field('esu.Vdc')
floating = Field('esu.Port', allow_none=True)
@classmethod
def get_object(cls, id, token=None):
"""
Получить объект kubernetes по его ID
Args:
id (str): Идентификатор Kubernetes
token (str): Токен для доступа к API. Если не передан, будет
использована переменная окружения **ESU_API_TOKEN**
Returns:
object: Возвращает объект кластера
kubernetes :class:`esu.Kubernetes`
"""
k8s = cls(token=token, id=id)
k8s._get_object('v1/kubernetes', k8s.id)
return k8s
def create(self):
"""
Создать объект
Raises:
ObjectAlreadyHasId: Если производится попытка создать объект,
который уже существует
"""
if self.id is not None:
raise ObjectAlreadyHasId
self._commit()
def save(self):
"""
Сохранить изменения
"""
if self.id is None:
raise ObjectHasNoId
self._commit()
return self
def _commit(self):
k8s = {
'vdc': self.vdc.id,
'template': self.template.id,
'name': self.name,
'node_cpu': self.node_cpu,
'node_ram': self.node_ram,
'node_disk_size': self.node_disk_size,
'node_storage_profile': self.node_storage_profile.id,
'nodes_count': self.nodes_count,
'user_public_key': self.user_public_key
}
floating = None
if self.floating:
# keep/change or get a new IP
floating = self.floating.id or '0.0.0.0'
k8s['floating'] = floating
self._commit_object('v1/kubernetes', **k8s)
def destroy(self):
"""
Удалить объект
Raises:
ObjectHasNoId: Когда производится попытка удалить несуществующий
объект
"""
if self.id is None:
raise ObjectHasNoId
self._destroy_object('v1/kubernetes', self.id)
self.id = None
def get_dashbord_url(self):
"""
Получить ссылку на Dashboard для открытия консоли k8s
Returns:
str: Адрес дашборда
"""
url = self._call('GET', 'v1/kubernetes/{}/dashboard'.format(self.id))
uri = url['url']
return '{}{}'.format(self.endpoint_url, uri) | /rustack_esu-0.1.16-py3-none-any.whl/esu/kubernetes.py | 0.644225 | 0.229622 | kubernetes.py | pypi |
from esu.base import BaseAPI, Field, ObjectAlreadyHasId, ObjectHasNoId
class RouterPortForwarding(BaseAPI):
"""
Args:
id (str): Идентификатор правила перенаправления портов
router (object): Объект класса :class:`esu.Router`. Роутер, к
которому относится данное правило
protocol (str): Протокол для которого осуществляется перенаправление
local_ip (str): IP адрес сервера для которого создаётся правило
перенаправления портов.
external_port_range_start (int): Старт диапазона портов сервера, для
которого осуществляется перенаправление
external_port_range_end (int): Конец диапазона портов сервера для
которого осуществляется перенаправление
internal_port (int): Порт роутера, по которому доступен сервер
для которого, создаётся правило перенаправления
token (str): Токен для доступа к API. Если не передан, будет
использована переменная окружения **ESU_API_TOKEN**
.. note:: Управление перенаправлением портов на роутере доступно только
для ресурсного пула VMware.
Поля ``protocol``, ``external_port_range_start``,
``external_port_range_end``, ``local_ip`` необходимы
для создания.
"""
class Meta:
router = Field('esu.Router')
id = Field()
external_port_range_end = Field()
external_port_range_start = Field()
internal_port = Field()
local_ip = Field()
protocol = Field()
@classmethod
def get_object(cls, router, pf_id, token=None):
"""
Получить объект правила перенаправления портов роутера по его ID
Args:
id (str): Идентификатор правила перенаправления портов роутера
token (str): Токен для доступа к API. Если не передан, будет
использована переменная окружения **ESU_API_TOKEN**
Returns:
object: Возвращает объект правила перенаправления портов на роутере
:class:`esu.RouterPortForwarding`
"""
port_forwarding = cls(token=token, id=pf_id, router=router)
port_forwarding._get_object(
'v1/router/{}/port_forwarding'.format(port_forwarding.router.id),
port_forwarding.id)
return port_forwarding
def create(self):
"""
Создать объект
Raises:
ObjectAlreadyHasId: Если производится попытка создать объект,
который уже существует
"""
if self.id is not None:
raise ObjectAlreadyHasId
self._commit()
def save(self):
"""
Сохранить изменения
Raises:
ObjectHasNoId: Если производится попытка сохранить несуществующий
объект
"""
if self.id is None:
raise ObjectHasNoId
self._commit()
def _commit(self):
port_forwarding = {
"internal_port": self.internal_port,
"external_port_range_end": self.external_port_range_end,
"external_port_range_start": self.external_port_range_start,
"local_ip": self.local_ip,
"protocol": self.protocol
}
self._commit_object(
'v1/router/{}/'
'port_forwarding'.format(self.router.id), **port_forwarding)
def destroy(self):
"""
Удалить объект
Raises:
ObjectHasNoId: Когда производится попытка удалить несуществующий
объект
"""
if self.id is None:
raise ObjectHasNoId
self._destroy_object(
'v1/router/{}/port_forwarding'.format(self.router.id), self.id)
self.id = None | /rustack_esu-0.1.16-py3-none-any.whl/esu/router_port_forwarding.py | 0.520253 | 0.443359 | router_port_forwarding.py | pypi |
from esu.base import BaseAPI, Field, ObjectAlreadyHasId, ObjectHasNoId
from esu.lbaas_pool import LbaasPool
class Lbaas(BaseAPI):
"""
Args:
id (str): Идентификатор Lbaas
name (str): Имя Lbaas
vdc (object): Объект класса :class:`esu.Vdc`. ВЦОД, к которому
относится данный балансировщик нагрузки
floating (object): Объект класса :class:`esu.Port`. Порт подключения
виртаульаного выделенного сервера к внешней сети.
Если None, сервер не имеет подключения к внешней
сети.
ports (object): объект класса :class:`esu.Port`. Сеть,
к которой подключен данный балансировщик нагрузки
token (str): Токен для доступа к API. Если не передан, будет
использована переменная окружения **ESU_API_TOKEN**
.. note:: Поля ``name``, ``vdc`` и ``port`` необходимы для
создания.
Поле ``name`` может быть изменено для существующего объекта.
"""
class Meta:
id = Field()
name = Field()
vdc = Field('esu.Vdc')
floating = Field('esu.Port', allow_none=True)
port = Field('esu.Port')
@classmethod
def get_object(cls, id, token=None):
"""
Получить объект Lbaas по его ID
Args:
id (str): Идентификатор Lbaas
token (str): Токен для доступа к API. Если не передан, будет
использована переменная окружения **ESU_API_TOKEN**
Returns:
object: Возвращает объект Lbaas :class:`esu.Lbaas`
"""
dns = cls(token=token, id=id)
dns._get_object('v1/lbaas', dns.id)
return dns
def create(self):
"""
Создать объект
Raises:
ObjectAlreadyHasId: Если производится попытка создать объект,
который уже существует
"""
if self.id is not None:
raise ObjectAlreadyHasId
self._commit()
def save(self):
"""
Сохранить изменения
Raises:
ObjectHasNoId: Если производится попытка сохранить несуществующий
объект
"""
if self.id is None:
raise ObjectHasNoId
self._commit()
def _commit(self):
port = {
'network': self.port.network.id,
'fw_templates': [o2.id for o2 in self.port.fw_templates or []]
}
floating = None
if self.floating:
# keep/change or get a new IP
floating = self.floating.id or '0.0.0.0'
self._commit_object('v1/lbaas', name=self.name, vdc=self.vdc.id,
port=port, floating=floating)
def destroy(self):
"""
Удалить объект
Raises:
ObjectHasNoId: Когда производится попытка удалить несуществующий
объект
"""
if self.id is None:
raise ObjectHasNoId
self._destroy_object('v1/lbaas', self.id)
self.id = None
def get_lbaas_pool(self):
"""
Получить список пулов балансировщика, доступных в рамках данного Lbaas.
Returns:
list: Список объектов :class:`esu.LbassPool`
"""
if self.id is None:
raise ObjectHasNoId
return self._get_list('v1/lbaas/{}/pool'.format(self.id), LbaasPool) | /rustack_esu-0.1.16-py3-none-any.whl/esu/lbaas.py | 0.498779 | 0.296056 | lbaas.py | pypi |
from esu.base import BaseAPI, Field, FieldList, ObjectAlreadyHasId, \
ObjectHasNoId
class Vm(BaseAPI):
"""
Args:
id (str): Идентификатор
name (str): Имя
description (str): Описание. Любой произвольный пользовательский текст
cpu (int): Количество ядер
ram (int): Количество ОЗУ в ГБ
power (bool): Текущее состояние питания. Включен или выключен
vdc (object): Объект класса :class:`esu.Vdc`. ВЦОД, к которому
относится данный виртуальный сервер
template (object): Объект класса :class:`esu.Template`. Шаблон
операционной системы
metadata (list): Список объектов класса :class:`esu.VmMetadata`.
Список полей, необходимых для создания виртуального
выделенного сервера. Например, пароль или имя
пользователя.
ports (list): Список объектов класса :class:`esu.Port`. Список сетей,
к которым подключен данный виртуальный сервер
disks (list): Список объектов класса :class:`esu.Disk`. Список дисков,
подключенных к данному виртуальному серверу
floating (object): Объект класса :class:`esu.Port`. Порт подключения
виртаульаного выделенного сервера к внешней сети.
Если None, сервер не имеет подключения к внешней
сети.
token (str): Токен для доступа к API. Если не передан, будет
использована переменная окружения **ESU_API_TOKEN**
.. note:: Поля ``name``, ``cpu``, ``ram``, ``template``, ``ports``,
``disks`` и ``vdc`` необходимы для создания.
Поля ``metadata``, ``description`` и ``floating`` опциональны
при создании.
Поля ``name``, ``description``, ``cpu``, ``ram``, ``floating``
могут быть изменены для существующего объекта.
"""
class Meta:
id = Field()
name = Field()
description = Field()
cpu = Field()
ram = Field()
power = Field()
vdc = Field('esu.Vdc')
template = Field('esu.Template')
metadata = FieldList('esu.VmMetadata')
ports = FieldList('esu.Port')
disks = FieldList('esu.Disk')
floating = Field('esu.Port', allow_none=True)
hotadd_feature = Field()
cdrom = Field('esu.Image', allow_none=True)
@classmethod
def get_object(cls, id, token=None):
"""
Получить объект виртуального сервера по его ID
Args:
id (str): Идентификатор виртуального сервера
token (str): Токен для доступа к API. Если не передан, будет
использована переменная окружения **ESU_API_TOKEN**
Returns:
object: Возвращает объект виртуального сервера :class:`esu.Vm`
"""
vm = cls(token=token, id=id)
vm._get_object('v1/vm', vm.id)
for disk in vm.disks:
disk.vm = vm
return vm
def create(self):
"""
Создать объект
Raises:
ObjectAlreadyHasId: Если производится попытка создать объект,
который уже существует
"""
if self.id is not None:
raise ObjectAlreadyHasId
self._commit()
def save(self):
"""
Сохранить изменения
"""
if self.id is None:
raise ObjectHasNoId
self._commit()
return self
def _commit(self):
vm = {
'vdc': self.vdc.id,
'template': self.template.id,
'name': self.name,
'cpu': self.cpu,
'ram': self.ram,
'description': self.description or '',
'ports': [{
'id': o.id,
} if o.id else {
'network': o.network.id,
'fw_templates': [o2.id for o2 in o.fw_templates or []]
} for o in self.ports],
'disks': [{
'name': o.name,
'size': o.size,
'storage_profile': o.storage_profile.id
} for o in self.disks]
}
if self.id is None:
vm['metadata'] = [{
'field': o.field.id,
'value': o.value
} for o in self.metadata]
floating = None
if self.floating:
# keep/change or get a new IP
floating = self.floating.id or '0.0.0.0'
vm['floating'] = floating
if self.hotadd_feature:
vm['hotadd_feature'] = True
self._commit_object('v1/vm', **vm)
def destroy(self):
"""
Удалить объект
Raises:
ObjectHasNoId: Когда производится попытка удалить несуществующий
объект
"""
if self.id is None:
raise ObjectHasNoId
self._destroy_object('v1/vm', self.id)
self.id = None
def add_disk(self, disk):
"""
Создать и присоединить к виртуальному серверу новый диск
Args:
disk (object): Объект диска :class:`esu.Disk`
"""
if disk.id:
raise ValueError('You must pass a new Disk object')
disk.vm = self
disk._commit()
self.disks.append(disk)
def attach_disk(self, disk):
"""
Присоединить существующий во ВЦОДе диск к виртуальному серверу
Args:
disk (object): Объект диска :class:`esu.Disk`
"""
if not disk.id:
raise ValueError('Disk is not exists')
if disk.vm is not None:
raise ValueError('Disk must be unattached')
disk.vm = self
disk.save()
self.disks.append(disk)
def detach_disk(self, disk):
"""
Отсоединить диск от виртуального сервера
Args:
disk (object): Объект диска :class:`esu.Disk`
"""
self._call('POST', 'v1/disk/{}/detach'.format(disk.id))
disk.vm = None
self.disks = [d for d in self.disks if d.id != disk.id]
def add_port(self, port):
"""
Добавить подключение
Args:
port (object): Новый объект :class:`esu.Port`
"""
port = self._call('POST', 'v1/port', vm=self.id,
network=port.network.id)
self.ports.append(port)
self._fill()
def remove_port(self, port):
"""
Удалить подключение
Args:
port (object): Существующий объект :class:`esu.Port`
"""
self._call('DELETE', 'v1/port/{}'.format(port.id))
self.ports = [o for o in self.ports if o.id != port.id]
def power_on(self):
"""
Включить виртуальный сервер
"""
self._call('POST', 'v1/vm/{}/state'.format(self.id), state='power_on')
self.power = True
def power_off(self):
"""
Выключить виртуальный сервер
"""
self._call('POST', 'v1/vm/{}/state'.format(self.id), state='power_off')
self.power = False
def reboot(self):
"""
Перезагрузить виртуальный сервер
"""
self._call('POST', 'v1/vm/{}/state'.format(self.id), state='reboot')
def get_vnc_url(self):
"""
Получить ссылку на VNC для открытия консоли управления сервером
Returns:
str: Адрес VNC консоли
"""
vnc = self._call('POST', 'v1/vm/{}/vnc'.format(self.id))
uri = vnc['url']
return '{}{}'.format(self.endpoint_url, uri)
def revert(self, snapshot):
"""
Восстановить сервер из снапшота
Args:
snapshot (object): объект снапшота :class:`esu.Snapshot`
"""
vm = self._call('POST', 'v2/snapshot/{}/revert'.format(snapshot.id))
return vm
def mount_iso(self, image):
"""
Примонтировать iso к серверу как CD-ROM.
После перезагрузки сервера он будет загружен с этого
диска если он загрузочный
Args:
image (object): объект образа :class:`esu.Image`
"""
image = {'image': image.id}
self._call('POST', 'v1/vm/{}/mount_iso'.format(self.id), **image)
def unmount_iso(self):
"""
Отмонтировать iso от сервера
После перезагрузки сервера он будет загружен с основного диска
"""
self._call('POST', 'v1/vm/{}/unmount_iso'.format(self.id)) | /rustack_esu-0.1.16-py3-none-any.whl/esu/vm.py | 0.528777 | 0.380644 | vm.py | pypi |
from esu.base import BaseAPI, Field, FieldList
class DomainAlias(BaseAPI):
"""
Args:
id (str): Идентификатор
alias (str): Наименование домена
.. warning:: Объект доступен только для чтения и не может быть создан,
изменен или удален.
"""
class Meta:
id = Field()
alias = Field()
class Domain(BaseAPI):
"""
Args:
id (str): Идентификатор наименования домена
name (str): Имя домена
aliases (list): Список объектов класса :class:`esu.DomainAlias`.
Список наименований, которые относятся к домену
.. warning:: Объект доступен только для чтения и не может быть создан,
изменен или удален.
"""
class Meta:
id = Field()
aliases = FieldList(DomainAlias)
name = Field()
class User(BaseAPI):
"""
Args:
id (str): Идентификатор пользователя
domain (object): Объект класса :class:`esu.Domain`.
Объект который указывает на принадлежность
пользователя к определённому домену
login (str): Логин пользователя
username (str): Имя пользователя
email (str): Почта
phone (str): Телефон
.. warning:: Объект доступен только для чтения и не может быть создан,
изменен или удален.
"""
class Meta:
id = Field()
domain = Field(Domain)
login = Field()
username = Field(allow_none=True)
email = Field(allow_none=True)
phone = Field(allow_none=True)
@classmethod
def get_object(cls, id, token=None):
"""
Получить объект пользователя по его ID или
id='me' для получения объекта к которому привязан токен
Args:
id (str): Идентификатор клиента
token (str): Токен для доступа к API. Если не передан, будет
использована переменная окружения **ESU_API_TOKEN**
Returns:
object: Возвращает объект клиента :class:`esu.User`
"""
user = cls(token=token, id=id)
user._get_object('v1/account', user.id)
return user | /rustack_esu-0.1.16-py3-none-any.whl/esu/user.py | 0.587825 | 0.327426 | user.py | pypi |
from esu.base import BaseAPI, Field, ObjectAlreadyHasId, ObjectHasNoId
class DnsRecord(BaseAPI):
"""
Args:
id (str): Идентификатор Dns записи
dns_id (str): Объект класса :class:`esu.Dns`. Днс зона, к
которому относится днс запись
data (str): дата Dns записи
flag (str): флаг Dns записи
host (str): хост Dns записи
port (str): порт Dns записи
priority (str): приоритет Dns записи
tag (str): тэг Dns записи
ttl (str): ttl Dns записи
type (str): тип Dns записи
weight (str): вес Dns записи
token (str): Токен для доступа к API. Если не передан, будет
использована переменная окружения **ESU_API_TOKEN**
.. note:: Поля ``data``, ``dns``, ``host``,
``ttl``, ``type`` необходимы для создания.
"""
class Meta:
id = Field()
dns = Field('esu.Dns')
data = Field()
flag = Field()
host = Field()
port = Field()
priority = Field()
tag = Field()
ttl = Field()
type = Field()
weight = Field()
@classmethod
def get_object(cls, dns, id, token=None):
"""
Получить объект Dns запись по его ID
Args:
id (str): Идентификатор Dns записи
token (str): Токен для доступа к API. Если не передан, будет
использована переменная окружения **ESU_API_TOKEN**
Returns:
object: Возвращает объект Dns :class:`esu.DnsRecord`
"""
dns_record = cls(token=token, id=id, dns=dns)
dns_record._get_object('v1/dns/{}/record'.format(dns_record.dns.id),
dns_record.id)
return dns_record
def create(self):
"""
Создать объект
Raises:
ObjectAlreadyHasId: Если производится попытка создать объект,
который уже существует
"""
if self.id is not None:
raise ObjectAlreadyHasId
self._commit()
def save(self):
"""
Сохранить изменения
Raises:
ObjectHasNoId: Если производится попытка сохранить несуществующий
объект
"""
if self.id is None:
raise ObjectHasNoId
self._commit()
def _commit(self):
self._commit_object('v1/dns/{}/record'.format(self.dns.id),
data=self.data, flag=self.flag, host=self.host,
port=self.port, priority=self.priority,
type=self.type, tag=self.tag, ttl=self.ttl,
weight=self.weight)
def destroy(self):
"""
Удалить объект
Raises:
ObjectHasNoId: Когда производится попытка удалить несуществующий
объект
"""
if self.id is None:
raise ObjectHasNoId
self._destroy_object('v1/dns/{}/record'.format(self.dns.id), self.id)
self.id = None | /rustack_esu-0.1.16-py3-none-any.whl/esu/dns_record.py | 0.5083 | 0.236527 | dns_record.py | pypi |
from esu.base import BaseAPI, Field, FieldList, ObjectAlreadyHasId, \
ObjectHasNoId
class LbaasPoolMember(BaseAPI):
"""
Args:
port (str): Порт участника пула балансировщика
vm (object): Объект :class:`esu.Vm`
weight (str): Вес участника пула балансировщика
"""
class Meta:
port = Field()
vm = Field('esu.Vm')
weight = Field()
class LbaasPool(BaseAPI):
"""
Args:
id (str): Идентификатор Lbaas Pool
name (str): Имя Lbaas Pool
lbaas (object): Объект класса :class:`esu.Lbaas`. Балансировщик, к
которому относится данный Lbaas Pool
connlimit (str): лимит соединений для пула балансировщика
cookie_name (str): Имя cookie
members (list): Список объектов класса :class:`esu.LbaasPoolMember`.
Список участников, которые подключены к данному
пулу балансировщика нагрузки
method (str): метод по которому будет работать пул балансировщика
port (str): порт по которому будет подключаться пул балансировщика
protocol (str): протокол по которому будет работать пул балансировщика
session_persistence (str): лимит соединений для пула балансировщика
token (str): Токен для доступа к API. Если не передан, будет
использована переменная окружения **ESU_API_TOKEN**
.. note:: Поля ``members`` и ``port`` необходимы для
создания.
"""
class Meta:
id = Field()
name = Field()
lbaas = Field('esu.Lbaas')
connlimit = Field()
cookie_name = Field()
members = FieldList(LbaasPoolMember)
method = Field()
port = Field()
protocol = Field()
session_persistence = Field()
@classmethod
def get_object(cls, lbaas, pool_id, token=None):
"""
Получить объект пула балансировщика по его ID
Args:
id (str): Идентификатор Lbaas Pool
token (str): Токен для доступа к API. Если не передан, будет
использована переменная окружения **ESU_API_TOKEN**
Returns:
object: Возвращает объект LbaasPool :class:`esu.LbaasPool`
"""
pool = cls(token=token, id=pool_id, lbaas=lbaas)
pool._get_object('v1/lbaas/{}/pool'.format(pool.lbaas.id), pool.id)
return pool
def create(self):
"""
Создать объект
Raises:
ObjectAlreadyHasId: Если производится попытка создать объект,
который уже существует
"""
if self.id is not None:
raise ObjectAlreadyHasId
self._commit()
def save(self):
"""
Сохранить изменения
Raises:
ObjectHasNoId: Если производится попытка сохранить несуществующий
объект
"""
if self.id is None:
raise ObjectHasNoId
self._commit()
def _commit(self):
members = [{
'port': o.port,
'vm': o.vm.id,
'weight': o.weight
} for o in self.members]
pull = {
'members': members,
'method': self.method,
'port': self.port,
'protocol': self.protocol,
'session_persistence': self.session_persistence,
'name': self.name,
}
if self.connlimit is not None:
pull['connlimit'] = self.connlimit
if self.cookie_name is not None:
pull['cookie_name'] = self.cookie_name
self._commit_object('v1/lbaas/{}/pool'.format(self.lbaas.id), **pull)
def destroy(self):
"""
Удалить объект
Raises:
ObjectHasNoId: Когда производится попытка удалить несуществующий
объект
"""
if self.id is None:
raise ObjectHasNoId
self._destroy_object('v1/lbaas/{}/pool'.format(self.lbaas.id), self.id)
self.id = None | /rustack_esu-0.1.16-py3-none-any.whl/esu/lbaas_pool.py | 0.520496 | 0.299124 | lbaas_pool.py | pypi |
from esu.base import BaseAPI
class Manager(BaseAPI):
"""
Args:
token (str): Токен для доступа к API. Если не передан, будет
использована переменная окружения **ESU_API_TOKEN**
"""
class Meta:
pass
def get_all_clients(self):
"""
Возвращает список объектов всех доступных пользователю клиентов. Если
текущему пользователю был предоставлен доступ к еще одному клиенту,
данный список будет содержать два элемента.
Returns:
list: Список объектов :class:`esu.Client`
"""
return self._get_list('v1/client', 'esu.Client')
def get_all_projects(self):
"""
Возвращает список объектов всех доступных пользователю проектов. Если
текущий пользователь имеет несколько проектов или ему предоставили
доступ к стороннему проекту, данный список будет содержать их все.
Returns:
list: Список объектов :class:`esu.Project`
"""
return self._get_list('v1/project', 'esu.Project')
def get_all_vdcs(self):
"""
Возвращает список объектов всех доступных пользователю ВЦОДов. Если
текущий пользователь имеет несколько ВЦОДов или ему был предоставлен
доступ к сотронним проектам, данный список будет содержать их все.
Returns:
list: Список объектов :class:`esu.Vdc`
"""
return self._get_list('v1/vdc', 'esu.Vdc')
def get_all_vms(self):
"""
Возвращает список объектов всех доступных пользователю виртуальных
выделенных серверов. Если текущий пользователь имеет несколько
виртуальных серверов или ему был предоставлен доступ к
сторонним проектам, данный список будет содержать их все.
Returns:
list: Список объектов :class:`esu.Vm`
"""
return self._get_list('v1/vm', 'esu.Vm')
def get_all_storage_profiles(self):
"""
Возвращает список объектов всех доступных пользователю профилей
хранения.
Returns:
list: Список объектов :class:`esu.StorageProfile`
"""
return self._get_list('v1/storage_profile', 'esu.StorageProfile')
def get_all_platforms(self):
"""
Возвращает список объектов всех доступных пользователю платформ.
Returns:
list: Список объектов :class:`esu.Platform`
"""
return self._get_list('v1/platform', 'esu.Platform', with_pages=False)
def get_all_firewall_templates(self):
"""
Возвращает список объектов всех доступных пользователю шаблонов
брандмауэра.
Returns:
list: Список объектов :class:`esu.FirewallTemplate`
"""
return self._get_list('v1/firewall', 'esu.FirewallTemplate')
def get_all_networks(self):
"""
Возвращает список объектов всех доступных пользователю сетей.
Returns:
list: Список объектов :class:`esu.Network`
"""
return self._get_list('v1/network', 'esu.Network') | /rustack_esu-0.1.16-py3-none-any.whl/esu/manager.py | 0.54819 | 0.433622 | manager.py | pypi |
from esu.base import BaseAPI, Field, ObjectAlreadyHasId, ObjectHasNoId
from esu.dns import Dns
from esu.kubernetes import Kubernetes
from esu.s3 import S3
from esu.vdc import Vdc
class Project(BaseAPI):
"""
Args:
id (str): Идентификатор
name (str): Имя
client (object): Объект класса :class:`esu.Client`. Клиент, к которому
относится проект
token (str): Токен для доступа к API. Если не передан, будет
использована переменная окружения **ESU_API_TOKEN**
.. note:: Поля ``name`` и ``client`` необходимы для создания.
Поле ``name`` может быть изменено для существующего объекта.
"""
class Meta:
id = Field()
name = Field()
client = Field('esu.Client')
@classmethod
def get_object(cls, id, token=None):
"""
Получить объект проекта по его ID
Args:
id (str): Идентификатор проекта
token (str): Токен для доступа к API. Если не передан, будет
использована переменная окружения **ESU_API_TOKEN**
Returns:
object: Возвращает объект проекта :class:`esu.Project`
"""
project = cls(token=token, id=id)
project._get_object('v1/project', project.id)
return project
def create(self):
"""
Создать объект
Raises:
ObjectAlreadyHasId: Если производится попытка создать объект,
который уже существует
"""
if self.id is not None:
raise ObjectAlreadyHasId
self._commit()
def save(self):
"""
Сохранить изменения
Raises:
ObjectHasNoId: Если производится попытка сохранить несуществующий
объект
"""
if self.id is None:
raise ObjectHasNoId
self._commit()
def _commit(self):
self._commit_object('v1/project', client=self.client.id,
name=self.name)
def destroy(self):
"""
Удалить объект
Raises:
ObjectHasNoId: Когда производится попытка удалить несуществующий
объект
"""
if self.id is None:
raise ObjectHasNoId
self._destroy_object('v1/project', self.id)
self.id = None
def get_vdcs(self):
"""
Получить ВЦОДы в данном проекте. Вернет список объектов
:class:`esu.Vdc`.
Returns:
list: Список объектов :class:`esu.Vdc`
"""
if self.id is None:
raise ObjectHasNoId
return self._get_list('v1/vdc', Vdc, project=self.id)
def get_available_hypervisors(self):
"""
Получить список доступных гипервизоров в этом проекте. Вернет список
объектов :class:`esu.Hypervisor`.
Returns:
list: Список объектов :class:`esu.Hypervisor`
"""
return self.client.allowed_hypervisors
def get_dns_zones(self):
"""
Получить список доступных доменных зон в этом проекте. Вернет список
объектов :class:`esu.Dns`.
Returns:
list: Список объектов :class:`esu.Dns`
"""
if self.id is None:
raise ObjectHasNoId
return self._get_list('v1/dns', Dns, project=self.id)
def get_k8s_clusters(self):
"""
Получить список доступных кластеров Kubernetes в этом проекте.
Вернет список объектов :class:`esu.Kubernetes`.
Returns:
list: Список объектов :class:`esu.Kubernetes`
"""
if self.id is None:
raise ObjectHasNoId
return self._get_list('v1/kubernetes', Kubernetes, project=self.id)
def get_s3_storages(self):
"""
Получить список доступных s3 хранилищ в этом проекте. Вернет список
объектов :class:`esu.S3`.
Returns:
list: Список объектов :class:`esu.S3`
"""
if self.id is None:
raise ObjectHasNoId
return self._get_list('v1/s3_storage', S3, project=self.id) | /rustack_esu-0.1.16-py3-none-any.whl/esu/project.py | 0.60288 | 0.254081 | project.py | pypi |
from esu.base import BaseAPI, Field, FieldList, ObjectAlreadyHasId, \
ObjectHasNoId, resolve
class Network(BaseAPI):
"""
Args:
id (str): Идентификатор сети
name (str): Имя сети
vdc (object): Объект класса :class:`esu.Vdc`. ВЦОД, к которому
относится сеть
is_default (bool): True для сети по умолчанию
subnets (object): Список объектов класса :class:`esu.Subnet`
.. note:: Поля ``name`` и ``vdc`` необходимы для создания.
Поле ``subnets`` опционально при создании.
Поле ``name`` может быть изменено для существующего объекта.
"""
class Meta:
id = Field()
name = Field()
vdc = Field('esu.Vdc', allow_none=True)
is_default = Field()
subnets = FieldList('esu.Subnet')
@classmethod
def get_object(cls, id, token=None):
"""
Получить объект сети по его ID
Args:
id (str): Идентификатор сети
token (str): Токен для доступа к API. Если не передан, будет
использована переменная окружения **ESU_API_TOKEN**
Returns:
object: Возвращает объект сети :class:`esu.Network`
"""
network = cls(token=token, id=id)
network._get_object('v1/network', network.id)
return network
def create(self):
"""
Создать объект
Raises:
ObjectAlreadyHasId: Если производится попытка создать объект,
который уже существует
"""
if self.id is not None:
raise ObjectAlreadyHasId
subnets = self.subnets or []
self._commit()
for subnet in subnets:
self.add_subnet(subnet)
def save(self):
"""
Сохранить изменения
Raises:
ObjectHasNoId: Если производится попытка сохранить несуществующий
объект
"""
if self.id is None:
raise ObjectHasNoId
self._commit()
def _commit(self):
return self._commit_object('v1/network', vdc=self.vdc.id,
name=self.name)
def destroy(self):
"""
Удалить объект
Raises:
ObjectHasNoId: Когда производится попытка удалить несуществующий
объект
"""
if self.id is None:
raise ObjectHasNoId
self._destroy_object('v1/network', self.id)
self.id = None
def add_subnet(self, subnet):
"""
Добавить подсеть
Args:
subnet (object): Объект подсети :class:`esu.Subnet`
"""
if subnet.id:
raise ValueError('You must pass a new Subnet object')
subnet = self._call('POST', 'v1/network/{}/subnet'.format(self.id),
cidr=subnet.cidr, gateway=subnet.gateway,
start_ip=subnet.start_ip, end_ip=subnet.end_ip,
enable_dhcp=subnet.enable_dhcp, subnet_routes=[],
dns_servers=[])
self.subnets.append(resolve('esu.Subnet')(token=self.token, **subnet))
def remove_subnet(self, subnet):
"""
Удалить подсеть
Args:
subnet (object): Объект подсети :class:`esu.Subnet`
"""
self._call('DELETE',
'v1/network/{}/subnet/{}'.format(self.id, subnet.id))
self.subnets = [s for s in self.subnets if s.id != subnet.id] | /rustack_esu-0.1.16-py3-none-any.whl/esu/network.py | 0.614625 | 0.266047 | network.py | pypi |
from esu.base import BaseAPI, Field, FieldList, ObjectAlreadyHasId, \
ObjectHasNoId
from esu.vm import Vm
class File(BaseAPI):
"""
Args:
id (str): Идентификатор
name (str): Имя
size (str): Размер файла
type (str): Тип файла
"""
class Meta:
id = Field()
name = Field()
type = Field()
size = Field()
class Image(BaseAPI):
"""
Args:
id (str): Идентификатор
name (str): Имя
files (list): Список объектов класса :class:`esu.File`. Список файлов в
образе
size (str): Размер образа
type (str): Тип образа
vdc (object): Объект класса :class:`esu.Vdc`. ВЦОД, к которому
относится образ
"""
class Meta:
files = FieldList(File, allow_none=True)
id = Field()
name = Field()
size = Field()
type = Field()
vdc = Field('esu.Vdc')
@classmethod
def get_object(cls, id, token=None):
"""
Получить объект образа по его ID
Args:
id (str): Идентификатор образа
token (str): Токен для доступа к API. Если не передан, будет
использована переменная окружения **ESU_API_TOKEN**
Returns:
object: Возвращает объект образа :class:`esu.Image`
"""
image = cls(token=token, id=id)
image._get_object('v1/image', image.id)
return image
def create_from_vm(self, vm):
"""
Создать образ из существующего сервера
Raises:
ObjectAlreadyHasId: Если производится попытка создать объект,
который уже существует
"""
if self.id is not None:
raise ObjectAlreadyHasId
self._commit(vm=vm)
def create_for_upload(self):
"""
Создать объект образа для последующей загрузки в него файлов
Raises:
ObjectAlreadyHasId: Если производится попытка создать объект,
который уже существует
"""
self._commit()
def get_upload_link(self):
"""
Получить ссылку для загрузки файлов образа
"""
image = {'name': self.name, 'type': self.type}
resp = self._call('POST', 'v1/image/{}/file'.format(self.id), **image)
url = '{}{}'.format(BaseAPI.endpoint_url, resp['url'])
return url
def commit_upload(self):
"""
Подтвердить окончание загрузки файлов образа.
Подтверждение необходимо после загрузки файлов образа по полученному
url для загрузки файлов
"""
resp = self._call('POST', 'v1/image/{}/commit'.format(self.id))
self.kwargs = resp
self._fill()
return self
def save(self):
"""
Сохранить изменения
"""
if self.id is None:
raise ObjectHasNoId
self._commit()
return self
def _commit(self, vm=None):
image = {'vdc': self.vdc.id, 'name': self.name}
if vm is not None:
image['vm'] = vm.id
else:
image['type'] = self.type
self._commit_object('v1/image', **image)
def destroy(self):
"""
Удалить объект образа
Raises:
ObjectHasNoId: Когда производится попытка удалить несуществующий
объект
"""
if self.id is None:
raise ObjectHasNoId
self._destroy_object('v1/image', self.id)
self.id = None
def get_download_link(self, file):
"""
Получить ссылку для скачивания файла образа
"""
resp = self._call('GET',
'v1/image/{}/file/{}'.format(self.id, file.id))
url = '{}{}'.format(self.endpoint_url, resp['url'])
return url
def deploy_vm_from_image(self, vm):
"""
Создать сервер из образа
Args:
vm (object) - объект создаваемого сервера :class:`esu.Vm`
Returns:
object: объект созданного из образа сервера :class:`esu.Vm`
"""
vm = {
'vdc': vm.vdc.id,
'name': vm.name,
'cpu': vm.cpu,
'ram': vm.ram,
'network': vm.ports[0].network.id,
'storage_profile': vm.disks[0].storage_profile.id
}
resp = self._call('POST', 'v1/image/{}/deploy'.format(self.id), **vm)
vm = Vm.get_object(id=resp['id'])
return vm | /rustack_esu-0.1.16-py3-none-any.whl/esu/image.py | 0.557845 | 0.216487 | image.py | pypi |
from esu.base import BaseAPI, Field, FieldList, ObjectAlreadyHasId, \
ObjectHasNoId, resolve
class Router(BaseAPI):
"""
Args:
id (str): Идентификатор
name (str): Имя
vdc (object): ВЦОД, к которому относится маршрутизатор :class:`esu.Vdc`
is_default (bool): True для маршрутизатора по умолчанию
floating (object): Порт подключения к внешней сети :class:`esu.Port`
ports (list): Список подключений маршрутизатора
.. note:: Поля ``name``, ``ports`` и ``vdc`` необходимы для создания.
Поле ``floating`` опционально при создании.
Поля ``name`` и ``floating`` могут быть изменены для
существующего объекта.
"""
class Meta:
id = Field()
name = Field()
vdc = Field('esu.Vdc')
is_default = Field()
floating = Field('esu.Port', allow_none=True)
ports = FieldList('esu.Port')
@classmethod
def get_object(cls, id, token=None):
"""
Получить объект маршрутизатора по его ID
Args:
id (str): Идентификатор маршрутизатора
token (str): Токен для доступа к API. Если не передан, будет
использована переменная окружения **ESU_API_TOKEN**
Returns:
object: Возвращает объект маршрутизатора :class:`esu.Router`
"""
router = cls(token=token, id=id)
router._get_object('v1/router', router.id)
return router
def create(self):
"""
Создать объект
Raises:
ObjectAlreadyHasId: Если производится попытка создать объект,
который уже существует
"""
if self.id is not None:
raise ObjectAlreadyHasId
self._commit()
def save(self):
"""
Сохранить изменения
Raises:
ObjectHasNoId: Если производится попытка сохранить несуществующий
объект
"""
if self.id is None:
raise ObjectHasNoId
self._commit()
def _commit(self):
ports = [{
'id': o.id
} if o.id else {
'network': o.network.id
} for o in self.ports]
floating = None
if self.floating:
# keep/change or get a new IP
floating = self.floating.id or '0.0.0.0'
return self._commit_object('v1/router', vdc=self.vdc.id,
name=self.name, ports=ports,
floating=floating)
def destroy(self):
"""
Удалить объект
Raises:
ObjectHasNoId: Когда производится попытка удалить несуществующий
объект
"""
if self.id is None:
raise ObjectHasNoId
self._destroy_object('v1/router', self.id)
self.id = None
def add_port(self, port):
"""
Добавить подключение
Args:
port (object): Новый объект :class:`esu.Port`
"""
port = self._call('POST', 'v1/port', router=self.id,
network=port.network.id)
self.ports.append(resolve('esu.Port')(token=self.token, **port))
def remove_port(self, port):
"""
Удалить подключение
Args:
port (object): Существующий объект :class:`esu.Port`
"""
self._call('DELETE', 'v1/port/{}'.format(port.id))
self.ports = [o for o in self.ports if o.id != port.id] | /rustack_esu-0.1.16-py3-none-any.whl/esu/router.py | 0.581897 | 0.294386 | router.py | pypi |
from esu.base import BaseAPI, Field, ObjectAlreadyHasId, ObjectHasNoId
class FirewallTemplateRule(BaseAPI):
"""
Args:
id (str): Идентификатор правила брандмауэра
name (str): Имя правила брандмауэра
firewall (str): Объект класса :class:`esu.FirewallTemplate`.
Шаблон брандмауэра, к которому относится данное правило
брандмауэра
direction (str): направление правила брандмауэра
destination_ip (str): destination_ip правила брандмауэра
dst_port_range_max (str): dst_port_range_max правила брандмауэра
dst_port_range_min (str): dst_port_range_min правила брандмауэра
protocol (str): protocol правила брандмауэра
token (str): Токен для доступа к API. Если не передан, будет
использована переменная окружения **ESU_API_TOKEN**
.. note:: Поля ``direction``, ``name`` и ``protocol`` необходимы для
создания.
"""
class Meta:
id = Field()
name = Field()
firewall = Field('esu.FirewallTemplate')
direction = Field()
destination_ip = Field()
dst_port_range_max = Field()
dst_port_range_min = Field()
protocol = Field()
@classmethod
def get_object(cls, firewall, rule_id, token=None):
"""
Получить объект правил брандмауэра по его ID
Args:
id (str): Идентификатор правила шаблона брандмауэра
token (str): Токен для доступа к API. Если не передан, будет
использована переменная окружения **ESU_API_TOKEN**
Returns:
object: Возвращает объект правила шаблона брандмауэра
:class:`esu.FirewallTemplateRule`
"""
firewall_rule = cls(token=token, id=rule_id, firewall=firewall)
firewall_rule._get_object(
'v1/firewall/{}/rule'.format(firewall_rule.firewall.id),
firewall_rule.id)
return firewall_rule
def create(self):
"""
Создать объект
Raises:
ObjectAlreadyHasId: Если производится попытка создать объект,
который уже существует
"""
if self.id is not None:
raise ObjectAlreadyHasId
self._commit()
def save(self):
"""
Сохранить изменения
Raises:
ObjectHasNoId: Если производится попытка сохранить несуществующий
объект
"""
if self.id is None:
raise ObjectHasNoId
self._commit()
def _commit(self):
self._commit_object(
'v1/firewall/{}/rule'.format(self.firewall.id),
name=self.name,
destination_ip=self.destination_ip,
direction=self.direction,
dst_port_range_max=self.dst_port_range_max,
dst_port_range_min=self.dst_port_range_min,
protocol=self.protocol,
)
def destroy(self):
"""
Удалить объект
Raises:
ObjectHasNoId: Когда производится попытка удалить несуществующий
объект
"""
if self.id is None:
raise ObjectHasNoId
self._destroy_object('v1/firewall/{}/rule'.format(self.firewall.id),
self.id)
self.id = None | /rustack_esu-0.1.16-py3-none-any.whl/esu/firewall_template_rule.py | 0.554953 | 0.224714 | firewall_template_rule.py | pypi |
from esu.base import BaseAPI, Field, ObjectAlreadyHasId, ObjectHasNoId
class RouterFirewallRule(BaseAPI):
"""
Args:
id (str): Идентификатор правила брандмауэра
name (str): Имя правила брандмауэра
router (object): Объект класса :class:`esu.Router`. Роутер, к
которому относится данное правило брандмауэра
direction (str): Направление правила брандмауэра
source_ip (str): Адрес источника правила брандмауэра (может быть IP
адресом или CIDR, None = любой)
src_port_range_max (str): Максимальный порт диапазона портов источника
src_port_range_min (str): Минимальный порт диапазона портов источника
destination_ip (str): Адрес назначения правила брандмауэра (может
быть IP адресом или CIDR, None = любой)
dst_port_range_max (str): Максимальный порт диапазона портов назначения
dst_port_range_min (str): Минимальный порт диапазона портов назначения
protocol (str): protocol правила брандмауэра
token (str): Токен для доступа к API. Если не передан, будет
использована переменная окружения **ESU_API_TOKEN**
.. note:: Управление брандмауэром на роутере доступно только для ресурсного
пула VMware.
Поля ``direction``, ``name`` и ``protocol`` необходимы
для создания.
"""
class Meta:
router = Field('esu.Router')
id = Field()
name = Field()
direction = Field()
source_ip = Field(allow_none=True)
src_port_range_max = Field(allow_none=True)
src_port_range_min = Field(allow_none=True)
destination_ip = Field(allow_none=True)
dst_port_range_max = Field(allow_none=True)
dst_port_range_min = Field(allow_none=True)
protocol = Field()
@classmethod
def get_object(cls, router, rule_id, token=None):
"""
Получить объект правила брандмауэра роутера по его ID
Args:
id (str): Идентификатор правила брандмауэра на роутере
token (str): Токен для доступа к API. Если не передан, будет
использована переменная окружения **ESU_API_TOKEN**
Returns:
object: Возвращает объект правила брандмауэра на роутере
:class:`esu.RouterFirewallRule`
"""
firewall_rule = cls(token=token, id=rule_id, router=router)
firewall_rule._get_object(
'v1/router/{}/firewall_rule'.format(firewall_rule.router.id),
firewall_rule.id)
return firewall_rule
def create(self):
"""
Создать объект
Raises:
ObjectAlreadyHasId: Если производится попытка создать объект,
который уже существует
"""
if self.id is not None:
raise ObjectAlreadyHasId
self._commit()
def save(self):
"""
Сохранить изменения
Raises:
ObjectHasNoId: Если производится попытка сохранить несуществующий
объект
"""
if self.id is None:
raise ObjectHasNoId
self._commit()
def _commit(self):
router_fw_rule = {
"name": self.name,
"destination_ip": self.destination_ip,
"source_ip": self.source_ip,
"src_port_range_max": self.src_port_range_max,
"src_port_range_min": self.src_port_range_min,
"direction": self.direction,
"dst_port_range_max": self.dst_port_range_max,
"dst_port_range_min": self.dst_port_range_min,
"protocol": self.protocol
}
self._commit_object(
'v1/router/{}/'
'firewall_rule'.format(self.router.id), **router_fw_rule)
def destroy(self):
"""
Удалить объект
Raises:
ObjectHasNoId: Когда производится попытка удалить несуществующий
объект
"""
if self.id is None:
raise ObjectHasNoId
self._destroy_object(
'v1/router/{}/firewall_'
'rule'.format(self.router.id), self.id)
self.id = None | /rustack_esu-0.1.16-py3-none-any.whl/esu/router_firewall_rule.py | 0.499023 | 0.341281 | router_firewall_rule.py | pypi |
from esu.base import BaseAPI, Field, FieldList, ObjectAlreadyHasId, \
ObjectHasNoId
class RestorePoint(BaseAPI):
"""
Args:
id (str): Идентификатор точки восстановления
backup_type (str): Тип точки восстановления
backup_size (int): Размер точки восстановления (bytes)
vm (object): Сервер для которого создана точка :class:`esu.Vm`
"""
class Meta:
id = Field()
backup_type = Field()
backup_size = Field()
vm = Field('esu.Vm')
class VmInBackup(BaseAPI):
"""
Args:
id (str): Идентификатор сервера в задаче резервного копирования
name (str): Имя сервера в задаче резервного копирования
"""
class Meta:
id = Field()
name = Field()
class Backup(BaseAPI):
"""
Args:
id (str): Идентификатор задачи резервного копирования
name (str): Имя задачи резервного копирования
size (int): Суммарный размер точек восстановления задачи
резервного копирования (bytes)
vdc (object): Объект ВЦОДа :class:`esu.Vdc`
retain_cycles (int): Глубина хранения задачи резервного копирования
time (str): Время выполнения задачи по расписанию в UTC
week_days (list): Дни недели выполнения задачи по расписанию [1,2,3]
vms (list): Список серверов для которых создана задача
.. note:: Поля ``name``, ``vms``, ``retain_cycles``, ``week_days``, ``time``
могут быть изменены для существующего объекта.
"""
class Meta:
id = Field()
name = Field()
vdc = Field('esu.Vdc')
vms = FieldList(VmInBackup)
retain_cycles = Field()
week_days = Field()
time = Field()
size = Field()
@classmethod
def get_object(cls, id, token=None):
"""
Получить объект задачи резервного копирования по ее ID
Args:
id (str): Идентификатор задачи резервного копирования
token (str): Токен для доступа к API. Если не передан, будет
использована переменная окружения **ESU_API_TOKEN**
Returns:
object: Возвращает объект диска :class:`esu.Disk`
"""
job = cls(token=token, id=id)
job._get_object('v1/backup', job.id)
return job
def create(self):
"""
Создать объект
Raises:
ObjectAlreadyHasId: Если производится попытка создать объект,
который уже существует
"""
if self.id is not None:
raise ObjectAlreadyHasId
self._commit()
def save(self):
"""
Сохранить изменения
Raises:
ObjectHasNoId: Если производится попытка сохранить несуществующий
объект
"""
if self.id is None:
raise ObjectHasNoId
self._commit()
def _commit(self):
vms = [i.id for i in self.vms]
job = {
'name': self.name,
'vdc': self.vdc.id,
'week_days': self.week_days,
'time': self.time,
'retain_cycles': self.retain_cycles,
'vms': vms
}
self._commit_object('v1/backup', **job)
def start_immediately(self):
"""
Запустить выполнение задачи - создание точек восстановления
"""
self._call('POST', 'v1/backup/{}/start_immediately'.format(self.id))
def get_restore_points(self):
"""
Получить список точек восстановления в задаче резервного копирования.
Returns:
list: Список объектов :class:`esu.RestorePoint`
"""
if self.id is None:
raise ObjectHasNoId
return self._get_list(
'v1/backup/{}/restore_points?'
'sort=-ctime'.format(self.id), RestorePoint)
def restore(self, restore_point, power_on=True, quick_restore=False):
"""
Восстановить сервер из точки восстановления
Args:
vm (object): :class:`esu.Vm`, сервер, который необходимо
восстановить
restore_point (object): :class:`esu.RestorePoint`, точка
восстановления из которой необходимо восстановить сервер
power_on (bool) True если после восстановления сервер должен
быть включен
quick_restore (bool) True если требуется быстрое восстановление
(не рекомендуется)
.. warning:: в сегменте KVM восстановление происходит в новый сервер,
в сегменте VMware восстановление происходит в текущий сервер
"""
restore = {
"power_on": power_on,
"quick_restore": quick_restore,
"vm": restore_point.vm.id,
"restore_point": restore_point.id
}
self._call('POST', 'v1/backup/{}/restore'.format(self.id), **restore)
def get_backup_log(self):
"""
Получить лог создания точек восстановления из задачи
Returns:
dict: Отчёт создания точек восстановления
"""
log = self._call(
'GET', 'v1/backup/log?backup={}'
'&sort=-ctime'.format(self.id))
return log
def get_restore_log(self, vm):
"""
Получить лог восстановления сервера из задачи
Returns:
dict: Отчёт восстановления сервера
"""
log = self._call('GET', 'v1/backup/log?vm={}'
'&sort=-ctime'.format(vm.id))
return log
def destroy(self):
"""
Удалить объект
Raises:
ObjectHasNoId: Когда производится попытка удалить несуществующий
объект
"""
if self.id is None:
raise ObjectHasNoId
self._destroy_object('v1/backup', self.id)
self.id = None | /rustack_esu-0.1.16-py3-none-any.whl/esu/backup.py | 0.570571 | 0.345436 | backup.py | pypi |
from esu.base import BaseAPI, Field, FieldList, ObjectAlreadyHasId, \
ObjectHasNoId, PortAlreadyConnected
class ConnectedObject(BaseAPI):
"""
Args:
id (str): Идентификатор подключённого объекта
type (object): Тип объекта
name (object): Имя подключённого объекта
vdc (str): Объект :class:`esu.Vdc`
"""
class Meta:
id = Field(allow_none=True)
type = Field()
name = Field()
vdc = Field('esu.Vdc', allow_none=True)
class Port(BaseAPI):
"""
Args:
id (str): Идентификатор порта
ip_address (str): IP адрес
type (str): Тип
vdc (object): Объект класса :class:`esu.Vdc`. ВЦОД, к которому
относится данный виртуальный сервер
fw_templates (list): Включенные шаблоны брандмауэра
:class:`esu.FirewallTemplate`
network (object): Сеть :class:`esu.Network`
.. note:: Поле ``network`` необходимо для создания в качестве подключения
к приватной сети ВЦОД.
Поля ``ip_address`` и ``fw_templates`` опцональны при создании
подключения к приватной сети ВЦОД
Поля ``ip_address`` и ``fw_templates`` могут быть изменены для
существующего объекта
При создании подключения плавающего IP обязательных полей нет
"""
class Meta:
id = Field()
ip_address = Field()
type = Field()
vdc = Field("esu.Vdc")
fw_templates = FieldList('esu.FirewallTemplate', allow_none=True)
network = Field('esu.Network')
connected = Field(ConnectedObject, allow_none=True)
vm = Field('esu.Vm')
router = Field('esu.Router', allow_none=True)
@classmethod
def get_object(cls, id, token=None):
"""
Получить объект порта по его ID
Args:
id (str): Идентификатор порта
token (str): Токен для доступа к API. Если не передан, будет
использована переменная окружения **ESU_API_TOKEN**
Returns:
object: Возвращает объект порта :class:`esu.Port`
"""
port = cls(token=token, id=id)
port._get_object('v1/port', port.id)
return port
def create_fip(self):
"""
Получить объект порта по его ID
Args:
id (str): Идентификатор порта
token (str): Токен для доступа к API. Если не передан, будет
использована переменная окружения **ESU_API_TOKEN**
Returns:
object: Возвращает объект порта :class:`esu.Port`
"""
port = {'vdc': self.vdc.id}
self._commit_object('v1/port', **port)
def create(self):
"""
Создать объект
Raises:
ObjectAlreadyHasId: Если производится попытка создать объект,
который уже существует
"""
if self.id is not None:
raise ObjectAlreadyHasId
self._commit()
def save(self):
"""
Сохранить изменения
Raises:
ObjectHasNoId: Если производится попытка сохранить несуществующий
объект
"""
if self.id is None:
raise ObjectHasNoId
self._commit()
def _commit(self):
port = {
'ip_address': self.ip_address or '0.0.0.0',
'fw_templates': [o.id for o in self.fw_templates or []]
}
if self.id is None:
port['network'] = self.network.id
if self.vm is not None:
port['vm'] = self.vm.id
elif self.router is not None:
port['router'] = self.router.id
self._commit_object('v1/port', **port)
def connect(self):
"""
Подключить
Raises:
ObjectHasNoId: Если производится попытка присоединить
несуществующий объект
PortAlreadyConnected: Если производится попытка
присоединить уже присоединенный порт
"""
if self.id is None:
raise ObjectHasNoId
if self.connected is not None:
raise PortAlreadyConnected
if self.vm is not None:
port = {'vm': self.vm.id}
elif self.router is not None:
port = {'router': self.router.id}
self._commit_object('v1/port', **port)
def disconnect(self):
"""
Отключить порт
Raises:
ObjectHasNoId: Если производится попытка сохранить несуществующий
объект
"""
if self.id is None:
raise ObjectHasNoId
self._call('PATCH', 'v1/port/{}/disconnect'.format(self.id))
self._fill()
self.connected = None
self.vm = None
self.router = None
def destroy(self):
"""
Удалить объект
Raises:
ObjectHasNoId: Когда производится попытка удалить несуществующий
объект
"""
if self.id is None:
raise ObjectHasNoId
self._destroy_object('v1/port', self.id)
self.id = None
def force_destroy(self):
"""
Удалить объект, даже если он подключен к сущности
Raises:
ObjectHasNoId: Если производится попытка сохранить несуществующий
объект
"""
if self.id is None:
raise ObjectHasNoId
self._call('DELETE', 'v1/port/{}/force'.format(self.id))
self.id = None | /rustack_esu-0.1.16-py3-none-any.whl/esu/port.py | 0.569374 | 0.267169 | port.py | pypi |
from __future__ import annotations
from ctypes import (
c_size_t,
byref,
c_float,
c_void_p,
)
from rustfst.weight import weight_one
from rustfst.ffi_utils import (
lib,
check_ffi_error,
)
from typing import Optional
class Tr:
"""
Structure representing a transition from a state to another state in a FST.
Attributes:
ilabel: The input label.
olabel: The output label.
weight: The transition weight.
nextstate: The destination state for the arc.
"""
def __init__(
self,
ilabel: Optional[int] = None,
olabel: Optional[int] = None,
weight: Optional[float] = None,
nextstate: Optional[int] = None,
):
"""
Create a new transition.
Args:
ilabel: The input label.
olabel: The outpit label.
weight: The transition's weight
nextstate: The destination state for the transition.
"""
if ilabel and olabel is None and weight is None and nextstate is None:
self._ptr = ilabel
else:
if weight is None:
weight = weight_one()
ptr = c_void_p()
exit_code = lib.tr_new(
c_size_t(ilabel),
c_size_t(olabel),
c_float(weight),
c_size_t(nextstate),
byref(ptr),
)
err_msg = "Something went wrong when creating the Tr struct"
check_ffi_error(exit_code, err_msg)
self._ptr = ptr
@property
def ptr(self):
return self._ptr
@property
def ilabel(self) -> int:
ilabel = c_size_t()
exit_code = lib.tr_ilabel(self._ptr, byref(ilabel))
err_msg = "Something went wrong when reading Tr ilabel value"
check_ffi_error(exit_code, err_msg)
return int(ilabel.value)
@ilabel.setter
def ilabel(self, value: int):
ilabel = c_size_t(value)
exit_code = lib.tr_set_ilabel(self._ptr, ilabel)
err_msg = "Something went wrong when setting Tr ilabel value"
check_ffi_error(exit_code, err_msg)
@property
def olabel(self) -> int:
olabel = c_size_t()
exit_code = lib.tr_olabel(self._ptr, byref(olabel))
err_msg = "Something went wrong when reading Tr ilabel value"
check_ffi_error(exit_code, err_msg)
return int(olabel.value)
@olabel.setter
def olabel(self, value: int):
olabel = c_size_t(value)
exit_code = lib.tr_set_olabel(self._ptr, olabel)
err_msg = "Something went wrong when setting Tr olabel value"
check_ffi_error(exit_code, err_msg)
@property
def weight(self) -> float:
weight = c_float()
exit_code = lib.tr_weight(self._ptr, byref(weight))
err_msg = "Something went wrong when reading Tr ilabel value"
check_ffi_error(exit_code, err_msg)
return weight.value
@weight.setter
def weight(self, value: float):
weight = c_float(value)
exit_code = lib.tr_set_weight(self._ptr, weight)
err_msg = "Something went wrong when setting Tr weight value"
check_ffi_error(exit_code, err_msg)
@property
def next_state(self) -> int:
next_state = c_size_t()
exit_code = lib.tr_next_state(self._ptr, byref(next_state))
err_msg = "Something went wrong when reading Tr ilabel value"
check_ffi_error(exit_code, err_msg)
return int(next_state.value)
@next_state.setter
def next_state(self, next_state: int):
next_state = c_size_t(next_state)
exit_code = lib.tr_set_next_state(self._ptr, next_state)
err_msg = "Something went wrong when setting Tr next_state value"
check_ffi_error(exit_code, err_msg)
def __eq__(self, other: Tr):
return (
self.ilabel == other.ilabel
and self.olabel == other.olabel
and self.weight == other.weight
and self.next_state == other.next_state
)
def __repr__(self):
"""x.__repr__() <==> repr(x)"""
return f"<Tr ilabel={self.ilabel}, olabel={self.olabel}, weight={self.weight}, next_state={self.next_state}>"
def __del__(self):
lib.tr_delete(self._ptr) | /rustfst_python-0.13.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl/rustfst/tr.py | 0.813831 | 0.327211 | tr.py | pypi |
from __future__ import annotations
from typing import Union, Tuple, List
from rustfst.ffi_utils import lib, check_ffi_error
import ctypes
from pathlib import Path
class SymbolTable:
"""
`SymbolTable` class. This class wraps the `SymbolTable` struct.
"""
def __init__(self, ptr=None):
"""
Creates an empty `SymbolTable`.
"""
if ptr:
self.ptr = ptr
else:
symt_ptr = ctypes.pointer(ctypes.c_void_p())
ret_code = lib.symt_new(ctypes.byref(symt_ptr))
err_msg = "__init__ failed"
check_ffi_error(ret_code, err_msg)
self.ptr = symt_ptr
def add_symbol(self, symbol: str) -> int:
"""
Adds a symbol to the table and returns the index.
Args:
symbol: A symbol unicode string.
Returns:
The integer key of the new symbol.
"""
try:
symbol = symbol.encode("utf-8")
except UnicodeDecodeError:
symbol = ctypes.c_char_p(symbol)
integer_key = ctypes.c_size_t()
ret_code = lib.symt_add_symbol(self.ptr, symbol, ctypes.byref(integer_key))
err_msg = "`add_symbol` failed"
check_ffi_error(ret_code, err_msg)
return int(integer_key.value)
def add_table(self, syms: SymbolTable):
"""
This method merges another symbol table into the current table. All key
values will be offset by the current available key.
Args:
syms: A `SymbolTable` to be merged with the current table.
"""
ret_code = lib.symt_add_table(self.ptr, syms.ptr)
err_msg = "`add_table` failed"
check_ffi_error(ret_code, err_msg)
def copy(self) -> SymbolTable:
"""
Returns:
A mutable copy of the `SymbolTable`.
"""
clone = ctypes.pointer(ctypes.c_void_p())
ret_code = lib.symt_copy(self.ptr, ctypes.byref(clone))
err_msg = "`copy` failed."
check_ffi_error(ret_code, err_msg)
return SymbolTable(ptr=clone)
def find(self, key: Union[int, str]) -> Union[int, str]:
"""
Given a symbol or index, finds the other one.
This method returns the index associated with a symbol key, or the symbol
associated with a index key.
Args:
key: Either a string or an index.
Returns:
If key is a string, the associated index; if key is an integer, the
associated symbol.
Raises:
KeyError: Key not found.
"""
if isinstance(key, int):
return self._find_index(key)
if isinstance(key, str):
return self._find_symbol(key)
raise f"key can only be a string or integer. Not {type(key)}"
def _find_index(self, key: int) -> str:
key = ctypes.c_size_t(key)
symbol = ctypes.c_void_p()
ret_code = lib.symt_find_index(self.ptr, key, ctypes.byref(symbol))
err_msg = "`find` failed"
check_ffi_error(ret_code, err_msg)
return ctypes.string_at(symbol).decode("utf8")
def _find_symbol(self, symbol: str) -> int:
symbol = symbol.encode("utf-8")
index = ctypes.c_size_t()
ret_code = lib.symt_find_symbol(self.ptr, symbol, ctypes.byref(index))
err_msg = "`find` failed"
check_ffi_error(ret_code, err_msg)
return int(index.value)
def member(self, key: Union[int, str]) -> bool:
"""
Given a symbol or index, returns whether it is found in the table.
This method returns a boolean indicating whether the given symbol or index
is present in the table. If one intends to perform subsequent lookup, it is
better to simply call the find method, catching the KeyError.
Args:
key: Either a string or an index.
Returns:
Whether or not the key is present (as a string or a index) in the table.
"""
is_present = ctypes.c_size_t()
ret_code = None
if isinstance(key, int):
index = ctypes.c_size_t(key)
ret_code = lib.symt_member_index(self.ptr, index, ctypes.byref(is_present))
elif isinstance(key, str):
symbol = key.encode("utf-8")
ret_code = lib.symt_member_symbol(
self.ptr, symbol, ctypes.byref(is_present)
)
else:
raise f"key can only be a string or integer. Not {type(key)}"
err_msg = "`member` failed"
check_ffi_error(ret_code, err_msg)
return bool(is_present.value)
def num_symbols(self) -> int:
"""
Returns:
The number of symbols in the symbol table.
"""
num_symbols = ctypes.c_size_t()
ret_code = lib.symt_num_symbols(self.ptr, ctypes.byref(num_symbols))
err_msg = "`num_symbols` failed"
check_ffi_error(ret_code, err_msg)
return int(num_symbols.value)
@classmethod
def read(cls, filename: Union[str, Path]) -> SymbolTable:
"""
Reads symbol table from binary file.
This class method creates a new SymbolTable from a symbol table binary file.
Args:
filename: The string location of the input binary file.
Returns:
A new SymbolTable instance.
See also: `SymbolTable.read_fst`, `SymbolTable.read_text`.
"""
symt = ctypes.pointer(ctypes.c_void_p())
ret_code = lib.symt_from_path(
ctypes.byref(symt), str(filename).encode("utf-8"), ctypes.c_size_t(1)
)
err_msg = f"Read failed for bin file : {filename}"
check_ffi_error(ret_code, err_msg)
return cls(ptr=symt)
@classmethod
def read_text(cls, filename: Union[str, Path]) -> SymbolTable:
"""
Reads symbol table from text file.
This class method creates a new SymbolTable from a symbol table text file.
Args:
filename: The string location of the input text file.
Returns:
A new SymbolTable instance.
See also: `SymbolTable.read`, `SymbolTable.read_fst`.
"""
symt = ctypes.pointer(ctypes.c_void_p())
ret_code = lib.symt_from_path(
ctypes.byref(symt), str(filename).encode("utf-8"), ctypes.c_size_t(0)
)
err_msg = f"Read failed for text file : {filename}"
check_ffi_error(ret_code, err_msg)
return cls(ptr=symt)
def write(self, filename: Union[str, Path]):
"""
Serializes symbol table to a file.
This methods writes the SymbolTable to a file in binary format.
Args:
filename: The string location of the output file.
Raises:
FstIOError: Write failed.
"""
ret_code = lib.symt_write_file(
self.ptr, str(filename).encode("utf-8"), ctypes.c_size_t(1)
)
err_msg = f"Write failed for bin file : {filename}"
check_ffi_error(ret_code, err_msg)
def write_text(self, filename: Union[str, Path]):
"""
Writes symbol table to text file.
This method writes the SymbolTable to a file in human-readable format.
Args:
filename: The string location of the output file.
Raises:
FstIOError: Write failed.
"""
ret_code = lib.symt_write_file(
self.ptr, str(filename).encode("utf-8"), ctypes.c_size_t(0)
)
err_msg = f"Write failed for text file : {filename}"
check_ffi_error(ret_code, err_msg)
def equals(self, other: SymbolTable) -> bool:
"""
Check if this SymbolTable is equal to the other
Params:
other: SymbolTable instance
Returns:
bool
"""
is_equal = ctypes.c_size_t()
ret_code = lib.symt_equals(self.ptr, other.ptr, ctypes.byref(is_equal))
err_msg = "Error checking equality"
check_ffi_error(ret_code, err_msg)
return bool(is_equal.value)
def __eq__(self, other: SymbolTable) -> bool:
"""
Check if this `SymbolTable` is equal to the other
Params:
other: SymbolTable instance
Returns:
bool
"""
return self.equals(other)
def __iter__(self) -> SymbolTableIterator:
"""
Returns an Iterator over the SymbolTable.
Returns:
An iterator over the SymbolTable.
"""
return SymbolTableIterator(self)
@classmethod
def from_symbols(cls, symbols: List[str]) -> SymbolTable:
"""
Constructs a SymbolTable from list of strings.
Args:
symbols: List of symbols
Returns:
A new `SymbolTable`.
"""
symt = cls()
for symbol in symbols:
symt.add_symbol(symbol)
return symt
def __del__(self):
lib.symt_destroy(self.ptr)
class SymbolTableIterator:
"""
Iterator on a SymbolTable. Allows retrieving all the symbols along with their corresponding labels.
"""
def __init__(self, symbol_table: SymbolTable):
"""
Constructs an iterator from the `Symboltable`.
Args:
symbol_table:
"""
self._symt = symbol_table
self._idx = 0
self._len = self._symt.num_symbols()
def __next__(self) -> Tuple[int, str]:
"""
Iterator over the symbols in the `SymbolTable`.
Returns:
A pair label (int) and symbol (str).
"""
if self._idx < self._len:
output = (self._idx, self._symt.find(self._idx))
self._idx += 1
return output
raise StopIteration | /rustfst_python-0.13.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl/rustfst/symbol_table.py | 0.914061 | 0.279238 | symbol_table.py | pypi |
from __future__ import annotations
import ctypes
from typing import Optional
from rustfst.ffi_utils import lib, check_ffi_error
from rustfst.tr import Tr
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from rustfst import Fst
class TrsIterator:
"""
TrsIterator(fst, state)
This class is used for iterating over the trs leaving some state of a FST.
"""
def __init__(self, fst: Fst, state: int):
self.ptr = fst # reference fst to prolong its lifetime (prevent early gc)
state = ctypes.c_size_t(state)
iter_ptr = ctypes.pointer(ctypes.c_void_p())
ret_code = lib.trs_iterator_new(fst.ptr, state, ctypes.byref(iter_ptr))
err_msg = "`__init__` failed"
check_ffi_error(ret_code, err_msg)
self._ptr = iter_ptr
def done(self) -> bool:
"""
done(self)
Indicates whether the iterator is exhausted or not.
Returns:
True if the iterator is exhausted, False otherwise.
"""
done = ctypes.c_size_t()
ret_code = lib.trs_iterator_done(self._ptr, ctypes.byref(done))
err_msg = "`done` failed"
check_ffi_error(ret_code, err_msg)
return bool(done.value)
def __next__(self) -> Optional[Tr]:
"""x.next() -> the next value, or raise StopIteration"""
if self.done():
raise StopIteration
tr_ptr = ctypes.pointer(ctypes.c_void_p())
ret_code = lib.trs_iterator_next(self._ptr, ctypes.byref(tr_ptr))
err_msg = "`next` failed"
check_ffi_error(ret_code, err_msg)
if tr_ptr is None:
return None
return Tr(tr_ptr)
def reset(self):
"""
reset(self)
Resets the iterator to the initial position.
"""
ret_code = lib.trs_iterator_reset(self._ptr)
err_msg = "`reset` failed"
check_ffi_error(ret_code, err_msg)
def __iter__(self) -> TrsIterator:
"""x.__iter__() <==> iter(x)"""
return self
def __repr__(self):
"""x.__repr__() <==> repr(x)"""
return f"<TrsIterator at 0x{id(self):x}>"
def __del__(self):
lib.trs_iterator_destroy(self._ptr)
class MutableTrsIterator:
"""
MutableTrsIterator(ifst, state)
This class is used for iterating over the trs leaving some state of a FST,
also permitting mutation of the current tr.
"""
def __init__(self, fst: Fst, state_id: int) -> MutableTrsIterator:
self.ptr = fst # reference fst to prolong its lifetime (prevent early gc)
state_id = ctypes.c_size_t(state_id)
iter_ptr = ctypes.pointer(ctypes.c_void_p())
ret_code = lib.mut_trs_iterator_new(fst.ptr, state_id, ctypes.byref(iter_ptr))
err_msg = "`__init__` failed"
check_ffi_error(ret_code, err_msg)
self._ptr = iter_ptr
def done(self) -> bool:
"""
done(self)
Indicates whether the iterator is exhausted or not.
Returns:
True if the iterator is exhausted, False otherwise.
"""
done = ctypes.c_size_t()
ret_code = lib.mut_trs_iterator_done(self._ptr, ctypes.byref(done))
err_msg = "`done` failed"
check_ffi_error(ret_code, err_msg)
return bool(done.value)
def __next__(self):
"""
Advances the internal tr iterator.
:return: None
"""
ret_code = lib.mut_trs_iterator_next(self._ptr)
err_msg = "`next` failed"
check_ffi_error(ret_code, err_msg)
def reset(self):
"""
reset(self)
Resets the iterator to the initial position.
"""
ret_code = lib.mut_trs_iterator_reset(self._ptr)
err_msg = "`reset`failed"
check_ffi_error(ret_code, err_msg)
def set_value(self, tr: Tr):
"""
set_value(self, tr)
Replace the current tr with a new tr.
Args:
tr: The tr to replace the current tr with.
"""
ret_code = lib.mut_trs_iterator_set_value(self._ptr, tr.ptr)
err_msg = "`set_value` failed"
check_ffi_error(ret_code, err_msg)
def value(self) -> Optional[Tr]:
"""
value(self)
Returns the current tr.
"""
tr_ptr = ctypes.pointer(ctypes.c_void_p())
ret_code = lib.mut_trs_iterator_value(self._ptr, ctypes.byref(tr_ptr))
err_msg = "`value` failed"
check_ffi_error(ret_code, err_msg)
if tr_ptr is None:
return None
return Tr(tr_ptr)
def __iter__(self) -> MutableTrsIterator:
"""x.__iter__() <==> iter(x)"""
return self
def __repr__(self):
"""x.__repr__() <==> repr(x)"""
return f"<MutableTrsIterator at 0x{id(self):x}>"
def __del__(self):
lib.mut_trs_iterator_destroy(self._ptr)
class StateIterator:
"""
StateIterator(fst)
This class is used for iterating over the states in a FST.
"""
def __init__(self, fst: Fst) -> StateIterator:
self.ptr = fst # reference fst to prolong its lifetime (prevent early gc)
iter_ptr = ctypes.pointer(ctypes.c_void_p())
ret_code = lib.state_iterator_new(fst.ptr, ctypes.byref(iter_ptr))
err_msg = "`__init__` failed"
check_ffi_error(ret_code, err_msg)
self._ptr = iter_ptr
def done(self) -> bool:
"""
done(self)
Indicates whether the iterator is exhausted or not.
Returns:
True if the iterator is exhausted, False otherwise.
"""
done = ctypes.c_size_t()
ret_code = lib.state_iterator_done(self._ptr, ctypes.byref(done))
err_msg = "`done` failed"
check_ffi_error(ret_code, err_msg)
return bool(done.value)
def __next__(self) -> Optional[int]:
"""x.next() -> the next value, or raise StopIteration"""
if self.done():
raise StopIteration
next_state = ctypes.c_size_t()
ret_code = lib.state_iterator_next(self._ptr, ctypes.byref(next_state))
err_msg = "`next` failed"
check_ffi_error(ret_code, err_msg)
if next_state is None:
return None
return int(next_state.value)
def __iter__(self) -> StateIterator:
"""x.__iter__() <==> iter(x)"""
return self
def __repr__(self):
"""x.__repr__() <==> repr(x)"""
return f"<StateIterator at 0x{id(self):x}>"
def __del__(self):
lib.state_iterator_destroy(self._ptr) | /rustfst_python-0.13.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl/rustfst/iterators.py | 0.87458 | 0.164282 | iterators.py | pypi |
from __future__ import annotations
from typing import List
import ctypes
from rustfst.ffi_utils import (
lib,
check_ffi_error,
)
from rustfst.fst.vector_fst import VectorFst
class LabelFstPair(ctypes.Structure):
_fields_ = [
("label", ctypes.c_size_t),
("fst", ctypes.POINTER(ctypes.c_void_p)),
]
def replace(
root_idx: int, fst_list: List[(int, VectorFst)], epsilon_on_replace: bool
) -> VectorFst:
"""
Recursively replaces trs in the root FSTs with other FSTs.
Replace supports replacement of trs in one Fst with another FST. This
replacement is recursive. Replace takes an array of FST(s). One FST
represents the root (or topology) machine. The root FST refers to other FSTs
by recursively replacing trs labeled as non-terminals with the matching
non-terminal FST. Currently Replace uses the output symbols of the trs to
determine whether the transition is a non-terminal transition or not. A non-terminal can be
any label that is not a non-zero terminal label in the output alphabet.
Note that input argument is a vector of pairs. These correspond to the tuple
of non-terminal Label and corresponding FST.
Examples:
- Root Fst :

- Fst for non-terminal #NAME :

- Fst for non-terminal #FIRSTNAME :

- Fst for non-terminal #LASTNAME :

- Output :

Args:
root_idx:
fst_list:
epsilon_on_replace:
Returns:
The resulting Fst.
"""
pairs = [LabelFstPair(label, fst.ptr) for (label, fst) in fst_list]
pairs_array = (LabelFstPair * len(pairs))(*pairs)
res_fst = ctypes.pointer(ctypes.c_void_p())
ret_code = lib.fst_replace(
ctypes.c_size_t(root_idx),
ctypes.byref(pairs_array),
ctypes.c_size_t(len(pairs)),
ctypes.c_bool(epsilon_on_replace),
ctypes.byref(res_fst),
)
err_msg = "Error performing replace"
check_ffi_error(ret_code, err_msg)
return VectorFst(ptr=res_fst) | /rustfst_python-0.13.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl/rustfst/algorithms/replace.py | 0.89546 | 0.494202 | replace.py | pypi |
import ctypes
from rustfst.fst.vector_fst import Fst, VectorFst
from rustfst.ffi_utils import (
lib,
check_ffi_error,
)
def randgen(
ifst: Fst,
npath: int = 1,
seed: int = 0,
select: str = "uniform",
max_length: int = 2147483647,
weight: bool = False,
remove_total_weight: bool = False,
) -> VectorFst:
"""
Randomly generate successful paths in an FST.
This operation randomly generates a set of successful paths in the input FST.
This relies on a mechanism for selecting arcs, specified using the `select`
argument. The default selector, "uniform", randomly selects a transition
using a uniform distribution. The "log_prob" selector randomly selects a
transition w.r.t. the weights treated as negative log probabilities after
normalizing for the total weight leaving the state. In all cases, finality is
treated as a transition to a super-final state.
Args:
ifst: The input FST.
npath: The number of random paths to generate.
seed: An optional seed value for random path generation; if zero, the
current time and process ID is used.
select: A string matching a known random arc selection type; one of:
"uniform", "log_prob", "fast_log_prob".
max_length: The maximum length of each random path.
weight: Should the output be weighted by path count?
remove_total_weight: Should the total weight be removed (ignored when
`weighted` is False)?
Returns:
An FST containing one or more random paths.
Raises:
ValueError: when something wrong happened.
"""
if select != "uniform":
raise ValueError(
f"Only the uniform distribution is supported for now. Found {select}"
)
npath = ctypes.c_size_t(npath)
seed = ctypes.c_size_t(seed)
max_length = ctypes.c_size_t(max_length)
weight = ctypes.c_bool(weight)
remove_total_weight = ctypes.c_bool(remove_total_weight)
randgen_fst = ctypes.pointer(ctypes.c_void_p())
ret_code = lib.fst_randgen(
ifst.ptr,
npath,
seed,
max_length,
weight,
remove_total_weight,
ctypes.byref(randgen_fst),
)
err_msg = "Error during randgen"
check_ffi_error(ret_code, err_msg)
return VectorFst(ptr=randgen_fst) | /rustfst_python-0.13.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl/rustfst/algorithms/randgen.py | 0.849003 | 0.377627 | randgen.py | pypi |
from __future__ import annotations
import ctypes
from typing import Optional
from rustfst.ffi_utils import (
lib,
check_ffi_error,
)
from rustfst.fst.vector_fst import VectorFst
from enum import Enum
KDELTA = 1.0 / 1024.0
class DeterminizeType(Enum):
"""
Enumeration defining the type of the determinization to perform.
"""
DETERMINIZE_FUNCTIONAL = 0
"""
Input transducer is known to be functional (or error).
"""
DETERMINIZE_NON_FUNCTIONAL = 1
"""
Input transducer is NOT known to be functional.
"""
DETERMINIZE_DISAMBIGUATE = 2
"""
Input transducer is not known to be functional but only keep the min of
of ambiguous outputs.
"""
class DeterminizeConfig:
"""
Struct containing the parameters controlling the determinization algorithm.
"""
def __init__(self, det_type: DeterminizeType, delta: Optional[float] = None):
"""
Creates the configuration object.
Args:
det_type: Type of determinization to perform.
delta:
"""
if delta is None:
delta = KDELTA
config = ctypes.pointer(ctypes.c_void_p())
ret_code = lib.fst_determinize_config_new(
ctypes.c_float(delta),
ctypes.c_size_t(det_type.value),
ctypes.byref(config),
)
err_msg = "Error creating DeterminizeConfig"
check_ffi_error(ret_code, err_msg)
self.ptr = config
def determinize(fst: VectorFst) -> VectorFst:
"""
Make an Fst deterministic
Args:
fst: The Fst to make deterministic.
Returns:
The resulting Fst.
"""
det_fst = ctypes.pointer(ctypes.c_void_p())
ret_code = lib.fst_determinize(fst.ptr, ctypes.byref(det_fst))
err_msg = "Error during determinization"
check_ffi_error(ret_code, err_msg)
return VectorFst(ptr=det_fst)
def determinize_with_config(fst: VectorFst, config: DeterminizeConfig) -> VectorFst:
"""
Make an Fst deterministic
Args:
fst: The Fst to make deterministic.
config: Configuration of the determinization algorithm to use.
Returns:
The resulting Fst.
"""
det_fst = ctypes.pointer(ctypes.c_void_p())
ret_code = lib.fst_determinize_with_config(
fst.ptr, config.ptr, ctypes.byref(det_fst)
)
err_msg = "Error during determinization"
check_ffi_error(ret_code, err_msg)
return VectorFst(ptr=det_fst) | /rustfst_python-0.13.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl/rustfst/algorithms/determinize.py | 0.902479 | 0.317387 | determinize.py | pypi |
from __future__ import annotations
import ctypes
from typing import Optional, List
from rustfst.ffi_utils import (
lib,
check_ffi_error,
)
from rustfst.fst.vector_fst import VectorFst
from enum import Enum
class MatcherRewriteMode(Enum):
AUTO = 0
ALWAYS = 1
NEVER = 2
class CIntArray(ctypes.Structure):
_fields_ = [("data", ctypes.POINTER(ctypes.c_uint32)), ("size", ctypes.c_uint32)]
class MatcherConfig:
def __init__(
self,
sigma_label: int,
rewrite_mode: MatcherRewriteMode = MatcherRewriteMode.AUTO,
sigma_allowed_matches: Optional[List[int]] = None,
):
array = []
if sigma_allowed_matches is not None:
array = sigma_allowed_matches
arr = CIntArray()
arr.size = ctypes.c_uint32(len(array))
arr.data = (ctypes.c_uint32 * len(array))(*array)
config = ctypes.pointer(ctypes.c_void_p())
ret_code = lib.fst_matcher_config_new(
ctypes.c_size_t(sigma_label),
ctypes.c_size_t(rewrite_mode.value),
arr,
ctypes.byref(config),
)
err_msg = "Error creating MatcherConfig"
check_ffi_error(ret_code, err_msg)
self.ptr = config
def __del__(self):
lib.fst_matcher_config_destroy(self.ptr)
class ComposeFilter(Enum):
AUTOFILTER = 0
NULLFILTER = 1
TRIVIALFILTER = 2
SEQUENCEFILTER = 3
ALTSEQUENCEFILTER = 4
MATCHFILTER = 5
NOMATCHFILTER = 6
class ComposeConfig:
def __init__(
self,
compose_filter: ComposeFilter = ComposeFilter.AUTOFILTER,
connect: bool = True,
matcher1_config: Optional[MatcherConfig] = None,
matcher2_config: Optional[MatcherConfig] = None,
):
config = ctypes.pointer(ctypes.c_void_p())
m1_ptr = None
if matcher1_config is not None:
m1_ptr = matcher1_config.ptr
m2_ptr = None
if matcher2_config is not None:
m2_ptr = matcher2_config.ptr
ret_code = lib.fst_compose_config_new(
ctypes.c_size_t(compose_filter.value),
ctypes.c_bool(connect),
m1_ptr,
m2_ptr,
ctypes.byref(config),
)
err_msg = "Error creating ComposeConfig"
check_ffi_error(ret_code, err_msg)
self.ptr = config
def __del__(self):
lib.fst_compose_config_destroy(self.ptr)
def compose(fst: VectorFst, other_fst: VectorFst) -> VectorFst:
"""
Compute the composition of two FSTs.
Args:
fst: Left fst.
other_fst: Right fst.
Returns:
Resulting fst.
"""
composition = ctypes.pointer(ctypes.c_void_p())
ret_code = lib.fst_compose(fst.ptr, other_fst.ptr, ctypes.byref(composition))
err_msg = "Error Composing FSTs"
check_ffi_error(ret_code, err_msg)
return VectorFst(ptr=composition)
def compose_with_config(
fst: VectorFst, other_fst: VectorFst, config: ComposeConfig
) -> VectorFst:
"""
Compute the composition of two FSTs parametrized with a config.
Args:
fst: Left fst.
other_fst: Right fst.
config: Config parameters of the composition.
Returns:
Resulting fst.
"""
composition = ctypes.pointer(ctypes.c_void_p())
ret_code = lib.fst_compose_with_config(
fst.ptr, other_fst.ptr, config.ptr, ctypes.byref(composition)
)
err_msg = "Error Composing FSTs"
check_ffi_error(ret_code, err_msg)
return VectorFst(ptr=composition) | /rustfst_python-0.13.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl/rustfst/algorithms/compose.py | 0.878406 | 0.231375 | compose.py | pypi |
from __future__ import annotations
from typing import Optional
from rustfst.weight import weight_one
from rustfst.fst.vector_fst import VectorFst
from rustfst.symbol_table import SymbolTable
import ctypes
from rustfst.ffi_utils import (
lib,
check_ffi_error,
)
def acceptor(
astring: str, symbol_table: SymbolTable, weight: Optional[float] = None
) -> VectorFst:
"""
Creates an acceptor from a string.
This function creates a FST which accepts its input with a fixed weight
(defaulting to semiring One).
Args:
astring: The input string.
weight: A Weight or weight string indicating the desired path weight. If
omitted or null, the path weight is set to semiring One.
symbol_table: SymbolTable to be used to encode the string.
Returns:
An FST acceptor.
"""
if weight is None:
weight = weight_one()
acceptor_fst_ptr = ctypes.pointer(ctypes.c_void_p())
ret_code = lib.utils_string_to_acceptor(
astring.encode("utf-8"),
symbol_table.ptr,
ctypes.c_float(weight),
ctypes.byref(acceptor_fst_ptr),
)
err_msg = "Error creating acceptor FST"
check_ffi_error(ret_code, err_msg)
return VectorFst(ptr=acceptor_fst_ptr)
def transducer(
istring: str,
ostring: str,
isymt: SymbolTable,
osymt: SymbolTable,
weight: Optional[float] = None,
) -> VectorFst:
"""
Creates a transducer from a pair of strings or acceptor FSTs.
This function creates a FST which transduces from the first string to
the second with a fixed weight (defaulting to semiring One).
Args:
istring: The input string
ostring: The output string
weight: A Weight as float.
isymt: SymbolTable to be used to encode the string.
osymt: SymbolTable to be used to encode the string.
Returns:
An FST transducer.
"""
if weight is None:
weight = weight_one()
transducer_fst_ptr = ctypes.c_void_p()
ret_code = lib.utils_string_to_transducer(
istring.encode("utf-8"),
ostring.encode("utf-8"),
isymt.ptr,
osymt.ptr,
ctypes.c_float(weight),
ctypes.byref(transducer_fst_ptr),
)
err_msg = "Error creating tranducer FST"
check_ffi_error(ret_code, err_msg)
return VectorFst(ptr=transducer_fst_ptr)
def epsilon_machine(weight: Optional[float] = None) -> VectorFst:
"""
Constructs a single-state, no-arc FST accepting epsilon.
This function creates an unweighted FST with a single state which is both
initial and final.
Args:
weight: A Weight. Default semiring One.
Returns:
An FST.
"""
if weight is None:
weight = weight_one()
fst = VectorFst()
state = fst.add_state()
fst.set_start(state)
fst.set_final(state, weight)
return fst | /rustfst_python-0.13.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl/rustfst/algorithms/__init__.py | 0.942507 | 0.479686 | __init__.py | pypi |
from __future__ import annotations
import ctypes
from rustfst.string_paths_iterator import StringPathsIterator
from rustfst.ffi_utils import (
lib,
check_ffi_error,
)
from rustfst.fst import Fst
from rustfst.symbol_table import SymbolTable
from rustfst.drawing_config import DrawingConfig
from rustfst.iterators import MutableTrsIterator, StateIterator
from rustfst.tr import Tr
from rustfst.weight import weight_one
from typing import Optional, Union
from pathlib import Path
from typing import List
class VectorFst(Fst):
def __init__(self, ptr=None):
"""
Creates an empty VectorFst.
"""
self._input_symbols = None
self._output_symbols = None
if ptr:
self.ptr = ptr
# Check if isymt inside
isymt = ctypes.pointer(ctypes.c_void_p())
ret_code = lib.fst_input_symbols(self.ptr, ctypes.byref(isymt))
err_msg = "Error getting input symbols"
check_ffi_error(ret_code, err_msg)
if isymt.contents:
self._input_symbols = SymbolTable(ptr=isymt)
# Check if osymt inside
osymt = ctypes.pointer(ctypes.c_void_p())
ret_code = lib.fst_output_symbols(self.ptr, ctypes.byref(osymt))
err_msg = "Error getting input symbols"
check_ffi_error(ret_code, err_msg)
if osymt.contents:
self._output_symbols = SymbolTable(ptr=osymt)
else:
fst_ptr = ctypes.pointer(ctypes.c_void_p())
ret_code = lib.vec_fst_new(ctypes.byref(fst_ptr))
err_msg = "Something went wrong when creating the Fst struct"
check_ffi_error(ret_code, err_msg)
self.ptr = fst_ptr
super().__init__(self.ptr, self._input_symbols, self._output_symbols)
def add_tr(self, state: int, tr: Tr) -> Fst:
"""
Adds a new tr to the FST and return self. Note the tr should be considered
consumed and is not safe to use it after.
Args:
state: The integer index of the source state.
tr: The tr to add.
Returns:
self.
Raises:
SnipsFstException: If State index out of range.
See also: `add_state`.
"""
ret_code = lib.vec_fst_add_tr(self.ptr, ctypes.c_size_t(state), tr.ptr)
err_msg = "Error during `add_tr`"
check_ffi_error(ret_code, err_msg)
return self
def add_state(self) -> int:
"""
Adds a new state to the FST and returns the state ID.
Returns:
The integer index of the new state.
See also: `add_tr`, `set_start`, `set_final`.
"""
state_id = ctypes.c_size_t()
ret_code = lib.vec_fst_add_state(self.ptr, ctypes.byref(state_id))
err_msg = "Error during `add_state`"
check_ffi_error(ret_code, err_msg)
return state_id.value
def set_final(self, state: int, weight: float = None):
"""
Sets the final weight for a state.
Args:
state: The integer index of a state.
weight: A float indicating the desired final weight; if
omitted, it is set to semiring One.
Raises:
ValueError: State index out of range or Incompatible or invalid weight.
See also: `set_start`.
"""
if weight is None:
weight = weight_one()
state = ctypes.c_size_t(state)
weight = ctypes.c_float(weight)
ret_code = lib.vec_fst_set_final(self.ptr, state, weight)
err_msg = "Error setting final state"
check_ffi_error(ret_code, err_msg)
def unset_final(self, state: int):
"""
Unset the final weight of a state. As a result, the state is no longer final.
Args:
state: The integer index of a state
Raises:
ValueError: State index out of range.
"""
state = ctypes.c_size_t(state)
ret_code = lib.vec_fst_del_final_weight(self.ptr, state)
err_msg = "Error unsetting final state"
check_ffi_error(ret_code, err_msg)
def mutable_trs(self, state: int) -> MutableTrsIterator:
"""
Returns a mutable iterator over trs leaving the specified state.
Args:
state: The source state ID.
Returns:
A MutableTrsIterator.
See also: `trs`, `states`.
"""
return MutableTrsIterator(self, state)
def delete_states(self):
"""
Delete all the states
"""
ret_code = lib.vec_fst_delete_states(self.ptr)
err_msg = "Error deleting states"
check_ffi_error(ret_code, err_msg)
def num_states(self) -> int:
"""
Returns the number of states.
Returns:
Number of states present in the Fst.
"""
num_states = ctypes.c_size_t()
ret_code = lib.vec_fst_num_states(self.ptr, ctypes.byref(num_states))
err_msg = "Error getting number of states"
check_ffi_error(ret_code, err_msg)
return int(num_states.value)
def set_start(self, state: int):
"""
Sets a state to be the initial state state.
Args:
state: The integer index of a state.
Raises:
ValueError: If State index out of range.
See also: `set_final`.
"""
state_id = ctypes.c_size_t(state)
ret_code = lib.vec_fst_set_start(self.ptr, state_id)
err_msg = "Error setting start state"
check_ffi_error(ret_code, err_msg)
def states(self) -> StateIterator:
"""
Returns an iterator over all states in the FST.
Returns:
A StateIterator object for the FST.
See also: `trs`, `mutable_trs`.
"""
return StateIterator(self)
def relabel_tables(
self,
*,
old_isymbols: Optional[SymbolTable] = None,
new_isymbols: SymbolTable,
attach_new_isymbols: bool = True,
old_osymbols: Optional[SymbolTable] = None,
new_osymbols: SymbolTable,
attach_new_osymbols: bool = True,
) -> VectorFst:
"""
Destructively relabel the Fst with new Symbol Tables.
Relabelling refers to the operation where all the labels of an Fst are mapped to the equivalent labels
of a new `SymbolTable`.
If the Fst has a label `1` corresponding to the symbol "alpha" in the current symbol table and "alpha"
is mapped to 4 in a new SymbolTable, then all the 1 are going to be mapped to 4.
Args:
old_isymbols: Input `SymbolTable` used to build the Fst. If `None`, uses the Input `SymbolTable` attached to the Fst.
new_isymbols: New Input `SymbolTable` to use.
attach_new_isymbols: Whether to attach the new Input `SymbolTable` to the Fst. If False, the resulting Fst won't contain any attached Input `SymbolTable`.
old_osymbols: Output `SymbolTable` used to build the Fst. If `None`, uses the Output `SymbolTable` attached to the Fst
new_osymbols: New Output `SymbolTable` to use.
attach_new_osymbols: Whether to attach the new Output `SymbolTable` to the Fst. If False, the resulting Fst won't contain any attached Output `SymbolTable`.
Returns:
self
"""
old_isymbols_ptr = old_isymbols.ptr if old_isymbols is not None else None
old_osymbols_ptr = old_osymbols.ptr if old_osymbols is not None else None
ret_code = lib.vec_fst_relabel_tables(
self.ptr,
old_isymbols_ptr,
new_isymbols.ptr,
ctypes.c_size_t(attach_new_isymbols),
old_osymbols_ptr,
new_osymbols.ptr,
ctypes.c_size_t(attach_new_osymbols),
)
err_msg = "Relabel tables failed"
check_ffi_error(ret_code, err_msg)
# Necessary because the symts are cached on the python side.
if attach_new_isymbols:
self._input_symbols = new_isymbols
else:
self._input_symbols = None
if attach_new_osymbols:
self._output_symbols = new_osymbols
else:
self._output_symbols = None
return self
def draw(
self,
filename: str,
isymbols: Optional[SymbolTable] = None,
osymbols: Optional[SymbolTable] = None,
drawing_config: DrawingConfig = DrawingConfig(),
):
"""
Writes out the FST in Graphviz text format.
This method writes out the FST in the dot graph description language. The
graph can be rendered using the `dot` executable provided by Graphviz.
Args:
filename: The string location of the output dot/Graphviz file.
isymbols: An optional symbol table used to label input symbols.
osymbols: An optional symbol table used to label output symbols.
drawing_config: Drawing configuration to use.
See also: `text`.
"""
isymbols_ptr = isymbols.ptr if isymbols is not None else None
osymbols_ptr = osymbols.ptr if osymbols is not None else None
if drawing_config.width is None:
width = ctypes.c_float(-1.0)
else:
width = ctypes.c_float(drawing_config.width)
if drawing_config.height is None:
height = ctypes.c_float(-1.0)
else:
height = ctypes.c_float(drawing_config.height)
if drawing_config.ranksep is None:
ranksep = ctypes.c_float(-1.0)
else:
ranksep = ctypes.c_float(drawing_config.ranksep)
if drawing_config.nodesep is None:
nodesep = ctypes.c_float(-1.0)
else:
nodesep = ctypes.c_float(drawing_config.nodesep)
ret_code = lib.vec_fst_draw(
self.ptr,
isymbols_ptr,
osymbols_ptr,
filename.encode("utf-8"),
drawing_config.title.encode("utf-8"),
ctypes.c_size_t(drawing_config.acceptor),
width,
height,
ctypes.c_size_t(drawing_config.portrait),
ctypes.c_size_t(drawing_config.vertical),
ranksep,
nodesep,
ctypes.c_size_t(drawing_config.fontsize),
ctypes.c_size_t(drawing_config.show_weight_one),
ctypes.c_size_t(drawing_config.print_weight),
)
err_msg = "fst draw failed"
check_ffi_error(ret_code, err_msg)
@classmethod
def read(cls, filename: Union[str, Path]) -> VectorFst:
"""
Read a Fst at a given path.
Args:
filename: The string location of the input file.
Returns:
An Fst.
Raises:
ValueError: Read failed.
"""
fst = ctypes.pointer(ctypes.c_void_p())
ret_code = lib.vec_fst_from_path(
ctypes.byref(fst), str(filename).encode("utf-8")
)
err_msg = f"Read failed. file: {filename}"
check_ffi_error(ret_code, err_msg)
return cls(ptr=fst)
def write(self, filename: Union[str, Path]):
"""
Serializes FST to a file.
This method writes the FST to a file in vector binary format.
Args:
filename: The string location of the output file.
Raises:
ValueError: Write failed.
"""
ret_code = lib.vec_fst_write_file(self.ptr, str(filename).encode("utf-8"))
err_msg = f"Write failed. file: {filename}"
check_ffi_error(ret_code, err_msg)
@classmethod
def from_bytes(cls, data: bytes) -> VectorFst:
"""
Load a `VectorFst` from a sequence of bytes.
Args:
data: Sequence of bytes.
Returns:
Loaded `VectorFst`.
"""
fst_ptr = ctypes.pointer(ctypes.c_void_p())
# Define a temporary struct to hold the bytes array
class BytesArray(ctypes.Structure):
_fields_ = [("data_ptr", ctypes.c_char_p), ("size", ctypes.c_size_t)]
c_bytes = BytesArray(data, len(data))
ret_code = lib.vec_fst_from_bytes(ctypes.byref(c_bytes), ctypes.byref(fst_ptr))
error_msg = "`from_bytes` failed"
check_ffi_error(ret_code, error_msg)
return VectorFst(ptr=fst_ptr)
def to_bytes(self) -> bytes:
"""
Turns the `VectorFst` into bytes.
Returns:
Sequence of bytes.
"""
# Define a temporary struct to hold the bytes array
class BytesArray(ctypes.Structure):
_fields_ = [("data_ptr", ctypes.c_void_p), ("size", ctypes.c_size_t)]
bytes_ptr = ctypes.pointer(BytesArray())
ret_code = lib.vec_fst_to_bytes(self.ptr, ctypes.byref(bytes_ptr))
error_msg = "`to_bytes` failed"
check_ffi_error(ret_code, error_msg)
return bytes(
[
ctypes.c_ubyte.from_address(bytes_ptr.contents.data_ptr + i).value
for i in range(bytes_ptr.contents.size)
]
)
def equals(self, other: Fst) -> bool:
"""
Check if this Fst is equal to the other.
Args:
other: Fst instance
Returns:
Whether both Fst are equals.
"""
is_equal = ctypes.c_size_t()
ret_code = lib.vec_fst_equals(self.ptr, other.ptr, ctypes.byref(is_equal))
err_msg = "Error checking equality"
check_ffi_error(ret_code, err_msg)
return bool(is_equal.value)
def copy(self) -> VectorFst:
"""
Returns:
A copy of the Fst.
"""
cloned_fst = ctypes.pointer(ctypes.c_void_p())
ret_code = lib.vec_fst_copy(self.ptr, ctypes.byref(cloned_fst))
err_msg = "Error copying fst"
check_ffi_error(ret_code, err_msg)
return VectorFst(cloned_fst)
def compose(self, other: VectorFst, config=None) -> VectorFst:
from rustfst.algorithms.compose import compose, compose_with_config
if config:
return compose_with_config(self, other, config)
return compose(self, other)
def concat(self, other: VectorFst) -> VectorFst:
"""
Compute Fst Concatenation of this Fst with another Fst. Returning the resulting Fst.
Args:
other: Fst to concatenate with.
Returns:
The concatenated Fst.
"""
from rustfst.algorithms.concat import concat
return concat(self, other)
def connect(self) -> VectorFst:
"""
This operation trims an Fst, removing states and trs that are not on successful paths.
Examples :
- Input

- Output

Returns:
self
"""
from rustfst.algorithms.connect import connect
return connect(self)
def top_sort(self) -> VectorFst:
"""
This operation topologically sorts its input. When sorted, all transitions are from
lower to higher state IDs.
Examples :
- Input

- Output

Returns:
Equivalent top sorted Fst. Modification also happens in-place.
"""
from rustfst.algorithms.top_sort import top_sort
return top_sort(self)
def determinize(self, config=None) -> VectorFst:
from rustfst.algorithms.determinize import determinize, determinize_with_config
if config:
return determinize_with_config(self, config)
return determinize(self)
def project(self, proj_type=None) -> VectorFst:
from rustfst.algorithms.project import project, ProjectType
if proj_type:
return project(self, proj_type)
proj_type = ProjectType.PROJECT_INPUT
return project(self, proj_type)
def replace(
self,
root_label: int,
fst_list: List[(int, VectorFst)],
epsilon_on_replace: bool = False,
) -> VectorFst:
from rustfst.algorithms.replace import replace
complete_fst_list = [(root_label, self)] + fst_list
return replace(root_label, complete_fst_list, epsilon_on_replace)
def reverse(self) -> VectorFst:
from rustfst.algorithms.reverse import reverse
return reverse(self)
def rm_epsilon(self):
from rustfst.algorithms.rm_epsilon import rm_epsilon
rm_epsilon(self)
def shortest_path(self, config=None) -> VectorFst:
from rustfst.algorithms.shortest_path import (
shortestpath,
shortestpath_with_config,
)
if config:
return shortestpath_with_config(self, config)
return shortestpath(self)
def union(self, other_fst: VectorFst) -> VectorFst:
from rustfst.algorithms.union import union
return union(self, other_fst)
def optimize(self) -> VectorFst:
from rustfst.algorithms.optimize import optimize
optimize(self)
return self
def optimize_in_log(self) -> VectorFst:
from rustfst.algorithms.optimize import optimize_in_log
optimize_in_log(self)
return self
def tr_sort(self, ilabel_cmp: bool = True):
from rustfst.algorithms.tr_sort import tr_sort
tr_sort(self, ilabel_cmp)
def tr_unique(self):
from rustfst.algorithms.tr_unique import tr_unique
tr_unique(self)
def isomorphic(self, other: VectorFst) -> bool:
from rustfst.algorithms.isomorphic import isomorphic
return isomorphic(self, other)
def __add__(self, other: VectorFst) -> VectorFst:
"""
`fst_1 + fst_2` is a shortcut to perform the concatenation of `fst_1` and `fst_2`.
Args:
other: VectorFst to concatenate after the current Fst.
Returns:
The concatenated Fst.
"""
x = self.copy()
return x.concat(other)
def __mul__(self, other: VectorFst) -> VectorFst:
"""
`fst_1 * fst_2` is a shortcut to perform the composition of `fst_1` and `fst_2`.
Args:
other: VectorFst to compose with.
Returns:
The composed Fst.
"""
return self.compose(other)
def __or__(self, other: VectorFst) -> VectorFst:
"""
`fst_1 | fst_2` is a shortcut to perform the union of `fst_1` and `fst_2`.
Args:
other: VectorFst to perform the union with.
Returns:
The resulting Fst.
"""
x = self.copy()
return x.union(other)
def __str__(self):
s = ctypes.c_void_p()
ret_code = lib.vec_fst_display(self.ptr, ctypes.byref(s))
err_msg = "Error displaying VectorFst"
check_ffi_error(ret_code, err_msg)
return ctypes.string_at(s).decode("utf8")
def string_paths(self) -> StringPathsIterator:
return StringPathsIterator(self) | /rustfst_python-0.13.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl/rustfst/fst/vector_fst.py | 0.888227 | 0.273708 | vector_fst.py | pypi |
from __future__ import annotations
import ctypes
from rustfst.ffi_utils import (
lib,
check_ffi_error,
)
from rustfst.symbol_table import SymbolTable
from rustfst.iterators import TrsIterator
from typing import Optional
class Fst:
"""
This class is the base class for every Fst objects.
"""
def __init__(self, ptr, isymt=None, osymt=None):
self.ptr = ptr
self._input_symbols = isymt
self._output_symbols = osymt
def start(self) -> Optional[int]:
"""
Returns the start state.
Returns :
The start state or None.
"""
start = ctypes.c_size_t()
ret_code = lib.fst_start(self.ptr, ctypes.byref(start))
err_msg = "Error getting start state"
check_ffi_error(ret_code, err_msg)
if start is None:
return None
return int(start.value)
def final(self, state: int) -> Optional[float]:
"""
Returns the final weight of a state.
Args:
state: The integer index of a state.
Returns:
The final Weight of that state.
Raises:
Exception: If State index out of range.
"""
state = ctypes.c_size_t(state)
weight = ctypes.c_float()
ret_code = lib.fst_final_weight(self.ptr, state, ctypes.byref(weight))
err_msg = "Error getting final weight"
check_ffi_error(ret_code, err_msg)
if weight is None:
return None
return weight.value
def num_trs(self, state: int) -> int:
"""
Returns the number of trs leaving a state.
Args:
state: The integer index of a state.
Returns:
The number of trs leaving that state.
Raises:
Exception: If State index out of range.
See also: `num_states`.
"""
num_trs = ctypes.c_size_t()
state = ctypes.c_size_t(state)
ret_code = lib.fst_num_trs(self.ptr, state, ctypes.byref(num_trs))
err_msg = "Error getting number of trs"
check_ffi_error(ret_code, err_msg)
return int(num_trs.value)
def trs(self, state: int) -> TrsIterator:
"""
Returns an iterator over trs leaving the specified state.
Args:
state: The source state ID.
Returns:
An TrsIterator.
See also: `mutable_trs`, `states`.
"""
return TrsIterator(self, state)
def is_final(self, state_id: int) -> bool:
"""
Check if a state is final
Args :
state_id:
Returns :
bool
"""
state = ctypes.c_size_t(state_id)
is_final = ctypes.c_size_t()
ret_code = lib.fst_is_final(self.ptr, state, ctypes.byref(is_final))
err_msg = "Error checking if state is final"
check_ffi_error(ret_code, err_msg)
return bool(is_final.value)
def is_start(self, state_id: int) -> bool:
"""
Check if a state is a start state.
Args :
state_id: Integer index of the state.
Returns :
bool
"""
state = ctypes.c_size_t(state_id)
is_start = ctypes.c_size_t()
ret_code = lib.fst_is_start(self.ptr, state, ctypes.byref(is_start))
err_msg = "Error checking if state is final"
check_ffi_error(ret_code, err_msg)
return bool(is_start.value)
def input_symbols(self) -> Optional[SymbolTable]:
"""
Returns the Fst's input symbol table, or None if none is present.
Returns :
The Fst's input symbol table, or None if none is present.
See also: `output_symbols`.
"""
if self._input_symbols:
return self._input_symbols
table = ctypes.pointer(ctypes.c_void_p())
ret_code = lib.fst_input_symbols(self.ptr, ctypes.byref(table))
err_msg = "Error getting input symbols"
check_ffi_error(ret_code, err_msg)
if table.contents:
return SymbolTable(ptr=table)
return None
def output_symbols(self) -> Optional[SymbolTable]:
"""
Returns the Fst's output symbol table, or None if none is present.
Returns :
The Fst's output symbol table, or None if none is present.
See also: `input_symbols`.
"""
if self._output_symbols:
return self._output_symbols
table = ctypes.pointer(ctypes.c_void_p())
ret_code = lib.fst_output_symbols(self.ptr, ctypes.byref(table))
err_msg = "Error getting output symbols"
check_ffi_error(ret_code, err_msg)
if table.contents:
return SymbolTable(ptr=table)
return None
def set_input_symbols(self, syms: Optional[SymbolTable]) -> Fst:
"""
Sets the input symbol table.
Passing None as a value will delete the input symbol table.
Args:
syms: A SymbolTable.
Returns:
self.
See also: `set_output_symbols`.
"""
if syms is None:
ret_code = lib.fst_unset_input_symbols(self.ptr)
err_msg = "Error unsetting input symbols"
check_ffi_error(ret_code, err_msg)
# detach symbol table from fst
self._input_symbols = None
return self
table = syms.ptr
ret_code = lib.fst_set_input_symbols(self.ptr, table)
err_msg = "Error setting input symbols"
check_ffi_error(ret_code, err_msg)
# attach symbol table to fst (prevent early gc of syms)
self._input_symbols = syms
return self
def set_output_symbols(self, syms: Optional[SymbolTable]) -> Fst:
"""
Sets the output symbol table.
Passing None as a value will delete the output symbol table.
Args:
syms: A SymbolTable.
Returns:
self.
See also: `set_input_symbols`.
"""
if syms is None:
ret_code = lib.fst_unset_output_symbols(self.ptr)
err_msg = "Error unsetting output symbols"
check_ffi_error(ret_code, err_msg)
# detach symbol table from fst
self._output_symbols = None
return self
table = syms.ptr
ret_code = lib.fst_set_output_symbols(self.ptr, table)
err_msg = "Error setting output symbols"
check_ffi_error(ret_code, err_msg)
# attach symbol table to fst (prevent early gc of syms)
self._output_symbols = syms
return self
def remove_input_symbols(self, symbols: list[int]) -> Fst:
"""
Args:
symbols: List[int]
Returns:
self.
"""
symbols_ptr = (ctypes.c_int * len(symbols))(*symbols)
symbols_len = ctypes.c_size_t(len(symbols))
ret_code = lib.fst_remove_input_symbols(self.ptr, symbols_ptr, symbols_len)
err_msg = "Error during remove_input_symbols"
check_ffi_error(ret_code, err_msg)
return self
def remove_output_symbols(self, symbols: list[int]) -> Fst:
"""
Args:
symbols: List[int]
Returns:
self.
"""
symbols_ptr = (ctypes.c_int * len(symbols))(*symbols)
symbols_len = ctypes.c_size_t(len(symbols))
ret_code = lib.fst_remove_output_symbols(self.ptr, symbols_ptr, symbols_len)
err_msg = "Error during remove_outout_symbols"
check_ffi_error(ret_code, err_msg)
return self
def __eq__(self, y: Fst):
"""x.__eq__(y) <==> x==y"""
return self.equals(y)
def __repr__(self):
return f"<rustfst.fst.Fst at {id(self)}>"
def __del__(self):
lib.fst_destroy(self.ptr) | /rustfst_python-0.13.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl/rustfst/fst/__init__.py | 0.908229 | 0.325976 | __init__.py | pypi |
from __future__ import annotations
import typing as _t
import typing_extensions as _te
from ._core.bool_ import (bool_ as _bool,
try_construct_bool_ as _try_construct_bool)
if _t.TYPE_CHECKING:
from .result import (Err,
Ok)
_E = _t.TypeVar('_E')
_T = _t.TypeVar('_T')
_T2 = _t.TypeVar('_T2')
class None_:
def and_(self, _other: Option[_T]) -> _te.Self:
return self
def and_then(self, _function: _t.Callable[[_T], _T2]) -> _te.Self:
return self
def expect(self, _message: str) -> _t.NoReturn:
raise ValueError(_message)
def is_none(self) -> _bool:
return _bool(True)
def is_some(self) -> _bool:
return _bool(False)
def map(self, _function: _t.Callable[[_T], _T2]) -> None_:
return self
def map_or(self, _default: _T2, _function: _t.Callable[[_T], _T2]) -> _T2:
return _default
def map_or_else(self,
_default: _t.Callable[[], _T2],
_function: _t.Callable[[_T], _T2]) -> _T2:
return _default()
def ok_or(self, _err: _E) -> Err[_E]:
from .result import Err
return Err(_err)
def ok_or_else(self, _err: _t.Callable[[], _E]) -> Err[_E]:
from .result import Err
return Err(_err())
def or_(self, _other: Option[_T]) -> Option[_T]:
if not isinstance(_other, (None_, Some)):
raise TypeError(type(_other))
return _other
def or_else(self,
_function: _t.Callable[[], Option[_T]]) -> Option[_T]:
result = _function()
if not isinstance(result, (None_, Some)):
raise TypeError(type(result))
return result
def unwrap(self) -> _t.NoReturn:
raise ValueError('Called `unwrap` on a `None` value.')
def unwrap_or(self, _default: _T) -> _T:
return _default
def unwrap_or_else(self, _function: _t.Callable[[], _T]) -> _T:
return _function()
__module__ = 'rustpy.option'
__slots__ = ()
def __init_subclass__(cls, **kwargs: _t.Any) -> _t.NoReturn:
raise TypeError(f'type \'{cls.__module__}{cls.__qualname__}\' '
f'is not an acceptable base type')
@_t.overload
def __eq__(self, other: Option[_T]) -> _bool:
...
@_t.overload
def __eq__(self, other: _t.Any) -> _t.Any:
...
def __eq__(self, other: _t.Any) -> _t.Any:
return (_bool(isinstance(other, None_))
or (_bool(not isinstance(other, Some))
and NotImplemented))
@_t.overload
def __ge__(self, other: Option[_T]) -> _bool:
...
@_t.overload
def __ge__(self, other: _t.Any) -> _t.Any:
...
def __ge__(self, other: _t.Any) -> _t.Any:
return (_bool(isinstance(other, None_))
or (_bool(not isinstance(other, Some))
and NotImplemented))
@_t.overload
def __gt__(self, other: Option[_T]) -> _bool:
...
@_t.overload
def __gt__(self, other: _t.Any) -> _t.Any:
...
def __gt__(self, other: _t.Any) -> _t.Any:
return _bool(not isinstance(other, (None_, Some))) and NotImplemented
@_t.overload
def __le__(self, other: Option[_T]) -> _bool:
...
@_t.overload
def __le__(self, other: _t.Any) -> _t.Any:
...
def __le__(self, other: _t.Any) -> _t.Any:
return _bool(isinstance(other, (None_, Some))) or NotImplemented
@_t.overload
def __lt__(self, other: Option[_T]) -> _bool:
...
@_t.overload
def __lt__(self, other: _t.Any) -> _t.Any:
...
def __lt__(self, other: _t.Any) -> _t.Any:
return (_bool(not isinstance(other, None_))
and (_bool(isinstance(other, Some))
or NotImplemented))
def __repr__(self) -> str:
return f'{type(self).__qualname__}()'
class Some(_t.Generic[_T]):
def and_(self, _other: Option[_T]) -> Option[_T]:
if not isinstance(_other, (None_, Some)):
raise TypeError(type(_other))
return _other
def and_then(self,
_function: _t.Callable[[_T], Option[_T2]]) -> Option[_T2]:
result = _function(self._value)
if not isinstance(result, (None_, Some)):
raise TypeError(type(result))
return result
def expect(self, _message: str) -> _T:
return self._value
def is_none(self) -> _bool:
return _bool(False)
def is_some(self) -> _bool:
return _bool(True)
def ok_or(self, _err: _E) -> Ok[_T]:
from .result import Ok
return Ok(self._value)
def ok_or_else(self, _err: _t.Callable[[], _E]) -> Ok[_T]:
from .result import Ok
return Ok(self._value)
def or_(self, _other: Option[_T]) -> _te.Self:
return self
def or_else(self, _function: _t.Callable[[], Option[_T]]) -> _te.Self:
return self
def map(self, _function: _t.Callable[[_T], _T2]) -> Some[_T2]:
return Some(_function(self._value))
def map_or(self, _default: _T2, _function: _t.Callable[[_T], _T2]) -> _T2:
return _function(self._value)
def map_or_else(self,
_default: _t.Callable[[], _T2],
_function: _t.Callable[[_T], _T2]) -> _T2:
return _function(self._value)
def unwrap(self) -> _T:
return self._value
def unwrap_or(self, _default: _T) -> _T:
return self._value
def unwrap_or_else(self, _function: _t.Callable[[], _T]) -> _T:
return self._value
__module__ = 'rustpy.option'
__slots__ = '_value',
def __init__(self, value: _T) -> None:
self._value = value
def __init_subclass__(cls, **kwargs: _t.Any) -> _t.NoReturn:
raise TypeError(f'type \'{cls.__module__}{cls.__qualname__}\' '
f'is not an acceptable base type')
@_t.overload
def __eq__(self, other: Option[_T]) -> _bool:
...
@_t.overload
def __eq__(self, other: _t.Any) -> _t.Any:
...
def __eq__(self, other: _t.Any) -> _t.Any:
return (_try_construct_bool(self._value == other._value)
if isinstance(other, Some)
else (_bool(not isinstance(other, None_))
and NotImplemented))
@_t.overload
def __ge__(self, other: Option[_T]) -> _bool:
...
@_t.overload
def __ge__(self, other: _t.Any) -> _t.Any:
...
def __ge__(self, other: _t.Any) -> _t.Any:
return (_try_construct_bool(self._value >= other._value)
if isinstance(other, Some)
else _bool(isinstance(other, None_)) or NotImplemented)
@_t.overload
def __gt__(self, other: Option[_T]) -> _bool:
...
@_t.overload
def __gt__(self, other: _t.Any) -> _t.Any:
...
def __gt__(self, other: _t.Any) -> _t.Any:
return (_try_construct_bool(self._value > other._value)
if isinstance(other, Some)
else _bool(isinstance(other, None_)) or NotImplemented)
@_t.overload
def __le__(self, other: Option[_T]) -> _bool:
...
@_t.overload
def __le__(self, other: _t.Any) -> _t.Any:
...
def __le__(self, other: _t.Any) -> _t.Any:
return (_try_construct_bool(self._value <= other._value)
if isinstance(other, Some)
else _bool(not isinstance(other, None_)) and NotImplemented)
@_t.overload
def __lt__(self, other: Option[_T]) -> _bool:
...
@_t.overload
def __lt__(self, other: _t.Any) -> _t.Any:
...
def __lt__(self, other: _t.Any) -> _t.Any:
return (_try_construct_bool(self._value < other._value)
if isinstance(other, Some)
else _bool(not isinstance(other, None_)) and NotImplemented)
def __repr__(self) -> str:
return f'{type(self).__qualname__}({self._value!r})'
Option = _t.Union[None_, Some[_T]] | /_rustpy/option.py | 0.774413 | 0.225353 | option.py | pypi |
from __future__ import annotations
import typing as _t
import typing_extensions as _te
from ._core.bool_ import (bool_ as _bool,
try_construct_bool_ as _try_construct_bool)
from .option import (None_ as _None,
Some as _Some)
_E = _t.TypeVar('_E')
_E2 = _t.TypeVar('_E2')
_T = _t.TypeVar('_T')
_T2 = _t.TypeVar('_T2')
class Err(_t.Generic[_E]):
def and_(self, _other: Result[_T, _E]) -> _te.Self:
return self
def and_then(self,
_function: _t.Callable[[_T], Result[_T2, _E]]) -> _te.Self:
return self
def err(self) -> _Some[_E]:
return _Some(self._value)
def expect(self, _message: str) -> _t.NoReturn:
raise ValueError(f'{_message}: {self._value!r}')
def expect_err(self, _message: str) -> _E:
return self._value
def is_err(self) -> _bool:
return _bool(True)
def is_ok(self) -> _bool:
return _bool(False)
def map(self, _function: _t.Callable[[_T], _T2]) -> _te.Self:
return self
def map_err(self, _function: _t.Callable[[_E], _E2]) -> Err[_E2]:
return Err(_function(self._value))
def map_or(self,
_default: _T2,
_function: _t.Callable[[_T], _T2]) -> _T2:
return _default
def map_or_else(self,
_default: _t.Callable[[_E], _T2],
_function: _t.Callable[[_T], _T2]) -> _T2:
return _default(self._value)
def ok(self) -> _None:
return _None()
def or_(self, _other: Result[_T, _E]) -> Result[_T, _E]:
if not isinstance(_other, (Err, Ok)):
raise TypeError(type(_other))
return _other
def or_else(
self, _function: _t.Callable[[_E], Result[_T, _E2]]
) -> Result[_T, _E2]:
result = _function(self._value)
if not isinstance(result, (Err, Ok)):
raise TypeError(type(result))
return result
def unwrap(self) -> _t.NoReturn:
raise ValueError('Called `unwrap` on an `Err` value: '
f'{self._value!r}.')
def unwrap_or(self, _default: _T) -> _T:
return _default
def unwrap_err(self) -> _E:
return self._value
def unwrap_or_else(self, _function: _t.Callable[[_E], _T]) -> _T:
return _function(self._value)
__module__ = 'rustpy.result'
__slots__ = '_value',
def __init__(self, value: _E) -> None:
self._value = value
def __init_subclass__(cls, **kwargs: _t.Any) -> _t.NoReturn:
raise TypeError(f'type \'{cls.__module__}{cls.__qualname__}\' '
f'is not an acceptable base type')
@_t.overload
def __eq__(self, other: Result[_T, _E]) -> _bool:
...
@_t.overload
def __eq__(self, other: _t.Any) -> _t.Any:
...
def __eq__(self, other: _t.Any) -> _t.Any:
return (_try_construct_bool(self._value == other._value)
if isinstance(other, Err)
else (_bool(not isinstance(other, Ok))
and NotImplemented))
@_t.overload
def __ge__(self, other: Result[_T, _E]) -> _bool:
...
@_t.overload
def __ge__(self, other: _t.Any) -> _t.Any:
...
def __ge__(self, other: _t.Any) -> _t.Any:
return (_try_construct_bool(self._value >= other._value)
if isinstance(other, Err)
else _bool(isinstance(other, Ok)) or NotImplemented)
@_t.overload
def __gt__(self, other: Result[_T, _E]) -> _bool:
...
@_t.overload
def __gt__(self, other: _t.Any) -> _t.Any:
...
def __gt__(self, other: _t.Any) -> _t.Any:
return (_try_construct_bool(self._value > other._value)
if isinstance(other, Err)
else _bool(isinstance(other, Ok)) or NotImplemented)
@_t.overload
def __le__(self, other: Result[_T, _E]) -> _bool:
...
@_t.overload
def __le__(self, other: _t.Any) -> _t.Any:
...
def __le__(self, other: _t.Any) -> _t.Any:
return (_try_construct_bool(self._value <= other._value)
if isinstance(other, Err)
else _bool(not isinstance(other, Ok)) and NotImplemented)
@_t.overload
def __lt__(self, other: Result[_T, _E]) -> _bool:
...
@_t.overload
def __lt__(self, other: _t.Any) -> _t.Any:
...
def __lt__(self, other: _t.Any) -> _t.Any:
return (_try_construct_bool(self._value < other._value)
if isinstance(other, Err)
else _bool(not isinstance(other, Ok)) and NotImplemented)
def __repr__(self) -> str:
return f'{type(self).__qualname__}({self._value!r})'
class Ok(_t.Generic[_T]):
def and_(self, _other: Result[_T, _E]) -> Result[_T, _E]:
if not isinstance(_other, (Err, Ok)):
raise TypeError(type(_other))
return _other
def and_then(
self, _function: _t.Callable[[_T], Result[_T2, _E]]
) -> Result[_T2, _E]:
result = _function(self._value)
if not isinstance(result, (Err, Ok)):
raise TypeError(type(result))
return result
def err(self) -> _None:
return _None()
def expect(self, _message: str) -> _T:
return self._value
def expect_err(self, _message: str) -> _t.NoReturn:
raise ValueError(f'{_message}: {self._value!r}')
def is_err(self) -> _bool:
return _bool(False)
def is_ok(self) -> _bool:
return _bool(True)
def map(self, _function: _t.Callable[[_T], _T2]) -> Ok[_T2]:
return Ok(_function(self._value))
def map_err(self, _function: _t.Callable[[_E], _E2]) -> _te.Self:
return self
def map_or(self,
_default: _T2,
_function: _t.Callable[[_T], _T2]) -> _T2:
return _function(self._value)
def map_or_else(self,
_default: _t.Callable[[_E], _T2],
_function: _t.Callable[[_T], _T2]) -> _T2:
return _function(self._value)
def ok(self) -> _Some[_T]:
return _Some(self._value)
def or_(self, _other: Result[_T, _E]) -> _te.Self:
return self
def or_else(self,
_function: _t.Callable[[_E], Result[_T, _E2]]) -> _te.Self:
return self
def unwrap(self) -> _T:
return self._value
def unwrap_err(self) -> _t.NoReturn:
raise ValueError('Called `unwrap_err` on an `Ok` value: '
f'{self._value!r}.')
def unwrap_or(self, _default: _T) -> _T:
return self._value
def unwrap_or_else(self, _function: _t.Callable[[_E], _T]) -> _T:
return self._value
__module__ = 'rustpy.result'
__slots__ = '_value'
def __init__(self, value: _T) -> None:
self._value = value
def __init_subclass__(cls, **kwargs: _t.Any) -> _t.NoReturn:
raise TypeError(f'type \'{cls.__module__}{cls.__qualname__}\' '
f'is not an acceptable base type')
@_t.overload
def __eq__(self, other: Result[_T, _E]) -> _bool:
...
@_t.overload
def __eq__(self, other: _t.Any) -> _t.Any:
...
def __eq__(self, other: _t.Any) -> _t.Any:
return (_try_construct_bool(self._value == other._value)
if isinstance(other, Ok)
else (_bool(not isinstance(other, Err))
and NotImplemented))
@_t.overload
def __ge__(self, other: Result[_T, _E]) -> _bool:
...
@_t.overload
def __ge__(self, other: _t.Any) -> _t.Any:
...
def __ge__(self, other: _t.Any) -> _t.Any:
return (_try_construct_bool(self._value >= other._value)
if isinstance(other, Ok)
else _bool(not isinstance(other, Err)) and NotImplemented)
@_t.overload
def __gt__(self, other: Result[_T, _E]) -> _bool:
...
@_t.overload
def __gt__(self, other: _t.Any) -> _t.Any:
...
def __gt__(self, other: _t.Any) -> _t.Any:
return (_try_construct_bool(self._value > other._value)
if isinstance(other, Ok)
else _bool(not isinstance(other, Err)) and NotImplemented)
@_t.overload
def __le__(self, other: Result[_T, _E]) -> _bool:
...
@_t.overload
def __le__(self, other: _t.Any) -> _t.Any:
...
def __le__(self, other: _t.Any) -> _t.Any:
return (_try_construct_bool(self._value <= other._value)
if isinstance(other, Ok)
else _bool(isinstance(other, Err)) or NotImplemented)
@_t.overload
def __lt__(self, other: Result[_T, _E]) -> _bool:
...
@_t.overload
def __lt__(self, other: _t.Any) -> _t.Any:
...
def __lt__(self, other: _t.Any) -> _t.Any:
return (_try_construct_bool(self._value < other._value)
if isinstance(other, Ok)
else _bool(isinstance(other, Err)) or NotImplemented)
def __repr__(self) -> str:
return f'{type(self).__qualname__}({self._value!r})'
Result = _t.Union[Ok[_T], Err[_E]] | /_rustpy/result.py | 0.738669 | 0.218138 | result.py | pypi |
from __future__ import annotations
import typing as _t
import typing_extensions as _te
from .bool_ import bool_ as _bool
from .wrapper import Wrapper as _Wrapper
class Ordered(_te.Protocol):
@_t.overload
def __ge__(self, other: _te.Self) -> bool:
...
@_t.overload
def __ge__(self, other: _t.Any) -> _t.Any:
...
def __ge__(self, other: _t.Any) -> _t.Any:
...
@_t.overload
def __gt__(self, other: _te.Self) -> bool:
...
@_t.overload
def __gt__(self, other: _t.Any) -> _t.Any:
...
def __gt__(self, other: _t.Any) -> _t.Any:
...
@_t.overload
def __le__(self, other: _te.Self) -> bool:
...
@_t.overload
def __le__(self, other: _t.Any) -> _t.Any:
...
def __le__(self, other: _t.Any) -> _t.Any:
...
@_t.overload
def __lt__(self, other: _te.Self) -> bool:
...
@_t.overload
def __lt__(self, other: _t.Any) -> _t.Any:
...
def __lt__(self, other: _t.Any) -> _t.Any:
...
_OrderedT = _t.TypeVar('_OrderedT',
bound=Ordered)
class OrderedWrapper(_Wrapper[_OrderedT]):
@_t.overload
def __ge__(self, other: _te.Self) -> _bool:
...
@_t.overload
def __ge__(self, other: _t.Any) -> _t.Any:
...
def __ge__(self, other: _t.Any) -> _t.Any:
return (_bool(self._value >= other._value)
if isinstance(other, type(self))
else NotImplemented)
@_t.overload
def __gt__(self, other: _te.Self) -> _bool:
...
@_t.overload
def __gt__(self, other: _t.Any) -> _t.Any:
...
def __gt__(self, other: _t.Any) -> _t.Any:
return (_bool(self._value > other._value)
if isinstance(other, type(self))
else NotImplemented)
@_t.overload
def __le__(self, other: _te.Self) -> _bool:
...
@_t.overload
def __le__(self, other: _t.Any) -> _t.Any:
...
def __le__(self, other: _t.Any) -> _t.Any:
return (_bool(self._value <= other._value)
if isinstance(other, type(self))
else NotImplemented)
@_t.overload
def __lt__(self, other: _te.Self) -> _bool:
...
@_t.overload
def __lt__(self, other: _t.Any) -> _t.Any:
...
def __lt__(self, other: _t.Any) -> _t.Any:
return (_bool(self._value < other._value)
if isinstance(other, type(self))
else NotImplemented) | /_rustpy/_core/ordered.py | 0.751876 | 0.257362 | ordered.py | pypi |
from __future__ import annotations
import typing as _t
import typing_extensions as _te
@_te.final
class bool_:
def as_(self, cls: _t.Type[_CastableFromBool]) -> _CastableFromBool:
from .integer import (BaseSignedInteger,
BaseUnsignedInteger)
if issubclass(cls, type(self)):
return self
elif issubclass(cls, (BaseSignedInteger, BaseUnsignedInteger)):
return cls(int(self._value))
else:
raise TypeError(cls)
_value: bool
__slots__ = '_value',
def __new__(cls, _value: bool) -> bool_:
if not isinstance(_value, bool):
raise TypeError(type(_value))
self = super().__new__(cls)
self._value = _value
return self
def __bool__(self) -> bool:
return self._value
@_t.overload
def __eq__(self, other: _te.Self) -> _te.Self:
...
@_t.overload
def __eq__(self, other: _t.Any) -> _t.Any:
...
def __eq__(self, other: _t.Any) -> _t.Any:
return (bool_(self._value is other._value)
if isinstance(other, bool_)
else NotImplemented)
@_t.overload
def __ge__(self, other: _te.Self) -> _te.Self:
...
@_t.overload
def __ge__(self, other: _t.Any) -> _t.Any:
...
def __ge__(self, other: _t.Any) -> _t.Any:
return (bool_(self._value >= other._value)
if isinstance(other, bool_)
else NotImplemented)
@_t.overload
def __gt__(self, other: _te.Self) -> _te.Self:
...
@_t.overload
def __gt__(self, other: _t.Any) -> _t.Any:
...
def __gt__(self, other: _t.Any) -> _t.Any:
return (bool_(self._value > other._value)
if isinstance(other, bool_)
else NotImplemented)
@_t.overload
def __le__(self, other: _te.Self) -> _te.Self:
...
@_t.overload
def __le__(self, other: _t.Any) -> _t.Any:
...
def __le__(self, other: _t.Any) -> _t.Any:
return (bool_(self._value <= other._value)
if isinstance(other, bool_)
else NotImplemented)
@_t.overload
def __lt__(self, other: _te.Self) -> _te.Self:
...
@_t.overload
def __lt__(self, other: _t.Any) -> _t.Any:
...
def __lt__(self, other: _t.Any) -> _t.Any:
return (bool_(self._value < other._value)
if isinstance(other, bool_)
else NotImplemented)
def __repr__(self) -> str:
return f'{type(self).__qualname__}({self._value!r})'
def __str__(self) -> str:
return 'true' if self._value else 'false'
if _t.TYPE_CHECKING:
from .integer import (BaseSignedInteger as _BaseSignedInteger,
BaseUnsignedInteger as _BaseUnsignedInteger)
_CastableFromBool = _t.TypeVar('_CastableFromBool', bool_,
_BaseSignedInteger,
_BaseUnsignedInteger)
_T = _t.TypeVar('_T')
def try_construct_bool_(value: _T) -> _t.Union[_T, bool_]:
return bool_(value) if isinstance(value, bool) else value | /_rustpy/_core/bool_.py | 0.812942 | 0.229794 | bool_.py | pypi |

<p align="center">
<a href="#license" alt="license">
<img alt="License" src="https://img.shields.io/github/license/avhz/RustQuant">
<a href="#build" alt="build">
<img alt="GitHub Workflow Status" src="https://img.shields.io/github/actions/workflow/status/avhz/RustQuant/rust.yml">
<a href="#downloads" alt="downloads">
<img alt="Crates.io" src="https://img.shields.io/crates/d/RustQuant">
<a href="#stars" alt="stars">
<img alt="GitHub Repo stars" src="https://img.shields.io/github/stars/avhz/RustQuant">
<a href="#version" alt="version">
<img alt="Crates.io" src="https://img.shields.io/crates/v/RustQuant">
<a href="#codecov" alt="codecov">
<img alt="Codecov" src="https://img.shields.io/codecov/c/gh/avhz/RustQuant">
<a href="#deps" alt="deps">
<img alt="Dependencies" src="https://deps.rs/repo/github/avhz/RustQuant/status.svg">
</p>
Rust library for quantitative finance tools.
:dart: I want to hit a stable and legitimate `v1.0.0` by the end of 2023, so any and all feedback, suggestions, or contributions are strongly welcomed!
Contact: rustquantcontact@gmail.com
**Disclaimer**: This is currently a free-time project and not a professional financial software library. Nothing in this library should be taken as financial advice, and I do not recommend you to use it for trading or making financial decisions.
## :newspaper: Latest features
+ Download time series data from [Yahoo! Finance](https://finance.yahoo.com/).
+ Read (write) from (to) `.csv`, `.json`, and `.parquet` files, using [Polars `DataFrames`](https://pola-rs.github.io/polars-book/).
+ Arithmetic Brownian Motion generator.
+ Gamma, exponential, and chi-squared distributions.
+ Forward start option pricer (Rubinstein 1990 formula).
+ Gap option and cash-or-nothing option pricers (currently adding more binary options).
+ Asian option pricer (closed-form solution for continuous geometric average).
+ Heston Model option pricer (uses the tanh-sinh quadrature numerical integrator).
+ Tanh-sinh (double exponential) quadrature for evaluating integrals.
+ Plus other basic numerical integrators (midpoint, trapezoid, Simpson's 3/8).
+ Characteristic functions and density functions for common distributions:
+ Gaussian, Bernoulli, Binomial, Poisson, Uniform, Chi-Squared, Gamma, and Exponential.
# Table of Contents
1. [Automatic Differentiation](#autodiff)
2. [Option Pricers](#options)
3. [Stochastic Processes and Short Rate Models](#stochastics)
4. [Bonds](#bonds)
5. [Distributions](#distributions)
5. [Mathematics](#maths)
6. [Helper Functions and Macros](#helpers)
7. [How-tos](#howto)
8. [References](#references)
## :link: Automatic Differentiation <a name="autodiff"></a>
Currently only gradients can be computed. Suggestions on how to extend the functionality to Hessian matrices are definitely welcome.
+ [x] Reverse (Adjoint) Mode
+ Implementation via Operator and Function Overloading.
+ Useful when number of outputs is *smaller* than number of inputs.
+ i.e for functions $f:\mathbb{R}^n \rightarrow \mathbb{R}^m$, where $m \ll n$
+ [ ] Forward (Tangent) Mode
+ Implementation via Dual Numbers.
+ Useful when number of outputs is *larger* than number of inputs.
+ i.e. for functions $f:\mathbb{R}^n \rightarrow \mathbb{R}^m$, where $m \gg n$
## :money_with_wings: Option Pricers <a name="options"></a>
+ Closed-form price solutions:
+ [x] Heston Model
+ [x] Barrier
+ [x] European
+ [x] Greeks/Sensitivities
+ [x] Lookback
+ [x] Asian: Continuous Geometric Average
+ [x] Forward Start
+ [ ] Basket
+ [ ] Rainbow
+ [ ] American
+ Lattice models:
+ [x] Binomial Tree (Cox-Ross-Rubinstein)
The stochastic process generators can be used to price path-dependent options via Monte-Carlo.
+ Monte Carlo pricing:
+ [x] Lookback
+ [ ] Asian
+ [ ] Chooser
+ [ ] Barrier
## :chart_with_upwards_trend: Stochastic Processes and Short Rate Models <a name="stochastics"></a>
The following is a list of stochastic processes that can be generated.
+ [x] Brownian Motion
+ [x] Arithmetic Brownian Motion
+ $dX_t = \mu dt + \sigma dW_t$
+ [x] Geometric Brownian Motion
+ $dX_t = \mu X_t dt + \sigma X_t dW_t$
+ Models: Black-Scholes (1973), Rendleman-Bartter (1980)
+ [x] Cox-Ingersoll-Ross (1985)
+ $dX_t = (\theta - \alpha X_t)dt + \sqrt{r_t} \sigma dW_t$
+ [x] Ornstein-Uhlenbeck process
+ $dX_t = \theta(\mu - X_t)dt + \sigma dW_t$
+ Models: Vasicek (1977)
+ [ ] Ho-Lee (1986)
+ $dX_t = \theta_t dt + \sigma dW_t$
+ [ ] Hull-White (1990)
+ $dX_t = (\theta - \alpha X_t)dt + \sigma_t dW_t$
+ [ ] Black-Derman-Toy (1990)
+ $d\ln(X) = \left[ \theta_t + \frac{\sigma_t'}{\sigma_t}\ln(X) \right]dt + \sigma_t dW_t$
+ $d\ln(X) = \theta_t dt + \sigma dW_t$
+ [ ] Merton's model (1973)
+ $X_t = X_0 + at + \sigma W_t^*$
+ $dX_t = adt + \sigma dW_t^*$
## :chart_with_downwards_trend: Bonds <a name="bonds"></a>
+ Prices:
+ [X] The Vasicek Model
+ [x] The Cox, Ingersoll, and Ross Model
+ [ ] The Rendleman and Bartter Model
+ [ ] The Ho–Lee Model
+ [ ] The Hull–White (One-Factor) Model
+ [ ] The Black–Derman–Toy Model
+ [ ] The Black–Karasinski Model
+ [ ] Duration
+ [ ] Convexity
## :bar_chart: Distributions <a name="distributions"></a>
Probability density/mass functions, distribution functions, characteristic functions, etc.
+ [x] Gaussian
+ [x] Bernoulli
+ [x] Binomial
+ [x] Poisson
+ [x] Uniform (discrete & continuous)
+ [x] Chi-Squared
+ [x] Gamma
+ [x] Exponential
## :triangular_ruler: Mathematics <a name="maths"></a>
+ Numerical Integration (needed for Heston model, for example):
+ [x] Tanh-Sinh (double exponential) quadrature
+ [x] Composite Midpoint Rule
+ [x] Composite Trapezoidal Rule
+ [x] Composite Simpson's 3/8 Rule
+ [x] Risk-Reward Measures (Sharpe, Treynor, Sortino, etc)
+ [x] Newton-Raphson
+ [x] Standard Normal Distribution (Distribution/Density functions, and generation of variates)
+ [ ] Interpolation
## :handshake: Helper Functions and Macros <a name="helpers"></a>
A collection of utility functions and macros.
+ [x] Plot a vector.
+ [x] Write vector to file.
+ [x] Cumulative sum of vector.
+ [x] Linearly spaced sequence.
+ [x] `assert_approx_equal!`
## :heavy_check_mark: How-tos <a name="howto"></a>
I would not recommend using RustQuant within any other libraries for some time, as it will most likely go through many breaking changes as I learn more Rust and settle on a decent structure for the library.
:pray: I would greatly appreciate contributions so it can get to the `v1.0.0` mark ASAP.
### Download data from Yahoo! Finance:
You can download data from Yahoo! Finance into a Polars `DataFrame`.
```rust
use RustQuant::data::*;
use time::macros::date;
fn main() {
// New YahooFinanceData instance.
// By default, date range is: 1970-01-01 to present.
let mut yfd = YahooFinanceData::new("AAPL".to_string());
// Can specify custom dates (optional).
yfd.set_start_date(time::macros::datetime!(2019 - 01 - 01 0:00 UTC));
yfd.set_end_date(time::macros::datetime!(2020 - 01 - 01 0:00 UTC));
// Download the historical data.
yfd.get_price_history();
println!("Apple's quotes: {:?}", yfd.price_history)
}
```
```bash
Apple's quotes: Some(shape: (252, 7)
┌────────────┬───────────┬───────────┬───────────┬───────────┬────────────┬───────────┐
│ date ┆ open ┆ high ┆ low ┆ close ┆ volume ┆ adjusted │
│ --- ┆ --- ┆ --- ┆ --- ┆ --- ┆ --- ┆ --- │
│ date ┆ f64 ┆ f64 ┆ f64 ┆ f64 ┆ f64 ┆ f64 │
╞════════════╪═══════════╪═══════════╪═══════════╪═══════════╪════════════╪═══════════╡
│ 2019-01-02 ┆ 38.7225 ┆ 39.712502 ┆ 38.557499 ┆ 39.48 ┆ 1.481588e8 ┆ 37.994499 │
│ 2019-01-03 ┆ 35.994999 ┆ 36.43 ┆ 35.5 ┆ 35.547501 ┆ 3.652488e8 ┆ 34.209969 │
│ 2019-01-04 ┆ 36.1325 ┆ 37.137501 ┆ 35.950001 ┆ 37.064999 ┆ 2.344284e8 ┆ 35.670372 │
│ 2019-01-07 ┆ 37.174999 ┆ 37.2075 ┆ 36.474998 ┆ 36.982498 ┆ 2.191112e8 ┆ 35.590965 │
│ … ┆ … ┆ … ┆ … ┆ … ┆ … ┆ … │
│ 2019-12-26 ┆ 71.205002 ┆ 72.495003 ┆ 71.175003 ┆ 72.477501 ┆ 9.31212e7 ┆ 70.798401 │
│ 2019-12-27 ┆ 72.779999 ┆ 73.4925 ┆ 72.029999 ┆ 72.449997 ┆ 1.46266e8 ┆ 70.771545 │
│ 2019-12-30 ┆ 72.364998 ┆ 73.172501 ┆ 71.305 ┆ 72.879997 ┆ 1.441144e8 ┆ 71.191582 │
│ 2019-12-31 ┆ 72.482498 ┆ 73.419998 ┆ 72.379997 ┆ 73.412498 ┆ 1.008056e8 ┆ 71.711739 │
└────────────┴───────────┴───────────┴───────────┴───────────┴────────────┴───────────┘)
```
### Read/write data:
```rust
use RustQuant::data::*;
fn main() {
// New `Data` instance.
let mut data = Data::new(
format: DataFormat::CSV, // Can also be JSON or PARQUET.
path: String::from("./file/path/read.csv")
)
// Read from the given file.
data.read().unwrap();
// New path to write the data to.
data.path = String::from("./file/path/write.csv")
data.write().unwrap();
println!("{:?}", data.data)
}
```
### Compute gradients:
```rust
use RustQuant::autodiff::*;
fn main() {
// Create a new Tape.
let t = Tape::new();
// Assign variables.
let x = t.var(0.5);
let y = t.var(4.2);
// Define a function.
let z = x * y + x.sin();
// Accumulate the gradient.
let grad = z.accumulate();
println!("Function = {}", z);
println!("Gradient = {:?}", grad.wrt([x, y]));
}
```
### Compute integrals:
```rust
use RustQuant::math::*;
fn main() {
// Define a function to integrate: e^(sin(x))
fn f(x: f64) -> f64 {
(x.sin()).exp()
}
// Integrate from 0 to 5.
let integral = integrate(f, 0.0, 5.0);
// ~ 7.18911925
println!("Integral = {}", integral);
}
```
### Price options:
```rust
use RustQuant::options::*;
fn main() {
let VanillaOption = EuropeanOption {
initial_price: 100.0,
strike_price: 110.0,
risk_free_rate: 0.05,
volatility: 0.2,
dividend_rate: 0.02,
time_to_maturity: 0.5,
};
let prices = VanillaOption.price();
println!("Call price = {}", prices.0);
println!("Put price = {}", prices.1);
}
```
### Generate stochastic processes:
```rust
use RustQuant::stochastics::*;
fn main() {
// Create new GBM with mu and sigma.
let gbm = GeometricBrownianMotion::new(0.05, 0.9);
// Generate path using Euler-Maruyama scheme.
// Parameters: x_0, t_0, t_n, n, sims, parallel.
let output = (&gbm).euler_maruyama(10.0, 0.0, 0.5, 10, 1, false);
println!("GBM = {:?}", output.trajectories);
}
```
## :book: References: <a name="references"></a>
+ John C. Hull - *Options, Futures, and Other Derivatives*
+ Damiano Brigo & Fabio Mercurio - *Interest Rate Models - Theory and Practice (With Smile, Inflation and Credit)*
+ Paul Glasserman - *Monte Carlo Methods in Financial Engineering*
+ Andreas Griewank & Andrea Walther - *Evaluating Derivatives - Principles and Techniques of Algorithmic Differentiation*
+ Steven E. Shreve - *Stochastic Calculus for Finance II: Continuous-Time Models*
+ Espen Gaarder Haug - *Option Pricing Formulas*
+ Antoine Savine - *Modern Computational Finance: AAD and Parallel Simulations*
| /RustQuant-0.0.17.tar.gz/RustQuant-0.0.17/README.md | 0.663124 | 0.829043 | README.md | pypi |
```
import pandas as pd
from sklearn.metrics import r2_score, accuracy_score
from sklearn.tree import DecisionTreeRegressor, DecisionTreeClassifier
from sklearn.ensemble import RandomForestRegressor, RandomForestClassifier
import rustrees.decision_tree as rt_dt
import rustrees.random_forest as rt_rf
import time
import numpy as np
datasets = {
"reg": ["diabetes", "housing", "dgp"],
"clf": ["breast_cancer", "titanic"]
}
def evaluate_dataset(dataset, problem, model, max_depth, n_repeats, n_estimators=None):
df_train = pd.read_csv(f"../../datasets/{dataset}_train.csv")
df_test = pd.read_csv(f"../../datasets/{dataset}_test.csv")
if problem == "reg":
metric_fn = r2_score
metric = "r2"
if model == "dt":
model_sk = DecisionTreeRegressor(max_depth=max_depth)
model_rt = rt_dt.DecisionTreeRegressor(max_depth=max_depth)
elif model == "rf":
model_sk = RandomForestRegressor(n_estimators=n_estimators, max_depth=max_depth, n_jobs=-1)
model_rt = rt_rf.RandomForestRegressor(n_estimators=n_estimators, max_depth=max_depth)
elif problem == "clf":
metric_fn = accuracy_score
metric = "acc"
if model == "dt":
model_sk = DecisionTreeClassifier(max_depth=max_depth)
model_rt = rt_dt.DecisionTreeClassifier(max_depth=max_depth)
elif model == "rf":
model_sk = RandomForestClassifier(n_estimators=n_estimators, max_depth=max_depth, n_jobs=-1)
model_rt = rt_rf.RandomForestClassifier(n_estimators=n_estimators, max_depth=max_depth)
start_time = time.time()
results_sk = []
for _ in range(n_repeats):
model_sk.fit(df_train.drop("target", axis=1), df_train.target)
results_sk.append(metric_fn(df_test.target, model_sk.predict(df_test.drop("target", axis=1))))
sk_time = (time.time() - start_time)/n_repeats
sk_mean = np.mean(results_sk)
sk_std = np.std(results_sk)
start_time = time.time()
results_rt = []
for _ in range(n_repeats):
model_rt.fit(df_train.drop("target", axis=1), df_train.target)
results_rt.append(metric_fn(df_test.target, model_rt.predict(df_test.drop("target", axis=1))))
rt_time = (time.time() - start_time)/n_repeats
rt_mean = np.mean(results_rt)
rt_std = np.std(results_rt)
return (dataset, sk_mean, rt_mean, sk_std, rt_std, sk_time, rt_time, metric)
results_reg = [evaluate_dataset(d, "reg", model="dt", max_depth=5, n_repeats=100) for d in datasets["reg"]]
results_clf = [evaluate_dataset(d, "clf", model="dt", max_depth=5, n_repeats=100) for d in datasets["clf"]]
results = results_reg + results_clf
cols = "dataset sk_mean rt_mean sk_std rt_std sk_time(s) rt_time(s) metric".split()
pd.DataFrame(results, columns=cols)
results_reg = [evaluate_dataset(d, "reg", model="rf", max_depth=2, n_estimators=100, n_repeats=10) for d in datasets["reg"]]
results_clf = [evaluate_dataset(d, "clf", model="rf", max_depth=2, n_estimators=100, n_repeats=10) for d in datasets["clf"]]
results = results_reg + results_clf
cols = "dataset sk_mean rt_mean sk_std rt_std sk_time(s) rt_time(s) metric".split()
pd.DataFrame(results, columns=cols)
```
| /rustrees-0.1.0.tar.gz/rustrees-0.1.0/python/notebooks/sklearn_vs_rustrees.ipynb | 0.642881 | 0.469338 | sklearn_vs_rustrees.ipynb | pypi |
# rustworkx
[](https://opensource.org/licenses/Apache-2.0)

[](https://travis-ci.com/Qiskit/rustworkx)
[](https://github.com/Qiskit/rustworkx/releases)
[](https://pypi.org/project/rustworkx/)
[](https://coveralls.io/github/Qiskit/rustworkx?branch=main)
[](https://rust-lang.github.io/rfcs/2495-min-rust-version.html)
[](https://arxiv.org/abs/2110.15221)
- You can see the full rendered docs at:
<https://qiskit.org/documentation/rustworkx/dev>
|:warning:| The retworkx project has been renamed to **rustworkx**. The use of the
retworkx package will still work for the time being but starting in the 1.0.0
release retworkx will no longer be supported
rustworkx is a general purpose graph library for Python written in Rust to
take advantage of the performance and safety that Rust provides. It is
designed to provide a high performance general purpose graph library for
any Python application.
## Project history
Rustworkx was originally called retworkx and was was created initially to be
a replacement for [qiskit](https://qiskit.org/)'s previous (and current)
networkx usage (hence the original name). The project was originally started
to build a faster directed graph to use as the underlying data structure for
the DAG at the center of
[qiskit-terra](https://github.com/Qiskit/qiskit-terra/)'s transpiler. However,
since it's initial introduction the project has grown substantially and now
covers all applications that need to work with graphs which includes
Qiskit.
## Installing rustworkx
rustworkx is published on pypi so on x86\_64, i686, ppc64le, s390x, and
aarch64 Linux systems, x86\_64 on Mac OSX, and 32 and 64 bit Windows
installing is as simple as running:
```bash
pip install rustworkx
```
This will install a precompiled version of rustworkx into your python
environment.
### Installing on a platform without precompiled binaries
If there are no precompiled binaries published for your system you'll have to
build the package from source. However, to be able able to build the package
from the published source package you need to have Rust >= 1.48.0 installed (and
also [cargo](https://doc.rust-lang.org/cargo/) which is normally included with
rust) You can use [rustup](https://rustup.rs/) (a cross platform installer for
rust) to make this simpler, or rely on
[other installation methods](https://forge.rust-lang.org/infra/other-installation-methods.html).
A source package is also published on pypi, so you still can also run the above
`pip` command to install it. Once you have rust properly installed, running:
```bash
pip install rustworkx
```
will build rustworkx for your local system from the source package and install
it just as it would if there was a prebuilt binary available.
Note: To build from source you will need to ensure you have pip >=19.0.0
installed, which supports PEP-517, or that you have manually installed
`setuptools-rust` prior to running `pip install rustworkx`. If you recieve an
error about `setuptools-rust` not being found you should upgrade pip with
`pip install -U pip` or manually install `setuptools-rust` with
`pip install setuptools-rust` and try again.
### Optional dependencies
If you're planning to use the `rustworkx.visualization` module you will need to
install optional dependencies to use the functions. The matplotlib based drawer
function `rustworkx.visualization.mpl_draw` requires that the
[matplotlib](https://matplotlib.org/) library is installed. This can be
installed with `pip install matplotlib` or when you're installing rustworkx with
`pip install 'rustworkx[mpl]'`. If you're going to use the graphviz based drawer
function `rustworkx.visualization.graphviz_drawer` first you will need to install
graphviz, instructions for this can be found here:
https://graphviz.org/download/#executable-packages. Then you
will need to install the [pillow](https://python-pillow.org/) Python library.
This can be done either with `pip install pillow` or when installing rustworkx
with `pip install 'rustworkx[graphviz]'`.
If you would like to install all the optional Python dependencies when you
install rustworkx you can use `pip install 'rustworkx[all]'` to do this.
## Using rustworkx
Once you have rustworkx installed you can use it by importing rustworkx.
All the functions and graph classes are off the root of the package.
For example, calculating the shortest path between A and C would be:
```python3
import rustworkx
graph = rustworkx.PyGraph()
# Each time add node is called, it returns a new node index
a = graph.add_node("A")
b = graph.add_node("B")
c = graph.add_node("C")
# add_edges_from takes tuples of node indices and weights,
# and returns edge indices
graph.add_edges_from([(a, b, 1.5), (a, c, 5.0), (b, c, 2.5)])
# Returns the path A -> B -> C
rustworkx.dijkstra_shortest_paths(graph, a, c, weight_fn=float)
```
## Building from source
The first step for building rustworkx from source is to clone it locally
with:
```bash
git clone https://github.com/Qiskit/rustworkx.git
```
rustworkx uses [PyO3](https://github.com/pyo3/pyo3) and
[setuptools-rust](https://github.com/PyO3/setuptools-rust) to build the
python interface, which enables using standard python tooling to work. So,
assuming you have rust installed, you can easily install rustworkx into your
python environment using `pip`. Once you have a local clone of the repo, change
your current working directory to the root of the repo. Then, you can install
rustworkx into your python env with:
```bash
pip install .
```
Assuming your current working directory is still the root of the repo.
Otherwise you can run:
```bash
pip install $PATH_TO_REPO_ROOT
```
which will install it the same way. Then rustworkx is installed in your
local python environment. There are 2 things to note when doing this
though, first if you try to run python from the repo root using this
method it will not work as you expect. There is a name conflict in the
repo root because of the local python package shim used in building the
package. Simply run your python scripts or programs using rustworkx
outside of the repo root. The second issue is that any local changes you
make to the rust code will not be reflected live in your python environment,
you'll need to recompile rustworkx by rerunning `pip install` to have any
changes reflected in your python environment.
### Develop Mode
If you'd like to build rustworkx in debug mode and use an interactive debugger
while working on a change you can use `python setup.py develop` to build
and install rustworkx in develop mode. This will build rustworkx without
optimizations and include debuginfo which can be handy for debugging. Do note
that installing rustworkx this way will be significantly slower then using
`pip install` and should only be used for debugging/development.
It's worth noting that `pip install -e` does not work, as it will link the python
packaging shim to your python environment but not build the rustworkx binary. If
you want to build rustworkx in debug mode you have to use
`python setup.py develop`.
## Authors and Citation
rustworkx is the work of [many people](https://github.com/Qiskit/rustworkx/graphs/contributors) who contribute
to the project at different levels. If you use rustworkx in your research, please cite our
[paper](https://arxiv.org/abs/2110.15221) as per the included [BibTeX file](CITATION.bib).
## Community
Besides Github interactions (such as opening issues) there are two locations
available to talk to other rustworkx users and developers. The first is a
public Slack channel in the Qiskit workspace,
[#rustworkx](https://qiskit.slack.com/messages/rustworkx/). You can join the
Qiskit Slack workspace [here](http://ibm.co/joinqiskitslack). Additionally,
there is an IRC channel `#rustworkx` on the [OFTC IRC network](https://www.oftc.net/)
| /rustworkx-0.12.0.tar.gz/rustworkx-0.12.0/README.md | 0.5794 | 0.918077 | README.md | pypi |
# Fix spaces to be logical units of code ONLY
# Document CLI/Main
# Optparse autogenerates help menu
import numpy as np
import glob
import sys
import os
import tempfile as tmp
from pathlib import Path
import io
import argparse
from time import sleep
import subprocess as sp
import rusty_axe.tree_reader as tr
bin_path = os.path.join("bin", "rf_5")
RUST_PATH = str((Path(__file__).parent /
bin_path).resolve())
def main(location, input, output=None, ifh=None, ofh=None, **kwargs):
if output is None:
output = input
print("Running main")
print("Trying to load")
print(input)
print(output)
input_counts = np.loadtxt(input)
output_counts = np.loadtxt(output)
if ifh is not None:
ifh = np.loadtxt(ifh, dtype=str)
if ofh is not None:
ofh = np.loadtxt(ofh, dtype=str)
print("Loaded counts")
print(input)
fit_return = save_trees(
location, input_counts, output_counts=output_counts, ifh=ifh, ofh=ofh, **kwargs)
print(fit_return)
def save_trees(location, input_counts, output_counts=None, ifh=None, ofh=None, header=None, lrg_mem=None, unsupervised = "false", **kwargs):
# This method saves ascii matrices to pass as inputs to the rust fitting procedure.
if output_counts is None:
output_counts = input_counts
if header is not None:
ifh = header
ofh = header
np.savetxt(location + "input.counts", input_counts)
np.savetxt(location + "output.counts", output_counts)
if ifh is None:
np.savetxt(location + "tmp.ifh",
np.arange(input_counts.shape[1], dtype=int), fmt='%u')
else:
np.savetxt(location + "tmp.ifh", ifh, fmt="%s")
if ofh is None:
np.savetxt(location + "tmp.ofh",
np.arange(output_counts.shape[1], dtype=int), fmt='%u')
else:
np.savetxt(location + "tmp.ofh", ofh, fmt="%s")
print("Generating trees")
return inner_fit(location, ifh=(location + "tmp.ifh"), ofh=(location + "tmp.ofh"), lrg_mem=lrg_mem, unsupervised = unsupervised, **kwargs)
def load(location):
# Alias to the tree reader load
return tr.Forest.load(location)
def fit(input_counts, cache=True, output_counts=None, ifh=None, ofh=None, header=None, backtrace=False, lrg_mem=None, location=None, **kwargs):
"""
Fit a random forest. Start with this function if you are fitting a forest via the API in another python script or notebook.
Arguments:
Input_counts: NxM numpy array, rows are samples columns are features.
output_counts: Optional second matrix
"""
if output_counts is None:
output_counts = input_counts
unsupervised = True
else:
unsupervised = False
tmp_dir = None
if location is None:
print("Input:" + str(input_counts.shape))
print("Output:" + str(output_counts.shape))
tmp_dir = tmp.TemporaryDirectory()
location = tmp_dir.name + "/"
arguments = save_trees(tmp_dir.name + "/", input_counts=input_counts, output_counts=output_counts,
ifh=ifh, ofh=ofh, header=header, lrg_mem=lrg_mem, unsupervised = unsupervised, **kwargs)
forest = tr.Forest.load_from_rust(location, prefix="tmp", ifh="tmp.ifh", ofh="tmp.ofh",
clusters="tmp.clusters", input="input.counts", output="output.counts")
forest.set_cache(cache)
forest.arguments = arguments
if tmp_dir is not None:
tmp_dir.cleanup()
if '-reduce_output' in forest.arguments:
ro_i = forest.arguments.index('-reduce_output')
reduced = bool(forest.arguments[ro_i+1])
if reduced:
forest.reset_cache()
return forest
def inner_fit(location, backtrace=False, unsupervised = False, lrg_mem = False, **kwargs):
"""
This method calls out to rust via cli using files written to disk
The argument calls for the location of input.counts and output.counts,
optionally the backtrace, which determines whether or not Rust backtraces
errors.
The remainder of keyword arguments should be rust keywords. For further details see io.rs in src
"""
print("Running " + RUST_PATH)
arg_list = []
arg_list.extend([RUST_PATH, "-ic", location + "input.counts",
"-oc", location + "output.counts", "-o", location + "tmp", "-auto"])
for arg in kwargs.keys():
arg_list.append("-" + str(arg))
arg_list.append(str(kwargs[arg]))
if lrg_mem is not None:
arg_list.append("-lrg_mem")
if unsupervised:
arg_list.append("-unsupervised")
print("Command: " + " ".join(arg_list))
with sp.Popen(arg_list, stdin=sp.PIPE, stdout=sp.PIPE, stderr=sp.PIPE, universal_newlines=True) as cp:
tree_count = 0
while True:
rc = cp.poll()
if rc is not None:
print(cp.stdout.read(), end='')
print(cp.stderr.read(), end='')
break
output = cp.stdout.readline()
if output[:6] == "Ingest":
print(output.rstrip(), end="\r")
elif output[:9] == "Computing":
tree_count += 1
print(f"Computing tree {tree_count}", end='\r')
else:
print(output)
return arg_list
if __name__ == "__main__":
kwargs = {x.split("=")[0]: x.split("=")[1] for x in sys.argv[3:]}
main(sys.argv[1], sys.argv[2], **kwargs) | /rusty_axe_bbrener1-0.68-py3-none-any.whl/rusty_axe/lumberjack.py | 0.432543 | 0.193986 | lumberjack.py | pypi |
import numpy as np
# TEST THIS
class SampleCluster:
"""
Class description here
"""
def __init__(self, forest, samples, id):
self.id = id
self.samples = samples
self.forest = forest
def name(self):
if hasattr(self, 'stored_name'):
return self.stored_name
else:
return str(self.id)
def set_name(self, name):
self.stored_name = name
def mask(self):
mask = np.zeros(len(self.forest.samples), dtype=bool)
mask[self.samples] = True
return mask
def median_feature_values(self):
return np.median(self.forest.output[self.samples], axis=0)
def mean_feature_values(self):
return np.mean(self.forest.output[self.samples], axis=0)
def increased_features(self, n=50, plot=True):
initial_means = np.mean(self.forest.output)
current_means = self.mean_feature_values()
difference = current_means - initial_means
feature_order = np.argsort(difference)
ordered_features = np.array(self.forest.output_features)[feature_order]
ordered_difference = difference[feature_order]
if plot:
plt.figure(figsize=(10, 8))
plt.title("Upregulated Genes")
plt.scatter(np.arange(n), ordered_difference[-n:])
plt.xlim(0, n)
plt.xlabel("Gene Symbol")
plt.ylabel("Increase (LogTPM)")
plt.xticks(np.arange(
n), ordered_features[-n:], rotation=45, verticalalignment='top', horizontalalignment='right')
plt.show()
return ordered_features, ordered_difference
def decreased_features(self, n=50, plot=True):
initial_means = np.mean(self.forest.output)
current_means = self.mean_feature_values()
difference = current_means - initial_means
feature_order = np.argsort(difference)
ordered_features = np.array(self.forest.output_features)[feature_order]
ordered_difference = difference[feature_order]
if plot:
plt.figure(figsize=(10, 8))
plt.title("Upregulated Genes")
plt.scatter(np.arange(n), ordered_difference[:n])
plt.xlim(0, n)
plt.xlabel("Gene Symbol")
plt.ylabel("Increase (LogTPM)")
plt.xticks(np.arange(
n), ordered_features[:n], rotation=45, verticalalignment='top', horizontalalignment='right')
plt.show()
return ordered_features, ordered_difference
def logistic_features(self, n=50):
from sklearn.linear_model import LogisticRegression
mask = self.mask()
scaled = sklearn.preprocessing.scale(self.forest.input)
model = LogisticRegression().fit(scaled, mask)
coefficient_sort = np.argsort(model.coef_[0])
sorted_features = self.forest.input_features[coefficient_sort][-n:]
sorted_coefficients = model.coef_[0][coefficient_sort][-n:]
return sorted_features, sorted_coefficients
def leaf_encoding(self):
leaves = self.forest.leaves()
encoding = self.forest.node_sample_encoding(leaves)
encoding = encoding[self.samples]
return encoding
def leaf_counts(self):
encoding = self.leaf_encoding()
return np.sum(encoding, axis=0)
def leaf_cluster_frequency(self, plot=True):
leaf_counts = self.leaf_counts()
leaf_cluster_labels = self.forest.leaf_labels
leaf_clusters = sorted(list(set(leaf_cluster_labels)))
leaf_cluster_counts = []
for leaf_cluster in leaf_clusters:
cluster_mask = leaf_cluster_labels == leaf_cluster
leaf_cluster_counts.append(np.sum(leaf_counts[cluster_mask]))
if plot:
plt.figure()
plt.title(
f"Distribution of Leaf Clusters in Sample Cluster {self.name()}")
plt.bar(np.arange(len(leaf_clusters)), leaf_cluster_counts,)
plt.ylabel("Frequency")
plt.xlabel("Leaf Cluster")
plt.xticks(np.arange(len(leaf_clusters)), leaf_clusters)
plt.show()
return leaf_clusters, leaf_cluster_counts
def feature_median(self, feature):
fi = self.forest.truth_dictionary.feature_dictionary[feature]
vector = self.forest.output[self.samples][:, fi]
return np.median(vector)
def feature_mean(self, feature):
fi = self.forest.truth_dictionary.feature_dictionary[feature]
vector = self.forest.output[self.samples][:, fi]
return np.mean(vector) | /rusty_axe_bbrener1-0.68-py3-none-any.whl/rusty_axe/sample_cluster.py | 0.828523 | 0.447158 | sample_cluster.py | pypi |
from rusty_axe.node import Node
import matplotlib.pyplot as plt
import matplotlib as mpl
from copy import copy, deepcopy
mpl.rcParams['figure.dpi'] = 100
class Tree:
def __init__(self, tree_json, forest):
self.root = Node(tree_json, self, forest, cache=forest.cache)
self.forest = forest
def nodes(self, root=True):
nodes = []
nodes.extend(self.root.nodes())
if root:
nodes.append(self.root)
return nodes
def leaves(self, depth=None):
leaves = self.root.leaves(depth=depth)
if len(leaves) < 1:
leaves.append(self.root)
return leaves
def stems(self):
stems = self.root.stems()
return stems
def level(self, target):
level_nodes = []
for node in self.nodes():
if node.level == target:
level_nodes.append(node)
return level_nodes
def descend(self, level):
return self.root.descend(level)
def seek(self, directions):
if len(directions) > 0:
self.children[directions[0]].seek(directions[1:])
else:
return self
def trim(self, limit):
for child in self.root.children:
child.trim(limit)
def derive_samples(self, samples):
root_copy = self.root.derive_samples(samples)
self_copy = self.derived_copy()
self_copy.root = root_copy
for node in self_copy.root.nodes():
node.tree = self_copy
self_copy.root.tree = self_copy
return self_copy
def derived_copy(self):
self_copy = copy(self)
self_copy.root = None
self_copy.forest = None
return self_copy
def feature_levels(self):
return self.root.feature_levels()
def plotting_representation(self, width=10, height=10):
coordinates = []
connectivities = []
bars = []
levels = self.root.nodes_by_level()
jump = height / len(levels)
for i, level in enumerate(levels):
level_samples = sum([node.pop() for node in level])
next_level_samples = 0
if i < (len(levels) - 1):
next_level_samples = sum([node.pop()
for node in levels[i + 1]])
consumed_width = 0
next_consumed_width = 0
for j, node in enumerate(level):
sample_weight = float(node.pop()) / float(level_samples)
half_width = (width * sample_weight) / 2
center = consumed_width + half_width
consumed_width = consumed_width + (half_width * 2)
coordinates.append((i * jump, center))
if i < (len(levels) - 1):
for child in node.children:
child_sample_weight = float(
child.pop()) / float(next_level_samples)
child_half_width = (width * child_sample_weight) / 2
child_center = next_consumed_width + child_half_width
next_consumed_width = next_consumed_width + \
(child_half_width * 2)
connectivities.append(
([i * jump, (i + 1) * jump], [center, child_center]))
coordinates = np.array(coordinates)
plt.figure()
plt.scatter(coordinates[:, 0], coordinates[:, 1], s=1)
for connection in connectivities:
plt.plot(connection[0], connection[1])
plt.show()
def recursive_plotting_repesentation(self, axes, height=None, height_step=None, representation=None, limits=None):
if limits is None:
limits = axes.get_xlim()
current_position = limits[0]
width = float(limits[1] - limits[0])
center = (limits[1] + limits[0]) / 2
if representation is None:
representation = self.root.plotting_representation()
print(representation)
if height_step is None or height is None:
depth = self.root.depth()
height_limits = axes.get_ylim()
height = height_limits[1]
height_step = -1 * (height_limits[1] - height_limits[0]) / depth
for i, current_representation in enumerate(representation):
width_proportion = current_representation[0]
children = current_representation[1]
node_start = current_position
node_width = width_proportion * width
padding = node_width * .05
node_width = node_width - padding
node_center = (node_width / 2) + current_position
node_height = height + height_step
node_end = (node_width) + current_position
current_position = node_end + padding
color = ['r', 'b'][(i % 2)]
axes.plot([center, node_center], [height, node_height], c=color)
axes.plot([node_start, node_end], [
node_height, node_height], c=color)
self.recursive_plotting_repesentation(
axes, height=node_height, height_step=height_step, representation=children, limits=(node_start, node_end))
def plot(self):
fig = plt.figure(figsize=(10, 20))
ax = fig.add_subplot(111)
self.recursive_plotting_repesentation(ax)
fig.show()
def tree_movie_frame(self, location, level=0, sorted=True, previous_frame=None, split_lines=True):
descent_nodes = self.descend(level)
total_samples = sum([node.pop() for node in descent_nodes])
heatmap = np.zeros((total_samples, len(self.forest.output_features)))
node_splits = []
running_samples = 0
for node in descent_nodes:
if sorted:
node_counts = node.sorted_node_counts()
else:
node_counts = node.node_counts()
node_samples = node_counts.shape[0]
heatmap[running_samples:running_samples +
node_samples] = node_counts
running_samples += node_samples
node_splits.append(running_samples)
plt.figure(figsize=(10, 10))
if previous_frame is None:
plt.imshow(heatmap, aspect='auto')
else:
plt.imshow(previous_frame, aspect='auto')
if split_lines:
for split in node_splits[:-1]:
plt.plot([0, len(self.forest.output_features) - 1],
[split, split], color='w')
plt.savefig(location)
return heatmap
def tree_movie(self, location):
max_depth = max([leaf.level for leaf in self.leaves()])
previous_frame = None
for i in range(max_depth):
self.tree_movie_frame(location + "." + str(i) + ".a.png",
level=i, sorted=False, previous_frame=previous_frame)
previous_frame = self.tree_movie_frame(
location + "." + str(i) + ".b.png", level=i, sorted=True)
self.tree_movie_frame(location + "." + str(i + 1) +
".b.png", level=i, sorted=True, split_lines=False)
def summary(self, verbose=True):
nodes = len(self.nodes)
leaves = len(self.leaves)
if verbose:
print("Nodes: {}".format(nodes))
print("Leaves: {}".format(leaves))
def aborting_sample_descent(self, sample):
return self.root.aborting_sample_descent(sample)
def plot_leaf_counts(self):
leaves = self.leaves()
total_samples = sum([x.pop() for x in leaves])
heatmap = np.zeros((total_samples, len(self.forest.output_features)))
running_samples = 0
for leaf in leaves:
leaf_counts = leaf.node_counts()
leaf_samples = leaf_counts.shape[0]
heatmap[running_samples:running_samples +
leaf_samples] = leaf_counts
running_samples += leaf_samples
ordering = dendrogram(linkage(heatmap.T), no_plot=True)['leaves']
heatmap = heatmap.T[ordering].T
plt.figure()
im = plt.imshow(heatmap, aspect='auto')
plt.colorbar()
plt.show()
return heatmap | /rusty_axe_bbrener1-0.68-py3-none-any.whl/rusty_axe/tree.py | 0.649245 | 0.45641 | tree.py | pypi |
from abc import abstractmethod
from dataclasses import dataclass
from typing import cast, TypeVar, Union, Callable, Generic, Iterator, Tuple, Dict, Any
from rusty_results.exceptions import UnwrapException
try:
from pydantic.fields import ModelField
except ImportError: # pragma: no cover
...
# base inner type generic
T = TypeVar('T')
# base error type generic
E = TypeVar('E')
# generic callable args for T -> U, E -> U
U = TypeVar('U')
R = TypeVar('R')
class OptionProtocol(Generic[T]):
@property
@abstractmethod
def is_some(self) -> bool:
"""
:return: True if the option is `Some`.
"""
... # pragma: no cover
@property
@abstractmethod
def is_empty(self) -> bool:
"""
:return: True if the option is `Empty`.
"""
... # pragma: no cover
@abstractmethod
def contains(self, item: T) -> bool:
"""
:param item: The value to check.
:return: True if the option is `Some` containing the given value.
"""
... # pragma: no cover
@abstractmethod
def expects(self, msg: str) -> T:
"""
:param msg: Attached message for `UnwrapException` if raised.
:return: The contained `Some` value
:raises: `UnwrapException` if option is Empty.
"""
... # pragma: no cover
@abstractmethod
def unwrap(self) -> T:
"""
Because this function may panic, its use is generally discouraged.
Instead, prefer to use pattern matching and handle the None case explicitly, or call unwrap_or, unwrap_or_else,
or unwrap_or_default
:return: The contained Some value, consuming the self value.
:raises: `UnwrapException` if option is `Empty`
"""
... # pragma: no cover
@abstractmethod
def unwrap_or(self, default: T) -> T:
"""
Arguments passed to unwrap_or are eagerly evaluated; if you are passing the result of a function call,
it is recommended to use unwrap_or_else, which is lazily evaluated.
:param default: default value.
:return: The contained `Some` value or a provided default.
"""
... # pragma: no cover
@abstractmethod
def unwrap_or_else(self, f: Callable[[], T]) -> T:
"""
:param f: Compute function in case option is `Empty`.
:return: The contained `Some` value or computed value from the closure.
"""
... # pragma: no cover
@abstractmethod
def map(self, f: Callable[[T], U]) -> "Option[U]":
"""
Maps an `Option[T]` to `Option[U]` by applying a function to a contained value.
:param f: Function to apply.
:return: `Some(f(value))` if option is `Some(value)` else `Empty`
"""
... # pragma: no cover
@abstractmethod
def map_or(self, default: U, f: Callable[[T], U]) -> U:
"""
Applies a function to the contained value (if any), or returns the provided default (if not).
Arguments passed to map_or are eagerly evaluated; if you are passing the result of a function call,
it is recommended to use map_or_else, which is lazily evaluated.
:param default: default value
:param f: function to apply
:return: f(value)` if option is `Some(value)` else `default`
"""
... # pragma: no cover
@abstractmethod
def map_or_else(self, default: Callable[[], U], f: Callable[[T], U]) -> U:
"""
Applies a function to the contained value (if any), or computes a default (if not).
:param default: Default value.
:param f: Function to apply to the map
:return: `Some(f(value))` if option is `Some(value)` else `default()`
"""
... # pragma: no cover
@abstractmethod
def iter(self) -> Iterator[T]:
"""
:return: An iterator over the contained value if option is `Some(T)` or an empty iterator if not.
"""
... # pragma: no cover
@abstractmethod
def filter(self, predicate: Callable[[T], bool]) -> "Option[T]":
"""
:param predicate:
:return: `Some(T)` if predicate returns `True` (where T is the wrapped value), `Empty` if predicate returns `False`
"""
... # pragma: no cover
@abstractmethod
def ok_or(self, err: E) -> "Result[T, E]":
"""
Transforms the `Option[T]` into a `Result[T, E]`, mapping `Some(v)` to `Ok(v)` and `None` to `Err(err)`.
Arguments passed to ok_or are eagerly evaluated; if you are passing the result of a function call,
it is recommended to use ok_or_else, which is lazily evaluated.
:param err: `Err` value
:return: `Ok(T)` if option is `Some(T)` else `Err(err)`
"""
... # pragma: no cover
@abstractmethod
def ok_or_else(self, err: Callable[[], E]) -> "Result[T, E]":
"""
Transforms the `Option[T]` into a `Result[T, E]`, mapping `Some(v)` to `Ok(v)` and `None` to `Err(err())`.
:param err: Callable that return the `Err` value
:return: `Ok(T)` if option is `Some(T)` else `Err(err())`
"""
... # pragma: no cover
@abstractmethod
def and_then(self, f: Callable[[T], "Option[T]"]) -> "Option[T]":
"""
Some languages call this operation flatmap.
:param f: The function to call.
:return: `Empty` if the option is `Empty`, otherwise calls f with the wrapped value and returns the result.
"""
... # pragma: no cover
@abstractmethod
def or_else(self, f: Callable[[], "Option[T]"]) -> "Option[T]":
"""
:param f: The function to call.
:return: The option if it contains a value, otherwise calls f and returns the result.
"""
... # pragma: no cover
@abstractmethod
def xor(self, optb: "Option[T]") -> "Option[T]":
"""
:param optb: `Option` to compare with.
:return: `Some` if exactly one of self or optb is `Some`, otherwise returns `Empty`.
"""
... # pragma: no cover
@abstractmethod
def zip(self, value: "Option[U]") -> "Option[Tuple[T, U]]":
"""
Zips self with another Option.
:param value: `Option` to zip with.
:return: If self is `Some[s]` and other is `Some[o]`, this method returns `Some[[s], [o]]`.
Otherwise, `Empty` is returned.
"""
... # pragma: no cover
@abstractmethod
def zip_with(self, other: "Option[U]", f: Callable[[Tuple[T, U]], R]) -> "Option[R]":
"""
Zips self and another Option with function f.
:param other: `Option` to zip with.
:param f: Function to apply to the zipped options values.
:return: If self is `Some[s]` and other is `Some[o]`, this method returns `Some[f(s, o)]`.
Otherwise, `Empty` is returned.
"""
... # pragma: no cover
@abstractmethod
def expect_empty(self, msg: str):
"""
:param msg: Message to be wrapped by `UnwrapException` if raised
:raises: `UnwrapException` if option is `Some`
"""
... # pragma: no cover
@abstractmethod
def unwrap_empty(self):
"""
:raises: `UnwrapException` if option is `Some`
"""
... # pragma: no cover
@abstractmethod
def flatten_one(self) -> "Option[T]":
"""
Removes one level from a nested `Option` structure.
E.g.:
* `Some(Some(1))` becomes `Some(1)`.
* `Some(Some(Some(1)))` becomes `Some(Some(1))`.
:return: `Option[T]` if self is `Option[Option[T]]`, otherwise `self`
"""
... # pragma: no cover
@abstractmethod
def flatten(self) -> "Option[T]":
"""
Removes all levels of nesting from a nested `Option` structure.
E.g.:
* `Some(Some(1))` becomes `Some(1)`.
* `Some(Some(Some(1)))` becomes `Some(1)`.
* `Some(Some(Some(Empty())))` becomes `Empty()`.
:return: `Option[T]` if self is `Option[ ... Option[T] ...]`, otherwise `self`
"""
... # pragma: no cover
@abstractmethod
def transpose(self) -> "Result[Option[T], E]":
"""
Transposes an Option of a Result into a Result of an Option.
Empty will be mapped to Ok(Empty). Some(Ok(_)) and Some(Err(_)) will be mapped to Ok(Some(_)) and Err(_).
:return: `Result[Option[T], E]`
:raises TypeError if inner value is not a `Result`
"""
... # pragma: no cover
@abstractmethod
def __bool__(self) -> bool:
... # pragma: no cover
def __contains__(self, item: T) -> bool:
return self.contains(item)
def __iter__(self):
return self.iter()
@classmethod
def __get_validators__(cls):
yield cls.__validate
@classmethod
def __validate(cls, value: Union["Some", "Empty", Dict], field: "ModelField"):
if isinstance(value, Some):
return cls.__validate_some(value, field)
elif isinstance(value, Empty):
return cls.__validate_empty(value, field)
elif isinstance(value, dict):
return cls.__validate_dict(value, field)
raise TypeError("Unable to validate Option") # pragma: no cover
@classmethod
def __validate_some(cls, value: "Some", field: "ModelField"):
import pydantic
if not field.sub_fields:
raise TypeError("No subfields found for Some")
field_value = field.sub_fields[0]
valid_value, error = field_value.validate(value.Some, {}, loc="")
if error:
# ignore type since it do not come from a base model
raise pydantic.ValidationError((error, ), Some) # type: ignore
return Some(valid_value)
@classmethod
def __validate_empty(cls, _: "Empty", field: "ModelField"):
if field.sub_fields:
raise TypeError("Empty value cannot be bound to external types")
return Empty()
@classmethod
def __validate_dict(cls, value: Dict, field: "ModelField"):
import pydantic
if value == {}:
return Empty()
if len(value) != 1:
raise TypeError(
"Extra object parameters found, Option can have strictly 0 (Empty) or 1 Value (Some)",
)
inner_value = value.get("Some")
if inner_value is None:
raise TypeError("Non Empty Option do not have a proper Value")
if not field.sub_fields:
raise TypeError("Cannot check Option pydantic subfields validations") # pragma: no cover
field_value = field.sub_fields[0]
valid_value, error = field_value.validate(value["Some"], {}, loc="")
if error:
# ignore type since it do not come from a base model
raise pydantic.ValidationError(error, Option) # type: ignore # pragma: no cover
return Some(valid_value)
@dataclass(eq=True, frozen=True)
class Some(OptionProtocol[T]):
Some: T
@property
def is_some(self) -> bool:
return True
@property
def is_empty(self) -> bool:
return False
def contains(self, item: T) -> bool:
return item == self.Some
def expects(self, msg: str) -> T:
return self.Some
def unwrap(self) -> T:
return self.Some
def unwrap_or(self, default: T) -> T:
return self.Some
def unwrap_or_else(self, f: Callable[[], T]) -> T:
return self.Some
def map(self, f: Callable[[T], U]) -> "Option[U]":
return Some(f(self.Some))
def map_or(self, default: U, f: Callable[[T], U]) -> U:
return f(self.Some)
def map_or_else(self, default: Callable[[], U], f: Callable[[T], U]) -> U:
return f(self.Some)
def iter(self) -> Iterator[T]:
def _iter():
yield self.Some
return iter(_iter())
def filter(self, predicate: Callable[[T], bool]) -> "Option[T]":
return self if predicate(self.Some) else Empty()
def ok_or(self, err: E) -> "Result[T, E]":
return Ok(self.Some)
def ok_or_else(self, err: Callable[[], E]) -> "Result[T, E]":
return Ok(self.Some)
def and_then(self, f: Callable[[T], "Option[T]"]) -> "Option[T]":
return f(self.Some)
def or_else(self, f: Callable[[], "Option[T]"]) -> "Option[T]":
return self
def xor(self, optb: "Option[T]") -> "Option[T]":
return self if optb.is_empty else Empty()
def zip(self, other: "Option[U]") -> "Option[Tuple[T, U]]":
if other.is_some:
# function typing is correct, we really return an Option[Tuple] but mypy complains that
# other may not have a Value attribute because it do not understand the previous line check.
return Some((self.Some, other.Some)) # type: ignore[union-attr]
return Empty()
def zip_with(self, other: "Option[U]", f: Callable[[Tuple[T, U]], R]) -> "Option[R]":
return self.zip(other).map(f)
def expect_empty(self, msg: str):
raise UnwrapException(msg)
def unwrap_empty(self):
self.expect_empty("")
def flatten_one(self) -> "Option[T]":
inner: T = self.unwrap()
if isinstance(inner, OptionProtocol):
return cast(Option, inner)
return self
def flatten(self) -> "Option[T]":
this: Option[T] = self
inner: Option[T] = self.flatten_one()
while inner is not this:
this, inner = (inner, inner.flatten_one())
return this
def transpose(self) -> "Result[Option[T], E]":
if not isinstance(self.Some, ResultProtocol):
raise TypeError("Inner value is not a Result")
value: "ResultProtocol[T, E]" = self.Some
return value.map(Some)
def __bool__(self) -> bool:
return True
@classmethod
def __get_validators__(cls):
yield from OptionProtocol.__get_validators__()
@dataclass(eq=True, frozen=True)
class Empty(OptionProtocol):
@property
def is_some(self) -> bool:
return False
@property
def is_empty(self) -> bool:
return True
def contains(self, item: T) -> bool:
return False
def expects(self, msg: str) -> T:
raise UnwrapException(msg)
def unwrap(self) -> T:
raise UnwrapException("Tried to unwrap on an Empty value")
def unwrap_or(self, default: T) -> T:
return default
def unwrap_or_else(self, f: Callable[[], T]) -> T:
return f()
def map(self, f: Callable[[T], U]) -> "Option[U]":
return self
def map_or(self, default: U, f: Callable[[T], U]) -> U:
return default
def map_or_else(self, default: Callable[[], U], f: Callable[[T], U]) -> U:
return default()
def iter(self) -> Iterator[T]:
return iter([])
def filter(self, predicate: Callable[[T], bool]) -> "Option[T]":
return self
def ok_or(self, err: E) -> "Result[T, E]":
return Err(err)
def ok_or_else(self, err: Callable[[], E]) -> "Result[T, E]":
return Err(err())
def and_then(self, f: Callable[[T], "Option[T]"]) -> "Option[T]":
return self
def or_else(self, f: Callable[[], "Option[T]"]) -> "Option[T]":
return f()
def xor(self, optb: "Option[T]") -> "Option[T]":
return optb if optb.is_some else Empty()
def zip(self, value: "Option[U]") -> "Option[Tuple[T, U]]":
return Empty()
def zip_with(self, other: "Option[U]", f: Callable[[Tuple[T, U]], R]) -> "Option[R]":
return Empty()
def expect_empty(self, msg: str):
...
def unwrap_empty(self):
...
def flatten_one(self) -> "Option[T]":
return self
def flatten(self) -> "Option[T]":
return self
def transpose(self) -> "Result[Option[T], E]":
return Ok(self)
def __bool__(self) -> bool:
return False
@classmethod
def __get_validators__(cls):
yield from OptionProtocol.__get_validators__()
Option = Union[Some[T], Empty]
class ResultProtocol(Generic[T, E]):
@property
@abstractmethod
def is_ok(self) -> bool:
"""
:return: True if the result is Ok
"""
... # pragma: no cover
@property
@abstractmethod
def is_err(self) -> bool:
"""
:return: True if the result is Err
"""
... # pragma: no cover
@abstractmethod
def contains(self, value: T) -> bool:
"""
:param value: Value to be checked
:return: True if the result is an Ok value containing the given value
"""
... # pragma: no cover
@abstractmethod
def contains_err(self, err: E) -> bool:
"""
:param err: Value to be checked
:return: True if the result is an Err containing the given err value
"""
... # pragma: no cover
@abstractmethod
def ok(self) -> Option[T]:
"""
Converts from `Result[T, E]` to `Option[T]`
:return: `Some(T)` if result is `Ok(T)` otherwise `Empty` discarding the error, if any.
"""
... # pragma: no cover
@abstractmethod
def err(self) -> Option[E]:
"""
Converts from `Result[T, E]` to `Option[E]`
:return: `Some(E)` if result is `Err(E)` otherwise `Empty` discarding the success value, if any.
"""
... # pragma: no cover
@abstractmethod
def map(self, f: Callable[[T], U]) -> "Result[U, E]":
"""
Maps a `Result[T, E]` to `Result[U, E]` by applying a function to a contained Ok value, leaving an Err value untouched.
This function can be used to compose the results of two functions.
:param f: Function to apply to the `Ok(T)`
:return: A new result wrapping the new value, if applied.
"""
... # pragma: no cover
@abstractmethod
def map_or(self, default: U, f: Callable[[T], U]) -> U:
"""
Applies a function to the contained value (if Ok), or returns the provided default (if Err).
Arguments passed to map_or are eagerly evaluated; if you are passing the result of a function call,
it is recommended to use map_or_else, which is lazily evaluated.
:param default: Default value to be returned if result ir Err
:param f: Function to apply to the `Ok(T)`
:return: A new value with the result of applying the function to the Ok(value) or the default value.
"""
... # pragma: no cover
@abstractmethod
def map_or_else(self, default: Callable[[E], U], f: Callable[[T], U]) -> U:
"""
Maps a `Result[T, E]` to `U` by applying a function to a contained Ok value,
or a fallback function to a contained Err value.
This function can be used to unpack a successful result while handling an error.
:param default: Callable to lazy load the default return value
:param f: Function to apply to the `Ok(T)`
:return: A new value with the result of applying the function to the Ok(value) or the default value loaded from the default function call.
"""
... # pragma: no cover
@abstractmethod
def map_err(self, f: Callable[[E], U]) -> "Result[T, U]":
"""
Maps a `Result[T, E]` to `Result[T, F]` by applying a function to a contained `Err` value,
leaving an Ok value untouched.
This function can be used to pass through a successful result while handling an error.
:param f: Function to apply to the `E`
:return: A new result with the modified `Err` value if applies.
"""
... # pragma: no cover
@abstractmethod
def iter(self) -> Iterator[T]:
"""
:return: An iterator with a value if the result is `Ok` otherwise an empty iterator.
"""
... # pragma: no cover
@abstractmethod
def and_then(self, op: Callable[[T], "Result[T, E]"]) -> "Result[T, E]":
"""
Calls op if the result is `Ok`, otherwise returns the `Err` value of self.
This function can be used for control flow based on Result values.
:param op: Callable to apply if result value if is `Ok`
:return: A result from applying op if `Ok`, original `Err` if not
"""
... # pragma: no cover
@abstractmethod
def or_else(self, op: Callable[[E], U]) -> "Result[T, U]":
"""
Calls op if the result is `Err`, otherwise returns the `Ok` value of self.
This function can be used for control flow based on Result values.
:param op: Callable to apply if result value if is `Err`
:return: A result from applying op if `Err`, original `Ok` if not
"""
... # pragma: no cover
@abstractmethod
def unwrap(self) -> T:
"""
Returns the contained `Ok` value.
Because this function may raise an exception, its use is generally discouraged. Instead, prefer to use
pattern matching and handle the `Err` case explicitly, or call unwrap_or, unwrap_or_else, or unwrap_or_default.
:return: Contained `Ok` value
:raises: `UnwrapException` if resutl is `Err`
"""
... # pragma: no cover
@abstractmethod
def unwrap_or(self, default: T) -> T:
"""
Returns the contained `Ok` value or a provided default.
Arguments passed to unwrap_or are eagerly evaluated; if you are passing the result of a function call,
it is recommended to use unwrap_or_else, which is lazily evaluated.
:param default: Value to be returned if result is `Err`
:return: `Ok` value or `default`
"""
... # pragma: no cover
@abstractmethod
def unwrap_or_else(self, default: Callable[[], T]) -> T:
"""
:param default: Function to call for the default value
:return: The contained `Ok` value or computes it from a closure.
"""
... # pragma: no cover
@abstractmethod
def expect(self, msg: str) -> T:
"""
:param msg: Attached message in case result is `Err` and `UnwrapException` is raised
:return: The contained `Ok` value
:raises: `UnwrapException`
"""
... # pragma: no cover
@abstractmethod
def unwrap_err(self) -> E:
"""
:return: The contained `Err` value.
:raises: `UnwrapException` if result is `Ok`.
"""
... # pragma: no cover
@abstractmethod
def expect_err(self, msg: str) -> E:
"""
:param msg: Attached message in case result is `Ok` and `UnwrapException` is raised
:return: The contained `Err` value.
:raises: `UnwrapException` if result is `Ok`.
"""
... # pragma: no cover
@abstractmethod
def flatten_one(self) -> "Result[T, E]":
"""
Converts from Result[Result[T, E], E] to Result<T, E>, one nested level.
:return: Flattened Result[T, E]
"""
... # pragma: no cover
@abstractmethod
def flatten(self) -> "Result[T, E]":
"""
Converts from Result[Result[T, E], E] to Result<T, E>, any nested level
:return: Flattened Result[T, E]
"""
... # pragma: no cover
@abstractmethod
def transpose(self) -> Option["Result[T, E]"]:
"""
Transposes a Result of an Option into an Option of a Result.
Ok(Empty) will be mapped to Empty. Ok(Some(_)) and Err(_) will be mapped to Some(Ok(_)) and Some(Err(_))
:return: Option[Result[T, E]] as per the mapping above
:raises TypeError if inner value is not an `Option`
"""
... # pragma: no cover
@abstractmethod
def __bool__(self) -> bool:
... # pragma: no cover
def __contains__(self, item: T) -> bool:
return self.contains(item)
def __iter__(self) -> Iterator[T]:
return self.iter()
@classmethod
def __get_validators__(cls):
yield cls.__validate
@classmethod
def __validate(cls, value: Union["Ok", "Err", Dict], field: "ModelField"):
if isinstance(value, Ok):
return cls.__validate_ok(value, field)
elif isinstance(value, Err):
return cls.__validate_err(value, field)
elif isinstance(value, dict):
return cls.__validate_dict(value, field)
raise TypeError("Unable to validate Result") # pragma: no cover
@classmethod
def __validate_ok(cls, value: "Ok", field: "ModelField"):
import pydantic
if not field.sub_fields or len(field.sub_fields) != 2:
raise TypeError("Wrong subfields found for Ok") # pragma: no cover
field_value = field.sub_fields[0]
valid_value, error = field_value.validate(value.Ok, {}, loc="")
if error:
# ignore type since it do not come from a base model
raise pydantic.ValidationError(error, Result) # type: ignore
return Ok(valid_value)
@classmethod
def __validate_err(cls, value: "Err", field: "ModelField"):
import pydantic
if not field.sub_fields or len(field.sub_fields) != 2:
raise TypeError("Wrong subfields found for Ok") # pragma: no cover
field_value = field.sub_fields[1]
valid_value, error = field_value.validate(value.Error, {}, loc="")
if error:
# ignore type since it do not come from a base model
raise pydantic.ValidationError(error, Result) # type: ignore
return Err(valid_value)
@classmethod
def __validate_dict(cls, value: Dict, field: "ModelField"): # mypy: ignore
import pydantic
if not field.sub_fields or len(field.sub_fields) != 2:
raise TypeError("Wrong subfields found for Ok") # pragma: no cover
if len(value) != 1:
raise TypeError(
"Extra object parameters found, Results have strictly 1 value (either Value (Ok) or Error (Err))"
) # pragma: no cover
return_class: Callable[[Any], Any]
inner_value: Any
if "Ok" in value:
inner_value, return_class, subfield = value.get("Ok"), Ok, 0
elif "Error" in value:
inner_value, return_class, subfield = value.get("Error"), Err, 1
else:
# should never be able to reach here
raise TypeError("Cannot find any Result correct value") # pragma: no cover
field_value = field.sub_fields[subfield]
valid_value, error = field_value.validate(inner_value, {}, loc="")
if error:
# ignore type since it do not come from a base model
raise pydantic.ValidationError(error, Result) # type: ignore # pragma: no cover
return return_class(valid_value)
@dataclass(eq=True, frozen=True)
class Ok(ResultProtocol[T, E]):
Ok: T
@property
def is_ok(self) -> bool:
return True
@property
def is_err(self) -> bool:
return False
def contains(self, value: T) -> bool:
return self.Ok == value
def contains_err(self, err: E) -> bool:
return False
def ok(self) -> Option[T]:
return Some(self.Ok)
def err(self) -> Option[E]:
return Empty()
def map(self, f: Callable[[T], U]) -> "Result[U, E]":
return Ok(f(self.Ok))
def map_or(self, default: U, f: Callable[[T], U]) -> U:
return f(self.Ok)
def map_or_else(self, default: Callable[[E], U], f: Callable[[T], U]) -> U:
return f(self.Ok)
def map_err(self, f: Callable[[E], U]) -> "Result[T, U]":
# Type ignored here. It complains that we do not transform error to U (E -> U)
# since we do not really have an error, generic type remains the same.
return self # type: ignore
def iter(self) -> Iterator[T]:
def _iter():
yield self.Ok
return iter(_iter())
def and_then(self, op: Callable[[T], "Result[U, E]"]) -> "Result[U, E]":
return op(self.Ok)
def or_else(self, op: Callable[[E], U]) -> "Result[T, U]":
# Type ignored here. It complains that we do not transform error to U (E -> U)
# since we do not really have an error, generic type remains the same.
return self # type: ignore
def unwrap(self) -> T:
return self.Ok
def unwrap_or(self, default: T) -> T:
return self.Ok
def unwrap_or_else(self, default: Callable[[], T]) -> T:
return self.Ok
def expect(self, msg: str) -> T:
return self.Ok
def unwrap_err(self) -> E:
raise UnwrapException(f"{self.Ok}")
def expect_err(self, msg: str) -> E:
raise UnwrapException(msg)
def flatten_one(self) -> "Result[T, E]":
if isinstance(self.Ok, ResultProtocol):
return cast(Result, self.unwrap())
return self
def flatten(self) -> "Result[T, E]":
this: Result[T, E] = self
inner: Result[T, E] = self.flatten_one()
while inner is not this:
this, inner = (inner, inner.flatten_one())
return this
def transpose(self) -> Option["Result[T, E]"]:
if not isinstance(self.Ok, OptionProtocol):
raise TypeError("Inner value is not of type Option")
return cast(Option, self.unwrap()).map(Ok)
def __repr__(self):
return f"Ok({self.Ok})"
def __bool__(self):
return True
@classmethod
def __get_validators__(cls):
yield from ResultProtocol.__get_validators__()
@dataclass(eq=True, frozen=True)
class Err(ResultProtocol[T, E]):
Error: E
@property
def is_ok(self) -> bool:
return False
@property
def is_err(self) -> bool:
return True
def contains(self, value: T) -> bool:
return False
def contains_err(self, err: E) -> bool:
return self.Error == err
def ok(self) -> Option:
return Empty()
def err(self) -> Option:
return Some(self.Error)
def map(self, f: Callable[[T], U]) -> "Result[U, E]":
# Type ignored here. In this case U is the same type as T, but mypy cannot understand that match.
return self # type: ignore
def map_or(self, default: U, f: Callable[[T], U]) -> U:
return default
def map_or_else(self, default: Callable[[E], U], f: Callable[[T], U]) -> U:
return default(self.Error)
def map_err(self, f: Callable[[E], U]) -> "Result[T, U]":
return Err(f(self.Error))
def iter(self) -> Iterator[T]:
return iter(tuple())
def and_then(self, op: Callable[[T], "Result[U, E]"]) -> "Result[U, E]":
# Type ignored here. In this case U is the same type as T, but mypy cannot understand that match.
return self # type: ignore
def or_else(self, op: Callable[[E], U]) -> "Result[T, U]":
return Err(op(self.Error))
def unwrap(self) -> T:
raise UnwrapException(self.Error)
def unwrap_or(self, default: T) -> T:
return default
def unwrap_or_else(self, default: Callable[[], T]) -> T:
return default()
def expect(self, msg: str) -> T:
raise UnwrapException(msg)
def unwrap_err(self) -> E:
return self.Error
def expect_err(self, msg: str) -> E:
return self.Error
def flatten_one(self) -> "Result[T, E]":
return self
def flatten(self) -> "Result[T, E]":
return self
def transpose(self) -> Option["Result[T, E]"]:
return Some(self)
def __repr__(self):
return f"Err({self.Error})"
def __bool__(self):
return False
@classmethod
def __get_validators__(cls):
yield from ResultProtocol.__get_validators__()
Result = Union[Ok[T, E], Err[T, E]] | /rusty_results-1.1.1-py3-none-any.whl/rusty_results/prelude.py | 0.941674 | 0.440108 | prelude.py | pypi |
from mpi4py import MPI
import numpy as np
from rusty_tree import lib, ffi
from rusty_tree.types.iterator import Iterator
class DistributedTree:
"""
Wrapper for a DistributedTree structure in Rust. Used to create octrees
distributed via MPI from a set of N distributed Cartesian points with shape
(N,3) stored in NumPy arrays on each processor. Trees can optionally be
balanced.
Example Usage:
--------------
>>> from mpi4py import MPI
>>> import numpy as np
>>> from rusty_tree.distributed import DistributedTree
>>> # Setup MPI communicator
>>> comm = MPI.COMM_WORLD
>>> # Initialize points at the current processor
>>> points = np.random.rand(1000, 3)
>>> # Create a balanced, distributed, tree from a set of globally
>>> # distributed points
>>> tree = DistributedTree.from_global_points(points, True, comm)
"""
def __init__(self, p_tree, comm, p_comm, raw_comm):
"""
Don't directly use constructor, instead use the provided class method
to create a DistributedTree from a set of points, distributed globally
across the set of processors provided to the constructor via its
communicator.
Parameters
----------
p_tree: cdata 'struct <DistributedTree> *'
Pointer to a DistributedTree struct initialized in Rust.
comm: mpi4py.MPI.Intracomm
MPI world communicator, created using mpi4py.
p_comm: cdata '*MPI_Comm'
Pointer to underlying C communicator.
raw_comm: cdata 'MPI_Comm'
Underlying C communicator.
"""
self.ctype = p_tree
self.nkeys = lib.distributed_tree_nkeys(self.ctype)
self.keys = Iterator.from_keys(
lib.distributed_tree_keys(self.ctype), self.nkeys
)
self.npoints = lib.distributed_tree_npoints(self.ctype)
self.points = Iterator.from_points(
lib.distributed_tree_points(self.ctype), self.npoints
)
self.balanced = lib.distributed_tree_balanced(self.ctype)
self.comm = comm
self.p_comm = p_comm
self.raw_comm = raw_comm
@classmethod
def from_global_points(cls, points, balanced, comm):
"""
Construct a distributed tree from a set of globally distributed points.
Parameters
----------
points : np.array(shape=(n_points, 3), dtype=np.float64)
Cartesian points at this processor.
balanced : bool
If 'True' constructs a balanced tree, if 'False' constructs an unbalanced tree.
comm: mpi4py.MPI.Intracomm
MPI world communicator, created using mpi4py.
Returns
-------
DistributedTree
"""
points = np.array(points, dtype=np.float64, order="C", copy=False)
npoints, _ = points.shape
points_data = ffi.from_buffer(f"double(*)[3]", points)
balanced_data = ffi.cast("bool", np.bool(balanced))
npoints_data = ffi.cast("size_t", npoints)
p_comm = MPI._addressof(comm)
raw_comm = ffi.cast("uintptr_t*", p_comm)
return cls(
lib.distributed_tree_from_points(
points_data, npoints_data, balanced_data, raw_comm
),
comm,
p_comm,
raw_comm,
) | /rusty_tree-1.0.0-py3-none-manylinux_2_28_x86_64.whl/rusty_tree/distributed.py | 0.937326 | 0.364721 | distributed.py | pypi |
import numpy as np
from rusty_tree import lib, ffi
from rusty_tree.types.morton import MortonKey
from rusty_tree.types.point import Point
class IteratorProtocol:
"""
Wrapper defining an Iterator protocol implemented via Rust functions
exposed via the CFFI.
"""
def __init__(self, p_type, c_name, clone, next, index):
"""
Params:
-------
p_type : Object
Python object that mirrors a struct from Rust.
c_name : str
The name of the Rust struct.
clone : _cffi_backend._CDataBase
CFFI function for cloning a slice of a Rust iterator.
next : _cffi_backend._CDataBase
CFFI function for advancing a single element in a Rust iterator.
index : _cffi_backend._CDataBase
CFFI function for indexing a single element from a Rust iterator.
"""
self.p_type = p_type
self.c_name = c_name
self.clone = clone
self.next = next
self.index = index
MortonProtocol = IteratorProtocol(
p_type=MortonKey,
c_name="MortonKey",
clone=lib.morton_key_clone,
next=lib.morton_key_next,
index=lib.morton_key_index,
)
PointProtocol = IteratorProtocol(
p_type=Point,
c_name="Point",
clone=lib.point_clone,
next=lib.point_next,
index=lib.point_index,
)
class Iterator:
"""
Wrapper for Rust iterators exposed via a raw pointer via CFFI.
"""
def __init__(self, pointer, n, iterator_protocol):
"""
This constructor should not be used outside the class. Instead
use the provided class methods to construct an Iterator object.
Parameters
----------
pointer : cdata 'struct <Vec<T>> *'
Pointer to a the first element in a Vec<T> in Rust where type 'T'
has been exposed in Python.
n : int
Number of elements in Vec<T>.
iterator_protocol : IteratorProtocol
Helper class exposing the Rust iterator in Python.
"""
self._pointer = pointer
self._n = n
self._head = pointer
self._curr = self._head
self._iter = 0
self._n = n
self._iterator_protocol = iterator_protocol
@classmethod
def from_points(cls, pointer, n):
"""
Construct an Iterator for an exposed Vec<Point>.
Parameters
----------
pointer : cdata 'struct <T> *'
Pointer to a the first element in a Vec<T> in Rust where type 'T'
has been exposed in Python.
n : int
Number of elements in Vec<T>.
Returns
-------
Iterator
Instance of Rust iterator now wrapped in Python.
"""
return cls(pointer, n, PointProtocol)
@classmethod
def from_keys(cls, pointer, n):
"""
Construct an Iterator for an exposed Vec<MortonKey>.
Parameters
----------
pointer : cdata 'struct <T> *'
Pointer to a the first element in a Vec<T> in Rust where type 'T'
has been exposed in Python.
n : int
Number of elements in Vec<T>.
Returns
-------
Iterator
Instance of Rust iterator now wrapped in Python.
"""
return cls(pointer, n, MortonProtocol)
def __len__(self):
return self._n
def __iter__(self):
self._curr = self._head
self._iter = 0
return self
def __next__(self):
_curr = self._curr
_next = self._iterator_protocol.next(self._curr)[0]
if self._iter < len(self):
if _curr != _next:
self._curr = _next
self._iter += 1
return _curr
else:
raise StopIteration
def __repr__(self):
"""Printing to stdout forces a copy."""
return str(self._clone(0, len(self)))
@property
def head(self):
"""Return head of iterator, wrapped in a new compatible Python type."""
return self._iterator_protocol.p_type(self._head)
@property
def ctype(self):
"""Return the current head"""
return self._head
def _index(self, index):
"""
Index into an element of the exposed Vec<T> without copy.
"""
index = ffi.cast("size_t", index)
ntot = ffi.cast("size_t", len(self))
return self._iterator_protocol.index(self._head, ntot, index)
def _clone(self, start, stop):
"""Clone a slice of the exposed Vec<T> into a Python datatype."""
n = ffi.cast("size_t", len(self))
nslice = stop - start
start = ffi.cast("size_t", start)
stop = ffi.cast("size_t", stop)
ptr = np.empty(nslice, dtype=np.uint64)
ptr_data = ffi.from_buffer("uintptr_t *", ptr)
self._iterator_protocol.clone(self._head, ptr_data, n, start, stop)
return [
self._iterator_protocol.p_type(
ffi.cast(f"{self._iterator_protocol.c_name} *", ptr[index])
)
for index in range(nslice)
]
def _slice(self, start, stop):
"""
Index into a slice of the exposed Vec<T> without copy by returning the
slice inside a new Python Iterator.
"""
nslice = stop - start
ptr = self._index(start)[0]
return Iterator(ptr, nslice, self._iterator_protocol)
def __getitem__(self, key):
if isinstance(key, slice):
start, stop, _ = key.indices(len(self))
return self._slice(start, stop)
elif isinstance(key, int):
ptr = self._slice(key, key + 1)
return ptr
else:
raise TypeError("Invalid argument type: {}".format(type(key))) | /rusty_tree-1.0.0-py3-none-manylinux_2_28_x86_64.whl/rusty_tree/types/iterator.py | 0.872062 | 0.261935 | iterator.py | pypi |
import numpy as np
from rusty_tree import lib, ffi
class Domain:
def __init__(self, p_domain):
"""
Initialize a domain from a pointer to a Domain struct in Rust.
This constructor should not be used outside the class. Instead use the
provided class methods to construct a Domain object.
Parameters
----------
p_domain: cdata 'struct <Domain> *'
Pointer to a Domain struct initialized in Rust.
"""
self._p_domain = p_domain
@property
def ctype(self):
"""Give access to the underlying ctype."""
return self._p_domain
@property
def origin(self):
"""
Coordinate corresponding to the origin of the domain.
Returns
-------
np.array(shape=(3, 1), dtype=np.float64)
"""
return np.array([*self.ctype.origin], dtype=np.float64)
@property
def diameter(self):
"""
Width of domain along each axis.
Returns
--------
np.array(shape=(3, 1), dtype=np.float64)
"""
return np.array([*self.ctype.diameter], dtype=np.float64)
@classmethod
def from_local_points(cls, points):
"""
Infer the domain from points on this processor.
Parameters
----------
points: np.array(shape=(n, 3), dtype=np.float64)
Points on this processor
Returns
-------
Domain
Domain of points on this processor.
"""
points = np.array(points, dtype=np.float64, order="C")
npoints, _ = points.shape
points_data = ffi.from_buffer(f"double(*)[3]", points)
return cls(lib.domain_from_local_points(points_data, npoints))
@classmethod
def from_global_points(cls, points, comm):
"""
Infer the domain from points on all processors.
Parameters
----------
points: np.array(shape=(n, 3), dtype=np.float64)
Points on this processor.
comm: cdata 'MPI_Comm'
C communicator.
Returns
-------
Domain
Domain of points on all processors in communicator 'comm'.
"""
points = np.array(points, dtype=np.float64, order="C")
npoints, _ = points.shape
points_data = ffi.from_buffer(f"double(*)[3]", points)
return cls(lib.domain_from_global_points(points_data, npoints, comm)) | /rusty_tree-1.0.0-py3-none-manylinux_2_28_x86_64.whl/rusty_tree/types/domain.py | 0.908858 | 0.420064 | domain.py | pypi |
import numpy as np
from rusty_tree import lib, ffi
class MortonKey:
def __init__(self, p_key):
"""
Initialize a MortonKey from a pointer to a MortonKey struct in Rust.
This constructor should not be used outside the class. Instead
use the provided class methods to construct a MortonKey object.
Parameters
----------
p_key: cdata 'struct <MortonKey> *'
Pointer to a MortonKey struct initialized in Rust.
"""
self._p_key = p_key
def __del__(self):
lib.morton_key_delete(self.ctype)
def __repr__(self):
return str({"morton": self.morton(), "anchor": self.anchor()})
def __eq__(self, other):
return self.morton == other.morton
def __ne__(self, other):
return self.morton != other.morton
def __lt__(self, other):
return self.morton < other.morton
def __le__(self, other):
return self.morton <= other.morton
def __gt__(self, other):
return self.morton > other.morton
def __ge__(self, other):
return self.morton >= other.morton
def __hash__(self):
return self.morton()
@property
def ctype(self):
"""
Give access to the underlying ctype.
Returns
-------
cdata 'struct <MortonKey> *'
"""
return self._p_key
@classmethod
def from_anchor(cls, anchor):
"""
Create a Morton key from a given anchor.
Returns
-------
MortonKey
"""
anchor = np.array(anchor, dtype=np.uint64)
data = ffi.from_buffer("uint64_t(*)[3]", anchor)
return cls(lib.morton_key_from_anchor(data))
@classmethod
def from_morton(cls, morton):
"""
Create a Morton key from a given Morton index.
Returns
-------
MortonKey
"""
return cls(lib.morton_key_from_morton(morton))
@classmethod
def from_point(cls, point, origin, diameter):
"""
Create a Morton key from a point at the deepest level.
Returns
-------
MortonKey
"""
point = np.array(point, dtype=np.float64)
point_data = ffi.from_buffer("double(*)[3]", point)
origin = np.array(origin, dtype=np.float64)
origin_data = ffi.from_buffer("double(*)[3]", origin)
diameter = np.array(diameter, dtype=np.float64)
diameter_data = ffi.from_buffer("double(*)[3]", diameter)
return cls(lib.morton_key_from_point(point_data, origin_data, diameter_data))
def anchor(self):
"""Return the anchor, with copy."""
return np.array([*self.ctype.anchor], dtype=np.uint64)
def morton(self):
"""Return the Morton index, without copy."""
return self.ctype.morton
def level(self):
"""Return the level, without copy."""
return lib.morton_key_level(self.ctype)
def parent(self):
"""Return the parent."""
return MortonKey(lib.morton_key_parent(self.ctype))
def first_child(self):
"""Return the first child."""
return MortonKey(lib.morton_key_first_child(self.ctype))
def children(self):
"""Return the children, with copy."""
ptr = np.empty(8, dtype=np.uint64)
ptr_data = ffi.from_buffer("uintptr_t *", ptr)
lib.morton_key_children(self.ctype, ptr_data)
children = [
MortonKey(ffi.cast("MortonKey *", ptr[index])) for index in range(8)
]
return children
def ancestors(self):
"""Return all ancestors, with copy."""
curr = self
ancestors = set()
while curr.morton() != 0:
parent = curr.parent()
ancestors.add(parent)
curr = parent
return ancestors
def siblings(self):
"""Return all children of the parent, with copy."""
return self.parent().children()
def is_ancestor(self, other):
"""Check if the key is ancestor of `other`."""
return lib.morton_key_is_ancestor(self.ctype, other.ctype)
def is_descendent(self, other):
"""Check if the key is descendent of `other`."""
return lib.morton_key_is_descendent(self.ctype, other.ctype)
def to_coordinates(self, origin, diameter):
"""Return the coordinates of the anchor."""
coords = np.empty(3, dtype=np.float64)
coords_data = ffi.from_buffer("double(*)[3]", coords)
origin = np.array(origin, dtype=np.float64)
origin_data = ffi.from_buffer("double(*)[3]", origin)
diameter = np.array(diameter, dtype=np.float64)
diameter_data = ffi.from_buffer("double(*)[3]", diameter)
lib.morton_key_to_coordinates(
self.ctype, origin_data, diameter_data, coords_data
)
return coords
def box_coordinates(self, origin, diameter):
"""
Return the 8 coordinates of the box associated with the key.
Let the unit cube be described as follows:
[
[0, 0, 0],
[1, 0, 0],
[0, 1, 0],
[1, 1, 0],
[0, 0, 1],
[1, 0, 1],
[0, 1, 1],
[1, 1, 1]
]
The function returns the box coordinates in the same order
as the above unit cube coordinates.
"""
coords = np.empty(24, dtype=np.float64)
coords_data = ffi.from_buffer("double(*)[24]", coords)
origin = np.array(origin, dtype=np.float64)
origin_data = ffi.from_buffer("double(*)[3]", origin)
diameter = np.array(diameter, dtype=np.float64)
diameter_data = ffi.from_buffer("double(*)[3]", diameter)
lib.morton_key_box_coordinates(
self.ctype, origin_data, diameter_data, coords_data
)
return coords.reshape(8, 3)
def find_key_in_direction(self, direction):
"""
Find a key in a given direction.
Given an integer list `direction` containing
3 elements, return the key obtained by moving
from the current key `direction[j]` steps along
dimension [j]. For example, if `direction = [2, -1, 1]`
the method returns the key by moving two boxes in positive
x-direction, one box in the negative y direction and one box
in the positive z direction. Boxes are counted with respect to
the current level.
If there is no box in the given direction, i.e. the new coordinates
are out of bounds, the method retunrs None.
"""
direction = np.array(direction, dtype=np.int64)
direction_data = ffi.from_buffer("int64_t(*)[3]", direction)
ptr = lib.morton_key_key_in_direction(self.ctype, direction_data)
if ptr == ffi.NULL:
return None
else:
return MortonKey(ptr) | /rusty_tree-1.0.0-py3-none-manylinux_2_28_x86_64.whl/rusty_tree/types/morton.py | 0.89765 | 0.360433 | morton.py | pypi |
import re
def is_valid_rut(rut: str) -> bool:
"""Determines if a given rut is valid
Arguments:
rut {str} -- Complete rut, including verification digit. It might
contain dots and a dash.
Returns:
bool -- True if rut is valid. False otherwise.
Raises:
ValueError: when input is not valid to be processed.
"""
__raise_error_if_rut_input_format_not_valid(rut)
rut = __clean_rut(rut)
return get_verification_digit(rut[:-1]) == rut[-1]
def get_verification_digit(rut: str) -> str:
"""Calculates the verification digit for a given rut
Arguments:
rut {str} -- Rut containing digits only. No dots nor verification
digit allowed.
Returns:
str -- Verification digit. It might be a digit or 'k'.
Raises:
ValueError: when input is not valid to be processed.
"""
__raise_error_if_input_to_get_verification_digit_not_valid(rut)
partial_sum = __get_partial_sum_for_verification_digit_computation(rut)
return __get_verification_digit_from_partial_sum(partial_sum)
def get_capitalized_verification_digit(rut: str) -> str:
"""Calculates the capitalized verification digit for a given rut
Arguments:
rut {str} -- Rut containing digits only. No dots nor verification
digit allowed.
Returns:
str -- Verification digit. It might be a digit or 'K'.
Raises:
ValueError: when input is not valid to be processed.
"""
return get_verification_digit(rut).upper()
def format_rut_with_dots(rut: str) -> str:
"""Formats RUT, adding dots and dash
Arguments:
rut {str} -- RUT to be formatted
Returns:
str -- Formatted RUT.
Raises:
ValueError: when input is not valid to be processed.
"""
__raise_error_if_rut_input_format_not_valid(rut)
formatted_rut = __clean_rut(rut)
formatted_rut = __add_dash_to_rut(formatted_rut)
base_rut = __add_thousands_separator(formatted_rut[:-2])
return base_rut + formatted_rut[-2:]
def format_capitalized_rut_with_dots(rut: str) -> str:
"""Formats RUT, adding dots, dash and capitalized verification digit
Arguments:
rut {str} -- RUT to be formatted
Returns:
str -- Formatted RUT.
Raises:
ValueError: when input is not valid to be processed.
"""
return format_rut_with_dots(rut).upper()
def format_rut_without_dots(rut: str) -> str:
"""Formats RUT, adding dash
Arguments:
rut {str} -- RUT to be formatted
Returns:
str -- Formatted RUT.
Raises:
ValueError: when input is not valid to be processed.
"""
__raise_error_if_rut_input_format_not_valid(rut)
formatted_rut = __clean_rut(rut)
return __add_dash_to_rut(formatted_rut)
def format_capitalized_rut_without_dots(rut: str) -> str:
"""Formats RUT, adding dash and capitalized verification digit
Arguments:
rut {str} -- RUT to be formatted
Returns:
str -- Formatted RUT.
Raises:
ValueError: when input is not valid to be processed.
"""
return format_rut_without_dots(rut).upper()
def __raise_error_if_rut_input_format_not_valid(rut: str):
if not __is_rut_input_valid(rut):
raise ValueError("invalid input")
def __is_rut_input_valid(rut: str) -> bool:
return rut and __is_well_formatted(rut)
def __is_well_formatted(rut: str) -> bool:
format_regex = r"^((\d{1,3}(\.\d{3})+-)|\d+-?)(\d|k|K)$"
return re.match(format_regex, rut) is not None
def __raise_error_if_input_to_get_verification_digit_not_valid(rut: str):
if not __is_rut_format_valid_to_get_verification_digit(rut):
raise ValueError("invalid input")
def __is_rut_format_valid_to_get_verification_digit(rut: str) -> bool:
format_regex = r"^\d+$"
return rut and re.match(format_regex, rut)
def __get_partial_sum_for_verification_digit_computation(rut: str) -> int:
factors = [2, 3, 4, 5, 6, 7]
partial_sum = 0
factor_position = 0
for digit in reversed(rut):
partial_sum += int(digit) * factors[factor_position]
factor_position = (factor_position + 1) % 6
return partial_sum
def __get_verification_digit_from_partial_sum(partial_sum: int) -> str:
verification_digit = (11 - partial_sum % 11) % 11
return str(verification_digit) if verification_digit < 10 else 'k'
def __clean_rut(rut: str) -> bool:
return rut.replace(".", "").replace("-", "").lower()
def __add_thousands_separator(rut: str) -> str:
if len(rut) < 4:
return rut
digit_groups = __generate_digit_groups(rut)
return ".".join(digit_groups)
def __generate_digit_groups(rut: str) -> []:
digit_groups = __add_most_significant_digits_group(rut) + \
__generate_least_significant_digit_groups(rut)
return digit_groups
def __add_most_significant_digits_group(rut: str) -> []:
digit_group = []
if len(rut) % 3 > 0:
digit_group.append(rut[:len(rut) % 3])
return digit_group
def __generate_least_significant_digit_groups(rut: str) -> []:
digit_groups = []
for i in range(int(len(rut) / 3)):
start = len(rut) % 3 + 3 * i
digit_groups.append(rut[start:start + 3])
return digit_groups
def __add_dash_to_rut(rut: str) -> str:
return '-'.join([rut[:-1], rut[-1]]) | /rut_chile-2.0.1-py3-none-any.whl/rut_chile/rut_chile.py | 0.826467 | 0.655081 | rut_chile.py | pypi |
import json
import re
from pathlib import Path
from typing import Any, Dict, List, Text
import yaml
from pydantic import BaseModel
from ruth.nlu.model import ElementBuilder, Interpreter
def get_config(pipeline_path: Path) -> Dict[Text, Any]:
with open(pipeline_path, "r") as f:
return yaml.safe_load(f)
def load_json_data(path: Path) -> Dict[Text, Any]:
with open(path, "r") as f:
return json.load(f)
def build_pipeline_from_metadata(
metadata: Dict[Text, Any],
model_dir: Path,
element_builder: ElementBuilder = None,
):
pipeline_element = []
if not element_builder:
element_builder = ElementBuilder()
pipeline: List[Dict[Text, Any]] = metadata["pipeline"]
for element in pipeline:
pipeline_element.append(
element_builder.load_element(element["name"], element, model_dir=model_dir)
)
return pipeline_element
def get_metadata_from_model(model_path: Path) -> Dict[Text, Any]:
metadata_file_path = model_path / "metadata.json"
metadata = load_json_data(metadata_file_path)
return metadata
def get_interpreter_from_model_path(model_path: str) -> Interpreter:
model_path = check_model_path(model_path)
metadata = get_metadata_from_model(model_path.absolute())
pipeline = build_pipeline_from_metadata(metadata=metadata, model_dir=model_path)
return Interpreter(pipeline)
def check_model_path(model_path: str) -> Path:
if model_path:
if Path.exists(Path(model_path)):
model_file = model_path
else:
raise FileNotFoundError(
"Model does not exist in the given path.\nTo train: ruth train"
)
else:
model_folder = "models"
if not Path(model_folder).exists():
raise FileNotFoundError(
"No models found.\nTrain new models using: ruth train"
)
models = [
directory
for directory in Path(model_folder).iterdir()
if directory.is_dir() and re.search("ruth", str(directory))
]
if models:
models.sort()
model_file = models[-1]
else:
raise FileNotFoundError(
"No models found.\nTrain new models using: ruth train"
)
return Path(model_file)
def local_example_path(output_path: Text) -> Path:
if output_path:
(Path(output_path) / "data").mkdir(exist_ok=True)
return Path(output_path) / "data" / "example.yml"
else:
data_path = Path().absolute() / "data"
data_path.mkdir(exist_ok=True)
data_path = data_path / "example.yml"
return data_path
def local_pipeline_path(output_path: Text) -> Path:
if output_path:
return Path(output_path) / "pipeline.yml"
else:
pipeline_path = Path().absolute() / "pipeline.yml"
return pipeline_path
class Item(BaseModel):
text: str | /ruth_nlu-0.0.6-py3-none-any.whl/ruth/cli/utills.py | 0.609757 | 0.152158 | utills.py | pypi |
import os
from pathlib import Path
from typing import Text
from urllib import request
import click
import matplotlib.pyplot as plt
import uvicorn as uvicorn
from fastapi import FastAPI
from fastapi.encoders import jsonable_encoder
from progressbar import progressbar
from rich import print as rprint
from rich.console import Console
from rich.prompt import Confirm
from rich.table import Table
from ruth import VERSION
from ruth.cli.constants import (
BOLD_GREEN,
BOLD_GREEN_CLOSE,
BOLD_RED,
BOLD_RED_CLOSE,
BOLD_YELLOW,
BOLD_YELLOW_CLOSE,
FOLDER,
ROCKET,
TARGET,
)
from ruth.cli.utills import (
Item,
build_pipeline_from_metadata,
check_model_path,
get_config,
get_interpreter_from_model_path,
get_metadata_from_model,
local_example_path,
local_pipeline_path,
)
from ruth.constants import INTENT, INTENT_RANKING, TEXT
from ruth.nlu.model import Interpreter
from ruth.nlu.train import train_pipeline
from ruth.shared.constants import DATA_PATH, PIPELINE_PATH, RAW_GITHUB_URL
from ruth.shared.nlu.training_data.collections import TrainData
from ruth.shared.nlu.training_data.ruth_config import RuthConfig
from sklearn.metrics import confusion_matrix
from starlette.responses import JSONResponse
console = Console()
def get_logo():
logo_path = (Path(os.path.realpath(__file__))).parent / "data" / "banner.txt"
return f"{logo_path.read_text()}"
def add_heading_to_description_table(table: Table) -> Table:
table.add_column("Command", style="#c47900")
table.add_column("Arguments ", style="#c47900")
table.add_column("Description", style="#c47900")
return table
def print_logo_and_description():
console.print(f"[bold purple]{get_logo()}[/bold purple]", style="#6E1CF3")
console.print(
"[bold magenta]Website: [/bold magenta][link]https://puretalk.ai[/link]"
)
console.print("[bold magenta]Commands: [/bold magenta]")
table = Table(show_header=True, header_style="bold #c47900", show_lines=True)
table = add_heading_to_description_table(table)
table.add_row(
"[bold]init[/bold]",
"-o [bold red]output_file[/bold red]",
"[green]Initialize new project with default pipeline and default dataset[/green]",
)
table.add_row(
"[bold]train[/bold]",
"-p [bold red]pipeline_file[/bold red], -d [bold red]data_file[/bold red]",
"[green]Train a model with the given pipeline and data[/green]",
)
table.add_row(
"[bold]parse[/bold]",
"-m [bold red]model_file[/bold red], -t [bold red]text[/bold red]",
"[green]Classify intend for a sentence from a trained model[/green]",
)
table.add_row(
"[bold]evaluate[/bold]",
"-m [bold red]model_file[/bold red], -d [bold red]data_file[/bold red]",
"[green]Evaluates trained model for a test dataset[/green]",
)
table.add_row(
"[bold]deploy[/bold]",
"-m [bold red]model_file[/bold red], -p [bold red]port[/bold red], -h [bold red]host[/bold red]",
"[green]Trained models can be served using deploy command[/green]",
)
console.print(table)
class RichGroup(click.Group):
def format_help(self, ctx, formatter):
print_logo_and_description()
# TODO: Want to write the help description whenever the user call the ruth --help
@click.group(cls=RichGroup)
@click.version_option(VERSION)
def entrypoint():
pass
@entrypoint.command(name="train")
@click.option(
"-d",
"--data",
type=click.Path(exists=True, dir_okay=False),
required=True,
help="Data for training as json",
)
@click.option(
"-p",
"--pipeline",
type=click.Path(exists=True, dir_okay=False),
required=True,
help="pipeline for training as yaml",
)
def train(data: Path, pipeline: Path):
config = get_config(pipeline)
training_data = TrainData.build(data)
config = RuthConfig(config)
model_absolute_dir = train_pipeline(config, training_data)
console.print(
f"Training completed {ROCKET}..."
f"\nModel is stored at {FOLDER} {BOLD_YELLOW} {model_absolute_dir} {BOLD_YELLOW_CLOSE} \n",
f"\nTo evaluate model:{BOLD_GREEN} ruth parse --help{BOLD_GREEN_CLOSE}",
)
@entrypoint.command(name="parse")
@click.option(
"-t",
"--text",
type=click.STRING,
required=True,
help="Data that need to be get parsed",
)
@click.option(
"-m",
"--model_path",
type=click.STRING,
required=False,
help="Directory where the model is stored",
)
def parse(text: Text, model_path: Text):
model_file = check_model_path(model_path)
console.print(f"Latest Model found {FOLDER} {model_file}")
metadata = get_metadata_from_model(model_file.absolute())
pipeline = build_pipeline_from_metadata(metadata=metadata, model_dir=model_file)
interpreter = Interpreter(pipeline)
output = interpreter.parse(text)
console.print(
f"{TARGET} Predicted intent is {output.get(INTENT)} \n",
f"\nTo deploy your model run: {BOLD_GREEN}ruth deploy --help{BOLD_GREEN_CLOSE}",
)
@entrypoint.command(name="evaluate")
@click.option(
"-d",
"--data",
type=click.Path(exists=True, dir_okay=False),
required=True,
help="Data for testing as json",
)
@click.option(
"-m",
"--model_path",
type=click.STRING,
required=False,
help="Directory where the model is stored",
)
@click.option(
"-o",
"--output_folder",
type=click.Path(),
default=Path("results"),
help="Directory where the results is stored",
)
def evaluate(data: Path, model_path: Text, output_folder: Text):
model_file = check_model_path(model_path)
console.print(f"Latest Model found {FOLDER} {model_file}")
metadata = get_metadata_from_model(model_file.absolute())
pipeline = build_pipeline_from_metadata(metadata=metadata, model_dir=model_file)
interpreter = Interpreter(pipeline)
training_data = TrainData.build(data)
correct_predictions = 0
y_pred = []
y_actual = []
for example in training_data.training_examples:
output = interpreter.parse(example.get("text"))
y_pred.append(output.get(INTENT).get("name"))
y_actual.append(example.get("intent"))
if output.get(INTENT).get("name") == example.get("intent"):
correct_predictions += 1
accuracy = correct_predictions / len(training_data)
conf_matrix = confusion_matrix(y_true=y_actual, y_pred=y_pred)
fig, ax = plt.subplots(figsize=(7.5, 7.5))
ax.matshow(conf_matrix, cmap=plt.cm.Blues, alpha=0.3)
for i in range(conf_matrix.shape[0]):
for j in range(conf_matrix.shape[1]):
ax.text(
x=j, y=i, s=conf_matrix[i, j], va="center", ha="center", size="xx-large"
)
plt.xlabel("Predictions", fontsize=18)
plt.ylabel("Actual", fontsize=18)
plt.title("Confusion Matrix", fontsize=18)
if output_folder:
result_path = Path(output_folder).absolute()
else:
result_path = Path().absolute() / output_folder
result_path.mkdir(exist_ok=True)
directories = os.listdir(str(result_path))
indexes = []
model_name = str(model_file).split("/")[-1]
for result in directories:
if model_name in result:
indexes.append(int(result.split("@")[-1]))
if indexes:
index = max(indexes) + 1
else:
index = 0
folder_for_the_result = result_path / f"{model_name}@{index}"
folder_for_the_result.mkdir(exist_ok=True)
final_file_path = folder_for_the_result / "confusion_matrix.png"
plt.savefig(final_file_path)
rprint(f"{TARGET} accuracy: ", accuracy)
rprint(f"{BOLD_GREEN} confusion matrix is created.{BOLD_GREEN_CLOSE}")
rprint(" results are stored here: ", folder_for_the_result)
rprint(
f" To deploy your model run: {BOLD_GREEN}ruth deploy --help{BOLD_GREEN_CLOSE}"
)
@entrypoint.command(name="deploy")
@click.option(
"-m",
"--model_path",
type=click.STRING,
required=False,
help="Directory where the model is stored",
)
@click.option(
"-p",
"--port",
type=click.INT,
default=5500,
help="Port where the application should run",
)
@click.option(
"-h",
"--host",
type=click.STRING,
default="localhost",
help="host where the application should run",
)
def deploy(model_path: Text, port: int, host: str):
app = FastAPI()
app.interpreter = get_interpreter_from_model_path(model_path)
@app.get("/parse")
async def parse(item: Item):
output = app.interpreter.parse(item.text)
output = {
key: output[key] for key in output.keys() & {INTENT_RANKING, TEXT, INTENT}
}
json_compatible_item_data = jsonable_encoder(output)
return JSONResponse(content=json_compatible_item_data)
uvicorn.run(app, host=host, port=port)
pbar = None
def show_progress(block_num, block_size, total_size):
global pbar
if pbar is None:
pbar = progressbar.ProgressBar(maxval=total_size)
pbar.start()
downloaded = block_num * block_size
if downloaded < total_size:
pbar.update(downloaded)
else:
pbar.finish()
pbar = None
@entrypoint.command(name="init")
@click.option(
"-o",
"--output-path",
type=click.STRING,
required=False,
help="Directory where the files should be stored",
)
def init(output_path: Text):
global pbar
pipeline_path = f"{RAW_GITHUB_URL}/{PIPELINE_PATH}"
data_path = f"{RAW_GITHUB_URL}/{DATA_PATH}"
files_in_dir = 0
if output_path:
Path(output_path).mkdir(exist_ok=True)
for _ in Path(output_path).absolute().iterdir():
files_in_dir += 1
else:
output_path = Path().absolute()
for _ in Path().absolute().iterdir():
files_in_dir += 1
if files_in_dir:
override_changes = Confirm.ask(
f"{BOLD_RED}You already have project in the current directory. "
f"Do you still want to create new project?{BOLD_RED_CLOSE}"
)
if not override_changes:
return None
rprint(f"{BOLD_GREEN}Downloading pipeline.yml {BOLD_GREEN_CLOSE}")
request.urlretrieve(
str(pipeline_path), str(local_pipeline_path(output_path)), show_progress
)
rprint(f"{BOLD_GREEN}Downloading data.yml{BOLD_GREEN_CLOSE}")
request.urlretrieve(
str(data_path), str(local_example_path(output_path)), show_progress
)
rprint(f"{BOLD_GREEN}Project is Successfully build{ROCKET}{BOLD_GREEN_CLOSE}")
rprint(f" To train your model run: {BOLD_GREEN}ruth train --help{BOLD_GREEN_CLOSE}") | /ruth_nlu-0.0.6-py3-none-any.whl/ruth/cli/cli.py | 0.413951 | 0.202423 | cli.py | pypi |
from typing import Any, Dict, Text, Union
import numpy as np
from numpy import ndarray
from ruth.constants import TEXT
from scipy import sparse
class Feature:
def __init__(self, feature: Union[sparse.spmatrix, ndarray], origin: Text):
self.feature = feature
self.origin = origin
def is_sparse(self):
return isinstance(self.feature, sparse.spmatrix)
def is_dense(self):
return not self.is_sparse()
def _combine_sparse_features(
self, additional_features: "Feature", message: Dict[Text, Any]
) -> None:
from scipy.sparse import hstack
if self.feature.shape[0] != additional_features.feature.shape[0]:
raise ValueError(
f"Cannot combine sparse features as sequence dimensions do not "
f"match: {self.feature.shape[0]} != "
f"{additional_features.feature.shape[0]}."
)
self.features = hstack([self.feature, additional_features.feature])
def _combine_dense_features(
self, additional_features: Any, message: Dict[Text, Any]
) -> Any:
if len(self.feature.shape[0]) != len(additional_features.feature.shape[0]):
raise ValueError(
f"Cannot concatenate dense features as sequence dimension does not "
f"match: {self.feature.shape[0]} != "
f"{len(additional_features)}. Message: {message.get(TEXT, 'Text not available')}"
)
else:
return np.concatenate((self.feature, additional_features), axis=-1)
def combine_with_features(
self, additional_features: "Feature", message: Dict[Text, Any]
) -> None:
if additional_features is None:
return
if self.is_dense() and additional_features.is_dense():
self._combine_dense_features(additional_features, message)
elif self.is_sparse() and additional_features.is_sparse():
self._combine_sparse_features(additional_features, message)
else:
raise ValueError("Cannot combine sparse and dense features.") | /ruth_nlu-0.0.6-py3-none-any.whl/ruth/shared/nlu/training_data/feature.py | 0.888825 | 0.437523 | feature.py | pypi |
from pathlib import Path
from typing import Any, Dict, List, Optional, Text
from ruth.constants import TEXT
from ruth.nlu.classifiers.constants import MODEL_NAME
from ruth.nlu.tokenizer.constants import MAX_LENGTH_FOR_PADDING
from ruth.nlu.tokenizer.tokenizer import Tokenizer
from ruth.shared.constants import ATTENTION_MASKS, INPUT_IDS
from ruth.shared.nlu.training_data.collections import TrainData
from ruth.shared.nlu.training_data.ruth_data import RuthData
from tqdm import tqdm
from transformers import AutoTokenizer
class HFTokenizer(Tokenizer):
DO_LOWER_CASE = "do_lower_case"
defaults = {MODEL_NAME: "bert-base-uncased", DO_LOWER_CASE: True}
def __init__(self, element_config: Optional[Dict[Text, Any]], tokenizer=None):
super(HFTokenizer, self).__init__(element_config)
self.tokenizer = tokenizer or {}
def _build_tokenizer(self):
return AutoTokenizer.from_pretrained(self.element_config[MODEL_NAME])
def _create_tokens(self, examples: TrainData):
before_padding_text = [message.get(TEXT) for message in examples]
encoded = self.tokenizer(
before_padding_text,
add_special_tokens=True,
max_length=MAX_LENGTH_FOR_PADDING,
padding=True,
truncation=True,
return_attention_mask=True,
)
input_ids = encoded["input_ids"]
attention_masks = encoded["attention_mask"]
return input_ids, attention_masks
def tokenize(self, training_data: TrainData):
return self._create_tokens(training_data.training_examples)
@staticmethod
def _add_tokens_to_data(
training_examples: List[RuthData],
input_ids: List[List[int]],
attention_masks: List[List[int]],
):
for message, input_id, attention_mask in tqdm(
zip(training_examples, input_ids, attention_masks),
desc="tokenization",
total=len(training_examples),
):
message.set(INPUT_IDS, input_id)
message.set(ATTENTION_MASKS, attention_mask)
def train(self, training_data: TrainData):
self.tokenizer = self._build_tokenizer()
input_ids, attention_masks = self.tokenize(training_data)
self._add_tokens_to_data(
training_data.training_examples, input_ids, attention_masks
)
def persist(self, file_name: Text, model_dir: Path):
tokenizer_file_name = file_name + "_tokenizer"
tokenizer_path = str(model_dir) + "/" + tokenizer_file_name
if self.tokenizer:
self.tokenizer.save_pretrained(tokenizer_path)
return {"tokenizer": tokenizer_file_name}
@classmethod
def load(cls, meta: Dict[Text, Any], model_dir: Path, **kwargs):
tokenizer_file_name = model_dir / meta["tokenizer"]
tokenizer = AutoTokenizer.from_pretrained(tokenizer_file_name)
return cls(meta, tokenizer=tokenizer)
def parse(self, message: RuthData):
parser_token = self.tokenizer.encode_plus(
message.get(TEXT),
add_special_tokens=True,
max_length=MAX_LENGTH_FOR_PADDING,
padding=True,
truncation=True,
return_attention_mask=True,
)
message.set(INPUT_IDS, parser_token["input_ids"])
message.set(ATTENTION_MASKS, parser_token["attention_mask"]) | /ruth_nlu-0.0.6-py3-none-any.whl/ruth/nlu/tokenizer/hf_tokenizer.py | 0.915472 | 0.298696 | hf_tokenizer.py | pypi |
import logging
from collections import Counter
from pathlib import Path
from typing import Any, Dict, List, Text, Tuple
import torch
from numpy import argsort, fliplr, ndarray
from rich.console import Console
from ruth.constants import INTENT, INTENT_RANKING
from ruth.nlu.classifiers import LABEL_RANKING_LIMIT
from ruth.nlu.classifiers.constants import BATCH_SIZE, EPOCHS, MODEL_NAME
from ruth.nlu.classifiers.ruth_classifier import IntentClassifier
from ruth.nlu.tokenizer.hf_tokenizer import HFTokenizer
from ruth.shared.constants import (
ATTENTION_MASKS,
DEVICE,
INPUT_IDS,
INTENT_NAME_KEY,
PREDICTED_CONFIDENCE_KEY,
)
from ruth.shared.nlu.training_data.collections import TrainData
from ruth.shared.nlu.training_data.ruth_data import RuthData
from ruth.shared.utils import get_device, json_pickle, json_unpickle
from sklearn.preprocessing import LabelEncoder
from torch import nn
from torch.nn import Module
from torch.optim import AdamW
from torch.utils.data import DataLoader, Dataset
from tqdm import tqdm
from transformers import AutoModelForSequenceClassification
from transformers import logging as transformer_logging
torch.cuda.empty_cache()
logger = logging.getLogger(__name__)
transformer_logging.set_verbosity_error()
console = Console()
class HFClassifier(IntentClassifier):
defaults = {
EPOCHS: 100,
MODEL_NAME: "bert-base-uncased",
BATCH_SIZE: 4,
DEVICE: "cuda" if torch.cuda.is_available() else "cpu",
}
def __init__(
self,
element_config: Dict[Text, Any],
le: LabelEncoder = None,
model: Module = None,
):
super().__init__(element_config, le)
self.model = model
self.device = get_device(self.element_config[DEVICE])
def required_element(self):
return [HFTokenizer]
def _build_model(self, label_count):
return AutoModelForSequenceClassification.from_pretrained(
self.element_config[MODEL_NAME], num_labels=label_count
)
@staticmethod
def get_input_ids(message: RuthData) -> Dict[Text, List[int]]:
input_ids = message.get(INPUT_IDS)
if input_ids is not None:
return input_ids
raise ValueError("There is no sentence. Not able to train HFClassifier")
@staticmethod
def get_attention_masks(message: RuthData) -> Dict[Text, List[int]]:
attention_masks = message.get(ATTENTION_MASKS)
if attention_masks is not None:
return attention_masks
raise ValueError("There is no sentence. Not able to train HFClassifier")
@staticmethod
def get_optimizer(model):
return AdamW(model.parameters(), lr=5e-5)
@property
def get_params(self):
return {
EPOCHS: self.element_config[EPOCHS],
BATCH_SIZE: self.element_config[BATCH_SIZE],
}
def train(self, training_data: TrainData):
intents: List[Text] = [
message.get(INTENT) for message in training_data.intent_examples
]
if len(set(intents)) < 2:
logger.warning(
"There are no enough intent. "
"At least two unique intent are needed to train the model"
)
return
X = {
"input_ids": [
self.get_input_ids(message) for message in training_data.intent_examples
],
"attention_masks": [
self.get_attention_masks(message)
for message in training_data.intent_examples
],
}
y = self.encode_the_str_to_int(intents)
label_count = len(Counter(y).keys())
params = self.get_params
loaded_data = HFDatasetLoader(X, y)
batched_data = DataLoader(
loaded_data, batch_size=params[BATCH_SIZE], shuffle=True
)
self.model = self._build_model(label_count)
optimizer = self.get_optimizer(self.model)
console.print("device: " + str(self.device) + " is used")
self.model.to(self.device)
self.model.train()
for epoch in range(params[EPOCHS]):
for batch in tqdm(batched_data, desc="epoch " + str(epoch)):
optimizer.zero_grad()
input_ids = batch["input_ids"].to(self.device)
attention_masks = batch["attention_masks"].to(self.device)
labels = batch["labels"].to(self.device)
outputs = self.model(
input_ids, attention_mask=attention_masks, labels=labels
)
loss = outputs[0]
loss.backward()
optimizer.step()
def persist(self, file_name: Text, model_dir: Path):
classifier_file_name = file_name + "_classifier"
encoder_file_name = file_name + "_encoder.pkl"
classifier_path = str(model_dir) + "/" + classifier_file_name
encoder_path = str(model_dir) + "/" + encoder_file_name
if self.model and self.le:
model_to_save = (
self.model.module if hasattr(self.model, "module") else self.model
)
model_to_save.save_pretrained(classifier_path)
json_pickle(encoder_path, self.le)
return {"classifier": classifier_file_name, "encoder": encoder_file_name}
@classmethod
def load(cls, meta: Dict[Text, Any], model_dir: Path, **kwargs):
classifier_file_name = model_dir / meta["classifier"]
encoder_file_name = model_dir / meta["encoder"]
classifier = AutoModelForSequenceClassification.from_pretrained(
classifier_file_name
)
le = json_unpickle(Path(encoder_file_name))
return cls(meta, model=classifier, le=le)
def _predict(self, input_ids, attention_masks) -> Tuple[ndarray, ndarray]:
predictions = self.predict_probabilities(input_ids, attention_masks)
sorted_index = fliplr(argsort(predictions, axis=1))
return sorted_index[0], predictions[:, sorted_index][0][0]
def predict_probabilities(self, input_ids, attention_masks):
self.model.to(self.device)
self.model.eval()
probabilities = self.model(
torch.tensor(input_ids, device=self.device),
attention_mask=torch.tensor(attention_masks, device=self.device),
)[0]
probabilities = nn.functional.softmax(probabilities, dim=-1)
probabilities = probabilities.to(torch.device("cpu"))
probabilities = probabilities.detach().numpy()
return probabilities
def parse(self, message: RuthData):
input_ids = [message.get(INPUT_IDS)]
attention_masks = [message.get(ATTENTION_MASKS)]
index, probabilities = self._predict(input_ids, attention_masks)
intents = self._change_int_to_text(index)
probabilities = probabilities
if intents.size > 0 and probabilities.size > 0:
ranking = list(zip(list(intents), list(probabilities)))[
:LABEL_RANKING_LIMIT
]
intent = {
INTENT_NAME_KEY: intents[0],
PREDICTED_CONFIDENCE_KEY: float(probabilities[0]),
}
intent_rankings = [
{INTENT_NAME_KEY: name, PREDICTED_CONFIDENCE_KEY: float(probability)}
for name, probability in ranking
]
else:
intent = {INTENT_NAME_KEY: None, PREDICTED_CONFIDENCE_KEY: 0.0}
intent_rankings = []
message.set(INTENT, intent)
message.set(INTENT_RANKING, intent_rankings)
class HFDatasetLoader(Dataset):
def __init__(self, encodings, labels):
self.encodings = encodings
self.labels = labels
def __len__(self):
return len(self.labels)
def __getitem__(self, idx):
item = {key: torch.tensor(val[idx]) for key, val in self.encodings.items()}
item["labels"] = torch.tensor(self.labels[idx])
return item | /ruth_nlu-0.0.6-py3-none-any.whl/ruth/nlu/classifiers/hf_classifier.py | 0.84489 | 0.280007 | hf_classifier.py | pypi |
import logging
from pathlib import Path
from typing import Any, Dict, List, Text, Tuple, Union
import sklearn
from numpy import argsort, fliplr, ndarray, reshape
from rich.console import Console
from ruth.constants import INTENT, INTENT_RANKING
from ruth.nlu.classifiers import LABEL_RANKING_LIMIT
from ruth.nlu.classifiers.ruth_classifier import IntentClassifier
from ruth.shared.nlu.training_data.collections import TrainData
from ruth.shared.nlu.training_data.ruth_data import RuthData
from ruth.shared.utils import json_pickle, json_unpickle
from scipy import sparse
from sklearn.model_selection import GridSearchCV
from sklearn.preprocessing import LabelEncoder
logger = logging.getLogger(__name__)
console = Console()
class SVMClassifier(IntentClassifier):
defaults = {
"C": [1, 2, 5, 10, 20, 100],
"kernel": ["linear", "rbf"],
"gamma": ["auto", 0.1],
"decision_function_shape": ["ovr"],
"max_cross_validation_folds": 5,
"scoring": "f1_weighted",
"max_length": 30000,
}
def __init__(
self,
element_config: Dict[Text, Any],
le: LabelEncoder = None,
clf: GridSearchCV = None,
):
self.clf = clf
super().__init__(element_config, le)
@staticmethod
def get_features(message: RuthData) -> Union[sparse.spmatrix, ndarray]:
feature = message.get_features()
if feature is not None:
return feature.feature[0]
raise ValueError("There is no sentence. Not able to train SVMClassifier")
@property
def param_grids(self):
return {
"C": self.element_config["C"],
"kernel": self.element_config["kernel"],
"gamma": self.element_config["gamma"],
"decision_function_shape": self.element_config["decision_function_shape"],
}
def _create_gridsearch(self, X, y) -> "sklearn.model_selection.GridSearchCV":
from sklearn.svm import SVC
clf = SVC(probability=True)
param_grids = self.param_grids
return GridSearchCV(
clf,
param_grids,
scoring=self.element_config["scoring"],
cv=self.element_config["max_cross_validation_folds"],
)
def train(self, training_data: TrainData):
intents: List[Text] = [
message.get(INTENT) for message in training_data.intent_examples
]
if len(set(intents)) < 2:
logger.warning(
"There are no enough intent. "
"At least two unique intent are needed to train the model"
)
return
X = [self.get_features(message) for message in training_data.intent_examples]
if self.check_dense(X[0]):
max_length = self.get_max_length(X)
self.element_config["max_length"] = max_length
X = [self.ravel_vector(x) for x in X]
# X = [self.pad_vector(x, max_length) for x in X]
else:
X = [message.toarray() for message in X]
y = self.encode_the_str_to_int(intents)
X = reshape(X, (len(X), -1))
self.clf = self._create_gridsearch(X, y)
self.clf.fit(X, y)
console.print(f"The Best parameter we got are {self.clf.best_params_}")
console.print(f"score: {self.clf.best_score_}")
def persist(self, file_name: Text, model_dir: Path):
classifier_file_name = file_name + "_classifier.pkl"
encoder_file_name = file_name + "_encoder.pkl"
classifier_path = model_dir / classifier_file_name
encoder_path = model_dir / encoder_file_name
if self.clf and self.le:
json_pickle(classifier_path, self.clf.best_estimator_)
json_pickle(encoder_path, self.le)
return {"classifier": classifier_file_name, "encoder": encoder_file_name}
def _predict(self, x: ndarray) -> Tuple[ndarray, ndarray]:
predictions = self.predict_probabilities(x)
sorted_index = fliplr(argsort(predictions, axis=1))
return sorted_index, predictions[:, sorted_index]
def predict_probabilities(self, x: ndarray) -> ndarray:
return self.clf.predict_proba(x.reshape(1, -1))
@classmethod
def load(cls, meta: Dict[Text, Any], model_dir: Path, **kwargs):
classifier_file_name = model_dir / meta["classifier"]
encoder_file_name = model_dir / meta["encoder"]
clf = json_unpickle(Path(classifier_file_name))
le = json_unpickle(Path(encoder_file_name))
return cls(meta, clf=clf, le=le)
def parse(self, message: RuthData):
x = self.get_features(message)
if self.check_dense(x):
x = self.ravel_vector(x)
x = self.pad_vector(x, self.element_config["max_length"])
else:
x = x.toarray()
index, probabilities = self._predict(x)
intents = self._change_int_to_text(index.flatten())
probabilities = probabilities.flatten()
if intents.size > 0 and probabilities.size > 0:
ranking = list(zip(list(intents), list(probabilities)))[
:LABEL_RANKING_LIMIT
]
intent = {"name": intents[0], "accuracy": probabilities[0]}
intent_rankings = [
{"name": name, "accuracy": probability} for name, probability in ranking
]
else:
intent = {"name": None, "accuracy": 0.0}
intent_rankings = []
message.set(INTENT, intent)
message.set(INTENT_RANKING, intent_rankings) | /ruth_nlu-0.0.6-py3-none-any.whl/ruth/nlu/classifiers/svm_classifier.py | 0.794943 | 0.324811 | svm_classifier.py | pypi |
import logging
from pathlib import Path
from typing import Any, Dict, Text, Tuple
import sklearn
from numpy import argsort, fliplr, ndarray, reshape
from ruth.constants import INTENT, INTENT_RANKING
from ruth.nlu.classifiers import LABEL_RANKING_LIMIT
from ruth.nlu.classifiers.ruth_classifier import IntentClassifier
from ruth.shared.nlu.training_data.collections import TrainData
from ruth.shared.nlu.training_data.ruth_data import RuthData
from ruth.shared.utils import json_pickle, json_unpickle
from scipy import sparse
from sklearn.preprocessing import LabelEncoder
logger = logging.getLogger(__name__)
class NaiveBayesClassifier(IntentClassifier):
defaults = {"priors": None, "var_smoothing": 1e-9}
def __init__(
self,
element_config: Dict[Text, Any],
le: LabelEncoder = None,
model: "sklearn.naive_bayes.GaussianNB" = None,
):
super(NaiveBayesClassifier, self).__init__(element_config=element_config, le=le)
self.model = model
def _create_classifier(self) -> "sklearn.naive_bayes.GaussianNB":
from sklearn.naive_bayes import GaussianNB
priors = self.element_config["priors"]
var_smoothing = self.element_config["var_smoothing"]
return GaussianNB(priors=priors, var_smoothing=var_smoothing)
def train(self, training_data: TrainData):
intents = [message.get(INTENT) for message in training_data.intent_examples]
if len(set(intents)) < 2:
logger.warning(
"There are no enough intent. "
"At least two unique intent are needed to train the model"
)
return
X = [
self.get_features(message).toarray()
for message in training_data.intent_examples
]
y = self.encode_the_str_to_int(intents)
X = reshape(X, (len(X), -1))
self.model = self._create_classifier()
self.model.fit(X, y)
def _predict(self, x: ndarray) -> Tuple[ndarray, ndarray]:
predictions = self.predict_probabilities(x)
sorted_index = fliplr(argsort(predictions, axis=1))
return sorted_index, predictions[:, sorted_index]
def predict_probabilities(self, x: ndarray) -> ndarray:
return self.model.predict_proba(x.reshape(1, -1))
def _change_int_to_text(self, prediction: ndarray) -> ndarray:
return self.le.inverse_transform(prediction)
@staticmethod
def get_features(message: RuthData) -> sparse.spmatrix:
feature = message.get_features()
if feature is not None:
return feature.feature[0]
raise ValueError("There is no sentence. Not able to train NaiveBayesClassifier")
def parse(self, message: RuthData):
x = self.get_features(message).toarray()
index, probabilities = self._predict(x)
intents = self._change_int_to_text(index.flatten())
probabilities = probabilities.flatten()
if intents.size > 0 and probabilities.size > 0:
ranking = list(zip(list(intents), list(probabilities)))[
:LABEL_RANKING_LIMIT
]
intent = {"name": intents[0], "accuracy": probabilities[0]}
intent_rankings = [
{"name": name, "accuracy": probability} for name, probability in ranking
]
else:
intent = {"name": None, "accuracy": 0.0}
intent_rankings = []
message.set(INTENT, intent)
message.set(INTENT_RANKING, intent_rankings)
def persist(self, file_name: Text, model_dir: Path):
classifier_file_name = file_name + "_classifier.pkl"
encoder_file_name = file_name + "_encoder.pkl"
classifier_path = model_dir / classifier_file_name
encoder_path = model_dir / encoder_file_name
if self.model and self.le:
json_pickle(classifier_path, self.model)
json_pickle(encoder_path, self.le)
return {"classifier": classifier_file_name, "encoder": encoder_file_name}
@classmethod
def load(cls, meta: Dict[Text, Any], model_dir: Path):
classifier_file_name = model_dir / meta["classifier"]
encoder_file_name = model_dir / meta["encoder"]
model = json_unpickle(Path(classifier_file_name))
le = json_unpickle(Path(encoder_file_name))
return cls(meta, model=model, le=le) | /ruth_nlu-0.0.6-py3-none-any.whl/ruth/nlu/classifiers/naive_bayes_classifier.py | 0.846006 | 0.327951 | naive_bayes_classifier.py | pypi |
import io
import os
from typing import Any, Dict, List, Optional, Text
from urllib import request
import numpy
from progressbar import progressbar
from ruth.nlu.classifiers.constants import MODEL_NAME
from ruth.nlu.constants import ELEMENT_UNIQUE_NAME
from ruth.nlu.featurizers.dense_featurizers.dense_featurizer import DenseFeaturizer
from ruth.shared.nlu.training_data.collections import TrainData
from ruth.shared.nlu.training_data.feature import Feature
from ruth.shared.nlu.training_data.ruth_data import RuthData
from tqdm import tqdm
pbar = None
class FastTextFeaturizer(DenseFeaturizer):
"""Fasttext featurizer for dense featurizers."""
DO_LOWER_CASE = "do_lower_case"
defaults = {MODEL_NAME: "wiki-news-300d-1M.vec.zip", DO_LOWER_CASE: True}
DEFAULT_MODELS_DIR = os.path.join(
os.path.expanduser("~"), ".cache", "ruth", "models"
)
MODELS = {
"wiki-news-300d-1M.vec.zip": "https://dl.fbaipublicfiles.com/"
"fasttext/vectors-english/wiki-news-300d-1M.vec.zip",
"wiki-news-300d-1M-subword.vec.zip": "https://dl.fbaipublicfiles.com/"
"fasttext/vectors-english/wiki-news-300d-1M-subword.vec.zip",
"crawl-300d-2M.vec.zip": "https://dl.fbaipublicfiles.com/"
"fasttext/vectors-english/crawl-300d-2M.vec.zip",
"crawl-300d-2M-subword.zip": "https://dl.fbaipublicfiles.com/"
"fasttext/vectors-english/crawl-300d-2M-subword.zip",
}
def __init__(self, element_config: Optional[Dict[Text, Any]]):
super(FastTextFeaturizer, self).__init__(element_config)
self.vectors = None
self.featurizer = {}
if self.element_config[MODEL_NAME] not in self.MODELS:
raise ValueError(
"Model name not found. Please choose from the following: "
"{}".format(list(self.MODELS.keys()))
)
self.file_path = self.download_models(self.element_config[MODEL_NAME])
self.dimension = 300
def download_models(self, specific_models=None):
"""Download the models."""
os.makedirs(self.DEFAULT_MODELS_DIR, exist_ok=True)
def show_progress(block_num, block_size, total_size):
global pbar
if pbar is None:
pbar = progressbar.ProgressBar(maxval=total_size)
pbar.start()
downloaded = block_num * block_size
if downloaded < total_size:
pbar.update(downloaded)
else:
pbar.finish()
pbar = None
for model_name, url in self.MODELS.items():
if specific_models is not None and str(model_name) not in str(
specific_models
):
continue
model_path = os.path.join(self.DEFAULT_MODELS_DIR, model_name)
if os.path.exists(model_path):
model_path = model_path[:-4]
return model_path
request.urlretrieve(url, model_path, show_progress)
import zipfile
with zipfile.ZipFile(model_path, "r") as zip_ref:
zip_ref.extractall(self.DEFAULT_MODELS_DIR)
model_path = model_path[:-4]
return model_path
raise f"""Given model {specific_models} not found.
Please check the documentation and give the
right Fastext model name """
def train(self, training_data: TrainData):
"""Train the featurizer."""
self.featurizer = self._build_featurizer()
tokenized_data: List[List[Text]] = [
message.get_tokenized_data() for message in training_data.intent_examples
]
self.vectors = [
self.get_vector_list(token_list) for token_list in tokenized_data
]
for message, vector in zip(training_data.training_examples, self.vectors):
message.add_features(
Feature(vector, self.element_config[ELEMENT_UNIQUE_NAME])
)
def _build_featurizer(self):
"""Build the featurizer."""
fasttext_corpus = io.open(
self.file_path, "r", encoding="utf-8", newline="\n", errors="ignore"
)
model = {}
for line in tqdm(fasttext_corpus, colour="red"):
tokens = line.strip().split(" ")
model[tokens[0]] = numpy.array(list(tokens[1:]))
return model
def get_vector_list(self, token_list) -> numpy.ndarray:
"""Get the vector list."""
if self.featurizer == {}:
self._build_featurizer()
if not token_list:
return numpy.zeros(self.dimension)
return numpy.array([self.get_vector(token) for token in token_list])
def get_vector(self, token) -> numpy.ndarray:
"""Get the vector."""
if token in self.featurizer and self.featurizer != {}:
return self.featurizer[token]
else:
return numpy.zeros(self.dimension)
def parse(self, message: RuthData):
"""Parse the message."""
tokens = message.get_tokenized_data()
vector = self.get_vector_list(tokens)
message.add_features(Feature(vector, self.element_config[ELEMENT_UNIQUE_NAME])) | /ruth_nlu-0.0.6-py3-none-any.whl/ruth/nlu/featurizers/dense_featurizers/fast_text.py | 0.765418 | 0.298594 | fast_text.py | pypi |
import logging
from pathlib import Path
from typing import Any, Dict, List, Optional, Text
from rich.console import Console
from ruth.constants import TEXT
from ruth.nlu.featurizers.sparse_featurizers.constants import (
CLASS_FEATURIZER_UNIQUE_NAME,
)
from ruth.nlu.featurizers.sparse_featurizers.sparse_featurizer import SparseFeaturizer
from ruth.shared.nlu.training_data.collections import TrainData
from ruth.shared.nlu.training_data.feature import Feature
from ruth.shared.nlu.training_data.ruth_data import RuthData
from ruth.shared.utils import json_pickle, json_unpickle
from sklearn.feature_extraction.text import CountVectorizer
logger = logging.getLogger(__name__)
console = Console()
class CountVectorFeaturizer(SparseFeaturizer):
defaults = {
"analyzer": "word",
"stop_words": None,
"min_df": 1,
"max_df": 1.0,
"min_ngram": 1,
"max_ngram": 1,
"lowercase": True,
"max_features": None,
"use_lemma": True,
}
def _load_params(self):
self.analyzer = self.element_config["analyzer"]
self.stop_words = self.element_config["stop_words"]
self.min_df = self.element_config["min_df"]
self.max_df = self.element_config["max_df"]
self.min_ngram = self.element_config["min_ngram"]
self.max_ngram = self.element_config["max_ngram"]
self.lowercase = self.element_config["lowercase"]
self.use_lemma = self.element_config["use_lemma"]
def _verify_analyzer(self) -> None:
if self.analyzer != "word":
if self.stop_words is not None:
logger.warning(
"You specified the character wise analyzer."
" So stop words will be ignored."
)
if self.max_ngram == 1:
logger.warning(
"You specified the character wise analyzer"
" but max n-gram is set to 1."
" So, the vocabulary will only contain"
" the single characters. "
)
def __init__(
self,
element_config: Optional[Dict[Text, Any]],
vectorizer: Optional["CountVectorizer"] = None,
):
super(CountVectorFeaturizer, self).__init__(element_config)
self.vectorizer = vectorizer
self._load_params()
self._verify_analyzer()
@staticmethod
def _build_vectorizer(
parameters: Dict[Text, Any], vacabulary=None
) -> CountVectorizer:
return CountVectorizer(
analyzer=parameters["analyzer"],
stop_words=parameters["stop_words"],
min_df=parameters["min_df"],
max_df=parameters["max_df"],
ngram_range=(parameters["min_ngram"], parameters["max_ngram"]),
lowercase=parameters["lowercase"],
vocabulary=vacabulary,
)
def _check_attribute_vocabulary(self) -> bool:
"""Checks if trained vocabulary exists in attribute's count vectorizer."""
try:
return hasattr(self.vectorizer, "vocabulary_")
except (AttributeError, KeyError):
return False
def create_vectors(self, examples: List[RuthData]):
features = []
for message in examples:
features.append(self.vectorizer.transform([message.get(TEXT)]))
return features
def _get_featurizer_data(self, training_data: TrainData):
if self._check_attribute_vocabulary():
return self.create_vectors(training_data.training_examples)
else:
return []
def _add_features_to_data(self, training_examples: List[RuthData], features):
for message, feature in zip(training_examples, features):
message.add_features(
Feature(feature, self.element_config[CLASS_FEATURIZER_UNIQUE_NAME])
)
def train(self, training_data: TrainData) -> CountVectorizer:
self.vectorizer = self._build_vectorizer(
parameters={
"analyzer": self.analyzer,
"stop_words": self.stop_words,
"min_df": self.min_df,
"max_df": self.max_df,
"min_ngram": self.min_ngram,
"max_ngram": self.max_ngram,
"lowercase": self.lowercase,
}
)
self.vectorizer.fit(self.get_data(training_data))
features = self._get_featurizer_data(training_data)
self._add_features_to_data(training_data.training_examples, features)
return self.vectorizer
def parse(self, message: RuthData):
feature = self.vectorizer.transform([message.get(TEXT)])
message.add_features(
Feature(feature, self.element_config[CLASS_FEATURIZER_UNIQUE_NAME])
)
def get_vocablary_from_vectorizer(self):
if self.vectorizer.vocabulary_:
return self.vectorizer.vocabulary_
else:
raise "CountVectorizer not got trained. Please check the training data and retrain the model"
def persist(self, file_name: Text, model_dir: Text):
file_name = file_name + ".pkl"
if self.vectorizer:
vocab = self.vectorizer.vocabulary_
featurizer_path = Path(model_dir) / file_name
json_pickle(featurizer_path, vocab)
return {"file_name": file_name}
@classmethod
def load(
cls, meta: Dict[Text, Any], model_dir: Path, **kwargs: Any
) -> "CountVectorFeaturizer":
file_name = meta.get("file_name")
featurizer_file = model_dir / file_name
if not featurizer_file.exists():
return cls(meta)
vocabulary = json_unpickle(featurizer_file)
vectorizers = cls._build_vectorizer(parameters=meta, vacabulary=vocabulary)
return cls(meta, vectorizers) | /ruth_nlu-0.0.6-py3-none-any.whl/ruth/nlu/featurizers/sparse_featurizers/count_vector_featurizer.py | 0.845592 | 0.296807 | count_vector_featurizer.py | pypi |
import logging
from pathlib import Path
from typing import Any, Dict, List, Optional, Text
from rich.console import Console
from ruth.constants import TEXT
from ruth.nlu.featurizers.sparse_featurizers.constants import (
CLASS_FEATURIZER_UNIQUE_NAME,
)
from ruth.nlu.featurizers.sparse_featurizers.sparse_featurizer import SparseFeaturizer
from ruth.shared.nlu.training_data.collections import TrainData
from ruth.shared.nlu.training_data.feature import Feature
from ruth.shared.nlu.training_data.ruth_data import RuthData
from ruth.shared.utils import json_pickle, json_unpickle
from sklearn.feature_extraction.text import TfidfVectorizer
logger = logging.getLogger(__name__)
console = Console()
class TfidfVectorFeaturizer(SparseFeaturizer):
defaults = {
"analyzer": "word",
"stop_words": None,
"min_df": 1,
"max_df": 1.0,
"ngram_range": (1, 1),
"lowercase": True,
"max_features": None,
"norm": "l2",
"use_idf": True,
}
def __init__(
self,
element_config: Optional[Dict[Text, Any]],
vectorizer: Optional["TfidfVectorizer"] = None,
):
super(TfidfVectorFeaturizer, self).__init__(element_config)
self.vectorizer = vectorizer
self._load_params()
self._verify_analyzer()
def _load_params(self):
self.analyzer = self.element_config["analyzer"]
self.stop_words = self.element_config["stop_words"]
self.min_df = self.element_config["min_df"]
self.max_df = self.element_config["max_df"]
self.ngram_range = self.element_config["ngram_range"]
self.lowercase = self.element_config["lowercase"]
self.max_features = self.element_config["max_features"]
self.norm = self.element_config["norm"]
self.use_idf = self.element_config["use_idf"]
def _verify_analyzer(self) -> None:
if self.analyzer != "word":
if self.stop_words is not None:
logger.warning(
"You specified the character wise analyzer."
" So stop words will be ignored."
)
if self.ngram_range[1] == 1:
logger.warning(
"You specified the character wise analyzer"
" but max n-gram is set to 1."
" So, the vocabulary will only contain"
" the single characters. "
)
@staticmethod
def _build_vectorizer(
parameters: Dict[Text, Any], vacabulary=None
) -> TfidfVectorizer:
return TfidfVectorizer(
analyzer=parameters["analyzer"],
stop_words=parameters["stop_words"],
min_df=parameters["min_df"],
max_df=parameters["max_df"],
ngram_range=parameters["ngram_range"],
lowercase=parameters["lowercase"],
max_features=parameters["max_features"],
norm=parameters["norm"],
use_idf=parameters["use_idf"],
vocabulary=vacabulary,
)
def train(self, training_data: TrainData) -> TfidfVectorizer:
self.vectorizer = self._build_vectorizer(
parameters={
"analyzer": self.analyzer,
"stop_words": self.stop_words,
"min_df": self.min_df,
"max_df": self.max_df,
"ngram_range": self.ngram_range,
"lowercase": self.lowercase,
"max_features": self.max_features,
"norm": self.norm,
"use_idf": self.use_idf,
}
)
self.vectorizer.fit(self.get_data(training_data))
features = self._get_featurizer_data(training_data)
self._add_featurizer_data(training_data, features)
return self.vectorizer
def parse(self, message: RuthData):
feature = self.vectorizer.transform([message.get(TEXT)])
message.add_features(
Feature(feature, self.element_config[CLASS_FEATURIZER_UNIQUE_NAME])
)
def _check_attribute_vocabulary(self) -> bool:
"""Checks if trained vocabulary exists in attribute's count vectorizer."""
try:
return hasattr(self.vectorizer, "vocabulary_")
except (AttributeError, KeyError):
return False
def create_vector(self, examples: List[RuthData]):
features = []
for message in examples:
features.append(self.vectorizer.transform([message.get(TEXT)]))
return features
def _get_featurizer_data(self, training_data: TrainData):
if self._check_attribute_vocabulary():
return self.create_vector(training_data.training_examples)
else:
return []
def _add_featurizer_data(self, training_examples: List[RuthData], features):
for message, feature in zip(training_examples, features):
message.add_features(
Feature(feature, self.element_config[CLASS_FEATURIZER_UNIQUE_NAME])
)
def persist(self, file_name: Text, model_dir: Text):
file_name = file_name + ".pkl"
if self.vectorizer:
featurizer_path = Path(model_dir) / file_name
json_pickle(featurizer_path, self.vectorizer)
return {"file_name": file_name}
@classmethod
def load(
cls, meta: Dict[Text, Any], model_dir: Path, **kwargs: Any
) -> "TfidfVectorFeaturizer":
file_name = meta.get("file_name")
featurizer_file = model_dir / file_name
if not featurizer_file.exists():
return cls(meta)
vectorizer = json_unpickle(featurizer_file)
return cls(meta, vectorizer) | /ruth_nlu-0.0.6-py3-none-any.whl/ruth/nlu/featurizers/sparse_featurizers/tfidf_vector_featurizer.py | 0.832645 | 0.279872 | tfidf_vector_featurizer.py | pypi |
from typing import Any, Dict, Text, Union
import numpy as np
from numpy import ndarray
from ruth.constants import TEXT
from scipy import sparse
class Feature:
def __init__(self, feature: Union[sparse.spmatrix, ndarray], origin: Text):
self.feature = feature
self.origin = origin
def is_sparse(self):
return isinstance(self.feature, sparse.spmatrix)
def is_dense(self):
return not self.is_sparse()
def _combine_sparse_features(
self, additional_features: "Feature", message: Dict[Text, Any]
) -> None:
from scipy.sparse import hstack
if self.feature.shape[0] != additional_features.feature.shape[0]:
raise ValueError(
f"Cannot combine sparse features as sequence dimensions do not "
f"match: {self.feature.shape[0]} != "
f"{additional_features.feature.shape[0]}."
)
self.features = hstack([self.feature, additional_features.feature])
def _combine_dense_features(
self, additional_features: Any, message: Dict[Text, Any]
) -> Any:
if len(self.feature.shape[0]) != len(additional_features.feature.shape[0]):
raise ValueError(
f"Cannot concatenate dense features as sequence dimension does not "
f"match: {self.feature.shape[0]} != "
f"{len(additional_features)}. Message: {message.get(TEXT, 'Text not available')}"
)
else:
return np.concatenate((self.feature, additional_features), axis=-1)
def combine_with_features(
self, additional_features: "Feature", message: Dict[Text, Any]
) -> None:
if additional_features is None:
return
if self.is_dense() and additional_features.is_dense():
self._combine_dense_features(additional_features, message)
elif self.is_sparse() and additional_features.is_sparse():
self._combine_sparse_features(additional_features, message)
else:
raise ValueError("Cannot combine sparse and dense features.") | /ruth-py-0.0.5.tar.gz/ruth-py-0.0.5/src/ruth/shared/nlu/training_data/feature.py | 0.888825 | 0.437523 | feature.py | pypi |
from pathlib import Path
from typing import Any, Dict, List, Optional, Text
from ruth.constants import TEXT
from ruth.nlu.classifiers.constants import MODEL_NAME
from ruth.nlu.tokenizer.constants import MAX_LENGTH_FOR_PADDING
from ruth.nlu.tokenizer.tokenizer import Tokenizer
from ruth.shared.constants import ATTENTION_MASKS, INPUT_IDS
from ruth.shared.nlu.training_data.collections import TrainData
from ruth.shared.nlu.training_data.ruth_data import RuthData
from tqdm import tqdm
from transformers import AutoTokenizer
class HFTokenizer(Tokenizer):
DO_LOWER_CASE = "do_lower_case"
defaults = {MODEL_NAME: "bert-base-uncased", DO_LOWER_CASE: True}
def __init__(self, element_config: Optional[Dict[Text, Any]], tokenizer=None):
super(HFTokenizer, self).__init__(element_config)
self.tokenizer = tokenizer or {}
def _build_tokenizer(self):
return AutoTokenizer.from_pretrained(self.element_config[MODEL_NAME])
def _create_tokens(self, examples: TrainData):
before_padding_text = [message.get(TEXT) for message in examples]
encoded = self.tokenizer(
before_padding_text,
add_special_tokens=True,
max_length=MAX_LENGTH_FOR_PADDING,
padding=True,
truncation=True,
return_attention_mask=True,
)
input_ids = encoded["input_ids"]
attention_masks = encoded["attention_mask"]
return input_ids, attention_masks
def tokenize(self, training_data: TrainData):
return self._create_tokens(training_data.training_examples)
@staticmethod
def _add_tokens_to_data(
training_examples: List[RuthData],
input_ids: List[List[int]],
attention_masks: List[List[int]],
):
for message, input_id, attention_mask in tqdm(
zip(training_examples, input_ids, attention_masks),
desc="tokenization",
total=len(training_examples),
):
message.set(INPUT_IDS, input_id)
message.set(ATTENTION_MASKS, attention_mask)
def train(self, training_data: TrainData):
self.tokenizer = self._build_tokenizer()
input_ids, attention_masks = self.tokenize(training_data)
self._add_tokens_to_data(
training_data.training_examples, input_ids, attention_masks
)
def persist(self, file_name: Text, model_dir: Path):
tokenizer_file_name = file_name + "_tokenizer"
tokenizer_path = str(model_dir) + "/" + tokenizer_file_name
if self.tokenizer:
self.tokenizer.save_pretrained(tokenizer_path)
return {"tokenizer": tokenizer_file_name}
@classmethod
def load(cls, meta: Dict[Text, Any], model_dir: Path, **kwargs):
tokenizer_file_name = model_dir / meta["tokenizer"]
tokenizer = AutoTokenizer.from_pretrained(tokenizer_file_name)
return cls(meta, tokenizer=tokenizer)
def parse(self, message: RuthData):
parser_token = self.tokenizer.encode_plus(
message.get(TEXT),
add_special_tokens=True,
max_length=MAX_LENGTH_FOR_PADDING,
padding=True,
truncation=True,
return_attention_mask=True,
)
message.set(INPUT_IDS, parser_token["input_ids"])
message.set(ATTENTION_MASKS, parser_token["attention_mask"]) | /ruth-py-0.0.5.tar.gz/ruth-py-0.0.5/src/ruth/nlu/tokenizer/hf_tokenizer.py | 0.915472 | 0.298696 | hf_tokenizer.py | pypi |
import logging
from collections import Counter
from pathlib import Path
from typing import Any, Dict, List, Text, Tuple
import torch
from numpy import argsort, fliplr
from rich.console import Console
from ruth.constants import INTENT, INTENT_RANKING
from ruth.nlu.classifiers import LABEL_RANKING_LIMIT
from ruth.nlu.classifiers.constants import BATCH_SIZE, EPOCHS, MODEL_NAME
from ruth.nlu.classifiers.ruth_classifier import IntentClassifier
from ruth.nlu.tokenizer.hf_tokenizer import HFTokenizer
from ruth.shared.constants import ATTENTION_MASKS, INPUT_IDS
from ruth.shared.nlu.training_data.collections import TrainData
from ruth.shared.nlu.training_data.ruth_data import RuthData
from ruth.shared.utils import json_pickle, json_unpickle
from sklearn.preprocessing import LabelEncoder
from torch import nn
from torch.nn import Module
from torch.optim import AdamW
from torch.utils.data import DataLoader, Dataset
from tqdm import tqdm
from transformers import AutoModelForSequenceClassification
from transformers import logging as transformer_logging
torch.cuda.empty_cache()
logger = logging.getLogger(__name__)
transformer_logging.set_verbosity_error()
console = Console()
class HFClassifier(IntentClassifier):
defaults = {EPOCHS: 100, MODEL_NAME: "bert-base-uncased", BATCH_SIZE: 1}
def __init__(
self,
element_config: Dict[Text, Any],
le: LabelEncoder = None,
model: Module = None,
):
self.model = model
super().__init__(element_config, le)
def required_element(self):
return [HFTokenizer]
def _build_model(self, label_count):
return AutoModelForSequenceClassification.from_pretrained(
self.element_config[MODEL_NAME], num_labels=label_count
)
@staticmethod
def get_input_ids(message: RuthData) -> Dict[Text, List[int]]:
input_ids = message.get(INPUT_IDS)
if input_ids is not None:
return input_ids
raise ValueError("There is no sentence. Not able to train HFClassifier")
@staticmethod
def get_attention_masks(message: RuthData) -> Dict[Text, List[int]]:
attention_masks = message.get(ATTENTION_MASKS)
if attention_masks is not None:
return attention_masks
raise ValueError("There is no sentence. Not able to train HFClassifier")
@staticmethod
def get_optimizer(model):
return AdamW(model.parameters(), lr=5e-5)
@staticmethod
def get_device():
return (
torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
)
@property
def get_params(self):
return {
EPOCHS: self.element_config[EPOCHS],
BATCH_SIZE: self.element_config[BATCH_SIZE],
}
def train(self, training_data: TrainData):
intents: List[Text] = [
message.get(INTENT) for message in training_data.intent_examples
]
if len(set(intents)) < 2:
logger.warning(
"There are no enough intent. "
"At least two unique intent are needed to train the model"
)
return
X = {
"input_ids": [
self.get_input_ids(message) for message in training_data.intent_examples
],
"attention_masks": [
self.get_attention_masks(message)
for message in training_data.intent_examples
],
}
y = self.encode_the_str_to_int(intents)
label_count = len(Counter(y).keys())
params = self.get_params
loaded_data = HFDatasetLoader(X, y)
batched_data = DataLoader(
loaded_data, batch_size=params[BATCH_SIZE], shuffle=True
)
self.model = self._build_model(label_count)
optimizer = self.get_optimizer(self.model)
device = self.get_device()
logger.info("device: " + str(device) + " is used")
self.model.to(device)
self.model.train()
for epoch in range(params[EPOCHS]):
for batch in tqdm(batched_data, desc="epoch " + str(epoch)):
optimizer.zero_grad()
input_ids = batch["input_ids"].to(device)
attention_masks = batch["attention_masks"].to(device)
labels = batch["labels"].to(device)
outputs = self.model(
input_ids, attention_mask=attention_masks, labels=labels
)
loss = outputs[0]
loss.backward()
optimizer.step()
def persist(self, file_name: Text, model_dir: Path):
classifier_file_name = file_name + "_classifier"
encoder_file_name = file_name + "_encoder.pkl"
classifier_path = str(model_dir) + "/" + classifier_file_name
encoder_path = str(model_dir) + "/" + encoder_file_name
if self.model and self.le:
model_to_save = (
self.model.module if hasattr(self.model, "module") else self.model
)
model_to_save.save_pretrained(classifier_path)
json_pickle(encoder_path, self.le)
return {"classifier": classifier_file_name, "encoder": encoder_file_name}
@classmethod
def load(cls, meta: Dict[Text, Any], model_dir: Path):
classifier_file_name = model_dir / meta["classifier"]
encoder_file_name = model_dir / meta["encoder"]
classifier = AutoModelForSequenceClassification.from_pretrained(
classifier_file_name
)
le = json_unpickle(Path(encoder_file_name))
return cls(meta, model=classifier, le=le)
def _predict(self, input_ids, attention_masks) -> Tuple[List[int], List[float]]:
predictions = self.predict_probabilities(input_ids, attention_masks)
sorted_index = fliplr(argsort(predictions, axis=1))
return sorted_index[0], predictions[:, sorted_index][0][0]
def predict_probabilities(self, input_ids, attention_masks):
self.model.to(self.get_device())
self.model.eval()
probabilities = self.model(
torch.tensor(input_ids, device=self.get_device()),
attention_mask=torch.tensor(attention_masks, device=self.get_device()),
)[0]
probabilities = nn.functional.softmax(probabilities, dim=-1)
probabilities = probabilities.to(torch.device("cpu"))
probabilities = probabilities.detach().numpy()
return probabilities
def parse(self, message: RuthData):
input_ids = [message.get(INPUT_IDS)]
attention_masks = [message.get(ATTENTION_MASKS)]
index, probabilities = self._predict(input_ids, attention_masks)
intents = self._change_int_to_text(index)
probabilities = probabilities
if intents.size > 0 and probabilities.size > 0:
ranking = list(zip(list(intents), list(probabilities)))[
:LABEL_RANKING_LIMIT
]
intent = {"name": intents[0], "accuracy": probabilities[0]}
intent_rankings = [
{"name": name, "accuracy": probability} for name, probability in ranking
]
else:
intent = {"name": None, "accuracy": 0.0}
intent_rankings = []
message.set(INTENT, intent)
message.set(INTENT_RANKING, intent_rankings)
class HFDatasetLoader(Dataset):
def __init__(self, encodings, labels):
self.encodings = encodings
self.labels = labels
def __len__(self):
return len(self.labels)
def __getitem__(self, idx):
item = {key: torch.tensor(val[idx]) for key, val in self.encodings.items()}
item["labels"] = torch.tensor(self.labels[idx])
return item | /ruth-py-0.0.5.tar.gz/ruth-py-0.0.5/src/ruth/nlu/classifiers/hf_classifier.py | 0.835484 | 0.272294 | hf_classifier.py | pypi |
import logging
from pathlib import Path
from typing import Any, Dict, List, Text, Tuple, Union
import sklearn
from numpy import argsort, fliplr, ndarray, reshape
from rich.console import Console
from ruth.constants import INTENT, INTENT_RANKING
from ruth.nlu.classifiers import LABEL_RANKING_LIMIT
from ruth.nlu.classifiers.ruth_classifier import IntentClassifier
from ruth.shared.nlu.training_data.collections import TrainData
from ruth.shared.nlu.training_data.ruth_data import RuthData
from ruth.shared.utils import json_pickle, json_unpickle
from scipy import sparse
from sklearn.model_selection import GridSearchCV
from sklearn.preprocessing import LabelEncoder
logger = logging.getLogger(__name__)
console = Console()
class SVMClassifier(IntentClassifier):
defaults = {
"C": [1, 2, 5, 10, 20, 100],
"kernel": ["linear", "rbf"],
"gamma": ["auto", 0.1],
"decision_function_shape": ["ovr"],
"max_cross_validation_folds": 5,
"scoring": "f1_weighted",
"max_length": 30000,
}
def __init__(
self,
element_config: Dict[Text, Any],
le: LabelEncoder = None,
clf: GridSearchCV = None,
):
self.clf = clf
super().__init__(element_config, le)
@staticmethod
def get_features(message: RuthData) -> Union[sparse.spmatrix, ndarray]:
feature = message.get_features()
if feature is not None:
return feature.feature[0]
raise ValueError("There is no sentence. Not able to train SVMClassifier")
@property
def param_grids(self):
return {
"C": self.element_config["C"],
"kernel": self.element_config["kernel"],
"gamma": self.element_config["gamma"],
"decision_function_shape": self.element_config["decision_function_shape"],
}
def _create_gridsearch(self, X, y) -> "sklearn.model_selection.GridSearchCV":
from sklearn.svm import SVC
clf = SVC(probability=True)
param_grids = self.param_grids
return GridSearchCV(
clf,
param_grids,
scoring=self.element_config["scoring"],
# cv=self.element_config["max_cross_validation_folds"],
)
def train(self, training_data: TrainData):
intents: List[Text] = [
message.get(INTENT) for message in training_data.intent_examples
]
if len(set(intents)) < 2:
logger.warning(
"There are no enough intent. "
"At least two unique intent are needed to train the model"
)
return
X = [self.get_features(message) for message in training_data.intent_examples]
if self.check_dense(X[0]):
max_length = self.get_max_length(X)
self.element_config["max_length"] = max_length
X = [self.ravel_vector(x) for x in X]
# X = [self.pad_vector(x, max_length) for x in X]
else:
X = [message.toarray() for message in X]
y = self.encode_the_str_to_int(intents)
X = reshape(X, (len(X), -1))
self.clf = self._create_gridsearch(X, y)
self.clf.fit(X, y)
console.print(f"The Best parameter we got are {self.clf.best_params_}")
console.print(f"score: {self.clf.best_score_}")
def persist(self, file_name: Text, model_dir: Path):
classifier_file_name = file_name + "_classifier.pkl"
encoder_file_name = file_name + "_encoder.pkl"
classifier_path = model_dir / classifier_file_name
encoder_path = model_dir / encoder_file_name
if self.clf and self.le:
json_pickle(classifier_path, self.clf.best_estimator_)
json_pickle(encoder_path, self.le)
return {"classifier": classifier_file_name, "encoder": encoder_file_name}
def _predict(self, x: ndarray) -> Tuple[ndarray, ndarray]:
predictions = self.predict_probabilities(x)
sorted_index = fliplr(argsort(predictions, axis=1))
return sorted_index, predictions[:, sorted_index]
def predict_probabilities(self, x: ndarray) -> ndarray:
return self.clf.predict_proba(x.reshape(1, -1))
@classmethod
def load(cls, meta: Dict[Text, Any], model_dir: Path):
classifier_file_name = model_dir / meta["classifier"]
encoder_file_name = model_dir / meta["encoder"]
clf = json_unpickle(Path(classifier_file_name))
le = json_unpickle(Path(encoder_file_name))
return cls(meta, clf=clf, le=le)
def parse(self, message: RuthData):
x = self.get_features(message)
if self.check_dense(x):
x = self.ravel_vector(x)
x = self.pad_vector(x, self.element_config["max_length"])
else:
x = x.toarray()
index, probabilities = self._predict(x)
intents = self._change_int_to_text(index.flatten())
probabilities = probabilities.flatten()
if intents.size > 0 and probabilities.size > 0:
ranking = list(zip(list(intents), list(probabilities)))[
:LABEL_RANKING_LIMIT
]
intent = {"name": intents[0], "accuracy": probabilities[0]}
intent_rankings = [
{"name": name, "accuracy": probability} for name, probability in ranking
]
else:
intent = {"name": None, "accuracy": 0.0}
intent_rankings = []
message.set(INTENT, intent)
message.set(INTENT_RANKING, intent_rankings) | /ruth-py-0.0.5.tar.gz/ruth-py-0.0.5/src/ruth/nlu/classifiers/svm_classifier.py | 0.781539 | 0.325856 | svm_classifier.py | pypi |
import logging
from pathlib import Path
from typing import Any, Dict, Text, Tuple
import sklearn
from numpy import argsort, fliplr, ndarray, reshape
from ruth.constants import INTENT, INTENT_RANKING
from ruth.nlu.classifiers import LABEL_RANKING_LIMIT
from ruth.nlu.classifiers.ruth_classifier import IntentClassifier
from ruth.shared.nlu.training_data.collections import TrainData
from ruth.shared.nlu.training_data.ruth_data import RuthData
from ruth.shared.utils import json_pickle, json_unpickle
from scipy import sparse
from sklearn.preprocessing import LabelEncoder
logger = logging.getLogger(__name__)
class NaiveBayesClassifier(IntentClassifier):
defaults = {"priors": None, "var_smoothing": 1e-9}
def __init__(
self,
element_config: Dict[Text, Any],
le: LabelEncoder = None,
model: "sklearn.naive_bayes.GaussianNB" = None,
):
super(NaiveBayesClassifier, self).__init__(element_config=element_config, le=le)
self.model = model
def _create_classifier(self) -> "sklearn.naive_bayes.GaussianNB":
from sklearn.naive_bayes import GaussianNB
priors = self.element_config["priors"]
var_smoothing = self.element_config["var_smoothing"]
return GaussianNB(priors=priors, var_smoothing=var_smoothing)
def train(self, training_data: TrainData):
intents = [message.get(INTENT) for message in training_data.intent_examples]
if len(set(intents)) < 2:
logger.warning(
"There are no enough intent. "
"At least two unique intent are needed to train the model"
)
return
X = [
self.get_features(message).toarray()
for message in training_data.intent_examples
]
y = self.encode_the_str_to_int(intents)
X = reshape(X, (len(X), -1))
self.model = self._create_classifier()
self.model.fit(X, y)
def _predict(self, x: ndarray) -> Tuple[ndarray, ndarray]:
predictions = self.predict_probabilities(x)
sorted_index = fliplr(argsort(predictions, axis=1))
return sorted_index, predictions[:, sorted_index]
def predict_probabilities(self, x: ndarray) -> ndarray:
return self.model.predict_proba(x.reshape(1, -1))
def _change_int_to_text(self, prediction: ndarray) -> ndarray:
return self.le.inverse_transform(prediction)
@staticmethod
def get_features(message: RuthData) -> sparse.spmatrix:
feature = message.get_features()
if feature is not None:
return feature.feature[0]
raise ValueError("There is no sentence. Not able to train NaiveBayesClassifier")
def parse(self, message: RuthData):
x = self.get_features(message).toarray()
index, probabilities = self._predict(x)
intents = self._change_int_to_text(index.flatten())
probabilities = probabilities.flatten()
if intents.size > 0 and probabilities.size > 0:
ranking = list(zip(list(intents), list(probabilities)))[
:LABEL_RANKING_LIMIT
]
intent = {"name": intents[0], "accuracy": probabilities[0]}
intent_rankings = [
{"name": name, "accuracy": probability} for name, probability in ranking
]
else:
intent = {"name": None, "accuracy": 0.0}
intent_rankings = []
message.set(INTENT, intent)
message.set(INTENT_RANKING, intent_rankings)
def persist(self, file_name: Text, model_dir: Path):
classifier_file_name = file_name + "_classifier.pkl"
encoder_file_name = file_name + "_encoder.pkl"
classifier_path = model_dir / classifier_file_name
encoder_path = model_dir / encoder_file_name
if self.model and self.le:
json_pickle(classifier_path, self.model)
json_pickle(encoder_path, self.le)
return {"classifier": classifier_file_name, "encoder": encoder_file_name}
@classmethod
def load(cls, meta: Dict[Text, Any], model_dir: Path):
classifier_file_name = model_dir / meta["classifier"]
encoder_file_name = model_dir / meta["encoder"]
model = json_unpickle(Path(classifier_file_name))
le = json_unpickle(Path(encoder_file_name))
return cls(meta, model=model, le=le) | /ruth-py-0.0.5.tar.gz/ruth-py-0.0.5/src/ruth/nlu/classifiers/naive_bayes_classifier.py | 0.846006 | 0.327951 | naive_bayes_classifier.py | pypi |
import io
import os
from typing import Any, Dict, List, Optional, Text
from urllib import request
import numpy
from progressbar import progressbar
from ruth.nlu.classifiers.constants import MODEL_NAME
from ruth.nlu.constants import ELEMENT_UNIQUE_NAME
from ruth.nlu.featurizers.dense_featurizers.dense_featurizer import DenseFeaturizer
from ruth.shared.nlu.training_data.collections import TrainData
from ruth.shared.nlu.training_data.feature import Feature
from ruth.shared.nlu.training_data.ruth_data import RuthData
from tqdm import tqdm
pbar = None
class FastTextFeaturizer(DenseFeaturizer):
DO_LOWER_CASE = "do_lower_case"
defaults = {MODEL_NAME: "wiki-news-300d-1M.vec.zip", DO_LOWER_CASE: True}
DEFAULT_MODELS_DIR = os.path.join(
os.path.expanduser("~"), ".cache", "ruth", "models"
)
MODELS = {
"wiki-news-300d-1M.vec.zip": "https://dl.fbaipublicfiles.com/"
"fasttext/vectors-english/wiki-news-300d-1M.vec.zip",
"wiki-news-300d-1M-subword.vec.zip": "https://dl.fbaipublicfiles.com/"
"fasttext/vectors-english/wiki-news-300d-1M-subword.vec.zip",
"crawl-300d-2M.vec.zip": "https://dl.fbaipublicfiles.com/"
"fasttext/vectors-english/crawl-300d-2M.vec.zip",
"crawl-300d-2M-subword.zip": "https://dl.fbaipublicfiles.com/"
"fasttext/vectors-english/crawl-300d-2M-subword.zip",
}
def __init__(self, element_config: Optional[Dict[Text, Any]]):
super(FastTextFeaturizer, self).__init__(element_config)
self.vectors = None
self.featurizer = {}
if self.element_config[MODEL_NAME] not in self.MODELS:
raise ValueError(
"Model name not found. Please choose from the following: "
"{}".format(list(self.MODELS.keys()))
)
self.file_path = self.download_models(self.element_config[MODEL_NAME])
self.dimension = 300
def download_models(self, specific_models=None):
os.makedirs(self.DEFAULT_MODELS_DIR, exist_ok=True)
def show_progress(block_num, block_size, total_size):
global pbar
if pbar is None:
pbar = progressbar.ProgressBar(maxval=total_size)
pbar.start()
downloaded = block_num * block_size
if downloaded < total_size:
pbar.update(downloaded)
else:
pbar.finish()
pbar = None
for model_name, url in self.MODELS.items():
if specific_models is not None and str(model_name) not in str(
specific_models
):
continue
model_path = os.path.join(self.DEFAULT_MODELS_DIR, model_name)
if os.path.exists(model_path):
model_path = model_path[:-4]
return model_path
request.urlretrieve(url, model_path, show_progress)
import zipfile
with zipfile.ZipFile(model_path, "r") as zip_ref:
zip_ref.extractall(self.DEFAULT_MODELS_DIR)
model_path = model_path[:-4]
return model_path
raise f"""Given model {specific_models} not found.
Please check the documentation and give the
right Fastext model name """
def train(self, training_data: TrainData):
self.featurizer = self._build_featurizer()
tokenized_data: List[List[Text]] = [
message.get_tokenized_data() for message in training_data.intent_examples
]
self.vectors = [
self.get_vector_list(token_list) for token_list in tokenized_data
]
for message, vector in zip(training_data.training_examples, self.vectors):
message.add_features(
Feature(vector, self.element_config[ELEMENT_UNIQUE_NAME])
)
def _build_featurizer(self):
fasttext_corpus = io.open(
self.file_path, "r", encoding="utf-8", newline="\n", errors="ignore"
)
model = {}
for line in tqdm(fasttext_corpus, colour="red"):
tokens = line.strip().split(" ")
model[tokens[0]] = numpy.array(list(tokens[1:]))
return model
def get_vector_list(self, token_list):
if self.featurizer == {}:
self._build_featurizer()
if not token_list:
return numpy.zeros(self.dimension)
return numpy.array([self.get_vector(token) for token in token_list])
def get_vector(self, token):
if token in self.featurizer and self.featurizer != {}:
return self.featurizer[token]
else:
return numpy.zeros(self.dimension)
def parse(self, message: RuthData):
tokens = message.get_tokenized_data()
vector = self.get_vector_list(tokens)
message.add_features(Feature(vector, self.element_config[ELEMENT_UNIQUE_NAME])) | /ruth-py-0.0.5.tar.gz/ruth-py-0.0.5/src/ruth/nlu/featurizers/dense_featurizers/fast_text.py | 0.68742 | 0.278315 | fast_text.py | pypi |
import logging
from pathlib import Path
from typing import Any, Dict, List, Optional, Text
from rich.console import Console
from ruth.constants import TEXT
from ruth.nlu.featurizers.sparse_featurizers.constants import (
CLASS_FEATURIZER_UNIQUE_NAME,
)
from ruth.nlu.featurizers.sparse_featurizers.sparse_featurizer import SparseFeaturizer
from ruth.shared.nlu.training_data.collections import TrainData
from ruth.shared.nlu.training_data.feature import Feature
from ruth.shared.nlu.training_data.ruth_data import RuthData
from ruth.shared.utils import json_pickle, json_unpickle
from sklearn.feature_extraction.text import CountVectorizer
logger = logging.getLogger(__name__)
console = Console()
class CountVectorFeaturizer(SparseFeaturizer):
defaults = {
"analyzer": "word",
"stop_words": None,
"min_df": 1,
"max_df": 1.0,
"min_ngram": 1,
"max_ngram": 1,
"lowercase": True,
"max_features": None,
"use_lemma": True,
}
def _load_params(self):
self.analyzer = self.element_config["analyzer"]
self.stop_words = self.element_config["stop_words"]
self.min_df = self.element_config["min_df"]
self.max_df = self.element_config["max_df"]
self.min_ngram = self.element_config["min_ngram"]
self.max_ngram = self.element_config["max_ngram"]
self.lowercase = self.element_config["lowercase"]
self.use_lemma = self.element_config["use_lemma"]
def _verify_analyzer(self) -> None:
if self.analyzer != "word":
if self.stop_words is not None:
logger.warning(
"You specified the character wise analyzer."
" So stop words will be ignored."
)
if self.max_ngram == 1:
logger.warning(
"You specified the character wise analyzer"
" but max n-gram is set to 1."
" So, the vocabulary will only contain"
" the single characters. "
)
def __init__(
self,
element_config: Optional[Dict[Text, Any]],
vectorizer: Optional["CountVectorizer"] = None,
):
super(CountVectorFeaturizer, self).__init__(element_config)
self.vectorizer = vectorizer
self._load_params()
self._verify_analyzer()
@staticmethod
def _build_vectorizer(
parameters: Dict[Text, Any], vacabulary=None
) -> CountVectorizer:
return CountVectorizer(
analyzer=parameters["analyzer"],
stop_words=parameters["stop_words"],
min_df=parameters["min_df"],
max_df=parameters["max_df"],
ngram_range=(parameters["min_ngram"], parameters["max_ngram"]),
lowercase=parameters["lowercase"],
vocabulary=vacabulary,
)
def _check_attribute_vocabulary(self) -> bool:
"""Checks if trained vocabulary exists in attribute's count vectorizer."""
try:
return hasattr(self.vectorizer, "vocabulary_")
except (AttributeError, KeyError):
return False
def create_vectors(self, examples: List[RuthData]):
features = []
for message in examples:
features.append(self.vectorizer.transform([message.get(TEXT)]))
return features
def _get_featurizer_data(self, training_data: TrainData):
if self._check_attribute_vocabulary():
return self.create_vectors(training_data.training_examples)
else:
return []
def _add_features_to_data(self, training_examples: List[RuthData], features):
for message, feature in zip(training_examples, features):
message.add_features(
Feature(feature, self.element_config[CLASS_FEATURIZER_UNIQUE_NAME])
)
def train(self, training_data: TrainData) -> CountVectorizer:
self.vectorizer = self._build_vectorizer(
parameters={
"analyzer": self.analyzer,
"stop_words": self.stop_words,
"min_df": self.min_df,
"max_df": self.max_df,
"min_ngram": self.min_ngram,
"max_ngram": self.max_ngram,
"lowercase": self.lowercase,
}
)
self.vectorizer.fit(self.get_data(training_data))
features = self._get_featurizer_data(training_data)
self._add_features_to_data(training_data.training_examples, features)
return self.vectorizer
def parse(self, message: RuthData):
feature = self.vectorizer.transform([message.get(TEXT)])
message.add_features(
Feature(feature, self.element_config[CLASS_FEATURIZER_UNIQUE_NAME])
)
def get_vocablary_from_vectorizer(self):
if self.vectorizer.vocabulary_:
return self.vectorizer.vocabulary_
else:
raise "CountVectorizer not got trained. Please check the training data and retrain the model"
def persist(self, file_name: Text, model_dir: Text):
file_name = file_name + ".pkl"
if self.vectorizer:
vocab = self.vectorizer.vocabulary_
featurizer_path = Path(model_dir) / file_name
json_pickle(featurizer_path, vocab)
return {"file_name": file_name}
@classmethod
def load(
cls, meta: Dict[Text, Any], model_dir: Path, **kwargs: Any
) -> "CountVectorFeaturizer":
file_name = meta.get("file_name")
featurizer_file = model_dir / file_name
if not featurizer_file.exists():
return cls(meta)
vocabulary = json_unpickle(featurizer_file)
vectorizers = cls._build_vectorizer(parameters=meta, vacabulary=vocabulary)
return cls(meta, vectorizers) | /ruth-py-0.0.5.tar.gz/ruth-py-0.0.5/src/ruth/nlu/featurizers/sparse_featurizers/count_vector_featurizer.py | 0.845592 | 0.296807 | count_vector_featurizer.py | pypi |
import logging
from pathlib import Path
from typing import Any, Dict, List, Optional, Text
from rich.console import Console
from ruth.constants import TEXT
from ruth.nlu.featurizers.sparse_featurizers.constants import (
CLASS_FEATURIZER_UNIQUE_NAME,
)
from ruth.nlu.featurizers.sparse_featurizers.sparse_featurizer import SparseFeaturizer
from ruth.shared.nlu.training_data.collections import TrainData
from ruth.shared.nlu.training_data.feature import Feature
from ruth.shared.nlu.training_data.ruth_data import RuthData
from ruth.shared.utils import json_pickle, json_unpickle
from sklearn.feature_extraction.text import TfidfVectorizer
logger = logging.getLogger(__name__)
console = Console()
class TfidfVectorFeaturizer(SparseFeaturizer):
defaults = {
"analyzer": "word",
"stop_words": None,
"min_df": 1,
"max_df": 1.0,
"ngram_range": (1, 1),
"lowercase": True,
"max_features": None,
"norm": "l2",
"use_idf": True,
}
def __init__(
self,
element_config: Optional[Dict[Text, Any]],
vectorizer: Optional["TfidfVectorizer"] = None,
):
super(TfidfVectorFeaturizer, self).__init__(element_config)
self.vectorizer = vectorizer
self._load_params()
self._verify_analyzer()
def _load_params(self):
self.analyzer = self.element_config["analyzer"]
self.stop_words = self.element_config["stop_words"]
self.min_df = self.element_config["min_df"]
self.max_df = self.element_config["max_df"]
self.ngram_range = self.element_config["ngram_range"]
self.lowercase = self.element_config["lowercase"]
self.max_features = self.element_config["max_features"]
self.norm = self.element_config["norm"]
self.use_idf = self.element_config["use_idf"]
def _verify_analyzer(self) -> None:
if self.analyzer != "word":
if self.stop_words is not None:
logger.warning(
"You specified the character wise analyzer."
" So stop words will be ignored."
)
if self.ngram_range[1] == 1:
logger.warning(
"You specified the character wise analyzer"
" but max n-gram is set to 1."
" So, the vocabulary will only contain"
" the single characters. "
)
@staticmethod
def _build_vectorizer(
parameters: Dict[Text, Any], vacabulary=None
) -> TfidfVectorizer:
return TfidfVectorizer(
analyzer=parameters["analyzer"],
stop_words=parameters["stop_words"],
min_df=parameters["min_df"],
max_df=parameters["max_df"],
ngram_range=parameters["ngram_range"],
lowercase=parameters["lowercase"],
max_features=parameters["max_features"],
norm=parameters["norm"],
use_idf=parameters["use_idf"],
vocabulary=vacabulary,
)
def train(self, training_data: TrainData) -> TfidfVectorizer:
self.vectorizer = self._build_vectorizer(
parameters={
"analyzer": self.analyzer,
"stop_words": self.stop_words,
"min_df": self.min_df,
"max_df": self.max_df,
"ngram_range": self.ngram_range,
"lowercase": self.lowercase,
"max_features": self.max_features,
"norm": self.norm,
"use_idf": self.use_idf,
}
)
self.vectorizer.fit(self.get_data(training_data))
features = self._get_featurizer_data(training_data)
self._add_featurizer_data(training_data, features)
return self.vectorizer
def parse(self, message: RuthData):
feature = self.vectorizer.transform([message.get(TEXT)])
message.add_features(
Feature(feature, self.element_config[CLASS_FEATURIZER_UNIQUE_NAME])
)
def _check_attribute_vocabulary(self) -> bool:
"""Checks if trained vocabulary exists in attribute's count vectorizer."""
try:
return hasattr(self.vectorizer, "vocabulary_")
except (AttributeError, KeyError):
return False
def create_vector(self, examples: List[RuthData]):
features = []
for message in examples:
features.append(self.vectorizer.transform([message.get(TEXT)]))
return features
def _get_featurizer_data(self, training_data: TrainData):
if self._check_attribute_vocabulary():
return self.create_vector(training_data.training_examples)
else:
return []
def _add_featurizer_data(self, training_examples: List[RuthData], features):
for message, feature in zip(training_examples, features):
message.add_features(
Feature(feature, self.element_config[CLASS_FEATURIZER_UNIQUE_NAME])
)
def persist(self, file_name: Text, model_dir: Text):
file_name = file_name + ".pkl"
if self.vectorizer:
featurizer_path = Path(model_dir) / file_name
json_pickle(featurizer_path, self.vectorizer)
return {"file_name": file_name}
@classmethod
def load(
cls, meta: Dict[Text, Any], model_dir: Path, **kwargs: Any
) -> "TfidfVectorFeaturizer":
file_name = meta.get("file_name")
featurizer_file = model_dir / file_name
if not featurizer_file.exists():
return cls(meta)
vectorizer = json_unpickle(featurizer_file)
return cls(meta, vectorizer) | /ruth-py-0.0.5.tar.gz/ruth-py-0.0.5/src/ruth/nlu/featurizers/sparse_featurizers/tfidf_vector_featurizer.py | 0.832645 | 0.279872 | tfidf_vector_featurizer.py | pypi |
import json
import re
from pathlib import Path
from typing import Any, Dict, List, Text
import yaml
from pydantic import BaseModel
from ruth.nlu.model import ElementBuilder, Interpreter
def get_config(pipeline_path: Path) -> Dict[Text, Any]:
with open(pipeline_path, "r") as f:
return yaml.safe_load(f)
def load_json_data(path: Path) -> Dict[Text, Any]:
with open(path, "r") as f:
return json.load(f)
def build_pipeline_from_metadata(
metadata: Dict[Text, Any],
model_dir: Path,
element_builder: ElementBuilder = None,
):
pipeline_element = []
if not element_builder:
element_builder = ElementBuilder()
pipeline: List[Dict[Text, Any]] = metadata["pipeline"]
for element in pipeline:
pipeline_element.append(
element_builder.load_element(element["name"], element, model_dir=model_dir)
)
return pipeline_element
def get_metadata_from_model(model_path: Path) -> Dict[Text, Any]:
metadata_file_path = model_path / "metadata.json"
metadata = load_json_data(metadata_file_path)
return metadata
def get_interpreter_from_model_path(model_path: str) -> Interpreter:
model_path = check_model_path(model_path)
metadata = get_metadata_from_model(model_path.absolute())
pipeline = build_pipeline_from_metadata(metadata=metadata, model_dir=model_path)
return Interpreter(pipeline)
def check_model_path(model_path: str) -> Path:
if model_path:
if Path.exists(Path(model_path)):
model_file = model_path
else:
raise FileNotFoundError(
"Model does not exist in the given path.\nTo train: ruth train"
)
else:
model_folder = "models"
if not Path(model_folder).exists():
raise FileNotFoundError(
"No models found.\nTrain new models using: ruth train"
)
models = [
directory
for directory in Path(model_folder).iterdir()
if directory.is_dir() and re.search("ruth", str(directory))
]
if models:
models.sort()
model_file = models[-1]
else:
raise FileNotFoundError(
"No models found.\nTrain new models using: ruth train"
)
return Path(model_file)
def local_example_path(output_path: Text) -> Path:
if output_path:
return Path(output_path) / "data" / "example.yml"
else:
data_path = Path().absolute() / "data"
data_path.mkdir(exist_ok=True)
data_path = data_path / "example.yml"
return data_path
def local_pipeline_path(output_path: Text) -> Path:
if output_path:
return Path(output_path)
else:
pipeline_path = Path().absolute() / "pipeline.yml"
return pipeline_path
class Item(BaseModel):
text: str | /ruth_python-0.0.8-py3-none-any.whl/ruth/cli/utills.py | 0.644001 | 0.153803 | utills.py | pypi |
from typing import Any, Dict, Text, Union
import numpy as np
from numpy import ndarray
from ruth.constants import TEXT
from scipy import sparse
class Feature:
def __init__(self, feature: Union[sparse.spmatrix, ndarray], origin: Text):
self.feature = feature
self.origin = origin
def is_sparse(self):
return isinstance(self.feature, sparse.spmatrix)
def is_dense(self):
return not self.is_sparse()
def _combine_sparse_features(
self, additional_features: "Feature", message: Dict[Text, Any]
) -> None:
from scipy.sparse import hstack
if self.feature.shape[0] != additional_features.feature.shape[0]:
raise ValueError(
f"Cannot combine sparse features as sequence dimensions do not "
f"match: {self.feature.shape[0]} != "
f"{additional_features.feature.shape[0]}."
)
self.features = hstack([self.feature, additional_features.feature])
def _combine_dense_features(
self, additional_features: Any, message: Dict[Text, Any]
) -> Any:
if len(self.feature.shape[0]) != len(additional_features.feature.shape[0]):
raise ValueError(
f"Cannot concatenate dense features as sequence dimension does not "
f"match: {self.feature.shape[0]} != "
f"{len(additional_features)}. Message: {message.get(TEXT, 'Text not available')}"
)
else:
return np.concatenate((self.feature, additional_features), axis=-1)
def combine_with_features(
self, additional_features: "Feature", message: Dict[Text, Any]
) -> None:
if additional_features is None:
return
if self.is_dense() and additional_features.is_dense():
self._combine_dense_features(additional_features, message)
elif self.is_sparse() and additional_features.is_sparse():
self._combine_sparse_features(additional_features, message)
else:
raise ValueError("Cannot combine sparse and dense features.") | /ruth_python-0.0.8-py3-none-any.whl/ruth/shared/nlu/training_data/feature.py | 0.888825 | 0.437523 | feature.py | pypi |
from pathlib import Path
from typing import Any, Dict, List, Optional, Text
from ruth.constants import TEXT
from ruth.nlu.classifiers.constants import MODEL_NAME
from ruth.nlu.tokenizer.constants import MAX_LENGTH_FOR_PADDING
from ruth.nlu.tokenizer.tokenizer import Tokenizer
from ruth.shared.constants import ATTENTION_MASKS, INPUT_IDS
from ruth.shared.nlu.training_data.collections import TrainData
from ruth.shared.nlu.training_data.ruth_data import RuthData
from tqdm import tqdm
from transformers import AutoTokenizer
class HFTokenizer(Tokenizer):
DO_LOWER_CASE = "do_lower_case"
defaults = {MODEL_NAME: "bert-base-uncased", DO_LOWER_CASE: True}
def __init__(self, element_config: Optional[Dict[Text, Any]], tokenizer=None):
super(HFTokenizer, self).__init__(element_config)
self.tokenizer = tokenizer or {}
def _build_tokenizer(self):
return AutoTokenizer.from_pretrained(self.element_config[MODEL_NAME])
def _create_tokens(self, examples: TrainData):
before_padding_text = [message.get(TEXT) for message in examples]
encoded = self.tokenizer(
before_padding_text,
add_special_tokens=True,
max_length=MAX_LENGTH_FOR_PADDING,
padding=True,
truncation=True,
return_attention_mask=True,
)
input_ids = encoded["input_ids"]
attention_masks = encoded["attention_mask"]
return input_ids, attention_masks
def tokenize(self, training_data: TrainData):
return self._create_tokens(training_data.training_examples)
@staticmethod
def _add_tokens_to_data(
training_examples: List[RuthData],
input_ids: List[List[int]],
attention_masks: List[List[int]],
):
for message, input_id, attention_mask in tqdm(
zip(training_examples, input_ids, attention_masks),
desc="tokenization",
total=len(training_examples),
):
message.set(INPUT_IDS, input_id)
message.set(ATTENTION_MASKS, attention_mask)
def train(self, training_data: TrainData):
self.tokenizer = self._build_tokenizer()
input_ids, attention_masks = self.tokenize(training_data)
self._add_tokens_to_data(
training_data.training_examples, input_ids, attention_masks
)
def persist(self, file_name: Text, model_dir: Path):
tokenizer_file_name = file_name + "_tokenizer"
tokenizer_path = str(model_dir) + "/" + tokenizer_file_name
if self.tokenizer:
self.tokenizer.save_pretrained(tokenizer_path)
return {"tokenizer": tokenizer_file_name}
@classmethod
def load(cls, meta: Dict[Text, Any], model_dir: Path, **kwargs):
tokenizer_file_name = model_dir / meta["tokenizer"]
tokenizer = AutoTokenizer.from_pretrained(tokenizer_file_name)
return cls(meta, tokenizer=tokenizer)
def parse(self, message: RuthData):
parser_token = self.tokenizer.encode_plus(
message.get(TEXT),
add_special_tokens=True,
max_length=MAX_LENGTH_FOR_PADDING,
padding=True,
truncation=True,
return_attention_mask=True,
)
message.set(INPUT_IDS, parser_token["input_ids"])
message.set(ATTENTION_MASKS, parser_token["attention_mask"]) | /ruth_python-0.0.8-py3-none-any.whl/ruth/nlu/tokenizer/hf_tokenizer.py | 0.915472 | 0.298696 | hf_tokenizer.py | pypi |
import logging
from collections import Counter
from pathlib import Path
from typing import Any, Dict, List, Text, Tuple
import torch
from numpy import argsort, fliplr, ndarray
from rich.console import Console
from ruth.constants import INTENT, INTENT_RANKING
from ruth.nlu.classifiers import LABEL_RANKING_LIMIT
from ruth.nlu.classifiers.constants import BATCH_SIZE, EPOCHS, MODEL_NAME
from ruth.nlu.classifiers.ruth_classifier import IntentClassifier
from ruth.nlu.tokenizer.hf_tokenizer import HFTokenizer
from ruth.shared.constants import (
ATTENTION_MASKS,
DEVICE,
INPUT_IDS,
INTENT_NAME_KEY,
PREDICTED_CONFIDENCE_KEY,
)
from ruth.shared.nlu.training_data.collections import TrainData
from ruth.shared.nlu.training_data.ruth_data import RuthData
from ruth.shared.utils import get_device, json_pickle, json_unpickle
from sklearn.preprocessing import LabelEncoder
from torch import nn
from torch.nn import Module
from torch.optim import AdamW
from torch.utils.data import DataLoader, Dataset
from tqdm import tqdm
from transformers import AutoModelForSequenceClassification
from transformers import logging as transformer_logging
torch.cuda.empty_cache()
logger = logging.getLogger(__name__)
transformer_logging.set_verbosity_error()
console = Console()
class HFClassifier(IntentClassifier):
defaults = {
EPOCHS: 100,
MODEL_NAME: "bert-base-uncased",
BATCH_SIZE: 4,
DEVICE: "cuda" if torch.cuda.is_available() else "cpu",
}
def __init__(
self,
element_config: Dict[Text, Any],
le: LabelEncoder = None,
model: Module = None,
):
super().__init__(element_config, le)
self.model = model
self.device = get_device(self.element_config[DEVICE])
def required_element(self):
return [HFTokenizer]
def _build_model(self, label_count):
return AutoModelForSequenceClassification.from_pretrained(
self.element_config[MODEL_NAME], num_labels=label_count
)
@staticmethod
def get_input_ids(message: RuthData) -> Dict[Text, List[int]]:
input_ids = message.get(INPUT_IDS)
if input_ids is not None:
return input_ids
raise ValueError("There is no sentence. Not able to train HFClassifier")
@staticmethod
def get_attention_masks(message: RuthData) -> Dict[Text, List[int]]:
attention_masks = message.get(ATTENTION_MASKS)
if attention_masks is not None:
return attention_masks
raise ValueError("There is no sentence. Not able to train HFClassifier")
@staticmethod
def get_optimizer(model):
return AdamW(model.parameters(), lr=5e-5)
@property
def get_params(self):
return {
EPOCHS: self.element_config[EPOCHS],
BATCH_SIZE: self.element_config[BATCH_SIZE],
}
def train(self, training_data: TrainData):
intents: List[Text] = [
message.get(INTENT) for message in training_data.intent_examples
]
if len(set(intents)) < 2:
logger.warning(
"There are no enough intent. "
"At least two unique intent are needed to train the model"
)
return
X = {
"input_ids": [
self.get_input_ids(message) for message in training_data.intent_examples
],
"attention_masks": [
self.get_attention_masks(message)
for message in training_data.intent_examples
],
}
y = self.encode_the_str_to_int(intents)
label_count = len(Counter(y).keys())
params = self.get_params
loaded_data = HFDatasetLoader(X, y)
batched_data = DataLoader(
loaded_data, batch_size=params[BATCH_SIZE], shuffle=True
)
self.model = self._build_model(label_count)
optimizer = self.get_optimizer(self.model)
console.print("device: " + str(self.device) + " is used")
self.model.to(self.device)
self.model.train()
for epoch in range(params[EPOCHS]):
for batch in tqdm(batched_data, desc="epoch " + str(epoch)):
optimizer.zero_grad()
input_ids = batch["input_ids"].to(self.device)
attention_masks = batch["attention_masks"].to(self.device)
labels = batch["labels"].to(self.device)
outputs = self.model(
input_ids, attention_mask=attention_masks, labels=labels
)
loss = outputs[0]
loss.backward()
optimizer.step()
def persist(self, file_name: Text, model_dir: Path):
classifier_file_name = file_name + "_classifier"
encoder_file_name = file_name + "_encoder.pkl"
classifier_path = str(model_dir) + "/" + classifier_file_name
encoder_path = str(model_dir) + "/" + encoder_file_name
if self.model and self.le:
model_to_save = (
self.model.module if hasattr(self.model, "module") else self.model
)
model_to_save.save_pretrained(classifier_path)
json_pickle(encoder_path, self.le)
return {"classifier": classifier_file_name, "encoder": encoder_file_name}
@classmethod
def load(cls, meta: Dict[Text, Any], model_dir: Path, **kwargs):
classifier_file_name = model_dir / meta["classifier"]
encoder_file_name = model_dir / meta["encoder"]
classifier = AutoModelForSequenceClassification.from_pretrained(
classifier_file_name
)
le = json_unpickle(Path(encoder_file_name))
return cls(meta, model=classifier, le=le)
def _predict(self, input_ids, attention_masks) -> Tuple[ndarray, ndarray]:
predictions = self.predict_probabilities(input_ids, attention_masks)
sorted_index = fliplr(argsort(predictions, axis=1))
return sorted_index[0], predictions[:, sorted_index][0][0]
def predict_probabilities(self, input_ids, attention_masks):
self.model.to(self.device)
self.model.eval()
probabilities = self.model(
torch.tensor(input_ids, device=self.device),
attention_mask=torch.tensor(attention_masks, device=self.device),
)[0]
probabilities = nn.functional.softmax(probabilities, dim=-1)
probabilities = probabilities.to(torch.device("cpu"))
probabilities = probabilities.detach().numpy()
return probabilities
def parse(self, message: RuthData):
input_ids = [message.get(INPUT_IDS)]
attention_masks = [message.get(ATTENTION_MASKS)]
index, probabilities = self._predict(input_ids, attention_masks)
intents = self._change_int_to_text(index)
probabilities = probabilities
if intents.size > 0 and probabilities.size > 0:
ranking = list(zip(list(intents), list(probabilities)))[
:LABEL_RANKING_LIMIT
]
intent = {
INTENT_NAME_KEY: intents[0],
PREDICTED_CONFIDENCE_KEY: float(probabilities[0]),
}
intent_rankings = [
{INTENT_NAME_KEY: name, PREDICTED_CONFIDENCE_KEY: float(probability)}
for name, probability in ranking
]
else:
intent = {INTENT_NAME_KEY: None, PREDICTED_CONFIDENCE_KEY: 0.0}
intent_rankings = []
message.set(INTENT, intent)
message.set(INTENT_RANKING, intent_rankings)
class HFDatasetLoader(Dataset):
def __init__(self, encodings, labels):
self.encodings = encodings
self.labels = labels
def __len__(self):
return len(self.labels)
def __getitem__(self, idx):
item = {key: torch.tensor(val[idx]) for key, val in self.encodings.items()}
item["labels"] = torch.tensor(self.labels[idx])
return item | /ruth_python-0.0.8-py3-none-any.whl/ruth/nlu/classifiers/hf_classifier.py | 0.84489 | 0.280007 | hf_classifier.py | pypi |
import logging
from pathlib import Path
from typing import Any, Dict, List, Text, Tuple, Union
import sklearn
from numpy import argsort, fliplr, ndarray, reshape
from rich.console import Console
from ruth.constants import INTENT, INTENT_RANKING
from ruth.nlu.classifiers import LABEL_RANKING_LIMIT
from ruth.nlu.classifiers.ruth_classifier import IntentClassifier
from ruth.shared.nlu.training_data.collections import TrainData
from ruth.shared.nlu.training_data.ruth_data import RuthData
from ruth.shared.utils import json_pickle, json_unpickle
from scipy import sparse
from sklearn.model_selection import GridSearchCV
from sklearn.preprocessing import LabelEncoder
logger = logging.getLogger(__name__)
console = Console()
class SVMClassifier(IntentClassifier):
defaults = {
"C": [1, 2, 5, 10, 20, 100],
"kernel": ["linear", "rbf"],
"gamma": ["auto", 0.1],
"decision_function_shape": ["ovr"],
"max_cross_validation_folds": 5,
"scoring": "f1_weighted",
"max_length": 30000,
}
def __init__(
self,
element_config: Dict[Text, Any],
le: LabelEncoder = None,
clf: GridSearchCV = None,
):
self.clf = clf
super().__init__(element_config, le)
@staticmethod
def get_features(message: RuthData) -> Union[sparse.spmatrix, ndarray]:
feature = message.get_features()
if feature is not None:
return feature.feature[0]
raise ValueError("There is no sentence. Not able to train SVMClassifier")
@property
def param_grids(self):
return {
"C": self.element_config["C"],
"kernel": self.element_config["kernel"],
"gamma": self.element_config["gamma"],
"decision_function_shape": self.element_config["decision_function_shape"],
}
def _create_gridsearch(self, X, y) -> "sklearn.model_selection.GridSearchCV":
from sklearn.svm import SVC
clf = SVC(probability=True)
param_grids = self.param_grids
return GridSearchCV(
clf,
param_grids,
scoring=self.element_config["scoring"],
cv=self.element_config["max_cross_validation_folds"],
)
def train(self, training_data: TrainData):
intents: List[Text] = [
message.get(INTENT) for message in training_data.intent_examples
]
if len(set(intents)) < 2:
logger.warning(
"There are no enough intent. "
"At least two unique intent are needed to train the model"
)
return
X = [self.get_features(message) for message in training_data.intent_examples]
if self.check_dense(X[0]):
max_length = self.get_max_length(X)
self.element_config["max_length"] = max_length
X = [self.ravel_vector(x) for x in X]
# X = [self.pad_vector(x, max_length) for x in X]
else:
X = [message.toarray() for message in X]
y = self.encode_the_str_to_int(intents)
X = reshape(X, (len(X), -1))
self.clf = self._create_gridsearch(X, y)
self.clf.fit(X, y)
console.print(f"The Best parameter we got are {self.clf.best_params_}")
console.print(f"score: {self.clf.best_score_}")
def persist(self, file_name: Text, model_dir: Path):
classifier_file_name = file_name + "_classifier.pkl"
encoder_file_name = file_name + "_encoder.pkl"
classifier_path = model_dir / classifier_file_name
encoder_path = model_dir / encoder_file_name
if self.clf and self.le:
json_pickle(classifier_path, self.clf.best_estimator_)
json_pickle(encoder_path, self.le)
return {"classifier": classifier_file_name, "encoder": encoder_file_name}
def _predict(self, x: ndarray) -> Tuple[ndarray, ndarray]:
predictions = self.predict_probabilities(x)
sorted_index = fliplr(argsort(predictions, axis=1))
return sorted_index, predictions[:, sorted_index]
def predict_probabilities(self, x: ndarray) -> ndarray:
return self.clf.predict_proba(x.reshape(1, -1))
@classmethod
def load(cls, meta: Dict[Text, Any], model_dir: Path, **kwargs):
classifier_file_name = model_dir / meta["classifier"]
encoder_file_name = model_dir / meta["encoder"]
clf = json_unpickle(Path(classifier_file_name))
le = json_unpickle(Path(encoder_file_name))
return cls(meta, clf=clf, le=le)
def parse(self, message: RuthData):
x = self.get_features(message)
if self.check_dense(x):
x = self.ravel_vector(x)
x = self.pad_vector(x, self.element_config["max_length"])
else:
x = x.toarray()
index, probabilities = self._predict(x)
intents = self._change_int_to_text(index.flatten())
probabilities = probabilities.flatten()
if intents.size > 0 and probabilities.size > 0:
ranking = list(zip(list(intents), list(probabilities)))[
:LABEL_RANKING_LIMIT
]
intent = {"name": intents[0], "accuracy": probabilities[0]}
intent_rankings = [
{"name": name, "accuracy": probability} for name, probability in ranking
]
else:
intent = {"name": None, "accuracy": 0.0}
intent_rankings = []
message.set(INTENT, intent)
message.set(INTENT_RANKING, intent_rankings) | /ruth_python-0.0.8-py3-none-any.whl/ruth/nlu/classifiers/svm_classifier.py | 0.794943 | 0.324811 | svm_classifier.py | pypi |
import logging
from pathlib import Path
from typing import Any, Dict, Text, Tuple
import sklearn
from numpy import argsort, fliplr, ndarray, reshape
from ruth.constants import INTENT, INTENT_RANKING
from ruth.nlu.classifiers import LABEL_RANKING_LIMIT
from ruth.nlu.classifiers.ruth_classifier import IntentClassifier
from ruth.shared.nlu.training_data.collections import TrainData
from ruth.shared.nlu.training_data.ruth_data import RuthData
from ruth.shared.utils import json_pickle, json_unpickle
from scipy import sparse
from sklearn.preprocessing import LabelEncoder
logger = logging.getLogger(__name__)
class NaiveBayesClassifier(IntentClassifier):
defaults = {"priors": None, "var_smoothing": 1e-9}
def __init__(
self,
element_config: Dict[Text, Any],
le: LabelEncoder = None,
model: "sklearn.naive_bayes.GaussianNB" = None,
):
super(NaiveBayesClassifier, self).__init__(element_config=element_config, le=le)
self.model = model
def _create_classifier(self) -> "sklearn.naive_bayes.GaussianNB":
from sklearn.naive_bayes import GaussianNB
priors = self.element_config["priors"]
var_smoothing = self.element_config["var_smoothing"]
return GaussianNB(priors=priors, var_smoothing=var_smoothing)
def train(self, training_data: TrainData):
intents = [message.get(INTENT) for message in training_data.intent_examples]
if len(set(intents)) < 2:
logger.warning(
"There are no enough intent. "
"At least two unique intent are needed to train the model"
)
return
X = [
self.get_features(message).toarray()
for message in training_data.intent_examples
]
y = self.encode_the_str_to_int(intents)
X = reshape(X, (len(X), -1))
self.model = self._create_classifier()
self.model.fit(X, y)
def _predict(self, x: ndarray) -> Tuple[ndarray, ndarray]:
predictions = self.predict_probabilities(x)
sorted_index = fliplr(argsort(predictions, axis=1))
return sorted_index, predictions[:, sorted_index]
def predict_probabilities(self, x: ndarray) -> ndarray:
return self.model.predict_proba(x.reshape(1, -1))
def _change_int_to_text(self, prediction: ndarray) -> ndarray:
return self.le.inverse_transform(prediction)
@staticmethod
def get_features(message: RuthData) -> sparse.spmatrix:
feature = message.get_features()
if feature is not None:
return feature.feature[0]
raise ValueError("There is no sentence. Not able to train NaiveBayesClassifier")
def parse(self, message: RuthData):
x = self.get_features(message).toarray()
index, probabilities = self._predict(x)
intents = self._change_int_to_text(index.flatten())
probabilities = probabilities.flatten()
if intents.size > 0 and probabilities.size > 0:
ranking = list(zip(list(intents), list(probabilities)))[
:LABEL_RANKING_LIMIT
]
intent = {"name": intents[0], "accuracy": probabilities[0]}
intent_rankings = [
{"name": name, "accuracy": probability} for name, probability in ranking
]
else:
intent = {"name": None, "accuracy": 0.0}
intent_rankings = []
message.set(INTENT, intent)
message.set(INTENT_RANKING, intent_rankings)
def persist(self, file_name: Text, model_dir: Path):
classifier_file_name = file_name + "_classifier.pkl"
encoder_file_name = file_name + "_encoder.pkl"
classifier_path = model_dir / classifier_file_name
encoder_path = model_dir / encoder_file_name
if self.model and self.le:
json_pickle(classifier_path, self.model)
json_pickle(encoder_path, self.le)
return {"classifier": classifier_file_name, "encoder": encoder_file_name}
@classmethod
def load(cls, meta: Dict[Text, Any], model_dir: Path):
classifier_file_name = model_dir / meta["classifier"]
encoder_file_name = model_dir / meta["encoder"]
model = json_unpickle(Path(classifier_file_name))
le = json_unpickle(Path(encoder_file_name))
return cls(meta, model=model, le=le) | /ruth_python-0.0.8-py3-none-any.whl/ruth/nlu/classifiers/naive_bayes_classifier.py | 0.846006 | 0.327951 | naive_bayes_classifier.py | pypi |
import io
import os
from typing import Any, Dict, List, Optional, Text
from urllib import request
import numpy
from progressbar import progressbar
from ruth.nlu.classifiers.constants import MODEL_NAME
from ruth.nlu.constants import ELEMENT_UNIQUE_NAME
from ruth.nlu.featurizers.dense_featurizers.dense_featurizer import DenseFeaturizer
from ruth.shared.nlu.training_data.collections import TrainData
from ruth.shared.nlu.training_data.feature import Feature
from ruth.shared.nlu.training_data.ruth_data import RuthData
from tqdm import tqdm
pbar = None
class FastTextFeaturizer(DenseFeaturizer):
DO_LOWER_CASE = "do_lower_case"
defaults = {MODEL_NAME: "wiki-news-300d-1M.vec.zip", DO_LOWER_CASE: True}
DEFAULT_MODELS_DIR = os.path.join(
os.path.expanduser("~"), ".cache", "ruth", "models"
)
MODELS = {
"wiki-news-300d-1M.vec.zip": "https://dl.fbaipublicfiles.com/"
"fasttext/vectors-english/wiki-news-300d-1M.vec.zip",
"wiki-news-300d-1M-subword.vec.zip": "https://dl.fbaipublicfiles.com/"
"fasttext/vectors-english/wiki-news-300d-1M-subword.vec.zip",
"crawl-300d-2M.vec.zip": "https://dl.fbaipublicfiles.com/"
"fasttext/vectors-english/crawl-300d-2M.vec.zip",
"crawl-300d-2M-subword.zip": "https://dl.fbaipublicfiles.com/"
"fasttext/vectors-english/crawl-300d-2M-subword.zip",
}
def __init__(self, element_config: Optional[Dict[Text, Any]]):
super(FastTextFeaturizer, self).__init__(element_config)
self.vectors = None
self.featurizer = {}
if self.element_config[MODEL_NAME] not in self.MODELS:
raise ValueError(
"Model name not found. Please choose from the following: "
"{}".format(list(self.MODELS.keys()))
)
self.file_path = self.download_models(self.element_config[MODEL_NAME])
self.dimension = 300
def download_models(self, specific_models=None):
os.makedirs(self.DEFAULT_MODELS_DIR, exist_ok=True)
def show_progress(block_num, block_size, total_size):
global pbar
if pbar is None:
pbar = progressbar.ProgressBar(maxval=total_size)
pbar.start()
downloaded = block_num * block_size
if downloaded < total_size:
pbar.update(downloaded)
else:
pbar.finish()
pbar = None
for model_name, url in self.MODELS.items():
if specific_models is not None and str(model_name) not in str(
specific_models
):
continue
model_path = os.path.join(self.DEFAULT_MODELS_DIR, model_name)
if os.path.exists(model_path):
model_path = model_path[:-4]
return model_path
request.urlretrieve(url, model_path, show_progress)
import zipfile
with zipfile.ZipFile(model_path, "r") as zip_ref:
zip_ref.extractall(self.DEFAULT_MODELS_DIR)
model_path = model_path[:-4]
return model_path
raise f"""Given model {specific_models} not found.
Please check the documentation and give the
right Fastext model name """
def train(self, training_data: TrainData):
self.featurizer = self._build_featurizer()
tokenized_data: List[List[Text]] = [
message.get_tokenized_data() for message in training_data.intent_examples
]
self.vectors = [
self.get_vector_list(token_list) for token_list in tokenized_data
]
for message, vector in zip(training_data.training_examples, self.vectors):
message.add_features(
Feature(vector, self.element_config[ELEMENT_UNIQUE_NAME])
)
def _build_featurizer(self):
fasttext_corpus = io.open(
self.file_path, "r", encoding="utf-8", newline="\n", errors="ignore"
)
model = {}
for line in tqdm(fasttext_corpus, colour="red"):
tokens = line.strip().split(" ")
model[tokens[0]] = numpy.array(list(tokens[1:]))
return model
def get_vector_list(self, token_list):
if self.featurizer == {}:
self._build_featurizer()
if not token_list:
return numpy.zeros(self.dimension)
return numpy.array([self.get_vector(token) for token in token_list])
def get_vector(self, token):
if token in self.featurizer and self.featurizer != {}:
return self.featurizer[token]
else:
return numpy.zeros(self.dimension)
def parse(self, message: RuthData):
tokens = message.get_tokenized_data()
vector = self.get_vector_list(tokens)
message.add_features(Feature(vector, self.element_config[ELEMENT_UNIQUE_NAME])) | /ruth_python-0.0.8-py3-none-any.whl/ruth/nlu/featurizers/dense_featurizers/fast_text.py | 0.68742 | 0.278315 | fast_text.py | pypi |
import logging
from pathlib import Path
from typing import Any, Dict, List, Optional, Text
from rich.console import Console
from ruth.constants import TEXT
from ruth.nlu.featurizers.sparse_featurizers.constants import (
CLASS_FEATURIZER_UNIQUE_NAME,
)
from ruth.nlu.featurizers.sparse_featurizers.sparse_featurizer import SparseFeaturizer
from ruth.shared.nlu.training_data.collections import TrainData
from ruth.shared.nlu.training_data.feature import Feature
from ruth.shared.nlu.training_data.ruth_data import RuthData
from ruth.shared.utils import json_pickle, json_unpickle
from sklearn.feature_extraction.text import CountVectorizer
logger = logging.getLogger(__name__)
console = Console()
class CountVectorFeaturizer(SparseFeaturizer):
defaults = {
"analyzer": "word",
"stop_words": None,
"min_df": 1,
"max_df": 1.0,
"min_ngram": 1,
"max_ngram": 1,
"lowercase": True,
"max_features": None,
"use_lemma": True,
}
def _load_params(self):
self.analyzer = self.element_config["analyzer"]
self.stop_words = self.element_config["stop_words"]
self.min_df = self.element_config["min_df"]
self.max_df = self.element_config["max_df"]
self.min_ngram = self.element_config["min_ngram"]
self.max_ngram = self.element_config["max_ngram"]
self.lowercase = self.element_config["lowercase"]
self.use_lemma = self.element_config["use_lemma"]
def _verify_analyzer(self) -> None:
if self.analyzer != "word":
if self.stop_words is not None:
logger.warning(
"You specified the character wise analyzer."
" So stop words will be ignored."
)
if self.max_ngram == 1:
logger.warning(
"You specified the character wise analyzer"
" but max n-gram is set to 1."
" So, the vocabulary will only contain"
" the single characters. "
)
def __init__(
self,
element_config: Optional[Dict[Text, Any]],
vectorizer: Optional["CountVectorizer"] = None,
):
super(CountVectorFeaturizer, self).__init__(element_config)
self.vectorizer = vectorizer
self._load_params()
self._verify_analyzer()
@staticmethod
def _build_vectorizer(
parameters: Dict[Text, Any], vacabulary=None
) -> CountVectorizer:
return CountVectorizer(
analyzer=parameters["analyzer"],
stop_words=parameters["stop_words"],
min_df=parameters["min_df"],
max_df=parameters["max_df"],
ngram_range=(parameters["min_ngram"], parameters["max_ngram"]),
lowercase=parameters["lowercase"],
vocabulary=vacabulary,
)
def _check_attribute_vocabulary(self) -> bool:
"""Checks if trained vocabulary exists in attribute's count vectorizer."""
try:
return hasattr(self.vectorizer, "vocabulary_")
except (AttributeError, KeyError):
return False
def create_vectors(self, examples: List[RuthData]):
features = []
for message in examples:
features.append(self.vectorizer.transform([message.get(TEXT)]))
return features
def _get_featurizer_data(self, training_data: TrainData):
if self._check_attribute_vocabulary():
return self.create_vectors(training_data.training_examples)
else:
return []
def _add_features_to_data(self, training_examples: List[RuthData], features):
for message, feature in zip(training_examples, features):
message.add_features(
Feature(feature, self.element_config[CLASS_FEATURIZER_UNIQUE_NAME])
)
def train(self, training_data: TrainData) -> CountVectorizer:
self.vectorizer = self._build_vectorizer(
parameters={
"analyzer": self.analyzer,
"stop_words": self.stop_words,
"min_df": self.min_df,
"max_df": self.max_df,
"min_ngram": self.min_ngram,
"max_ngram": self.max_ngram,
"lowercase": self.lowercase,
}
)
self.vectorizer.fit(self.get_data(training_data))
features = self._get_featurizer_data(training_data)
self._add_features_to_data(training_data.training_examples, features)
return self.vectorizer
def parse(self, message: RuthData):
feature = self.vectorizer.transform([message.get(TEXT)])
message.add_features(
Feature(feature, self.element_config[CLASS_FEATURIZER_UNIQUE_NAME])
)
def get_vocablary_from_vectorizer(self):
if self.vectorizer.vocabulary_:
return self.vectorizer.vocabulary_
else:
raise "CountVectorizer not got trained. Please check the training data and retrain the model"
def persist(self, file_name: Text, model_dir: Text):
file_name = file_name + ".pkl"
if self.vectorizer:
vocab = self.vectorizer.vocabulary_
featurizer_path = Path(model_dir) / file_name
json_pickle(featurizer_path, vocab)
return {"file_name": file_name}
@classmethod
def load(
cls, meta: Dict[Text, Any], model_dir: Path, **kwargs: Any
) -> "CountVectorFeaturizer":
file_name = meta.get("file_name")
featurizer_file = model_dir / file_name
if not featurizer_file.exists():
return cls(meta)
vocabulary = json_unpickle(featurizer_file)
vectorizers = cls._build_vectorizer(parameters=meta, vacabulary=vocabulary)
return cls(meta, vectorizers) | /ruth_python-0.0.8-py3-none-any.whl/ruth/nlu/featurizers/sparse_featurizers/count_vector_featurizer.py | 0.845592 | 0.296807 | count_vector_featurizer.py | pypi |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.