code stringlengths 114 1.05M | path stringlengths 3 312 | quality_prob float64 0.5 0.99 | learning_prob float64 0.2 1 | filename stringlengths 3 168 | kind stringclasses 1
value |
|---|---|---|---|---|---|
import logging
import typing
from pathlib import Path
import networkx as nx
import rhasspynlu
from rhasspyhermes.base import Message
from rhasspyhermes.client import GeneratorType, HermesClient, TopicArgs
from rhasspyhermes.intent import Intent, Slot, SlotRange
from rhasspyhermes.nlu import (
NluError,
NluIntent,
NluIntentNotRecognized,
NluIntentParsed,
NluQuery,
NluTrain,
NluTrainSuccess,
)
from rhasspynlu import Sentence, recognize
_LOGGER = logging.getLogger("rhasspynlu_hermes")
# -----------------------------------------------------------------------------
class NluHermesMqtt(HermesClient):
"""Hermes MQTT server for Rhasspy NLU."""
def __init__(
self,
client,
intent_graph: typing.Optional[nx.DiGraph] = None,
graph_path: typing.Optional[Path] = None,
default_entities: typing.Dict[str, typing.Iterable[Sentence]] = None,
word_transform: typing.Optional[typing.Callable[[str], str]] = None,
fuzzy: bool = True,
replace_numbers: bool = False,
language: typing.Optional[str] = None,
extra_converters: typing.Optional[
typing.Dict[str, typing.Callable[..., typing.Any]]
] = None,
site_ids: typing.Optional[typing.List[str]] = None,
lang: typing.Optional[str] = None,
):
super().__init__("rhasspynlu_hermes", client, site_ids=site_ids)
self.subscribe(NluQuery, NluTrain)
self.graph_path = graph_path
self.intent_graph = intent_graph
self.default_entities = default_entities or {}
self.word_transform = word_transform
self.fuzzy = fuzzy
self.replace_numbers = replace_numbers
self.language = language
self.extra_converters = extra_converters
self.lang = lang
# -------------------------------------------------------------------------
async def handle_query(
self, query: NluQuery
) -> typing.AsyncIterable[
typing.Union[
NluIntentParsed,
typing.Tuple[NluIntent, TopicArgs],
NluIntentNotRecognized,
NluError,
]
]:
"""Do intent recognition."""
original_input = query.input
try:
if not self.intent_graph and self.graph_path and self.graph_path.is_file():
# Load graph from file
_LOGGER.debug("Loading %s", self.graph_path)
with open(self.graph_path, mode="rb") as graph_file:
self.intent_graph = rhasspynlu.gzip_pickle_to_graph(graph_file)
if self.intent_graph:
def intent_filter(intent_name: str) -> bool:
"""Filter out intents."""
if query.intent_filter:
return intent_name in query.intent_filter
return True
# Replace digits with words
if self.replace_numbers:
# Have to assume whitespace tokenization
words = rhasspynlu.replace_numbers(
query.input.split(), self.language
)
query.input = " ".join(words)
input_text = query.input
# Fix casing for output event
if self.word_transform:
input_text = self.word_transform(input_text)
# Pass in raw query input so raw values will be correct
recognitions = recognize(
query.input,
self.intent_graph,
intent_filter=intent_filter,
word_transform=self.word_transform,
fuzzy=self.fuzzy,
extra_converters=self.extra_converters,
)
else:
_LOGGER.error("No intent graph loaded")
recognitions = []
if recognitions:
# Use first recognition only.
recognition = recognitions[0]
assert recognition is not None
assert recognition.intent is not None
intent = Intent(
intent_name=recognition.intent.name,
confidence_score=recognition.intent.confidence,
)
slots = [
Slot(
entity=(e.source or e.entity),
slot_name=e.entity,
confidence=1.0,
value=e.value_dict,
raw_value=e.raw_value,
range=SlotRange(
start=e.start,
end=e.end,
raw_start=e.raw_start,
raw_end=e.raw_end,
),
)
for e in recognition.entities
]
if query.custom_entities:
# Copy user-defined entities
for entity_name, entity_value in query.custom_entities.items():
slots.append(
Slot(
entity=entity_name,
confidence=1.0,
value={"value": entity_value},
)
)
# intentParsed
yield NluIntentParsed(
input=recognition.text,
id=query.id,
site_id=query.site_id,
session_id=query.session_id,
intent=intent,
slots=slots,
)
# intent
yield (
NluIntent(
input=recognition.text,
id=query.id,
site_id=query.site_id,
session_id=query.session_id,
intent=intent,
slots=slots,
asr_tokens=[NluIntent.make_asr_tokens(recognition.tokens)],
asr_confidence=query.asr_confidence,
raw_input=original_input,
wakeword_id=query.wakeword_id,
lang=(query.lang or self.lang),
custom_data=query.custom_data,
),
{"intent_name": recognition.intent.name},
)
else:
# Not recognized
yield NluIntentNotRecognized(
input=query.input,
id=query.id,
site_id=query.site_id,
session_id=query.session_id,
custom_data=query.custom_data,
)
except Exception as e:
_LOGGER.exception("handle_query")
yield NluError(
site_id=query.site_id,
session_id=query.session_id,
error=str(e),
context=original_input,
)
# -------------------------------------------------------------------------
async def handle_train(
self, train: NluTrain, site_id: str = "default"
) -> typing.AsyncIterable[
typing.Union[typing.Tuple[NluTrainSuccess, TopicArgs], NluError]
]:
"""Transform sentences to intent graph"""
try:
_LOGGER.debug("Loading %s", train.graph_path)
with open(train.graph_path, mode="rb") as graph_file:
self.intent_graph = rhasspynlu.gzip_pickle_to_graph(graph_file)
yield (NluTrainSuccess(id=train.id), {"site_id": site_id})
except Exception as e:
_LOGGER.exception("handle_train")
yield NluError(
site_id=site_id, session_id=train.id, error=str(e), context=train.id
)
# -------------------------------------------------------------------------
async def on_message(
self,
message: Message,
site_id: typing.Optional[str] = None,
session_id: typing.Optional[str] = None,
topic: typing.Optional[str] = None,
) -> GeneratorType:
"""Received message from MQTT broker."""
if isinstance(message, NluQuery):
async for query_result in self.handle_query(message):
yield query_result
elif isinstance(message, NluTrain):
assert site_id, "Missing site_id"
async for train_result in self.handle_train(message, site_id=site_id):
yield train_result
else:
_LOGGER.warning("Unexpected message: %s", message) | /rhasspy-nlu-hermes-0.5.1.tar.gz/rhasspy-nlu-hermes-0.5.1/rhasspynlu_hermes/__init__.py | 0.678859 | 0.24092 | __init__.py | pypi |
# Rhasspy Natural Language Understanding
[](https://github.com/rhasspy/rhasspy-nlu/actions)
[](https://pypi.org/project/rhasspy-nlu)
[](https://www.python.org)
[](https://github.com/rhasspy/rhasspy-nlu/blob/master/LICENSE)
Library for parsing Rhasspy sentence templates, doing intent recognition, and generating ARPA language models.
## Requirements
* Python 3.7
## Installation
```bash
$ git clone https://github.com/rhasspy/rhasspy-nlu
$ cd rhasspy-nlu
$ ./configure
$ make
$ make install
```
## Running
```bash
$ bin/rhasspy-nlu <ARGS>
```
## Parsing Sentence Templates
Rhasspy voice commands are stored in text files formatted like this:
```ini
[Intent1]
this is a sentence
this is another sentence
[Intent2]
a sentence in a different intent
```
You can parse these into a structured representation with `rhasspynlu.parse_ini` and then convert them to a graph using `rhasspynlu.intents_to_graph`:
```python
import rhasspynlu
# Load and parse
intents = rhasspynlu.parse_ini(
"""
[LightOn]
turn on [the] (living room lamp | kitchen light){name}
"""
)
graph = rhasspynlu.intents_to_graph(intents)
```
The result is a [directed graph](https://networkx.github.io/documentation/networkx-2.3/reference/classes/digraph.html) whose states are words and edges are input/output labels.
You can pass an `intent_filter` function to `parse_ini` to return `True` for only the intent names you want to parse.
Additionally, a function can be provided for the `sentence_transform` argument that each sentence will be passed through (e.g., to lower case).
### Template Syntax
Sentence templates are based on the [JSGF](https://www.w3.org/TR/jsgf/) standard. The following constructs are available:
* Optional words
* `this is [a] test` - the word "a" may or may not be present
* Alternatives
* `set color to (red | green | blue)` - either "red", "green", or "blue" is possible
* Tags
* `turn on the [den | playroom]{location} light` - named entity `location` will be either "den" or "playroom"
* Substitutions
* `make ten:10 coffees` - output will be "make 10 coffees"
* `turn off the: (television | tele):tv` - output will be "turn off tv"
* `set brightness to (medium | half){brightness:50}` - named entity `brightness` will be "50"
* Rules
* `rule_name = rule body` can be referenced as `<rule_name>`
* Slots
* `$slot` will be replaced by a list of sentences in the `replacements` argument of `intents_to_graph`
#### Rules
Named rules can be added to your template file using the syntax:
```ini
rule_name = rule body
```
and then reference using `<rule_name>`. The body of a rule is a regular sentence, which may itself contain references to other rules.
You can refrence rules from different intents by prefixing the rule name with the intent name and a dot:
```ini
[Intent1]
rule = a test
this is <rule>
[Intent2]
rule = this is
<rule> <Intent1.rule>
```
In the example above, `Intent2` uses its local `<rule>` as well as the `<rule>` from `Intent1`.
#### Slots
Slot names are prefixed with a dollar sign (`$`). When calling `intents_to_graph`, the `replacements` argument is a dictionary whose keys are slot names (with `$`) and whose values are lists of (parsed) `Sentence` objects. Each `$slot` will be replaced by the corresponding list of sentences, which may contain optional words, tags, rules, and other slots.
For example:
```python
import rhasspynlu
# Load and parse
intents = rhasspynlu.parse_ini(
"""
[SetColor]
set color to $color
"""
)
graph = rhasspynlu.intents_to_graph(
intents, replacements = {
"$color": [rhasspynlu.Sentence.parse("red | green | blue")]
}
)
```
will replace `$color` with "red", "green", or "blue".
## Intent Recognition
After converting your sentence templates to a graph, you can recognize sentences. Assuming you have a `.ini` file like this:
```
[LightOn]
turn on [the] (living room lamp | kitchen light){name}
```
You can recognize sentences with:
```python
from pathlib import Path
import rhasspynlu
# Load and parse
intents = rhasspynlu.parse_ini(Path("sentences.ini"))
graph = rhasspynlu.intents_to_graph(intents)
rhasspynlu.recognize("turn on living room lamp", graph)
```
will return a list of `Recognition` objects like:
```
[
Recognition(
intent=Intent(name='LightOn', confidence=1.0),
entities=[
Entity(
entity='name',
value='living room lamp',
raw_value='living room lamp',
start=8,
raw_start=8,
end=24,
raw_end=24,
tokens=['living', 'room', 'lamp'],
raw_tokens=['living', 'room', 'lamp']
)
],
text='turn on living room lamp',
raw_text='turn on living room lamp',
recognize_seconds=0.00010710899914556649,
tokens=['turn', 'on', 'living', 'room', 'lamp'],
raw_tokens=['turn', 'on', 'living', 'room', 'lamp']
)
]
```
An empty list means that recognition has failed. You can easily convert `Recognition` objects to JSON:
```python
...
import json
recognitions = rhasspynlu.recognize("turn on living room lamp", graph)
if recognitions:
recognition_dict = recognitions[0].asdict()
print(json.dumps(recognition_dict))
```
You can also pass an `intent_filter` function to `recognize` to return `True` only for intent names you want to include in the search.
#### Tokens
If your sentence is tokenized by something other than whitespace, pass the list of tokens into `recognize` instead of a string.
#### Recognition Fields
The `rhasspynlu.Recognition` object has the following fields:
* `intent` - a `rhasspynlu.Intent` instance
* `name` - name of recognized intent
* `confidence` - number for 0-1, 1 being sure
* `text` - substituted input text
* `raw_text` - input text
* `entities` - list of `rhasspynlu.Entity` objects
* `entity` - name of recognized entity ("name" in `(input:output){name}`)
* `value` - substituted value of recognized entity ("output" in `(input:output){name}`)
* `tokens` - list of words in `value`
* `start` - start index of `value` in `text`
* `end` - end index of `value` in `text` (exclusive)
* `raw_value` - value of recognized entity ("input" in `(input:output){name}`)
* `raw_tokens` - list of words in `raw_value`
* `raw_start` - start index of `raw_value` in `raw_text`
* `raw_end` - end index of `raw_value` in `raw_text` (exclusive)
* `recognize_seconds` - seconds taken for `recognize`
### Stop Words
You can pass a set of `stop_words` to `recognize`:
```python
rhasspynlu.recognize("turn on that living room lamp", graph, stop_words=set(["that"]))
```
Stop words in the input sentence will be skipped over if they don't match the graph.
### Strict Recognition
For faster, but less flexible recognition, set `fuzzy` to `False`:
```python
rhasspynlu.recognize("turn on the living room lamp", graph, fuzzy=False)
```
This is at least twice as fast, but will fail if the sentence is not precisely present in the graph.
Strict recognition also supports `stop_words` for a little added flexibility. If recognition without `stop_words` fails, a second attempt will be made using `stop_words`.
### Converters
Value conversions can be applied during recognition, such as converting the string "10" to the integer 10. Following a word, sequence, or tag name with "!converter" will run "converter" on the string value during `recognize`:
```python
import rhasspynlu
# Load and parse
intents = rhasspynlu.parse_ini(
"""
[SetBrightness]
set brightness to (one: hundred:100)!int
"""
)
graph = rhasspynlu.intents_to_graph(intents)
recognitions = rhasspynlu.recognize("set brightness to one hundred", graph)
assert recognitions[0].tokens[-1] == 100
```
Converters can be applied to tags/entities as well:
```python
import rhasspynlu
# Load and parse
intents = rhasspynlu.parse_ini(
"""
[SetBrightness]
set brightness to (one:1 | two:2){value!int}
"""
)
graph = rhasspynlu.intents_to_graph(intents)
recognitions = rhasspynlu.recognize("set brightness to two", graph)
assert recognitions[0].tokens[-1] == 2
```
The following default converters are available in `rhasspynlu`:
* int - convert to integer
* float - convert to real
* bool - convert to boolean
* lower - lower-case
* upper - upper-case
You may override these converters by passing a dictionary to the `converters` argument of `recognize`. To supply additional converters (instead of overriding), use `extra_converters`:
```python
import rhasspynlu
# Load and parse
intents = rhasspynlu.parse_ini(
"""
[SetBrightness]
set brightness to (one:1 | two:2){value!myconverter}
"""
)
graph = rhasspynlu.intents_to_graph(intents)
recognitions = rhasspynlu.recognize(
"set brightness to two",
graph,
extra_converters={
"myconverter": lambda *values: [int(v)**2 for v in values]
}
)
assert recognitions[0].tokens[-1] == 4
```
Lastly, you can chain converters together with multiple "!":
```python
import rhasspynlu
# Load and parse
intents = rhasspynlu.parse_ini(
"""
[SetBrightness]
set brightness to (one:1 | two:2){value!int!cube}
"""
)
graph = rhasspynlu.intents_to_graph(intents)
recognitions = rhasspynlu.recognize(
"set brightness to two",
graph,
extra_converters={
"cube": lambda *values: [v**3 for v in values]
}
)
assert recognitions[0].tokens[-1] == 8
```
## ARPA Language Models
You can compute [ngram counts](https://en.wikipedia.org/wiki/N-gram) from a `rhasspynlu` graph, useful for generating [ARPA language models](https://cmusphinx.github.io/wiki/arpaformat/). These models can be used by speech recognition systems, such as [Pocketsphinx](https://github.com/cmusphinx/pocketsphinx), [Kaldi](https://kaldi-asr.org), and [Julius](https://github.com/julius-speech/julius).
```python
import rhasspynlu
# Load and parse
intents = rhasspynlu.parse_ini(
"""
[SetColor]
set light to (red | green | blue)
"""
)
graph = rhasspynlu.intents_to_graph(intents)
counts = rhasspynlu.get_intent_ngram_counts(
graph,
pad_start="<s>",
pad_end="</s>",
order=3
)
# Print counts by intent
for intent_name in counts:
print(intent_name)
for ngram, count in counts[intent_name].items():
print(ngram, count)
print("")
```
will print something like:
```
SetColor
('<s>',) 3
('set',) 3
('<s>', 'set') 3
('light',) 3
('set', 'light') 3
('<s>', 'set', 'light') 3
('to',) 3
('light', 'to') 3
('set', 'light', 'to') 3
('red',) 1
('to', 'red') 1
('light', 'to', 'red') 1
('green',) 1
('to', 'green') 1
('light', 'to', 'green') 1
('blue',) 1
('to', 'blue') 1
('light', 'to', 'blue') 1
('</s>',) 3
('red', '</s>') 1
('green', '</s>') 1
('blue', '</s>') 1
('to', 'red', '</s>') 1
('to', 'green', '</s>') 1
('to', 'blue', '</s>') 1
```
### Opengrm
If you have the [Opengrm](http://www.opengrm.org/twiki/bin/view/GRM/NGramLibrary) command-line tools in your `PATH`, you can use `rhasspynlu` to generate language models in the [ARPA format](https://cmusphinx.github.io/wiki/arpaformat/).
The `graph_to_fst` and `fst_to_arpa` functions are used to convert between formats. Calling `fst_to_arpa` requires the following binaries to be present in your `PATH`:
* `fstcompile` (from [OpenFST](http://www.openfst.org))
* `ngramcount`
* `ngrammake`
* `ngrammerge`
* `ngramprint`
* `ngramread`
Example:
```python
# Convert to FST
graph_fst = rhasspynlu.graph_to_fst(graph)
# Write FST and symbol text files
graph_fst.write("my_fst.txt", "input_symbols.txt", "output_symbols.txt")
# Compile and convert to ARPA language model
rhasspynlu.fst_to_arpa(
"my_fst.txt",
"input_symbols.txt",
"output_symbols.txt",
"my_arpa.lm"
)
```
You can now use `my_arpa.lm` in any speech recognizer that accepts ARPA-formatted language models.
### Language Model Mixing
If you have an existing language model that you'd like to mix with Rhasspy voice commands, you will first need to convert it to an FST:
```python
rhasspynlu.fst_to_arpa("existing_arpa.lm", "existing_arpa.fst")
```
Now when you call `fst_to_arpa`, make sure to provide the `base_fst_weight` argument. This is a tuple with the path to your existing ARPA FST and a mixture weight between 0 and 1. A weight of 0.05 means that the base language model will receive 5% of the overall probability mass in the language model. The rest of the mass will be given to your custom voice commands.
Example:
```python
rhasspynlu.fst_to_arpa(
"my_fst.txt",
"input_symbols.txt",
"output_symbols.txt",
"my_arpa.lm",
base_fst_weight=("existing_arpa.fst", 0.05)
)
```
## Command Line Usage
The `rhasspynlu` module can be run directly to convert `sentences.ini` files into JSON graphs or FST text files:
```bash
python3 -m rhasspynlu sentences.ini > graph.json
```
You can pass multiple `.ini` files as arguments, and they will be combined. Adding a `--fst` argument will write out FST text files instead:
```bash
python3 -m rhasspynlu sentences.ini --fst
```
This will output three files in the current directory:
* `fst.txt` - finite state transducer as text
* `fst.isymbols.txt` - input symbols
* `fst.osymbols.txt` - output symbols
These file names can be changed with the `--fst-text`, `--fst-isymbols`, and `--fst-osymbols` arguments, respectively.
Compile to a binary FST using `fstcompile` (from [OpenFST](http://www.openfst.org)) with:
```bash
fstcompile \
--isymbols=fst.isymbols.txt \
--osymbols=fst.osymbols.txt \
--keep_isymbols=1 \
--keep_osymbols=1 \
fst.txt \
out.fst
```
## Word Pronunciations
Methods for loading and using phonetic pronunciation dictionaries are provided in `rhasspynlu.g2p` ("g2p" stands for "grapheme to phoneme").
Dictionaries are expected in the same format as the [CMU Pronouncing Dictionary](https://github.com/cmusphinx/cmudict), which is simply one word per line with whitespace separating words and phonemes:
```
yawn Y AO N
test T EH S T
say S EY
who HH UW
bee B IY
azure AE ZH ER
read R EH D
read(2) R IY D
```
When multiple pronunciations are available for a word (like "read" in the previous example), a `(N)` can be suffixed to the word.
You can load a phonetic dictionary into a Python dictionary with `rhasspynlu.g2p.read_pronunciations`:
```python
import rhasspynlu.g2p
with open("/path/to/file.dict", "r") as dict_file:
pronunciations = rhasspynlu.g2p.read_pronunciations(dict_file)
assert pronunciations == {
"yawn": [["Y", "AO", "N"]],
"test": [["T", "EH", "S", "T"]],
"say": [["S", "EY"]],
"who": [["HH", "UW"]],
"bee": [["B", "IY"]],
"azure": [["AE", "ZH", "ER"]],
"read": [["R", "EH", "D"], ["R", "IY", "D"]],
}
```
See [voice2json profiles](https://github.com/synesthesiam/voice2json-profiles) for pre-built phonetic dictionaries.
### Guessing Pronunciations
The `rhasspynlu.g2p.guess_pronunciations` function uses [Phonetisaurus](https://github.com/AdolfVonKleist/Phonetisaurus) and a pre-trained grapheme to phoneme model to guess pronunciations for unknown words. You will need `phonetisaurus-apply` in your `$PATH` and the pre-trained model (`g2p.fst`) available:
```python
import rhasspynlu.g2p
guesses = rhasspynlu.g2p.guess_pronunciations(
["moogle", "ploop"], "/path/to/g2p.fst", num_guesses=1
)
print(list(guesses))
# Something like: [
# ('moogle', ['M', 'UW', 'G', 'AH', 'L']),
# ('ploop', ['P', 'L', 'UW', 'P'])
# ]
```
See [voice2json profiles](https://github.com/synesthesiam/voice2json-profiles) for pre-trained g2p models.
### Sounds Like Pronunciations
Rhasspy NLU supports an alternative way of specifying word pronunciations. Instead of specifying phonemes directly, you can describe how a word should be pronounced by referencing other words:
```
unknown_word1 known_word1 [known_word2] ...
...
```
For example, the singer [Beyoncé](https://www.beyonce.com/) sounds like a combination of the words "bee yawn say":
```
beyoncé bee yawn say
```
The `rhasspynlu.g2p.load_sounds_like` function will parse this text and, when given an existing pronunciation dictionary, generate a new pronunciation:
```python
import io
import rhasspynlu.g2p
# Load existing dictionary
pronunciations = rhasspynlu.g2p.read_pronunciations("/path/to/file.dict")
sounds_like = """
beyoncé bee yawn say
"""
with io.StringIO(sounds_like) as f:
rhasspynlu.g2p.load_sounds_like(f, pronunciations)
print(pronunciations["beyoncé"])
# Something like: [['B', 'IY', 'Y', 'AO', 'N', 'S', 'EY']]
```
You may reference a specific pronunciation for a known word using the `word(N)` syntax, where `N` is 1-based. Pronunciations are loaded in line order, so the order is predictable. For example, `read(2)` will reference the second pronunciation of the word "read". Without an `(N)`, all pronunciations found will be used.
#### Phoneme Literals
You can interject phonetic chunks into these pronunciations too. For example, the word "hooiser" sounds like "who" and the "-zure" in "azure":
```
hooiser who /Z 3/
```
Text between slashes (`/`) will be interpreted as phonemes in the configured speech system.
#### Word Segments
If a grapheme-to-phoneme alignment corupus is available, segments of words can also be used for pronunciations. Using the "hooiser" example above, we can replace the phonemes with:
```
hooiser who a>zure<
```
This will combine the pronunciation of "who" from the current phonetic dictionaries (`base_dictionary.txt` and `custom_words.txt`) and the "-zure" from the word "azure".
The brackets point `>at<` the segment of the word that you want to contribute to the pronunciation. This is accomplished using a grapheme-to-phoneme alignment corpus generated with [phonetisaurus
](https://github.com/AdolfVonKleist/Phonetisaurus) and a pre-built phonetic dictionary. In the `a>zure<` example, the word "azure" is located in the alignment corpus, and the output phonemes from the phonemes "zure" in it are used.
```python
import io
import rhasspynlu.g2p
# Load existing dictionary
pronunciations = rhasspynlu.g2p.read_pronunciations("/path/to/file.dict")
# Example alignment corpus:
# a}AE z}ZH u|r}ER e}_
alignment = rhasspynlu.g2p.load_g2p_corpus("/path/to/g2p.corpus")
sounds_like = """
hooiser who a>zure<
"""
with io.StringIO(sounds_like) as f:
rhasspynlu.g2p.load_sounds_like(
f, pronunciations, g2p_alignment=alignment
)
print(pronunciations["hooiser"])
# Something like [["HH", "UW", "ZH", "ER"]]
```
See [voice2json profiles](https://github.com/synesthesiam/voice2json-profiles) for g2p alignment corpora.
| /rhasspy-nlu-0.4.0.tar.gz/rhasspy-nlu-0.4.0/README.md | 0.642881 | 0.979016 | README.md | pypi |
import base64
import itertools
import random
import time
import typing
from collections import defaultdict
from dataclasses import dataclass, field
from datetime import datetime
import networkx as nx
from .intent import Entity, Intent, Recognition, RecognitionResult
from .jsgf_graph import get_start_end_nodes
from .utils import pairwise
# -----------------------------------------------------------------------------
PathNodeType = typing.Union[int, typing.Tuple[int, typing.List[str]]]
PathType = typing.List[PathNodeType]
def recognize(
tokens: typing.Union[str, typing.List[str]],
graph: nx.DiGraph,
fuzzy: bool = True,
stop_words: typing.Optional[typing.Set[str]] = None,
intent_filter: typing.Optional[typing.Callable[[str], bool]] = None,
word_transform: typing.Optional[typing.Callable[[str], str]] = None,
converters: typing.Optional[
typing.Dict[str, typing.Callable[..., typing.Any]]
] = None,
extra_converters: typing.Optional[
typing.Dict[str, typing.Callable[..., typing.Any]]
] = None,
**search_args,
) -> typing.List[Recognition]:
"""Recognize one or more intents from tokens or a sentence."""
start_time = time.perf_counter()
if isinstance(tokens, str):
# Assume whitespace separation
tokens = tokens.split()
if fuzzy:
# Fuzzy recognition
best_fuzzy = best_fuzzy_cost(
paths_fuzzy(
tokens,
graph,
stop_words=stop_words,
intent_filter=intent_filter,
word_transform=word_transform,
**search_args,
)
)
end_time = time.perf_counter()
if best_fuzzy:
recognitions = []
# Gather all successful fuzzy paths
for fuzzy_result in best_fuzzy:
result, recognition = path_to_recognition(
fuzzy_result.node_path,
graph,
cost=fuzzy_result.cost,
converters=converters,
extra_converters=extra_converters,
)
if result == RecognitionResult.SUCCESS:
assert recognition is not None
recognition.recognize_seconds = end_time - start_time
recognitions.append(recognition)
return recognitions
else:
# Strict recognition
paths = list(
paths_strict(
tokens,
graph,
intent_filter=intent_filter,
word_transform=word_transform,
**search_args,
)
)
if (not paths) and stop_words:
# Try again by excluding stop words
tokens = [t for t in tokens if t not in stop_words]
paths = list(
paths_strict(
tokens,
graph,
exclude_tokens=stop_words,
intent_filter=intent_filter,
word_transform=word_transform,
**search_args,
)
)
end_time = time.perf_counter()
recognitions = []
for path in paths:
result, recognition = path_to_recognition(
path, graph, converters=converters, extra_converters=extra_converters
)
if result == RecognitionResult.SUCCESS:
assert recognition is not None
recognition.recognize_seconds = end_time - start_time
recognitions.append(recognition)
return recognitions
# No results
return []
# -----------------------------------------------------------------------------
def paths_strict(
tokens: typing.List[str],
graph: nx.DiGraph,
exclude_tokens: typing.Optional[typing.Set[str]] = None,
max_paths: typing.Optional[int] = None,
intent_filter: typing.Optional[typing.Callable[[str], bool]] = None,
word_transform: typing.Optional[typing.Callable[[str], str]] = None,
) -> typing.Iterable[PathType]:
"""Match a single path from the graph exactly if possible."""
if not tokens:
return []
intent_filter = intent_filter or (lambda x: True)
word_transform = word_transform or (lambda x: x)
# node -> attrs
n_data = graph.nodes(data=True)
# start state
start_node, _ = get_start_end_nodes(graph)
assert start_node is not None
# Number of matching paths found
paths_found: int = 0
# Do breadth-first search.
# Queue contains items of the form:
# * current node (int)
# * current path (int, str?) - node, matching input token
# * remaining input tokens
node_queue: typing.List[typing.Tuple[int, PathType, typing.List[str]]] = [
(start_node, [], tokens)
]
while node_queue:
current_node, current_path, current_tokens = node_queue.pop(0)
is_final = n_data[current_node].get("final", False)
if is_final and (not current_tokens):
# Reached final state
paths_found += 1
yield current_path
if max_paths and (paths_found >= max_paths):
break
for next_node, edge_data in graph[current_node].items():
next_path = list(current_path)
next_tokens = list(current_tokens)
ilabel = edge_data.get("ilabel", "")
olabel = edge_data.get("olabel", "")
matching_tokens: typing.List[str] = []
if olabel[:9] == "__label__":
intent_name = olabel[9:]
if not intent_filter(intent_name):
# Skip intent
continue
if ilabel:
ilabel = word_transform(ilabel)
if next_tokens:
# Failed to match input label
if ilabel != word_transform(next_tokens[0]):
if (not exclude_tokens) or (ilabel not in exclude_tokens):
# Can't exclude
continue
else:
# Token match
matching_token = next_tokens.pop(0)
matching_tokens.append(matching_token)
else:
# Ran out of tokens
continue
next_path.append((current_node, matching_tokens))
# Continue search
node_queue.append((next_node, next_path, next_tokens))
# No results
return []
# -----------------------------------------------------------------------------
@dataclass
class FuzzyResult:
"""Single path for fuzzy recognition."""
intent_name: str
node_path: PathType
cost: float
@dataclass
class FuzzyCostInput:
"""Input to fuzzy cost function."""
ilabel: str
tokens: typing.List[str]
stop_words: typing.Set[str]
word_transform: typing.Optional[typing.Callable[[str], str]] = None
@dataclass
class FuzzyCostOutput:
"""Output from fuzzy cost function."""
cost: float
continue_search: bool = True
matching_tokens: typing.List[str] = field(default_factory=list)
def default_fuzzy_cost(cost_input: FuzzyCostInput) -> FuzzyCostOutput:
"""Increases cost when input tokens fail to match graph. Marginal cost for stop words."""
ilabel = cost_input.ilabel
cost = 0.0
tokens = cost_input.tokens
stop_words = cost_input.stop_words
word_transform = cost_input.word_transform or (lambda x: x)
matching_tokens: typing.List[str] = []
if ilabel:
ilabel = word_transform(ilabel)
while tokens and (ilabel != word_transform(tokens[0])):
bad_token = word_transform(tokens.pop(0))
if bad_token in stop_words:
# Marginal cost to ensure paths matching stop words are preferred
cost += 0.1
else:
# Mismatched token
cost += 1
if tokens and (ilabel == word_transform(tokens[0])):
# Consume matching token
matching_token = tokens.pop(0)
matching_tokens.append(matching_token)
else:
# No matching token
return FuzzyCostOutput(cost=cost, continue_search=False)
return FuzzyCostOutput(cost=cost, matching_tokens=matching_tokens)
def paths_fuzzy(
tokens: typing.List[str],
graph: nx.DiGraph,
stop_words: typing.Optional[typing.Set[str]] = None,
cost_function: typing.Optional[
typing.Callable[[FuzzyCostInput], FuzzyCostOutput]
] = None,
intent_filter: typing.Optional[typing.Callable[[str], bool]] = None,
word_transform: typing.Optional[typing.Callable[[str], str]] = None,
) -> typing.Dict[str, typing.List[FuzzyResult]]:
"""Do less strict matching using a cost function and optional stop words."""
if not tokens:
return {}
intent_filter = intent_filter or (lambda x: True)
cost_function = cost_function or default_fuzzy_cost
stop_words = stop_words or set()
# node -> attrs
n_data = graph.nodes(data=True)
# start state
start_node: int = next(n for n, data in n_data if data.get("start"))
# intent -> [(symbols, cost), (symbols, cost)...]
intent_symbols_and_costs: typing.Dict[str, typing.List[FuzzyResult]] = defaultdict(
list
)
# Lowest cost so far
best_cost: float = float(len(n_data))
# (node, in_tokens, out_path, out_count, cost, intent_name)
node_queue: typing.List[
typing.Tuple[int, typing.List[str], PathType, int, float, typing.Optional[str]]
] = [(start_node, tokens, [], 0, 0.0, None)]
# BFS it up
while node_queue:
(
q_node,
q_in_tokens,
q_out_path,
q_out_count,
q_cost,
q_intent,
) = node_queue.pop(0)
is_final: bool = n_data[q_node].get("final", False)
# Update best intent cost on final state.
# Don't bother reporting intents that failed to consume any tokens.
if is_final and (q_cost < q_out_count):
q_intent = q_intent or ""
best_intent_cost: typing.Optional[float] = None
best_intent_costs = intent_symbols_and_costs.get(q_intent)
if best_intent_costs:
best_intent_cost = best_intent_costs[0].cost
final_cost = q_cost + len(q_in_tokens) # remaning tokens count against
final_path = q_out_path
if (best_intent_cost is None) or (final_cost < best_intent_cost):
# Overwrite best cost
intent_symbols_and_costs[q_intent] = [
FuzzyResult(
intent_name=q_intent, node_path=final_path, cost=final_cost
)
]
elif final_cost == best_intent_cost:
# Add to existing list
intent_symbols_and_costs[q_intent].append(
(
FuzzyResult(
intent_name=q_intent, node_path=final_path, cost=final_cost
)
)
)
if final_cost < best_cost:
# Update best cost so far
best_cost = final_cost
if q_cost > best_cost:
# Can't get any better
continue
# Process child edges
for next_node, edge_data in graph[q_node].items():
in_label = edge_data.get("ilabel") or ""
out_label = edge_data.get("olabel") or ""
next_in_tokens = list(q_in_tokens)
next_out_path = list(q_out_path)
next_out_count = q_out_count
next_cost = q_cost
next_intent = q_intent
if out_label:
if out_label[:9] == "__label__":
next_intent = out_label[9:]
if not intent_filter(next_intent):
# Skip intent
continue
elif out_label[:2] != "__":
next_out_count += 1
cost_output = cost_function(
FuzzyCostInput(
ilabel=in_label,
tokens=next_in_tokens,
stop_words=stop_words,
word_transform=word_transform,
)
)
next_cost += cost_output.cost
if not cost_output.continue_search:
continue
# Extend current path
next_out_path.append((q_node, cost_output.matching_tokens))
node_queue.append(
(
next_node,
next_in_tokens,
next_out_path,
next_out_count,
next_cost,
next_intent,
)
)
return intent_symbols_and_costs
def best_fuzzy_cost(
intent_symbols_and_costs: typing.Dict[str, typing.List[FuzzyResult]]
) -> typing.List[FuzzyResult]:
"""Return fuzzy results with cost."""
if not intent_symbols_and_costs:
return []
best_cost: typing.Optional[float] = None
best_results: typing.List[FuzzyResult] = []
# Find all results with the lowest cost
for fuzzy_results in intent_symbols_and_costs.values():
if not fuzzy_results:
continue
# All results for a given intent should have the same cost
if best_cost is None:
# First result
best_cost = fuzzy_results[0].cost
best_results = list(fuzzy_results)
elif fuzzy_results[0].cost < best_cost:
# Overwrite
best_results = list(fuzzy_results)
elif fuzzy_results[0].cost == best_cost:
# Add to list
best_results.extend(fuzzy_results)
return best_results
# -----------------------------------------------------------------------------
@dataclass
class ConverterInfo:
"""Local info for converter stack in path_to_recognition"""
# Name + args
key: str
# Name of converter
name: str
# Optional arguments passed using name,arg1,arg2,...
args: typing.Optional[typing.List[str]] = None
# List of raw/substituted tokens
tokens: typing.List[typing.Tuple[str, str, typing.List[str]]] = field(
default_factory=list
)
def path_to_recognition(
node_path: typing.Iterable[PathNodeType],
graph: nx.DiGraph,
cost: typing.Optional[float] = None,
converters: typing.Optional[
typing.Dict[str, typing.Callable[..., typing.Any]]
] = None,
extra_converters: typing.Optional[
typing.Dict[str, typing.Callable[..., typing.Any]]
] = None,
) -> typing.Tuple[RecognitionResult, typing.Optional[Recognition]]:
"""Transform node path in graph to an intent recognition object."""
if not node_path:
# Empty path indicates failure
return RecognitionResult.FAILURE, None
converters = converters or get_default_converters()
if extra_converters:
# Merge in extra converters
converters.update(extra_converters)
node_attrs = graph.nodes(data=True)
recognition = Recognition(intent=Intent("", confidence=1.0))
# Step 1: go through path pairwise and gather input/output labels
raw_sub_tokens: typing.List[typing.Tuple[str, str, typing.List[str]]] = []
for last_node_tokens, next_node_tokens in pairwise(node_path):
# Unpack path nodes
if isinstance(last_node_tokens, int):
last_node = last_node_tokens
last_tokens = []
else:
last_node, last_tokens = last_node_tokens
if isinstance(next_node_tokens, int):
next_node = next_node_tokens
else:
next_node, _ = next_node_tokens
# Get raw text
word = node_attrs[next_node].get("word") or ""
# Get output label
edge_data = graph[last_node][next_node]
olabel = edge_data.get("olabel") or ""
if olabel[:10] == "__unpack__":
# Decode payload as base64-encoded bytestring
olabel = base64.decodebytes(olabel[10:].encode()).decode()
if olabel[:9] == "__label__":
# Intent name
assert recognition.intent is not None
recognition.intent.name = olabel[9:]
elif word or olabel:
# Keep non-empty words
raw_sub_tokens.append((word, olabel, last_tokens))
# Step 2: apply converters
converter_stack: typing.List[ConverterInfo] = []
raw_conv_tokens: typing.List[typing.Tuple[str, typing.Any, typing.List[str]]] = []
for raw_token, sub_token, original_tokens in raw_sub_tokens:
if sub_token and converter_stack and (sub_token[:2] != "__"):
# Add to existing converter
converter_stack[-1].tokens.append((raw_token, sub_token, original_tokens))
elif sub_token[:11] == "__convert__":
# Begin converter
converter_key = sub_token[11:]
converter_name = converter_key
converter_args: typing.Optional[typing.List[str]] = None
# Detect arguments
if "," in converter_name:
converter_name, *converter_args = converter_name.split(",")
converter_stack.append(
ConverterInfo(
key=converter_key, name=converter_name, args=converter_args
)
)
elif sub_token[:13] == "__converted__":
# End converter
assert converter_stack, "Found __converted__ without a __convert__"
last_converter = converter_stack.pop()
actual_key = sub_token[13:]
assert (
last_converter.key == actual_key
), f"Mismatched converter name (expected {last_converter.key}, got {actual_key})"
# Convert and add directly
raw_tokens: typing.List[str] = []
sub_tokens: typing.List[str] = []
orig_tokens: typing.List[typing.List[str]] = []
for t in last_converter.tokens:
if t[0]:
raw_tokens.append(t[0])
if t[1]:
sub_tokens.append(t[1])
if t[2]:
orig_tokens.append(t[2])
# Run substituted tokens through conversion function
converter_func = converters[last_converter.name]
# Pass arguments as keyword "converter_args"
converter_kwargs = (
{"converter_args": last_converter.args} if last_converter.args else {}
)
converted_tokens = converter_func(*sub_tokens, **converter_kwargs)
if converter_stack:
# Add to parent converter
target_list = converter_stack[-1].tokens
else:
# Add directly to list
target_list = raw_conv_tokens
# Zip 'em up
target_list.extend(
itertools.zip_longest(
raw_tokens, converted_tokens, orig_tokens, fillvalue=""
)
)
else:
raw_conv_tokens.append((raw_token, sub_token, original_tokens))
assert not converter_stack, f"Converter(s) remaining on stack ({converter_stack})"
# Step 3: collect entities
entity_stack: typing.List[Entity] = []
raw_index = 0
sub_index = 0
for raw_token, conv_token, original_tokens in raw_conv_tokens:
# Handle raw (input) token
if original_tokens:
# Use tokens from recognition string
recognition.raw_tokens.extend(original_tokens)
raw_index += sum(len(t) for t in original_tokens) + len(original_tokens)
if entity_stack:
last_entity = entity_stack[-1]
last_entity.raw_tokens.extend(original_tokens)
elif raw_token:
# Use word itself
recognition.raw_tokens.append(raw_token)
raw_index += len(raw_token) + 1
if entity_stack:
last_entity = entity_stack[-1]
last_entity.raw_tokens.append(raw_token)
# Handle converted (output) token
if conv_token is not None:
conv_token_str = str(conv_token)
if conv_token_str:
if conv_token_str[:9] == "__begin__":
# Begin tag/entity
entity_name = conv_token[9:]
entity_stack.append(
Entity(
entity=entity_name,
value="",
start=sub_index,
raw_start=raw_index,
)
)
elif conv_token_str[:7] == "__end__":
# End tag/entity
assert entity_stack, "Found __end__ without a __begin__"
last_entity = entity_stack.pop()
actual_name = conv_token[7:]
assert (
last_entity.entity == actual_name
), "Mismatched entity name (expected {last_entity.entity}, got {actual_name})"
# Assign end indexes
last_entity.end = sub_index - 1
last_entity.raw_end = raw_index - 1
# Create values
if len(last_entity.tokens) == 1:
# Use Python object
last_entity.value = last_entity.tokens[0]
else:
# Join as string
last_entity.value = " ".join(str(t) for t in last_entity.tokens)
last_entity.raw_value = " ".join(last_entity.raw_tokens)
# Add to recognition
recognition.entities.append(last_entity)
elif conv_token_str[:10] == "__source__":
if entity_stack:
last_entity = entity_stack[-1]
last_entity.source = conv_token_str[10:]
elif entity_stack:
# Add to most recent named entity
last_entity = entity_stack[-1]
last_entity.tokens.append(conv_token)
recognition.tokens.append(conv_token)
sub_index += len(conv_token_str) + 1
else:
# Substituted text
recognition.tokens.append(conv_token)
sub_index += len(conv_token_str) + 1
# Step 4: create text fields and compute confidence
recognition.text = " ".join(str(t) for t in recognition.tokens)
recognition.raw_text = " ".join(recognition.raw_tokens)
if cost and cost > 0:
# Set fuzzy confidence
assert recognition.intent is not None
recognition.intent.confidence = 1.0 - float(cost / len(recognition.raw_tokens))
return RecognitionResult.SUCCESS, recognition
# -----------------------------------------------------------------------------
def bool_converter(obj: typing.Any) -> bool:
"""Returns False if argument is 'false' string or zero."""
return (obj != 0) and (str(obj).lower() != "false")
def get_default_converters() -> typing.Dict[str, typing.Callable[..., typing.Any]]:
"""Get built-in fsticuffs converters"""
return {
"int": lambda *args: map(int, args),
"float": lambda *args: map(float, args),
"bool": lambda *args: map(bool_converter, args),
"lower": lambda *args: map(str.lower, args),
"upper": lambda *args: map(str.upper, args),
"object": lambda *args: [
{"value": args[0] if len(args) == 1 else " ".join(str(a) for a in args)}
],
"kind": lambda *args, converter_args=None: [
{"kind": converter_args[0], **a} for a in args
],
"unit": lambda *args, converter_args=None: [
{"unit": converter_args[0], **a} for a in args
],
"datetime": lambda *args, converter_args=None: [
datetime.strptime(" ".join(str(a) for a in args), *(converter_args or []))
],
}
# -----------------------------------------------------------------------------
def sample_by_intent(
intent_graph: nx.DiGraph,
num_samples: typing.Optional[int] = None,
**recognition_args,
) -> typing.Dict[str, typing.List[Recognition]]:
"""Sample sentences from a graph."""
sentences_by_intent: typing.Dict[str, typing.List[Recognition]] = defaultdict(list)
start_node, end_node = get_start_end_nodes(intent_graph)
assert (start_node is not None) and (
end_node is not None
), "Missing start/end node(s)"
if num_samples is not None:
# Randomly sample
paths = random.sample(
list(nx.all_simple_paths(intent_graph, start_node, end_node)), num_samples
)
else:
# Use generator
paths = nx.all_simple_paths(intent_graph, start_node, end_node)
for path in paths:
_, recognition = path_to_recognition(path, intent_graph, **recognition_args)
assert recognition, "Path failed"
if recognition.intent:
sentences_by_intent[recognition.intent.name].append(recognition)
return sentences_by_intent | /rhasspy-nlu-0.4.0.tar.gz/rhasspy-nlu-0.4.0/rhasspynlu/fsticuffs.py | 0.727007 | 0.337586 | fsticuffs.py | pypi |
import itertools
import json
import logging
import re
import shutil
import subprocess
import tempfile
import typing
from collections import defaultdict
from enum import Enum
from pathlib import Path
PronunciationsType = typing.Dict[str, typing.List[typing.List[str]]]
_LOGGER = logging.getLogger(__name__)
# -----------------------------------------------------------------------------
class MissingWordPronunciationsException(Exception):
"""Raised when missing word pronunciations and no g2p model."""
def __init__(self, words: typing.List[str]):
super().__init__(self)
self.words = words
def __str__(self):
return f"Missing pronunciations for: {self.words}"
# -----------------------------------------------------------------------------
class PronunciationAction(str, Enum):
"""Action taken when multiple pronunciations for the same word are found."""
APPEND = "append"
OVERWRITE_ONCE = "overwrite_once"
OVERWRITE_ALWAYS = "overwrite_always"
def read_pronunciations(
dict_file: typing.Iterable[str],
word_dict: typing.Optional[PronunciationsType] = None,
action: PronunciationAction = PronunciationAction.APPEND,
) -> PronunciationsType:
"""Loads a CMU-like pronunciation dictionary, optionally into an existing dictionary."""
word_actions: typing.Dict[str, PronunciationAction] = {}
if word_dict is None:
word_dict = {}
for i, line in enumerate(dict_file):
line = line.strip()
if not line:
continue
try:
# Use explicit whitespace (avoid 0xA0)
word, *pronounce = re.split(r"[ \t]+", line)
word = word.split("(")[0]
has_word = word in word_dict
word_action = word_actions.get(word, action)
if has_word and (word_action == PronunciationAction.APPEND):
# Append to list of pronunciations
word_dict[word].append(pronounce)
elif word_action == PronunciationAction.OVERWRITE_ONCE:
# Overwrite just once, then append
word_dict[word] = [pronounce]
word_actions[word] = PronunciationAction.APPEND
else:
# Overwrite
word_dict[word] = [pronounce]
except Exception as e:
_LOGGER.warning("read_pronunciations: %s (line %s)", e, i + 1)
return word_dict
def write_pronunciations(
vocabulary: typing.Set[str],
pronunciations: PronunciationsType,
dictionary: typing.Union[str, Path],
g2p_model: typing.Optional[typing.Union[str, Path]] = None,
g2p_word_transform: typing.Optional[typing.Callable[[str], str]] = None,
phonetisaurus_apply: typing.Optional[typing.Union[str, Path]] = None,
missing_words_path: typing.Optional[typing.Union[str, Path]] = None,
number_repeated_words: bool = True,
):
"""Create pronunciation dictionary. Guess missing words if g2p model is available."""
# Look up words
missing_words: typing.Set[str] = set()
# Look up each word
with open(dictionary, "w") as dictionary_file:
for word in vocabulary:
word_phonemes = pronunciations.get(word)
if not word:
_LOGGER.warning(
"Empty word in vocabulary with pronunciations: %s", word_phonemes
)
continue
if not word_phonemes:
# Add to missing word list
_LOGGER.warning("Missing word '%s'", word)
missing_words.add(word)
continue
# Write CMU format
for i, phonemes in enumerate(word_phonemes):
phoneme_str = " ".join(phonemes).strip()
if (not number_repeated_words) or (i == 0):
# word
print(word, phoneme_str, file=dictionary_file)
else:
# word(n)
print(f"{word}({i+1})", phoneme_str, file=dictionary_file)
# Open missing words file
missing_file: typing.Optional[typing.TextIO] = None
if missing_words_path:
missing_file = open(missing_words_path, "w")
try:
if missing_words:
# Fail if no g2p model is available
if not g2p_model:
raise MissingWordPronunciationsException(list(missing_words))
if not phonetisaurus_apply:
# Find in PATH
phonetisaurus_apply = shutil.which("phonetisaurus-apply")
assert phonetisaurus_apply, "phonetisaurus-apply not found in PATH"
# Guess word pronunciations
_LOGGER.debug("Guessing pronunciations for %s", missing_words)
guesses = guess_pronunciations(
missing_words,
g2p_model,
phonetisaurus_apply,
g2p_word_transform=g2p_word_transform,
num_guesses=1,
)
# Output is a pronunciation dictionary.
# Append to existing dictionary file.
for guess_word, guess_phonemes in guesses:
guess_phoneme_str = " ".join(guess_phonemes).strip()
print(guess_word, guess_phoneme_str, file=dictionary_file)
if missing_file:
print(guess_word, guess_phoneme_str, file=missing_file)
finally:
if missing_file:
missing_file.close()
_LOGGER.debug("Wrote missing words to %s", str(missing_words_path))
def guess_pronunciations(
words: typing.Iterable[str],
g2p_model: typing.Union[str, Path],
phonetisaurus_apply: typing.Optional[typing.Union[str, Path]] = None,
g2p_word_transform: typing.Optional[typing.Callable[[str], str]] = None,
num_guesses: int = 1,
) -> typing.Iterable[typing.Tuple[str, typing.List[str]]]:
"""Guess phonetic pronunciations for words. Yields (word, phonemes) pairs."""
g2p_model = Path(g2p_model)
if g2p_model.suffix == ".npz":
from .g2p_geepers import GeepersG2P
# Use geepers
g2p_config_path = g2p_model.parent / "g2p_config.json"
with open(g2p_config_path, "r") as g2p_config_file:
g2p_config = json.load(g2p_config_file)
g2p = GeepersG2P(
graphemes=g2p_config["model"]["graphemes"],
phonemes=g2p_config["model"]["phonemes"],
)
g2p.load_variables(g2p_model)
for word in words:
word = word.strip()
if not word:
continue
try:
pron = g2p.predict(word)
yield word, pron
except Exception:
_LOGGER.exception("No pronunciation for %s", word)
else:
# Use phonetisaurus
if not phonetisaurus_apply:
# Find in PATH
phonetisaurus_apply = shutil.which("phonetisaurus-apply")
assert phonetisaurus_apply, "phonetisaurus-apply not found in PATH"
g2p_word_transform = g2p_word_transform or (lambda s: s)
with tempfile.NamedTemporaryFile(mode="w") as wordlist_file:
for word in words:
word = g2p_word_transform(word)
print(word, file=wordlist_file)
wordlist_file.seek(0)
g2p_command = [
str(phonetisaurus_apply),
"--model",
str(g2p_model),
"--word_list",
wordlist_file.name,
"--nbest",
str(num_guesses),
]
_LOGGER.debug(g2p_command)
g2p_lines = subprocess.check_output(
g2p_command, universal_newlines=True
).splitlines()
# Output is a pronunciation dictionary.
# Append to existing dictionary file.
for line in g2p_lines:
line = line.strip()
if line:
word, *phonemes = line.split()
yield (word.strip(), phonemes)
# -----------------------------------------------------------------------------
# Sounds Like Pronunciations
# -----------------------------------------------------------------------------
G2PAlignmentType = typing.Dict[
str, typing.List[typing.List[typing.Tuple[typing.List[str], typing.List[str]]]]
]
_SOUNDS_LIKE_WORD_N = re.compile(r"^([^(]+)\(([0-9]+)\)$")
_SOUNDS_LIKE_PARTIAL = re.compile(r"^([^>]*)>([^<]+)<.*$")
# -----------------------------------------------------------------------------
def load_sounds_like(
sounds_like: typing.Union[str, Path, typing.TextIO],
pronunciations: PronunciationsType,
action: PronunciationAction = PronunciationAction.APPEND,
g2p_alignment: typing.Optional[G2PAlignmentType] = None,
g2p_corpus: typing.Optional[Path] = None,
) -> typing.Optional[G2PAlignmentType]:
"""Loads file with unknown word pronunciations based on known words."""
original_action = action
# word -> [[(["graheme", ...], ["phoneme", ...])], ...]
g2p_alignment = g2p_alignment or {}
if isinstance(sounds_like, (str, Path)):
_LOGGER.debug("Loading sounds-like pronunciations from %s", sounds_like)
sounds_like_file = open(sounds_like, "r")
else:
# TextIO
sounds_like_file = sounds_like
# File with <unknown_word> <known_word> [<known_word> ...]
# Pronunciation is derived from phonemes of known words.
# Phonemes can be included with the syntax /P1 P2/
with sounds_like_file:
for i, line in enumerate(sounds_like_file):
line = line.strip()
if not line:
continue
try:
# Restore word action
action = original_action
# Parse line of <unknown> <known> [<known> ...]
unknown_word, *known_words = line.split()
assert known_words, f"No known words for {unknown_word}"
# Identify literal phonemes
in_phoneme = False
# line -> alternatives -> phoneme sequence
known_phonemes: typing.List[typing.List[typing.List[str]]] = []
# ongoing phoneme sequence
current_phonemes: typing.List[str] = []
# Process space-separated tokens
for known_word in known_words:
if known_word.startswith("/"):
# Begin literal phoneme string
# /P1 P2 P3/
in_phoneme = True
known_word = known_word[1:]
current_phonemes = []
end_slash = known_word.endswith("/")
if end_slash:
# End literal phoneme string
# /P1 P2 P3/
known_word = known_word[:-1]
if in_phoneme:
# Literal phonemes
# P_N of /P1 P2 P3/
current_phonemes.append(known_word)
else:
# Check for >part<ial word
partial_match = _SOUNDS_LIKE_PARTIAL.match(known_word)
if partial_match:
partial_prefix, partial_body = (
partial_match.group(1),
partial_match.group(2),
)
if not g2p_alignment:
# Need to load g2p alignment corpus
assert (
g2p_corpus
), f"No G2P corpus given for partial word: {known_word}"
assert (
g2p_corpus.is_file()
), f"Missing G2P corpus for {known_word}: {g2p_corpus}"
g2p_alignment = load_g2p_corpus(g2p_corpus)
# Align graphemes with phonemes
word = re.sub(r"[<>]", "", known_word)
aligned_phonemes = get_aligned_phonemes(
g2p_alignment, word, partial_prefix, partial_body
)
# Add all possible alignments (phoneme sequences) as alternatives
known_phonemes.append(list(aligned_phonemes))
else:
# Known word with one or more pronunciations
known_prons = get_nth_word(pronunciations, known_word)
assert known_prons, f"No pronunciations for {known_word}"
# Add all pronunciations as alternatives
known_phonemes.append(known_prons)
if end_slash:
in_phoneme = False
if current_phonemes:
known_phonemes.append([current_phonemes])
# Collect pronunciations from known words
# word_prons: typing.List[typing.List[typing.List[str]]] = []
for word_phonemes in itertools.product(*known_phonemes):
# Generate all possible pronunciations.
word_pron = list(itertools.chain(*word_phonemes))
has_word = unknown_word in pronunciations
# Handle according to custom words action
if has_word and (action == PronunciationAction.APPEND):
# Append to list of pronunciations
pronunciations[unknown_word].append(word_pron)
elif action == PronunciationAction.OVERWRITE_ONCE:
# Overwrite just once, then append
pronunciations[unknown_word] = [word_pron]
action = PronunciationAction.APPEND
else:
# Overwrite
pronunciations[unknown_word] = [word_pron]
except Exception as e:
_LOGGER.warning("load_sounds_like: %s (line %s)", e, i + 1)
raise e
return g2p_alignment
def load_g2p_corpus(
g2p_corpus: typing.Union[str, typing.TextIO, Path]
) -> G2PAlignmentType:
"""Loads a grapheme to phoneme alignment corpus generated by Phonetisaurus."""
g2p_alignment: G2PAlignmentType = defaultdict(list)
if isinstance(g2p_corpus, (str, Path)):
_LOGGER.debug("Loading g2p corpus from %s", g2p_corpus)
corpus_file = open(g2p_corpus, "r")
else:
# TextIO
corpus_file = g2p_corpus
with corpus_file:
for line in corpus_file:
line = line.strip()
if not line:
continue
word = ""
inputs_outputs = []
# Parse line
parts = line.split()
for part in parts:
# Assume default delimiters:
# } separates input/output
# | separates input/output tokens
# _ indicates empty output
part_in, part_out = part.split("}")
part_ins = part_in.split("|")
if part_out == "_":
# Empty output
part_outs = []
else:
part_outs = part_out.split("|")
inputs_outputs.append((part_ins, part_outs))
word += "".join(part_ins)
# Add to pronunciations for word
g2p_alignment[word].append(inputs_outputs)
return g2p_alignment
def get_aligned_phonemes(
g2p_alignment: G2PAlignmentType, word: str, prefix: str, body: str
) -> typing.Iterable[typing.List[str]]:
"""Yields lists of phonemes that comprise the body of the word. Prefix graphemes are skipped."""
word_index: typing.Optional[int] = None
match = _SOUNDS_LIKE_WORD_N.match(word)
if match:
# word(N)
word, word_index = (match.group(1), int(match.group(2)))
# Loop through possible alignments for this word
for io_index, inputs_outputs in enumerate(g2p_alignment.get(word, [])):
if (word_index is not None) and (word_index != (io_index + 1)):
continue
can_match = True
prefix_chars = list(prefix)
body_chars = list(body)
phonemes = []
for word_input, word_output in inputs_outputs:
word_input = list(word_input)
word_output = list(word_output)
while prefix_chars and word_input:
# Exhaust characters before desired word segment first
if word_input[0] != prefix_chars[0]:
can_match = False
break
prefix_chars = prefix_chars[1:]
word_input = word_input[1:]
while body_chars and word_input:
# Match desired word segment
if word_input[0] != body_chars[0]:
can_match = False
break
body_chars = body_chars[1:]
word_input = word_input[1:]
if word_output:
phonemes.append(word_output[0])
word_output = word_output[1:]
if not can_match or not body_chars:
# Mismatch or done with word segment
break
if can_match and phonemes:
yield phonemes
def get_nth_word(
pronunciations: PronunciationsType, word: str
) -> typing.List[typing.List[str]]:
"""Get all pronunciations for a word or a single(n) pronunciation."""
# Check for explicit word index (1-based)
word_index: typing.Optional[int] = None
match = _SOUNDS_LIKE_WORD_N.match(word)
if match:
# word(N)
word, word_index = (match.group(1), int(match.group(2)))
known_prons = pronunciations.get(word, [])
if (not known_prons) or (word_index is None):
# Add all known pronunciations
return known_prons
# Add indexed word only.
# Clip to within bounds of list.
i = min(max(1, word_index), len(known_prons)) - 1
return [known_prons[i]] | /rhasspy-nlu-0.4.0.tar.gz/rhasspy-nlu-0.4.0/rhasspynlu/g2p.py | 0.633864 | 0.23793 | g2p.py | pypi |
import re
import typing
from dataclasses import dataclass, field
from enum import Enum
@dataclass
class Substitutable:
"""Indicates an expression may be replaced with some text."""
# Replacement text
substitution: typing.Optional[typing.Union[str, typing.List[str]]] = None
# Names of converters to apply after substitution
converters: typing.List[str] = field(default_factory=list)
@staticmethod
def parse_substitution(sub_text: str) -> typing.Union[str, typing.List[str]]:
"""Parse substitution text into token list or string."""
sub_text = sub_text.strip()
if sub_text[:1] == "(":
sub_text = sub_text[1:]
if sub_text[-1:] == ")":
sub_text = sub_text[:-1]
if " " in sub_text:
return sub_text.split()
return sub_text
@dataclass
class Tag(Substitutable):
"""{tag} attached to an expression."""
# Name of tag (entity)
tag_text: str = ""
@dataclass
class Taggable:
"""Indicates an expression may be tagged."""
# Tag to be applied
tag: typing.Optional[Tag] = None
@dataclass
class Expression:
"""Base class for most JSGF types."""
# Text representation expression
text: str = ""
@dataclass
class Word(Substitutable, Taggable, Expression):
"""Single word/token."""
class SequenceType(str, Enum):
"""Type of a sequence. Optionals are alternatives with an empty option."""
# Sequence of expressions
GROUP = "group"
# Expressions where only one will be recognized
ALTERNATIVE = "alternative"
@dataclass
class Sequence(Substitutable, Taggable, Expression):
"""Ordered sequence of expressions. Supports groups, optionals, and alternatives."""
# Items in the sequence
items: typing.List[Expression] = field(default_factory=list)
# Group or alternative
type: SequenceType = SequenceType.GROUP
@dataclass
class RuleReference(Taggable, Expression):
"""Reference to a rule by <name> or <grammar.name>."""
# Name of referenced rule
rule_name: str = ""
# Grammar name of referenced rule
grammar_name: typing.Optional[str] = None
@property
def full_rule_name(self):
"""Get fully qualified rule name."""
if self.grammar_name:
return f"{self.grammar_name}.{self.rule_name}"
return self.rule_name
@dataclass
class SlotReference(Substitutable, Taggable, Expression):
"""Reference to a slot by $name."""
# Name of referenced slot
slot_name: str = ""
@dataclass
class ParseMetadata:
"""Debug metadata for more helpful parsing errors."""
file_name: str
line_number: int
intent_name: typing.Optional[str] = None
@dataclass
class Sentence(Sequence):
"""Sequence representing a complete sentence template."""
@staticmethod
def parse(text: str, metadata: typing.Optional[ParseMetadata] = None) -> "Sentence":
"""Parse a single sentence."""
s = Sentence(text=text)
parse_expression(s, text, metadata=metadata)
return Sentence(
text=s.text,
items=s.items,
type=s.type,
tag=s.tag,
substitution=s.substitution,
)
@dataclass
class Rule:
"""Named rule with body."""
RULE_DEFINITION = re.compile(r"^(public)?\s*<([^>]+)>\s*=\s*([^;]+)(;)?$")
rule_name: str
rule_body: Sentence
public: bool = False
text: str = ""
@staticmethod
def parse(text: str, metadata: typing.Optional[ParseMetadata] = None) -> "Rule":
"""Parse a single rule."""
# public <RuleName> = rule body;
# <RuleName> = rule body;
rule_match = Rule.RULE_DEFINITION.match(text)
assert rule_match is not None, f"No rule was found in {text}"
public = rule_match.group(1) is not None
rule_name = rule_match.group(2)
rule_text = rule_match.group(3)
s = Sentence.parse(rule_text, metadata=metadata)
return Rule(rule_name=rule_name, rule_body=s, public=public, text=text)
# -----------------------------------------------------------------------------
def walk_expression(
expression: Expression,
visit: typing.Callable[
[Expression], typing.Union[bool, typing.Optional[Expression]]
],
replacements: typing.Optional[typing.Dict[str, typing.List[Expression]]] = None,
) -> typing.Union[bool, typing.Optional[Expression]]:
"""Recursively visit/replace nodes in expression."""
result = visit(expression)
if result is False:
return False
if result is not None:
assert isinstance(result, Expression), f"Expected Expression, got {result}"
expression = result
if isinstance(expression, Sequence):
for i in range(len(expression.items)):
new_item = walk_expression(expression.items[i], visit, replacements)
if new_item:
assert isinstance(
new_item, Expression
), f"Expected Expression, got {new_item}"
expression.items[i] = new_item
elif isinstance(expression, Rule):
new_body = walk_expression(expression.rule_body, visit, replacements)
if new_body:
assert isinstance(new_body, Sentence), f"Expected Sentence, got {new_body}"
expression.rule_body = new_body
elif isinstance(expression, RuleReference):
key = f"<{expression.full_rule_name}>"
if replacements and (key in replacements):
key_replacements = replacements[key]
for i in range(len(key_replacements)):
new_item = walk_expression(key_replacements[i], visit, replacements)
if new_item:
assert isinstance(
new_item, Expression
), f"Expected Expression, got {new_item}"
key_replacements[i] = new_item
elif isinstance(expression, SlotReference):
key = f"${expression.slot_name}"
if replacements and (key in replacements):
key_replacements = replacements[key]
for i in range(len(key_replacements)):
new_item = walk_expression(key_replacements[i], visit, replacements)
if new_item:
assert isinstance(
new_item, Expression
), f"Expected Expression, got {new_item}"
key_replacements[i] = new_item
return expression
# -----------------------------------------------------------------------------
def maybe_remove_parens(s: str) -> str:
"""Remove parentheses from around a string if it has them."""
if (len(s) > 1) and (s[0] == "(") and (s[-1] == ")"):
return s[1:-1]
return s
def split_words(text: str) -> typing.Iterable[Expression]:
"""Split words by whitespace. Detect slot references and substitutions."""
tokens: typing.List[str] = []
token: str = ""
last_c: str = ""
in_seq_sub: bool = False
# Process words, correctly handling substitution sequences.
# e.g., input:(output words)
for c in text:
break_token = False
if (c == "(") and (last_c == ":"):
# Begin sequence substitution
in_seq_sub = True
elif in_seq_sub and (c == ")"):
# End sequence substitution
in_seq_sub = False
break_token = True
elif c == " " and (not in_seq_sub):
# Whitespace break (not inside sequence substitution)
break_token = True
else:
# Accumulate into token
token += c
if break_token and token:
tokens.append(token)
token = ""
last_c = c
if token:
# Last token
tokens.append(token)
for token in tokens:
if token[:1] == "$":
slot_name = token[1:]
if ":" in slot_name:
# Slot with substitutions
slot_name, substitution = slot_name.split(":", maxsplit=1)
yield SlotReference(
text=token,
slot_name=slot_name,
substitution=Substitutable.parse_substitution(substitution),
)
else:
# Slot without substitutions
yield SlotReference(text=token, slot_name=slot_name)
else:
word = Word(text=token)
if "!" in token:
# Word with converter(s)
# e.g., twenty:20!int
parts = token.split("!")
word.text = parts[0]
word.converters = parts[1:]
if ":" in word.text:
# Word with substitution
# e.g., twenty:20
lhs, rhs = word.text.split(":", maxsplit=1)
word.text = lhs
word.substitution = Substitutable.parse_substitution(rhs)
yield word
def parse_expression(
root: typing.Optional[Sequence],
text: str,
end: typing.List[str] = None,
is_literal: bool = True,
metadata: typing.Optional[ParseMetadata] = None,
) -> typing.Optional[int]:
"""Parse a full expression. Return index in text where current expression ends."""
end = end or []
found: bool = False
next_index: int = 0
literal: str = ""
last_taggable: typing.Optional[Taggable] = None
last_group: typing.Optional[Sequence] = root
# Process text character-by-character
for current_index, c in enumerate(text):
if current_index < next_index:
# Skip ahread
current_index += 1
continue
# Get previous character
if current_index > 0:
last_c = text[current_index - 1]
else:
last_c = ""
next_index = current_index + 1
if c in end:
# Found end character of expression (e.g., ])
next_index += 1
found = True
break
if (c in {":", "!"}) and (last_c in {")", "]"}):
# Handle sequence substitution/conversion
assert isinstance(last_taggable, Substitutable), parse_error(
f"Expected Substitutable, got {last_taggable}",
text,
current_index,
metadata=metadata,
)
# Check for substitution sequence.
# e.g., (input words):(output words)
if text[next_index] == "(":
# Find end of group
next_end = [")"] + end
next_seq_sub = True
else:
# Find end of word
next_end = [" "] + end
next_seq_sub = False
next_index = parse_expression(
None,
text[current_index + 1 :],
next_end,
is_literal=False,
metadata=metadata,
)
if next_index is None:
# End of text
next_index = len(text) + 1
else:
next_index += current_index - 1
if next_seq_sub:
# Consume end paren
next_index += 1
assert text[next_index - 1] == ")", parse_error(
"Missing end parenthesis", text, current_index, metadata=metadata
)
if c == ":":
# Substitution/conversion
sub_text = text[current_index + 1 : next_index].strip()
if "!" in sub_text:
# Extract converter(s)
sub_text, *converters = sub_text.split("!")
last_taggable.converters = converters
last_taggable.substitution = Substitutable.parse_substitution(sub_text)
else:
# Conversion only
conv_text = maybe_remove_parens(
text[current_index + 1 : next_index].strip()
)
last_taggable.converters = conv_text.split("!")
elif (c == "(" and last_c != ":") or (c in {"<", "[", "{", "|"}):
# Begin group/tag/alt/etc.
# Break literal here
literal = literal.strip()
if literal:
assert last_group is not None, parse_error(
"No group preceeding literal",
text,
current_index,
metadata=metadata,
)
words = list(split_words(literal))
last_group.items.extend(words)
last_word = words[-1]
assert isinstance(last_word, Taggable), parse_error(
f"Expected Taggable, got {last_word}",
text,
current_index,
metadata=metadata,
)
last_taggable = last_word
literal = ""
if c == "<":
# Rule reference
assert last_group is not None, parse_error(
"No group preceeding rule reference",
text,
current_index,
metadata=metadata,
)
rule = RuleReference()
end_index = parse_expression(
None,
text[current_index + 1 :],
end=[">"],
is_literal=False,
metadata=metadata,
)
assert end_index, parse_error(
f"Failed to find ending '>'", text, current_index, metadata=metadata
)
next_index = end_index + current_index
rule_name = text[current_index + 1 : next_index - 1]
if "." in rule_name:
# Split by last dot
last_dot = rule_name.rindex(".")
rule.grammar_name = rule_name[:last_dot]
rule.rule_name = rule_name[last_dot + 1 :]
else:
# Use entire name
rule.rule_name = rule_name
if metadata:
# Use intent name for grammar name
rule.grammar_name = metadata.intent_name
rule.text = text[current_index:next_index]
last_group.items.append(rule)
last_taggable = rule
elif c == "(":
# Group (expression)
if last_group is not None:
# Parse group into sequence.
# If last_group is None, we're on the right-hand side of a
# ":" and the text will be interpreted as a substitution
# instead.
group = Sequence(type=SequenceType.GROUP)
end_index = parse_expression(
group, text[current_index + 1 :], end=[")"], metadata=metadata
)
assert end_index, parse_error(
f"Failed to find ending ')'",
text,
current_index,
metadata=metadata,
)
next_index = end_index + current_index
group.text = text[current_index + 1 : next_index - 1]
last_group.items.append(group)
last_taggable = group
elif c == "[":
# Optional
# Recurse with group sequence to capture multi-word children.
optional_seq = Sequence(type=SequenceType.GROUP)
end_index = parse_expression(
optional_seq,
text[current_index + 1 :],
end=["]"],
metadata=metadata,
)
assert end_index, parse_error(
f"Failed to find ending ']'", text, current_index, metadata=metadata
)
next_index = end_index + current_index
optional = Sequence(type=SequenceType.ALTERNATIVE)
if optional_seq.items:
if (
(len(optional_seq.items) == 1)
and (not optional_seq.tag)
and (not optional_seq.substitution)
):
# Unpack inner item
inner_item = optional_seq.items[0]
optional.items.append(inner_item)
elif optional_seq.type == SequenceType.ALTERNATIVE:
# Unwrap inner alternative
optional.items.extend(optional_seq.items)
else:
# Keep inner group
optional_seq.text = text[current_index + 1 : next_index - 1]
optional.items.append(optional_seq)
# Empty alternative
optional.items.append(Word(text=""))
optional.text = text[current_index + 1 : next_index - 1]
assert last_group is not None, parse_error(
"Expected group preceeding optional",
text,
current_index,
metadata=metadata,
)
last_group.items.append(optional)
last_taggable = optional
elif c == "{":
assert last_taggable is not None, parse_error(
"Expected expression preceeding tag",
text,
current_index,
metadata=metadata,
)
tag = Tag()
# Tag
end_index = parse_expression(
None,
text[current_index + 1 :],
end=["}"],
is_literal=False,
metadata=metadata,
)
assert end_index, parse_error(
f"Failed to find ending '}}'",
text,
current_index,
metadata=metadata,
)
next_index = end_index + current_index
# Exclude {}
tag.tag_text = text[current_index + 1 : next_index - 1]
# Handle substitution/converter(s)
if "!" in tag.tag_text:
# Word with converter(s)
# e.g., twenty:20!int
parts = tag.tag_text.split("!")
tag.tag_text = parts[0]
tag.converters = parts[1:]
if ":" in tag.tag_text:
# Word with substitution
# e.g., twenty:20
lhs, rhs = tag.tag_text.split(":", maxsplit=1)
tag.tag_text = lhs
tag.substitution = Substitutable.parse_substitution(rhs)
last_taggable.tag = tag
elif c == "|":
assert root is not None, parse_error(
"Unexpected '|' outside of group/alternative",
text,
current_index,
metadata=metadata,
)
if root.type != SequenceType.ALTERNATIVE:
# Create alternative
alternative = Sequence(type=SequenceType.ALTERNATIVE)
if len(root.items) == 1:
# Add directly
alternative.items.append(root.items[0])
else:
# Wrap in group
last_group = Sequence(type=SequenceType.GROUP, items=root.items)
alternative.items.append(last_group)
# Modify original sequence
root.items = [alternative]
# Overwrite root
root = alternative
assert last_group is not None, parse_error(
"Expected group preceeding alternative",
text,
current_index,
metadata=metadata,
)
if not last_group.text:
# Fix text
last_group.text = " ".join(item.text for item in last_group.items)
# Create new group for any follow-on expressions
last_group = Sequence(type=SequenceType.GROUP)
alternative.items.append(last_group)
else:
# Accumulate into current literal
literal += c
# End of expression
current_index = len(text)
# Break literal
literal = literal.strip()
if is_literal and literal:
assert root is not None, parse_error(
"Literal outside parent expression", text, current_index, metadata=metadata
)
words = list(split_words(literal))
assert last_group is not None, parse_error(
"Expected group preceeding literal", text, current_index, metadata=metadata
)
last_group.items.extend(words)
if last_group:
if not last_group.text:
# Fix text
last_group.text = " ".join(item.text for item in last_group.items)
if len(last_group.items) == 1:
# Simplify final group
assert root is not None, parse_error(
"Group outside parent expression",
text,
current_index,
metadata=metadata,
)
root.items[-1] = last_group.items[0]
# Force text to be fixed
root.text = ""
if root and (not root.text):
# Fix text
if root.type == SequenceType.ALTERNATIVE:
# Pipe separated
root.text = " | ".join(item.text for item in root.items)
else:
# Space separated
root.text = " ".join(item.text for item in root.items)
if end and (not found):
# Signal end not found
return None
return next_index
def parse_error(
error: str, text: str, column: int, metadata: typing.Optional[ParseMetadata] = None
) -> str:
"""Generate helpful parsing error if metadata is available."""
if metadata:
return f"{error} (text='{text}', file={metadata.file_name}, column={column}, line={metadata.line_number})"
return error | /rhasspy-nlu-0.4.0.tar.gz/rhasspy-nlu-0.4.0/rhasspynlu/jsgf.py | 0.874466 | 0.423279 | jsgf.py | pypi |
import logging
import re
import shutil
import subprocess
import tempfile
import typing
from pathlib import Path
import networkx as nx
from .jsgf_graph import graph_to_fst
from .ngram import get_intent_ngram_counts
_LOGGER = logging.getLogger(__name__)
# -----------------------------------------------------------------------------
def graph_to_arpa(
graph: nx.DiGraph,
arpa_path: typing.Union[str, Path],
vocab_path: typing.Optional[typing.Union[str, Path]] = None,
intent_filter: typing.Optional[typing.Callable[[str], bool]] = None,
**fst_to_arpa_args,
):
"""Convert intent graph to ARPA language model using opengrm."""
with tempfile.TemporaryDirectory() as temp_dir_str:
temp_dir = Path(temp_dir_str)
fst_text_path = temp_dir / "graph.fst.txt"
isymbols_path = temp_dir / "isymbols.txt"
osymbols_path = temp_dir / "osymbols.txt"
# Graph -> binary FST
graph_to_fst(graph, intent_filter=intent_filter).write_fst(
fst_text_path, isymbols_path, osymbols_path
)
if vocab_path:
# Extract vocabulary
with open(vocab_path, "w") as vocab_file:
with open(isymbols_path, "r") as isymbols_file:
for line in isymbols_file:
line = line.strip()
if line:
# symbol N
isymbol = line[: line.rfind(" ")]
if isymbol and (isymbol[0] not in ["_", "<"]):
print(isymbol, file=vocab_file)
_LOGGER.debug("Wrote vocabulary to %s", vocab_path)
# Convert to ARPA
fst_to_arpa(
fst_text_path, isymbols_path, osymbols_path, arpa_path, **fst_to_arpa_args
)
def fst_to_arpa(
fst_text_path: typing.Union[str, Path],
isymbols_path: typing.Union[str, Path],
osymbols_path: typing.Union[str, Path],
arpa_path: typing.Union[str, Path],
**kwargs,
):
"""Convert text FST to ARPA language model using opengrm."""
for task in fst_to_arpa_tasks(
fst_text_path, isymbols_path, osymbols_path, arpa_path, **kwargs
):
run_task(task)
def graph_to_arpa_small(
graph: nx.DiGraph,
arpa_path: typing.Union[str, Path],
vocab_path: typing.Optional[typing.Union[str, Path]] = None,
dictionary_word_transform: typing.Optional[typing.Callable[[str], str]] = None,
balance_counts: bool = True,
estimate_ngram: typing.Optional[typing.Union[str, Path]] = None,
):
"""Convert intent graph to ARPA language model using MITLM. Works better for small graphs."""
estimate_ngram = estimate_ngram or shutil.which("estimate-ngram")
assert estimate_ngram, "Missing estimate-ngram in PATH"
# Generate counts
_LOGGER.debug("Generating ngram counts")
intent_counts = get_intent_ngram_counts(graph, balance_counts=balance_counts)
# Create ngram counts file
with tempfile.NamedTemporaryFile(mode="w+") as count_file:
for intent_name in intent_counts:
for ngram, count in intent_counts[intent_name].items():
if dictionary_word_transform:
ngram = [dictionary_word_transform(w) for w in ngram]
# word [word] ... <TAB> count
print(*ngram, file=count_file, end="")
print("\t", count, file=count_file)
count_file.seek(0)
with tempfile.NamedTemporaryFile(mode="w+") as vocab_file:
ngram_command = [
str(estimate_ngram),
"-order",
"3",
"-counts",
count_file.name,
"-write-lm",
str(arpa_path),
"-write-vocab",
vocab_file.name,
]
_LOGGER.debug(ngram_command)
subprocess.check_call(ngram_command)
if vocab_path:
# Copy over real file
vocab_file.seek(0)
with open(vocab_path, "w") as real_vocab_file:
for line in vocab_file:
line = line.strip()
if line and (line[0] not in ["_", "<"]):
print(line, file=real_vocab_file)
# -----------------------------------------------------------------------------
def arpa_to_fst(arpa_path: typing.Union[str, Path], fst_path: typing.Union[str, Path]):
"""Convert ARPA language model to FST. Typically for language model mixing."""
run_task(arpa_to_fst_task(arpa_path, fst_path))
def arpa_to_fst_task(
arpa_path: typing.Union[str, Path], fst_path: typing.Union[str, Path]
) -> typing.Dict[str, typing.Any]:
"""Generate doit compatible task for ARPA to FST conversion."""
return {
"name": "base_lm_to_fst",
"file_dep": [arpa_path],
"targets": [fst_path],
"actions": ["ngramread --ARPA %(dependencies)s %(targets)s"],
}
# -----------------------------------------------------------------------------
def fst_to_arpa_tasks(
fst_text_path: typing.Union[str, Path],
isymbols_path: typing.Union[str, Path],
osymbols_path: typing.Union[str, Path],
arpa_path: typing.Union[str, Path],
fst_path: typing.Optional[typing.Union[str, Path]] = None,
counts_path: typing.Optional[typing.Union[str, Path]] = None,
model_path: typing.Optional[typing.Union[str, Path]] = None,
base_fst_weight: typing.Optional[
typing.Tuple[typing.Union[str, Path], float]
] = None,
merge_path: typing.Optional[typing.Union[str, Path]] = None,
) -> typing.Iterable[typing.Dict[str, typing.Any]]:
"""Generate doit compatible tasks for FST to ARPA conversion."""
# Text -> FST
fst_text_path = Path(fst_text_path)
fst_path = Path(fst_path or fst_text_path.with_suffix(".fst"))
yield {
"name": "compile_fst",
"file_dep": [fst_text_path, isymbols_path, osymbols_path],
"targets": [fst_path],
"actions": [
"fstcompile "
"--keep_isymbols --keep_osymbols "
f"--isymbols={isymbols_path} --osymbols={osymbols_path} "
f"{fst_text_path} %(targets)s"
],
}
# FST -> n-gram counts
counts_path = counts_path or Path(str(fst_path) + ".counts")
yield {
"name": "intent_counts",
"file_dep": [fst_path],
"targets": [counts_path],
"actions": ["ngramcount %(dependencies)s %(targets)s"],
}
# n-gram counts -> model
model_path = model_path or Path(str(fst_path) + ".model")
yield {
"name": "intent_model",
"file_dep": [counts_path],
"targets": [model_path],
"actions": ["ngrammake --method=witten_bell %(dependencies)s %(targets)s"],
}
if base_fst_weight:
# Mixed language modeling
base_path_str, base_weight = base_fst_weight
if base_weight > 0:
base_path = Path(base_path_str)
if not base_path.is_file():
# Major assumption: base language model has the same stem as the
# FST, just with a .txt suffix.
base_arpa_path = base_path.with_suffix(".txt")
yield arpa_to_fst_task(base_arpa_path, base_path)
merge_path = merge_path or Path(str(fst_path) + ".merge")
# merge
yield {
"name": "lm_merge",
"file_dep": [base_path, model_path],
"targets": [merge_path],
"actions": [
"ngrammerge "
f"--alpha={base_weight} "
"%(dependencies)s %(targets)s"
],
}
# Use merged model instead
model_path = merge_path
# model -> ARPA
yield {
"name": "intent_arpa",
"file_dep": [model_path],
"targets": [arpa_path],
"actions": ["ngramprint --ARPA %(dependencies)s > %(targets)s"],
}
# -----------------------------------------------------------------------------
def mix_language_models(
small_arpa_path: typing.Union[str, Path],
large_fst_path: typing.Union[str, Path],
mix_weight: float,
mixed_arpa_path: typing.Union[str, Path],
small_fst_path: typing.Optional[typing.Union[str, Path]] = None,
mixed_fst_path: typing.Optional[typing.Union[str, Path]] = None,
):
"""Mix a large pre-built FST language model with a small ARPA model."""
with tempfile.NamedTemporaryFile(suffix=".fst", mode="w+") as small_fst:
# Convert small ARPA to FST
if not small_fst_path:
small_fst_path = small_fst.name
small_command = [
"ngramread",
"--ARPA",
str(small_arpa_path),
str(small_fst_path),
]
_LOGGER.debug(small_command)
subprocess.check_call(small_command)
with tempfile.NamedTemporaryFile(suffix=".fst", mode="w+") as mixed_fst:
# Merge ngram FSTs
small_fst.seek(0)
if not mixed_fst_path:
mixed_fst_path = mixed_fst.name
merge_command = [
"ngrammerge",
f"--alpha={mix_weight}",
str(large_fst_path),
str(small_fst_path),
str(mixed_fst_path),
]
_LOGGER.debug(merge_command)
subprocess.check_call(merge_command)
# Write to final ARPA
mixed_fst.seek(0)
print_command = [
"ngramprint",
"--ARPA",
str(mixed_fst_path),
str(mixed_arpa_path),
]
_LOGGER.debug(print_command)
subprocess.check_call(print_command)
# -----------------------------------------------------------------------------
def get_perplexity(
text: str, language_model_fst: typing.Union[str, Path], debug: bool = False
) -> typing.Optional[float]:
"""Compute perplexity of text with ngramperplexity."""
with tempfile.TemporaryDirectory() as temp_dir_name:
temp_path = Path(temp_dir_name)
text_path = temp_path / "sentence.txt"
text_path.write_text(text)
symbols_path = temp_path / "sentence.syms"
symbols_command = ["ngramsymbols", str(text_path), str(symbols_path)]
_LOGGER.debug(symbols_command)
subprocess.check_call(symbols_command)
far_path = temp_path / "sentence.far"
compile_command = [
"farcompilestrings",
f"-symbols={symbols_path}",
"-keep_symbols=1",
str(text_path),
str(far_path),
]
_LOGGER.debug(compile_command)
subprocess.check_call(compile_command)
verbosity = 1 if debug else 0
perplexity_command = [
"ngramperplexity",
f"--v={verbosity}",
str(language_model_fst),
str(far_path),
]
_LOGGER.debug(perplexity_command)
output = subprocess.check_output(perplexity_command).decode()
_LOGGER.debug(output)
last_line = output.strip().splitlines()[-1]
match = re.match(r"^.*perplexity\s*=\s*(.+)$", last_line)
if match:
perplexity = float(match.group(1))
return perplexity
return None
# -----------------------------------------------------------------------------
def run_task(task: typing.Dict[str, typing.Any]):
"""Execute a doit compatible task."""
name = task.get("name", "task")
for action in task["actions"]:
file_dep = " ".join(f'"{d}"' for d in task.get("file_dep", []))
targets = " ".join(f'"{t}"' for t in task.get("targets", []))
command = action % {"dependencies": file_dep, "targets": targets}
_LOGGER.debug("%s: %s", name, command)
subprocess.check_call(command, shell=True) | /rhasspy-nlu-0.4.0.tar.gz/rhasspy-nlu-0.4.0/rhasspynlu/arpa_lm.py | 0.603932 | 0.25422 | arpa_lm.py | pypi |
import typing
from enum import Enum
from pathlib import Path
import grapheme
import numpy as np
# -----------------------------------------------------------------------------
class FixedSymbols(str, Enum):
"""Pseudo-graphemes with fixed indexes"""
PAD = "<pad>"
SOS = "<s>"
EOS = "</s>"
def _sigmoid(x):
return 1 / (1 + np.exp(-x))
def _grucell(x, h, w_ih, w_hh, b_ih, b_hh):
rzn_ih = np.matmul(x, w_ih.T) + b_ih
rzn_hh = np.matmul(h, w_hh.T) + b_hh
rz_ih, n_ih = (
rzn_ih[:, : rzn_ih.shape[-1] * 2 // 3],
rzn_ih[:, rzn_ih.shape[-1] * 2 // 3 :],
)
rz_hh, n_hh = (
rzn_hh[:, : rzn_hh.shape[-1] * 2 // 3],
rzn_hh[:, rzn_hh.shape[-1] * 2 // 3 :],
)
rz = _sigmoid(rz_ih + rz_hh)
r, z = np.split(rz, 2, -1) # pylint: disable=unbalanced-tuple-unpacking
n = np.tanh(n_ih + r * n_hh)
h = (1 - z) * n + z * h
return h
def _gru(x, steps, w_ih, w_hh, b_ih, b_hh, h0=None):
if h0 is None:
h0 = np.zeros((x.shape[0], w_hh.shape[1]), np.float32)
h = h0 # initial hidden state
outputs = np.zeros((x.shape[0], steps, w_hh.shape[1]), np.float32)
for t in range(steps):
h = _grucell(x[:, t, :], h, w_ih, w_hh, b_ih, b_hh) # (b, h)
outputs[:, t, ::] = h
return outputs
# -----------------------------------------------------------------------------
class GeepersG2P:
"""Phoneme prediction from a pre-trained model using pure numpy"""
def __init__(
self,
graphemes: typing.List[str],
phonemes: typing.List[str],
dec_maxlen: int = 20,
):
self.graphemes = [FixedSymbols.PAD.value, FixedSymbols.EOS.value] + graphemes
self.phonemes = [
FixedSymbols.PAD.value,
FixedSymbols.EOS.value,
FixedSymbols.SOS.value,
] + phonemes
self.g2idx = {g: idx for idx, g in enumerate(self.graphemes)}
self.p2idx = {p: idx for idx, p in enumerate(self.phonemes)}
self.pad_idx = 0
self.eos_idx = 1
self.sos_idx = 2
self.dec_maxlen = dec_maxlen
# Empty until load_variables is called
self.enc_emb = np.empty((1,))
self.enc_w_ih = np.empty((1,))
self.enc_w_hh = np.empty((1,))
self.enc_b_ih = np.empty((1,))
self.enc_b_hh = np.empty((1,))
self.dec_emb = np.empty((1,))
self.dec_w_ih = np.empty((1,))
self.dec_w_hh = np.empty((1,))
self.dec_b_ih = np.empty((1,))
self.dec_b_hh = np.empty((1,))
self.fc_w = np.empty((1,))
self.fc_b = np.empty((1,))
def load_variables(self, npz_path: typing.Union[str, Path]):
"""Load encoder/decoder weights from .npz file"""
variables = np.load(str(npz_path))
self.enc_emb = variables["encoder.emb.weight"] # (len(graphemes), emb)
self.enc_w_ih = variables["encoder.rnn.weight_ih_l0"] # (3*128, 64)
self.enc_w_hh = variables["encoder.rnn.weight_hh_l0"] # (3*128, 128)
self.enc_b_ih = variables["encoder.rnn.bias_ih_l0"] # (3*128,)
self.enc_b_hh = variables["encoder.rnn.bias_hh_l0"] # (3*128,)
self.dec_emb = variables["decoder.emb.weight"] # (len(phonemes), emb)
self.dec_w_ih = variables["decoder.rnn.weight_ih_l0"] # (3*128, 64)
self.dec_w_hh = variables["decoder.rnn.weight_hh_l0"] # (3*128, 128)
self.dec_b_ih = variables["decoder.rnn.bias_ih_l0"] # (3*128,)
self.dec_b_hh = variables["decoder.rnn.bias_hh_l0"] # (3*128,)
self.fc_w = variables["decoder.fc.weight"] # (74, 128)
self.fc_b = variables["decoder.fc.bias"] # (74,)
def _encode(self, graphemes: typing.List[str]) -> np.ndarray:
x = [self.g2idx[g] for g in graphemes] + [self.eos_idx]
return np.take(self.enc_emb, np.expand_dims(x, 0), axis=0)
def predict(self, word: str) -> typing.List[str]:
"""Predict phonemes for the given word"""
# encoder
graphemes = list(grapheme.graphemes(word))
enc = self._encode(graphemes)
enc = _gru(
enc,
len(graphemes) + 1,
self.enc_w_ih,
self.enc_w_hh,
self.enc_b_ih,
self.enc_b_hh,
h0=np.zeros((1, self.enc_w_hh.shape[-1]), np.float32),
)
last_hidden = enc[:, -1, :]
# decoder
dec = np.take(self.dec_emb, [2], axis=0) # 2: <s>
h = last_hidden
preds = []
for _ in range(self.dec_maxlen):
h = _grucell(
dec, h, self.dec_w_ih, self.dec_w_hh, self.dec_b_ih, self.dec_b_hh
) # (b, h)
logits = np.matmul(h, self.fc_w.T) + self.fc_b
pred = logits.argmax()
if pred == self.eos_idx:
break # </s>
preds.append(pred)
dec = np.take(self.dec_emb, [pred], axis=0)
preds = [self.phonemes[idx] for idx in preds]
return preds | /rhasspy-nlu-0.4.0.tar.gz/rhasspy-nlu-0.4.0/rhasspynlu/g2p_geepers.py | 0.745028 | 0.373019 | g2p_geepers.py | pypi |
import copy
import itertools
import typing
from collections import Counter, defaultdict, deque
import networkx as nx
from .jsgf_graph import get_start_end_nodes, lcm
def get_intent_ngram_counts(
graph: nx.DiGraph,
pad_start="<s>",
pad_end="</s>",
order=3,
balance_counts: bool = True,
) -> typing.Dict[str, Counter]:
"""Gets ngram counts per intent for a JSGF graph"""
intent_counts: typing.Dict[str, Counter] = defaultdict(Counter)
start_node, end_node = get_start_end_nodes(graph)
assert (start_node is not None) and (end_node is not None)
word_graph = to_word_graph(
graph, start_node, end_node, pad_start=pad_start, pad_end=pad_end
)
sentence_counts: typing.Dict[str, int] = {}
intent_nodes = set(graph.successors(start_node))
for intent_node in intent_nodes:
# __label__INTENT
edge_data = graph.edges[(start_node, intent_node)]
olabel = edge_data["olabel"]
assert olabel[:9] == "__label__", "Not an intent graph"
intent_name = olabel[9:]
sentence_counts[intent_name] = edge_data.get("sentence_count", 1)
# First word(s) of intent
valid_nodes = set([start_node])
for maybe_word_node in nx.descendants(graph, intent_node):
if maybe_word_node in word_graph:
valid_nodes.add(maybe_word_node)
# Filter out nodes not part of this intent
def filter_node(n):
# pylint: disable=W0640
return n in valid_nodes
# Compute ngram counts using a view of the main word graph restricted to
# nodes from this intent.
subgraph = nx.subgraph_view(word_graph, filter_node=filter_node)
intent_counts[intent_name] = count_ngrams(
subgraph,
start_node,
end_node,
pad_start=pad_start,
pad_end=pad_end,
order=order,
)
# Balance all n-gram counts by intent
if balance_counts:
num_sentences_lcm = lcm(*sentence_counts.values())
# Multiple all counts by LCM/count for each intent
for intent_name, sentence_count in sentence_counts.items():
multiplier = num_sentences_lcm // sentence_count
ngram_counts = intent_counts[intent_name]
for ngram in ngram_counts:
ngram_counts[ngram] *= multiplier
return intent_counts
# -----------------------------------------------------------------------------
def count_ngrams(
word_graph: nx.DiGraph,
start_node: int,
end_node: int,
pad_start="<s>",
pad_end="</s>",
label="word",
order=3,
) -> Counter:
"""Compute n-gram counts in a word graph."""
assert order > 0, "Order must be greater than zero"
n_data = word_graph.nodes(data=True)
# Counts from a node to <s>
up_counts: Counter = Counter()
# Counts from a node to </s>
down_counts: Counter = Counter()
# Top/bottom = 1
up_counts[start_node] = 1
down_counts[end_node] = 1
# Skip start node
for n in itertools.islice(nx.topological_sort(word_graph), 1, None):
for n2 in word_graph.predecessors(n):
up_counts[n] += up_counts[n2]
# Down
reverse_graph = nx.reverse_view(word_graph)
# Skip end node
for n in itertools.islice(nx.topological_sort(reverse_graph), 1, None):
for n2 in reverse_graph.predecessors(n):
down_counts[n] += down_counts[n2]
# Compute counts
ngram_counts: Counter = Counter()
for n in word_graph:
# Unigram
word = n_data[n][label]
ngram = [word]
ngram_counts[tuple(ngram)] += up_counts[n] * down_counts[n]
if order == 1:
continue
# Higher order
q = deque([(n, ngram)])
while q:
current_node, current_ngram = q.popleft()
for n2 in word_graph.predecessors(current_node):
word_n2 = n_data[n2][label]
ngram_n2 = [word_n2] + current_ngram
ngram_counts[tuple(ngram_n2)] += up_counts[n2] * down_counts[n]
if len(ngram_n2) < order:
q.append((n2, ngram_n2))
return ngram_counts
# -----------------------------------------------------------------------------
def to_word_graph(
graph: nx.DiGraph,
start_node: int,
end_node: int,
pad_start: str = "<s>",
pad_end: str = "</s>",
label: str = "word",
) -> nx.DiGraph:
"""Converts a JSGF graph with meta nodes to just words."""
# Deep copy graph to avoid mutating the original
graph = copy.deepcopy(graph)
n_data = graph.nodes(data=True)
# Add <s> and </s>
n_data[start_node][label] = pad_start
n_data[end_node][label] = pad_end
for node in list(graph):
word = n_data[node].get(label, "")
if not word:
# Clip meta (non-word) node
for pred_node in graph.predecessors(node):
for succ_node in graph.successors(node):
graph.add_edge(pred_node, succ_node)
graph.remove_node(node)
return graph | /rhasspy-nlu-0.4.0.tar.gz/rhasspy-nlu-0.4.0/rhasspynlu/ngram.py | 0.756088 | 0.391173 | ngram.py | pypi |
import configparser
import io
import logging
import re
import typing
from collections import defaultdict
from dataclasses import dataclass, field
from pathlib import Path
from .const import IntentsType, ReplacementsType, SentencesType
from .jsgf import (
Expression,
ParseMetadata,
Rule,
RuleReference,
Sentence,
Sequence,
SequenceType,
SlotReference,
Word,
)
_LOGGER = logging.getLogger(__name__)
@dataclass
class Grammar:
"""Named JSGF grammar with rules."""
grammar_name: str = ""
rules: typing.List[Rule] = field(default_factory=list)
GRAMMAR_DECLARATION = re.compile(r"^grammar ([^;]+);$")
@classmethod
def parse(cls, source: typing.TextIO) -> "Grammar":
"""Parse single JSGF grammar."""
grammar = Grammar()
# Read line-by-line
for line in source:
line = line.strip()
if line[:1] == "#" or (not line):
# Skip comments/blank lines
continue
grammar_match = Grammar.GRAMMAR_DECLARATION.match(line)
if grammar_match is not None:
# grammar GrammarName;
grammar.grammar_name = grammar_match.group(1)
else:
# public <RuleName> = rule body;
# <RuleName> = rule body;
grammar.rules.append(Rule.parse(line)) # pylint: disable=E1101
return grammar
# -----------------------------------------------------------------------------
def parse_ini(
source: typing.Union[str, Path, typing.TextIO],
intent_filter: typing.Optional[typing.Callable[[str], bool]] = None,
sentence_transform: typing.Callable[[str], str] = None,
file_name: typing.Optional[str] = None,
) -> IntentsType:
"""Parse multiple JSGF grammars from an ini file."""
intent_filter = intent_filter or (lambda x: True)
if isinstance(source, str):
source = io.StringIO(source)
file_name = file_name or "<StringIO>"
elif isinstance(source, Path):
source = open(source, "r")
file_name = file_name or str(source)
else:
file_name = file_name or "<TextIO>"
# Process configuration sections
sentences: IntentsType = defaultdict(list)
try:
# Create ini parser
config = configparser.ConfigParser(
allow_no_value=True, strict=False, delimiters=["="]
)
# case sensitive
config.optionxform = str # type: ignore
config.read_file(source)
_LOGGER.debug("Loaded ini file")
# Parse each section (intent)
line_number: int = 1
for sec_name in config.sections():
# Exclude if filtered out.
if not intent_filter(sec_name):
_LOGGER.debug("Skipping %s", sec_name)
continue
# Section header
line_number += 1
# Processs settings (sentences/rules)
for k, v in config[sec_name].items():
if v is None:
# Collect non-valued keys as sentences
sentence = k.strip()
# Fix \[ escape sequence
sentence = sentence.replace("\\[", "[")
if sentence_transform:
# Do transform
sentence = sentence_transform(sentence)
sentences[sec_name].append(
Sentence.parse(
sentence,
metadata=ParseMetadata(
file_name=file_name,
line_number=line_number,
intent_name=sec_name,
),
)
)
else:
sentence = v.strip()
if sentence_transform:
# Do transform
sentence = sentence_transform(sentence)
# Collect key/value pairs as JSGF rules
rule = f"<{k.strip()}> = ({sentence});"
# Fix \[ escape sequence
rule = rule.replace("\\[", "[")
sentences[sec_name].append(
Rule.parse(
rule,
metadata=ParseMetadata(
file_name=file_name,
line_number=line_number,
intent_name=sec_name,
),
)
)
# Sentence
line_number += 1
# Blank line
line_number += 1
finally:
source.close()
return sentences
# -----------------------------------------------------------------------------
def split_rules(
intents: IntentsType, replacements: typing.Optional[ReplacementsType] = None
) -> typing.Tuple[SentencesType, ReplacementsType]:
"""Seperate out rules and sentences from all intents."""
sentences: SentencesType = {}
replacements = replacements or {}
for intent_name, intent_exprs in intents.items():
sentences[intent_name] = []
# Extract rules and fold them into replacements
for expr in intent_exprs:
if isinstance(expr, Rule):
# Rule
rule_name = expr.rule_name
# Surround with <>
rule_name = f"<{intent_name}.{rule_name}>"
replacements[rule_name] = [expr.rule_body]
else:
sentences[intent_name].append(expr)
return sentences, replacements
# -----------------------------------------------------------------------------
def get_intent_counts(
sentences: SentencesType,
replacements: typing.Optional[ReplacementsType] = None,
exclude_slots: bool = True,
count_dict: typing.Optional[typing.Dict[Expression, int]] = None,
) -> typing.Dict[str, int]:
"""Get number of possible sentences for each intent."""
intent_counts: typing.Dict[str, int] = defaultdict(int)
for intent_name, intent_sentences in sentences.items():
# Compute counts for all sentences
intent_counts[intent_name] = max(
1,
sum(
get_expression_count(
s, replacements, exclude_slots=exclude_slots, count_dict=count_dict
)
for s in intent_sentences
),
)
return intent_counts
# -----------------------------------------------------------------------------
def get_expression_count(
expression: Expression,
replacements: typing.Optional[ReplacementsType] = None,
exclude_slots: bool = True,
count_dict: typing.Optional[typing.Dict[Expression, int]] = None,
) -> int:
"""Get the number of possible sentences in an expression."""
if isinstance(expression, Sequence):
if expression.type == SequenceType.GROUP:
# Counts multiply down the sequence
count = 1
for sub_item in expression.items:
count = count * get_expression_count(
sub_item,
replacements,
exclude_slots=exclude_slots,
count_dict=count_dict,
)
if count_dict is not None:
count_dict[expression] = count
return count
if expression.type == SequenceType.ALTERNATIVE:
# Counts sum across the alternatives
count = sum(
get_expression_count(
sub_item,
replacements,
exclude_slots=exclude_slots,
count_dict=count_dict,
)
for sub_item in expression.items
)
if count_dict is not None:
count_dict[expression] = count
return count
elif isinstance(expression, RuleReference):
# Get substituted sentences for <rule>
key = f"<{expression.full_rule_name}>"
assert replacements, key
count = sum(
get_expression_count(
value, replacements, exclude_slots=exclude_slots, count_dict=count_dict
)
for value in replacements[key]
)
if count_dict is not None:
count_dict[expression] = count
return count
elif (not exclude_slots) and isinstance(expression, SlotReference):
# Get substituted sentences for $slot
key = f"${expression.slot_name}"
assert replacements, key
count = sum(
get_expression_count(
value, replacements, exclude_slots=exclude_slots, count_dict=count_dict
)
for value in replacements[key]
)
if count_dict is not None:
count_dict[expression] = count
return count
elif isinstance(expression, Word):
# Single word
count = 1
if count_dict is not None:
count_dict[expression] = count
return count
# Unknown expression type
count = 0
if count_dict is not None:
count_dict[expression] = count
return count | /rhasspy-nlu-0.4.0.tar.gz/rhasspy-nlu-0.4.0/rhasspynlu/ini_jsgf.py | 0.717507 | 0.196479 | ini_jsgf.py | pypi |
import logging
import re
import typing
from num2words import num2words
from .jsgf import Expression, Sequence, SequenceType, SlotReference, Word
_LOGGER = logging.getLogger(__name__)
# 0..100, -100..100
NUMBER_RANGE_PATTERN = re.compile(r"^(-?[0-9]+)\.\.(-?[0-9]+)(,[0-9]+)?$")
NUMBER_PATTERN = re.compile(r"^(-?[0-9]+)$")
# -----------------------------------------------------------------------------
def number_to_words(
number: int, language: typing.Optional[str] = None
) -> typing.List[str]:
"""Convert number to list of words (75 -> seventy five)"""
language = language or "en"
number_text = (
num2words(number, lang=language).replace("-", " ").replace(",", "").strip()
)
return number_text.split()
def replace_numbers(
words: typing.Iterable[str], language: typing.Optional[str] = None
) -> typing.Iterable[str]:
"""Replace numbers with words in a sentence (75 hats -> seventy five hats)"""
language = language or "en"
for word in words:
if NUMBER_PATTERN.match(word):
n = int(word)
for number_word in number_to_words(n, language=language):
yield number_word
else:
yield word
def number_range_transform(word: Expression, slot_name="rhasspy/number"):
"""Automatically transform number ranges to slot reference (e.g., 0..100)"""
if not isinstance(word, Word):
# Skip anything besides words
return
match = NUMBER_RANGE_PATTERN.match(word.text)
if not match:
return
try:
lower_bound = int(match.group(1))
upper_bound = int(match.group(2))
step = 1
if len(match.groups()) > 3:
# Exclude ,
step = int(match.group(3)[1:])
# Transform to $rhasspy/number
return SlotReference(
text=word.text,
slot_name=f"{slot_name},{lower_bound},{upper_bound},{step}",
converters=["int"],
)
except ValueError:
# Not a number
pass
except Exception:
_LOGGER.exception("number_range_transform")
def number_transform(word: Expression, language: typing.Optional[str] = None):
"""Automatically transform numbers to words (e.g., 75)"""
if not isinstance(word, Word):
# Skip anything besides words
return
match = NUMBER_PATTERN.match(word.text)
if not match:
return
try:
n = int(match.group(1))
# 75 -> (seventy five):75!int
number_words = number_to_words(n, language=language)
if len(number_words) == 1:
# Easy case, single word
word.text = number_words[0]
word.substitution = [str(n)]
word.converters = ["int"]
return word
# Hard case, split into multiple Words
number_text = " ".join(number_words)
return Sequence(
text=number_text,
type=SequenceType.GROUP,
substitution=[str(n)],
converters=["int"],
items=[Word(w) for w in number_words],
)
except ValueError:
# Not a number
pass
except Exception:
_LOGGER.exception("number_transform") | /rhasspy-nlu-0.4.0.tar.gz/rhasspy-nlu-0.4.0/rhasspynlu/numbers.py | 0.511229 | 0.339691 | numbers.py | pypi |
import base64
import gzip
import io
import math
import typing
from dataclasses import dataclass
from pathlib import Path
import networkx as nx
from .const import IntentsType, ReplacementsType, SentencesType
from .ini_jsgf import get_intent_counts, split_rules
from .jsgf import (
Expression,
RuleReference,
Sentence,
Sequence,
SequenceType,
SlotReference,
Substitutable,
Taggable,
Word,
)
from .slots import split_slot_args
# -----------------------------------------------------------------------------
def expression_to_graph(
expression: Expression,
graph: nx.DiGraph,
source_state: int,
replacements: typing.Optional[ReplacementsType] = None,
empty_substitution: int = 0,
grammar_name: typing.Optional[str] = None,
count_dict: typing.Optional[typing.Dict[Expression, int]] = None,
rule_grammar: str = "",
expand_slots: bool = True,
) -> int:
"""Insert JSGF expression into a graph. Return final state."""
replacements = replacements or {}
# Handle sequence substitution
if isinstance(expression, Substitutable) and expression.substitution:
# Ensure everything downstream outputs nothing
empty_substitution += 1
# Handle tag begin
if isinstance(expression, Taggable) and expression.tag:
# Begin tag
next_state = len(graph)
tag = expression.tag.tag_text
olabel = f"__begin__{tag}"
label = f":{olabel}"
graph.add_edge(
source_state, next_state, ilabel="", olabel=maybe_pack(olabel), label=label
)
source_state = next_state
if expression.tag.substitution:
# Ensure everything downstream outputs nothing
empty_substitution += 1
# Handle converters begin
begin_converters: typing.List[str] = []
if isinstance(expression, Taggable) and expression.tag:
begin_converters.extend(reversed(expression.tag.converters))
if isinstance(expression, Substitutable) and expression.converters:
begin_converters.extend(reversed(expression.converters))
# Create begin transitions for each converter (in reverse order)
for converter_name in begin_converters:
next_state = len(graph)
olabel = f"__convert__{converter_name}"
label = f"!{olabel}"
graph.add_edge(
source_state, next_state, ilabel="", olabel=maybe_pack(olabel), label=label
)
source_state = next_state
if isinstance(expression, Sequence):
# Group, optional, or alternative
seq: Sequence = expression
if seq.type == SequenceType.ALTERNATIVE:
# Optional or alternative
final_states = []
for item in seq.items:
# Branch alternatives from source state
next_state = expression_to_graph(
item,
graph,
source_state,
replacements=replacements,
empty_substitution=empty_substitution,
grammar_name=grammar_name,
count_dict=count_dict,
rule_grammar=rule_grammar,
expand_slots=expand_slots,
)
final_states.append(next_state)
# Connect all paths to final state
next_state = len(graph)
for final_state in final_states:
graph.add_edge(final_state, next_state, ilabel="", olabel="", label="")
source_state = next_state
else:
# Group
next_state = source_state
for item in seq.items:
# Create sequence of states
next_state = expression_to_graph(
item,
graph,
next_state,
replacements=replacements,
empty_substitution=empty_substitution,
grammar_name=grammar_name,
count_dict=count_dict,
rule_grammar=rule_grammar,
expand_slots=expand_slots,
)
source_state = next_state
elif isinstance(expression, Word):
# State for single word
word: Word = expression
next_state = len(graph)
graph.add_node(next_state, word=word.text)
if (word.substitution is None) and (empty_substitution <= 0):
# Single word input/output
graph.add_edge(
source_state,
next_state,
ilabel=word.text,
olabel=word.text,
label=word.text,
)
source_state = next_state
else:
# Loading edge
graph.add_edge(
source_state,
next_state,
ilabel=word.text,
olabel="",
label=f"{word.text}:",
)
source_state = next_state
# Add word output(s)
olabels = [word.text] if (word.substitution is None) else word.substitution
if empty_substitution <= 0:
source_state = add_substitution(graph, olabels, source_state)
elif isinstance(expression, RuleReference):
# Reference to a local or remote rule
rule_ref: RuleReference = expression
if rule_ref.grammar_name:
# Fully resolved rule name
rule_name = f"{rule_ref.grammar_name}.{rule_ref.rule_name}"
rule_grammar = rule_ref.grammar_name
elif rule_grammar:
# Nested rule
rule_name = f"{rule_grammar}.{rule_ref.rule_name}"
elif grammar_name:
# Local rule
rule_name = f"{grammar_name}.{rule_ref.rule_name}"
rule_grammar = grammar_name
else:
# Unresolved rule name
rule_name = rule_ref.rule_name
# Surround with <>
rule_name_brackets = f"<{rule_name}>"
rule_replacements = replacements.get(rule_name_brackets)
assert rule_replacements, f"Missing rule {rule_name}"
rule_body = next(iter(rule_replacements))
assert isinstance(rule_body, Sentence), f"Invalid rule {rule_name}: {rule_body}"
source_state = expression_to_graph(
rule_body,
graph,
source_state,
replacements=replacements,
empty_substitution=empty_substitution,
grammar_name=grammar_name,
count_dict=count_dict,
rule_grammar=rule_grammar,
expand_slots=expand_slots,
)
elif isinstance(expression, SlotReference):
# Reference to slot values
slot_ref: SlotReference = expression
# Prefix with $
slot_name = "$" + slot_ref.slot_name
if expand_slots:
slot_values = replacements.get(slot_name)
assert slot_values, f"Missing slot {slot_name}"
# Interpret as alternative
slot_seq = Sequence(type=SequenceType.ALTERNATIVE, items=list(slot_values))
source_state = expression_to_graph(
slot_seq,
graph,
source_state,
replacements=replacements,
empty_substitution=(
empty_substitution + (1 if slot_ref.substitution else 0)
),
grammar_name=grammar_name,
count_dict=count_dict,
rule_grammar=rule_grammar,
expand_slots=expand_slots,
)
# Emit __source__ with slot name (no arguments)
slot_name_noargs = split_slot_args(slot_ref.slot_name)[0]
next_state = len(graph)
olabel = f"__source__{slot_name_noargs}"
graph.add_edge(
source_state, next_state, ilabel="", olabel=olabel, label=maybe_pack(olabel)
)
source_state = next_state
# Handle sequence substitution
if isinstance(expression, Substitutable) and expression.substitution:
# Output substituted word(s)
empty_substitution -= 1
if empty_substitution <= 0:
source_state = add_substitution(
graph, expression.substitution, source_state
)
# Handle converters end
end_converters: typing.List[str] = []
if isinstance(expression, Substitutable) and expression.converters:
end_converters.extend(expression.converters)
if isinstance(expression, Taggable) and expression.tag:
end_converters.extend(expression.tag.converters)
# Handle tag end
if isinstance(expression, Taggable) and expression.tag:
# Handle tag substitution
if expression.tag.substitution:
# Output substituted word(s)
source_state = add_substitution(
graph, expression.tag.substitution, source_state
)
# Create end transitions for each converter
for converter_name in end_converters:
next_state = len(graph)
olabel = f"__converted__{converter_name}"
label = f"!{olabel}"
graph.add_edge(
source_state,
next_state,
ilabel="",
olabel=maybe_pack(olabel),
label=label,
)
source_state = next_state
# End tag
next_state = len(graph)
tag = expression.tag.tag_text
olabel = f"__end__{tag}"
label = f":{olabel}"
graph.add_edge(
source_state, next_state, ilabel="", olabel=maybe_pack(olabel), label=label
)
source_state = next_state
else:
# Create end transitions for each converter
for converter_name in end_converters:
next_state = len(graph)
olabel = f"__converted__{converter_name}"
label = f"!{olabel}"
graph.add_edge(
source_state,
next_state,
ilabel="",
olabel=maybe_pack(olabel),
label=label,
)
source_state = next_state
return source_state
def add_substitution(
graph: nx.DiGraph,
substitution: typing.Union[str, typing.List[str]],
source_state: int,
) -> int:
"""Add substitution token sequence to graph."""
if isinstance(substitution, str):
substitution = [substitution]
for olabel in substitution:
next_state = len(graph)
graph.add_edge(
source_state,
next_state,
ilabel="",
olabel=maybe_pack(olabel),
label=f":{olabel}",
)
source_state = next_state
return source_state
def maybe_pack(olabel: str) -> str:
"""Pack output label as base64 if it contains whitespace."""
if " " in olabel:
return "__unpack__" + base64.encodebytes(olabel.encode()).decode().strip()
return olabel
# -----------------------------------------------------------------------------
def intents_to_graph(
intents: IntentsType,
replacements: typing.Optional[ReplacementsType] = None,
add_intent_weights: bool = True,
exclude_slots_from_counts: bool = True,
) -> nx.DiGraph:
"""Convert sentences/rules grouped by intent into a directed graph."""
sentences, replacements = split_rules(intents, replacements)
return sentences_to_graph(
sentences,
replacements=replacements,
add_intent_weights=add_intent_weights,
exclude_slots_from_counts=exclude_slots_from_counts,
)
def sentences_to_graph(
sentences: SentencesType,
replacements: typing.Optional[ReplacementsType] = None,
add_intent_weights: bool = True,
exclude_slots_from_counts: bool = True,
expand_slots: bool = True,
) -> nx.DiGraph:
"""Convert sentences grouped by intent into a directed graph."""
num_intents = len(sentences)
intent_weights: typing.Dict[str, float] = {}
count_dict: typing.Optional[typing.Dict[Expression, int]] = None
if add_intent_weights:
# Count number of posssible sentences per intent
intent_counts = get_intent_counts(
sentences,
replacements,
exclude_slots=exclude_slots_from_counts,
count_dict=count_dict,
)
# Fix zero counts
for intent_name in intent_counts:
intent_counts[intent_name] = max(intent_counts[intent_name], 1)
num_sentences_lcm = lcm(*intent_counts.values())
intent_weights = {
intent_name: (
num_sentences_lcm // max(intent_counts.get(intent_name, 1), 1)
)
for intent_name in sentences
}
# Normalize
weight_sum = max(sum(intent_weights.values()), 1)
for intent_name in intent_weights:
intent_weights[intent_name] /= weight_sum
else:
intent_counts = {}
# Create initial graph
graph: nx.DiGraph = nx.DiGraph()
root_state: int = 0
graph.add_node(root_state, start=True)
final_states: typing.List[int] = []
for intent_name, intent_sentences in sentences.items():
# Branch off for each intent from start state
intent_state = len(graph)
olabel = f"__label__{intent_name}"
label = f":{olabel}"
edge_kwargs: typing.Dict[str, typing.Any] = {}
if add_intent_weights and (num_intents > 1):
edge_kwargs["sentence_count"] = intent_counts.get(intent_name, 1)
edge_kwargs["weight"] = intent_weights.get(intent_name, 0)
graph.add_edge(
root_state,
intent_state,
ilabel="",
olabel=olabel,
label=label,
**edge_kwargs,
)
for sentence in intent_sentences:
# Insert all sentences for this intent
next_state = expression_to_graph( # type: ignore
sentence,
graph,
intent_state,
replacements=replacements,
grammar_name=intent_name,
count_dict=count_dict,
expand_slots=expand_slots,
)
final_states.append(next_state)
# Create final state and join all sentences to it
final_state = len(graph)
graph.add_node(final_state, final=True)
for next_state in final_states:
graph.add_edge(next_state, final_state, ilabel="", olabel="", label="")
return graph
# -----------------------------------------------------------------------------
def graph_to_json(graph: nx.DiGraph) -> typing.Dict[str, typing.Any]:
"""Convert to dict suitable for JSON serialization."""
return nx.readwrite.json_graph.node_link_data(graph)
def json_to_graph(json_dict: typing.Dict[str, typing.Any]) -> nx.DiGraph:
"""Convert from deserialized JSON dict to graph."""
return nx.readwrite.json_graph.node_link_graph(json_dict)
def graph_to_gzip_pickle(graph: nx.DiGraph, out_file: typing.BinaryIO):
"""Convert to binary gzip pickle format."""
with gzip.GzipFile(fileobj=out_file, mode="wb") as graph_gzip:
nx.readwrite.gpickle.write_gpickle(graph, graph_gzip)
def gzip_pickle_to_graph(in_file: typing.BinaryIO) -> nx.DiGraph:
"""Convert from binary gzip pickle format."""
with gzip.GzipFile(fileobj=in_file, mode="rb") as graph_gzip:
return nx.readwrite.gpickle.read_gpickle(graph_gzip)
# -----------------------------------------------------------------------------
@dataclass
class GraphFsts:
"""Result from graph_to_fsts."""
intent_fsts: typing.Dict[str, str]
symbols: typing.Dict[str, int]
input_symbols: typing.Dict[str, int]
output_symbols: typing.Dict[str, int]
def graph_to_fsts(
graph: nx.DiGraph,
eps="<eps>",
weight_key="weight",
default_weight=0,
intent_filter: typing.Optional[typing.Callable[[str], bool]] = None,
) -> GraphFsts:
"""Convert graph to OpenFST text format, one per intent."""
intent_fsts: typing.Dict[str, str] = {}
symbols: typing.Dict[str, int] = {eps: 0}
input_symbols: typing.Dict[str, int] = {}
output_symbols: typing.Dict[str, int] = {}
n_data = graph.nodes(data=True)
# start state
start_node: int = next(n for n, data in n_data if data.get("start"))
for _, intent_node, edge_data in graph.edges(start_node, data=True):
intent_name: str = edge_data["olabel"][9:]
# Filter intents by name
if intent_filter and not intent_filter(intent_name):
continue
final_states: typing.Set[int] = set()
state_map: typing.Dict[int, int] = {}
with io.StringIO() as intent_file:
# Transitions
for edge in nx.edge_bfs(graph, intent_node):
edge_data = graph.edges[edge]
from_node, to_node = edge
# Map states starting from 0
from_state = state_map.get(from_node, len(state_map))
state_map[from_node] = from_state
to_state = state_map.get(to_node, len(state_map))
state_map[to_node] = to_state
# Get input/output labels.
# Empty string indicates epsilon transition (eps)
ilabel = edge_data.get("ilabel", "") or eps
olabel = edge_data.get("olabel", "") or eps
# Map labels (symbols) to integers
isymbol = symbols.get(ilabel, len(symbols))
symbols[ilabel] = isymbol
input_symbols[ilabel] = isymbol
osymbol = symbols.get(olabel, len(symbols))
symbols[olabel] = osymbol
output_symbols[olabel] = osymbol
if weight_key:
weight = edge_data.get(weight_key, default_weight)
print(
f"{from_state} {to_state} {ilabel} {olabel} {weight}",
file=intent_file,
)
else:
# No weight
print(
f"{from_state} {to_state} {ilabel} {olabel}", file=intent_file
)
# Check if final state
if n_data[from_node].get("final", False):
final_states.add(from_state)
if n_data[to_node].get("final", False):
final_states.add(to_state)
# Record final states
for final_state in final_states:
print(final_state, file=intent_file)
intent_fsts[intent_name] = intent_file.getvalue()
return GraphFsts(
intent_fsts=intent_fsts,
symbols=symbols,
input_symbols=input_symbols,
output_symbols=output_symbols,
)
# -----------------------------------------------------------------------------
@dataclass
class GraphFst:
"""Result from graph_to_fst."""
intent_fst: str
symbols: typing.Dict[str, int]
input_symbols: typing.Dict[str, int]
output_symbols: typing.Dict[str, int]
def write_fst(
self,
fst_text_path: typing.Union[str, Path],
isymbols_path: typing.Union[str, Path],
osymbols_path: typing.Union[str, Path],
):
"""Write FST text and symbol files."""
# Write FST
Path(fst_text_path).write_text(self.intent_fst)
# Write input symbols
with open(isymbols_path, "w") as isymbols_file:
# pylint: disable=E1101
for symbol, num in self.input_symbols.items():
print(symbol, num, file=isymbols_file)
# Write output symbols
with open(osymbols_path, "w") as osymbols_file:
# pylint: disable=E1101
for symbol, num in self.output_symbols.items():
print(symbol, num, file=osymbols_file)
def graph_to_fst(
graph: nx.DiGraph,
eps="<eps>",
weight_key="weight",
default_weight=0,
intent_filter: typing.Optional[typing.Callable[[str], bool]] = None,
) -> GraphFst:
"""Convert graph to OpenFST text format."""
symbols: typing.Dict[str, int] = {eps: 0}
input_symbols: typing.Dict[str, int] = {}
output_symbols: typing.Dict[str, int] = {}
n_data = graph.nodes(data=True)
# start state
start_node: int = next(n for n, data in n_data if data.get("start"))
# Generate FST text
with io.StringIO() as fst_file:
final_states: typing.Set[int] = set()
state_map: typing.Dict[int, int] = {}
# Transitions
for _, intent_node, intent_edge_data in graph.edges(start_node, data=True):
intent_olabel: str = intent_edge_data["olabel"]
intent_name: str = intent_olabel[9:]
# Filter intents by name
if intent_filter and not intent_filter(intent_name):
continue
assert (
" " not in intent_olabel
), f"Output symbol cannot contain whitespace: {intent_olabel}"
# Map states starting from 0
from_state = state_map.get(start_node, len(state_map))
state_map[start_node] = from_state
to_state = state_map.get(intent_node, len(state_map))
state_map[intent_node] = to_state
# Map labels (symbols) to integers
isymbol = symbols.get(eps, len(symbols))
symbols[eps] = isymbol
input_symbols[eps] = isymbol
osymbol = symbols.get(intent_olabel, len(symbols))
symbols[intent_olabel] = osymbol
output_symbols[intent_olabel] = osymbol
if weight_key:
weight = intent_edge_data.get(weight_key, default_weight)
print(
f"{from_state} {to_state} {eps} {intent_olabel} {weight}",
file=fst_file,
)
else:
# No weight
print(f"{from_state} {to_state} {eps} {intent_olabel}", file=fst_file)
# Add intent sub-graphs
for edge in nx.edge_bfs(graph, intent_node):
edge_data = graph.edges[edge]
from_node, to_node = edge
# Get input/output labels.
# Empty string indicates epsilon transition (eps)
ilabel = edge_data.get("ilabel", "") or eps
olabel = edge_data.get("olabel", "") or eps
# Check for whitespace
assert (
" " not in ilabel
), f"Input symbol cannot contain whitespace: {ilabel}"
assert (
" " not in olabel
), f"Output symbol cannot contain whitespace: {olabel}"
# Map states starting from 0
from_state = state_map.get(from_node, len(state_map))
state_map[from_node] = from_state
to_state = state_map.get(to_node, len(state_map))
state_map[to_node] = to_state
# Map labels (symbols) to integers
isymbol = symbols.get(ilabel, len(symbols))
symbols[ilabel] = isymbol
input_symbols[ilabel] = isymbol
osymbol = symbols.get(olabel, len(symbols))
symbols[olabel] = osymbol
output_symbols[olabel] = osymbol
if weight_key:
weight = edge_data.get(weight_key, default_weight)
print(
f"{from_state} {to_state} {ilabel} {olabel} {weight}",
file=fst_file,
)
else:
# No weight
print(f"{from_state} {to_state} {ilabel} {olabel}", file=fst_file)
# Check if final state
if n_data[from_node].get("final", False):
final_states.add(from_state)
if n_data[to_node].get("final", False):
final_states.add(to_state)
# Record final states
for final_state in final_states:
print(final_state, file=fst_file)
return GraphFst(
intent_fst=fst_file.getvalue(),
symbols=symbols,
input_symbols=input_symbols,
output_symbols=output_symbols,
)
# -----------------------------------------------------------------------------
def lcm(*nums: int) -> int:
"""Returns the least common multiple of the given integers"""
if nums:
nums_lcm = nums[0]
for n in nums[1:]:
nums_lcm = (nums_lcm * n) // math.gcd(nums_lcm, n)
return nums_lcm
return 1
# -----------------------------------------------------------------------------
def get_start_end_nodes(
graph: nx.DiGraph,
) -> typing.Tuple[typing.Optional[int], typing.Optional[int]]:
"""Return start/end nodes in graph"""
n_data = graph.nodes(data=True)
start_node = None
end_node = None
for node, data in n_data:
if data.get("start", False):
start_node = node
elif data.get("final", False):
end_node = node
if (start_node is not None) and (end_node is not None):
break
return (start_node, end_node) | /rhasspy-nlu-0.4.0.tar.gz/rhasspy-nlu-0.4.0/rhasspynlu/jsgf_graph.py | 0.670932 | 0.323808 | jsgf_graph.py | pypi |
import logging
import subprocess
import typing
from dataclasses import dataclass
from pathlib import Path
from .const import IntentsType, ReplacementsType
from .jsgf import Expression, Rule, Sentence, Sequence, SlotReference, walk_expression
_LOGGER = logging.getLogger(__name__)
# -----------------------------------------------------------------------------
@dataclass
class StaticSlotInfo:
"""Name/path to a static slot text file."""
name: str
path: Path
@dataclass
class SlotProgramInfo:
"""Name/path/arguments for a slot program."""
key: str
name: str
path: Path
args: typing.Optional[typing.List[str]] = None
# -----------------------------------------------------------------------------
def get_slot_replacements(
sentences: IntentsType,
slots_dirs: typing.Optional[typing.List[Path]] = None,
slot_programs_dirs: typing.Optional[typing.List[Path]] = None,
slot_visitor: typing.Optional[
typing.Callable[[Expression], typing.Union[bool, Expression]]
] = None,
) -> ReplacementsType:
"""Create replacement dictionary for referenced slots."""
replacements: ReplacementsType = {}
slots_dirs = slots_dirs or []
slot_programs_dirs = slot_programs_dirs or []
# Gather used slot names
slot_names: typing.Set[str] = set()
for intent_name in sentences:
for item in sentences[intent_name]:
for slot_name in get_slot_names(item):
slot_names.add(slot_name)
# Load slot values
for slot_key in slot_names:
if slot_key in replacements:
# Skip already loaded slot
continue
# Find slot file/program in file system
slot_info = find_slot(slot_key, slots_dirs, slot_programs_dirs)
slot_values: typing.List[Expression] = []
if isinstance(slot_info, StaticSlotInfo):
# Parse each non-empty line as a JSGF sentence
_LOGGER.debug("Loading slot %s from %s", slot_key, str(slot_info.path))
with open(slot_info.path, "r") as slot_file:
for line in slot_file:
line = line.strip()
if line:
sentence = Sentence.parse(line)
if slot_visitor:
walk_expression(sentence, slot_visitor)
slot_values.append(sentence)
elif isinstance(slot_info, SlotProgramInfo):
# Generate values in place
slot_command = [str(slot_info.path)] + (slot_info.args or [])
_LOGGER.debug("Running program for slot %s: %s", slot_key, slot_command)
# Parse each non-empty line as a JSGF sentence
has_output = False
for line in subprocess.check_output(
slot_command, universal_newlines=True
).splitlines():
line = line.strip()
if line:
has_output = True
sentence = Sentence.parse(line)
if slot_visitor:
walk_expression(sentence, slot_visitor)
slot_values.append(sentence)
assert has_output, f"No output from {slot_command}"
else:
_LOGGER.warning(
"Failed to load file/program for slot %s (tried: %s, %s)",
slot_key,
slots_dirs,
slot_programs_dirs,
)
# Replace $slot with sentences
replacements[f"${slot_key}"] = slot_values
return replacements
# -----------------------------------------------------------------------------
def get_slot_names(item: typing.Union[Expression, Rule]) -> typing.Iterable[str]:
"""Yield referenced slot names from an expression."""
if isinstance(item, SlotReference):
yield item.slot_name
elif isinstance(item, Sequence):
for sub_item in item.items:
for slot_name in get_slot_names(sub_item):
yield slot_name
elif isinstance(item, Rule):
for slot_name in get_slot_names(item.rule_body):
yield slot_name
def split_slot_args(
slot_name: str,
) -> typing.Tuple[str, typing.Optional[typing.List[str]]]:
"""Split slot name and arguments out (slot,arg1,arg2,...)"""
# Check for arguments.
slot_args: typing.Optional[typing.List[str]] = None
# Slot name retains argument(s).
if "," in slot_name:
slot_name, *slot_args = slot_name.split(",")
return slot_name, slot_args
# -----------------------------------------------------------------------------
def find_slot(
slot_key: str, slots_dirs: typing.List[Path], slot_programs_dirs: typing.List[Path]
) -> typing.Optional[typing.Union[StaticSlotInfo, SlotProgramInfo]]:
"""Look up a static slot or slot program."""
# Try static user slots
for slots_dir in slots_dirs:
slot_path = slots_dir / slot_key
if slot_path.is_file():
return StaticSlotInfo(name=slot_key, path=slot_path)
# Try user slot programs
slot_name, slot_args = split_slot_args(slot_key)
for slot_programs_dir in slot_programs_dirs:
slot_path = slot_programs_dir / slot_name
if slot_path.is_file():
return SlotProgramInfo(
key=slot_key, name=slot_name, path=slot_path, args=slot_args
)
return None | /rhasspy-nlu-0.4.0.tar.gz/rhasspy-nlu-0.4.0/rhasspynlu/slots.py | 0.842701 | 0.308893 | slots.py | pypi |
import dataclasses
import datetime
import typing
from collections.abc import Mapping
from dataclasses import dataclass, field
from enum import Enum
from numbers import Number
from . import utils
@dataclass
class Entity:
"""Named entity from intent."""
entity: str
value: typing.Any
raw_value: str = ""
source: str = ""
start: int = 0
raw_start: int = 0
end: int = 0
raw_end: int = 0
tokens: typing.List[typing.Any] = field(default_factory=list)
raw_tokens: typing.List[str] = field(default_factory=list)
@property
def value_dict(self):
"""Get dictionary representation of value."""
if isinstance(self.value, Mapping):
return self.value
kind = "Unknown"
if isinstance(self.value, Number):
kind = "Number"
elif isinstance(self.value, datetime.date):
kind = "Date"
elif isinstance(self.value, datetime.time):
kind = "Time"
elif isinstance(self.value, datetime.datetime):
kind = "Datetime"
elif isinstance(self.value, datetime.timedelta):
kind = "Duration"
return {"kind": kind, "value": self.value}
@classmethod
def from_dict(cls, entity_dict: typing.Dict[str, typing.Any]) -> "Entity":
"""Create Entity from dictionary."""
return Entity(**utils.only_fields(cls, entity_dict))
@dataclass
class Intent:
"""Named intention with entities and slots."""
name: str
confidence: float = 0
@classmethod
def from_dict(cls, intent_dict: typing.Dict[str, typing.Any]) -> "Intent":
"""Create Intent from dictionary."""
return Intent(**utils.only_fields(cls, intent_dict))
@dataclass
class TagInfo:
"""Information used to process FST tags."""
tag: str
start_index: int = 0
raw_start_index: int = 0
symbols: typing.List[str] = field(default_factory=list)
raw_symbols: typing.List[str] = field(default_factory=list)
@classmethod
def from_dict(cls, tag_dict: typing.Dict[str, typing.Any]) -> "TagInfo":
"""Create TagInfo from dictionary."""
return TagInfo(**utils.only_fields(cls, tag_dict))
class RecognitionResult(str, Enum):
"""Result of a recognition."""
SUCCESS = "success"
FAILURE = "failure"
@dataclass
class Recognition:
"""Output of intent recognition."""
intent: typing.Optional[Intent] = None
entities: typing.List[Entity] = field(default_factory=list)
text: str = ""
raw_text: str = ""
recognize_seconds: float = 0
tokens: typing.List[typing.Any] = field(default_factory=list)
raw_tokens: typing.List[str] = field(default_factory=list)
# Transcription details
wav_seconds: typing.Optional[float] = None
transcribe_seconds: typing.Optional[float] = None
speech_confidence: typing.Optional[float] = None
wav_name: typing.Optional[str] = None
def asdict(self) -> typing.Dict[str, typing.Any]:
"""Convert to dictionary."""
return dataclasses.asdict(self)
@classmethod
def empty(cls) -> "Recognition":
"""Return an empty recognition."""
return Recognition(intent=Intent(name=""))
@classmethod
def from_dict(cls, recognition_dict: typing.Dict[str, typing.Any]) -> "Recognition":
"""Create Recognition from dictionary."""
intent_dict = recognition_dict.pop("intent", None)
entity_dicts = recognition_dict.pop("entities", None)
slots_dict = recognition_dict.pop("slots", None)
recognition = Recognition(**utils.only_fields(cls, recognition_dict))
if intent_dict:
recognition.intent = Intent.from_dict(intent_dict)
if entity_dicts:
recognition.entities = [Entity.from_dict(e) for e in entity_dicts]
if slots_dict:
recognition.entities = [
Entity(entity=key, value=value) for key, value in slots_dict.items()
]
return recognition | /rhasspy-nlu-0.4.0.tar.gz/rhasspy-nlu-0.4.0/rhasspynlu/intent.py | 0.890175 | 0.302455 | intent.py | pypi |
import io
import logging
import typing
from pathlib import Path
import networkx as nx
import rhasspynlu
from rhasspynlu.jsgf import Expression, Word
_LOGGER = logging.getLogger(__name__)
# -----------------------------------------------------------------------------
def sentences_to_graph(
sentences_dict: typing.Dict[str, str],
slots_dirs: typing.Optional[typing.List[Path]] = None,
slot_programs_dirs: typing.Optional[typing.List[Path]] = None,
replace_numbers: bool = True,
language: str = "en",
word_transform: typing.Optional[typing.Callable[[str], str]] = None,
add_intent_weights: bool = True,
) -> nx.DiGraph:
"""Transform sentences to an intent graph"""
slots_dirs = slots_dirs or []
slot_programs_dirs = slot_programs_dirs or []
# Parse sentences and convert to graph
with io.StringIO() as ini_file:
# Join as single ini file
for lines in sentences_dict.values():
print(lines, file=ini_file)
print("", file=ini_file)
# Parse JSGF sentences
intents = rhasspynlu.parse_ini(ini_file.getvalue())
# Split into sentences and rule/slot replacements
sentences, replacements = rhasspynlu.ini_jsgf.split_rules(intents)
word_visitor: typing.Optional[
typing.Callable[[Expression], typing.Union[bool, Expression]]
] = None
if word_transform:
# Apply transformation to words
def transform_visitor(word: Expression):
if isinstance(word, Word):
assert word_transform
new_text = word_transform(word.text)
# Preserve case by using original text as substition
if (word.substitution is None) and (new_text != word.text):
word.substitution = word.text
word.text = new_text
return word
word_visitor = transform_visitor
# Apply case/number transforms
if word_visitor or replace_numbers:
for intent_sentences in sentences.values():
for sentence in intent_sentences:
if replace_numbers:
# Replace number ranges with slot references
# type: ignore
rhasspynlu.jsgf.walk_expression(
sentence, rhasspynlu.number_range_transform, replacements
)
if word_visitor:
# Do case transformation
# type: ignore
rhasspynlu.jsgf.walk_expression(
sentence, word_visitor, replacements
)
# Load slot values
slot_replacements = rhasspynlu.get_slot_replacements(
intents,
slots_dirs=slots_dirs,
slot_programs_dirs=slot_programs_dirs,
slot_visitor=word_visitor,
)
# Merge with existing replacements
for slot_key, slot_values in slot_replacements.items():
replacements[slot_key] = slot_values
if replace_numbers:
# Do single number transformations
for intent_sentences in sentences.values():
for sentence in intent_sentences:
rhasspynlu.jsgf.walk_expression(
sentence,
lambda w: rhasspynlu.number_transform(w, language),
replacements,
)
# Convert to directed graph
intent_graph = rhasspynlu.sentences_to_graph(
sentences, replacements=replacements, add_intent_weights=add_intent_weights
)
return intent_graph, slot_replacements | /rhasspy-server-hermes-2.5.10.tar.gz/rhasspy-server-hermes-2.5.10/rhasspyserver_hermes/train.py | 0.781331 | 0.334005 | train.py | pypi |
import io
import logging
import re
import subprocess
import typing
import wave
from pathlib import Path
import rhasspynlu
WHITESPACE_PATTERN = re.compile(r"\s+")
_LOGGER = logging.getLogger(__name__)
# -----------------------------------------------------------------------------
class FunctionLoggingHandler(logging.Handler):
"""Calls a function for each logging message."""
def __init__(
self,
func,
log_format: str = "[%(levelname)s:%(asctime)s] %(name)s: %(message)s",
):
logging.Handler.__init__(self)
self.func = func
self.formatter = logging.Formatter(log_format)
def handle(self, record):
self.func(self.formatter.format(record))
# -----------------------------------------------------------------------------
def read_dict(
dict_file: typing.Iterable[str],
word_dict: typing.Optional[typing.Dict[str, typing.List[str]]] = None,
transform: typing.Optional[typing.Callable[[str], str]] = None,
silence_words: typing.Optional[typing.Set[str]] = None,
) -> typing.Dict[str, typing.List[str]]:
"""
Loads a CMU/Julius word dictionary, optionally into an existing Python dictionary.
"""
if word_dict is None:
word_dict = {}
for i, line in enumerate(dict_file):
line = line.strip()
if not line:
continue
try:
# Use explicit whitespace (avoid 0xA0)
word, *parts = re.split(r"[ \t]+", line)
# Skip Julius extras
pronounce = " ".join(p for p in parts if p[0] not in {"[", "@"})
word = word.split("(")[0]
# Julius format word1+word2
words = word.split("+")
for word in words:
# Don't transform silence words
if transform and (
(silence_words is None) or (word not in silence_words)
):
word = transform(word)
if word in word_dict:
word_dict[word].append(pronounce)
else:
word_dict[word] = [pronounce]
except Exception as e:
_LOGGER.warning("read_dict: %s (line %s)", e, i + 1)
return word_dict
# -----------------------------------------------------------------------------
def recursive_remove(
base_dict: typing.Dict[typing.Any, typing.Any],
new_dict: typing.Dict[typing.Any, typing.Any],
) -> None:
"""Recursively removes values from new dictionary that are already in base dictionary"""
for k, v in list(new_dict.items()):
if k in base_dict:
if isinstance(v, dict):
recursive_remove(base_dict[k], v)
if not v:
del new_dict[k]
elif v == base_dict[k]:
del new_dict[k]
# -----------------------------------------------------------------------------
def buffer_to_wav(buffer: bytes) -> bytes:
"""Wraps a buffer of raw audio data (16-bit, 16Khz mono) in a WAV"""
with io.BytesIO() as wav_buffer:
wav_file: wave.Wave_write = wave.open(wav_buffer, mode="wb")
with wav_file:
wav_file.setframerate(16000)
wav_file.setsampwidth(2)
wav_file.setnchannels(1)
wav_file.writeframes(buffer)
return wav_buffer.getvalue()
def get_wav_duration(wav_bytes: bytes) -> float:
"""Return the real-time duration of a WAV file"""
with io.BytesIO(wav_bytes) as wav_buffer:
wav_file: wave.Wave_read = wave.open(wav_buffer, "rb")
with wav_file:
frames = wav_file.getnframes()
rate = wav_file.getframerate()
return frames / float(rate)
def wav_to_buffer(wav_bytes: bytes) -> bytes:
"""Return the raw audio of a WAV file"""
with io.BytesIO(wav_bytes) as wav_buffer:
wav_file: wave.Wave_read = wave.open(wav_buffer, "rb")
with wav_file:
frames = wav_file.getnframes()
return wav_file.readframes(frames)
# -----------------------------------------------------------------------------
def load_phoneme_examples(
path: typing.Union[str, Path]
) -> typing.Dict[str, typing.Dict[str, str]]:
"""Loads example words and pronunciations for each phoneme."""
examples = {}
with open(path, "r") as example_file:
for line in example_file:
line = line.strip()
if not line or line.startswith("#"):
continue # skip blanks and comments
parts = split_whitespace(line)
examples[parts[0]] = {"word": parts[1], "phonemes": " ".join(parts[2:])}
return examples
def load_phoneme_map(path: typing.Union[str, Path]) -> typing.Dict[str, str]:
"""Load phoneme map from CMU (Sphinx) phonemes to eSpeak phonemes."""
phonemes = {}
with open(path, "r") as phoneme_file:
for line in phoneme_file:
line = line.strip()
if not line or line.startswith("#"):
continue # skip blanks and comments
parts = split_whitespace(line, maxsplit=1)
phonemes[parts[0]] = parts[1]
return phonemes
# -----------------------------------------------------------------------------
def get_ini_paths(
sentences_ini: Path, sentences_dir: typing.Optional[Path] = None
) -> typing.List[Path]:
"""Get paths to all .ini files in profile."""
ini_paths: typing.List[Path] = []
if sentences_ini.is_file():
ini_paths = [sentences_ini]
# Add .ini files from intents directory
if sentences_dir and sentences_dir.is_dir():
ini_paths.extend(sentences_dir.rglob("*.ini"))
return ini_paths
def get_all_intents(ini_paths: typing.List[Path]) -> typing.Dict[str, typing.Any]:
"""Get intents from all .ini files in profile."""
try:
with io.StringIO() as combined_ini_file:
for ini_path in ini_paths:
combined_ini_file.write(ini_path.read_text())
print("", file=combined_ini_file)
return rhasspynlu.parse_ini(combined_ini_file.getvalue())
except Exception:
_LOGGER.exception("Failed to parse %s", ini_paths)
return {}
# -----------------------------------------------------------------------------
def split_whitespace(s: str, **kwargs):
"""Split a string by whitespace of any type/length."""
return WHITESPACE_PATTERN.split(s, **kwargs)
# -----------------------------------------------------------------------------
def get_espeak_wav(word: str, voice: typing.Optional[str] = None) -> bytes:
"""Get eSpeak WAV pronunciation for a word."""
try:
espeak_command = ["espeak", "--stdout", "-s", "80"]
if voice:
espeak_command.extend(["-v", str(voice)])
espeak_command.append(word)
_LOGGER.debug(espeak_command)
return subprocess.check_output(espeak_command)
except Exception:
_LOGGER.exception("get_espeak_wav")
return bytes()
def get_espeak_phonemes(word: str) -> str:
"""Get eSpeak phonemes for a word."""
try:
espeak_command = ["espeak", "-q", "-x", word]
_LOGGER.debug(espeak_command)
return subprocess.check_output(espeak_command, universal_newlines=True).strip()
except Exception:
_LOGGER.exception("get_espeak_phonemes")
return "" | /rhasspy-server-hermes-2.5.10.tar.gz/rhasspy-server-hermes-2.5.10/rhasspyserver_hermes/utils.py | 0.704058 | 0.228651 | utils.py | pypi |
# Rhasspy Silence
[](https://github.com/rhasspy/rhasspy-silence/actions)
[](https://github.com/rhasspy/rhasspy-silence/blob/master/LICENSE)
Detect speech/silence in voice commands with [webrtcvad](https://github.com/wiseman/py-webrtcvad).
## Requirements
* Python 3.7
* [webrtcvad](https://github.com/wiseman/py-webrtcvad)
## Installation
```bash
$ git clone https://github.com/rhasspy/rhasspy-silence
$ cd rhasspy-silence
$ ./configure
$ make
$ make install
```
## How it Works
`rhasspy-silence` uses a state machine to decide when a voice command has started and stopped. The variables that control this machine are:
* `skip_seconds` - seconds of audio to skip before voice command detection starts
* `speech_seconds` - seconds of speech before voice command has begun
* `before_seconds` - seconds of audio to keep before voice command has begun
* `minimum_seconds` - minimum length of voice command (seconds)
* `maximum_seconds` - maximum length of voice command before timeout (seconds, None for no timeout)
* `silence_seconds` - seconds of silence before a voice command has finished
The sensitivity of `webrtcvad` is set with `vad_mode`, which is a value between 0 and 3 with 0 being the most sensitive.
[](docs/img/state_machine.svg)
If there is no timeout, the final voice command audio will consist of:
* `before_seconds` worth of audio before the voice command had started
* At least `min_seconds` of audio during the voice command
### Energy-Based Silence Detection
Besides just `webrtcvad`, silence detection using the denoised energy of the incoming audio is also supported. There are two energy-based methods:
* Threshold - simple threshold where energy above is considered speech and energy below is silence
* Max/Current Ratio - ratio of maximum energy and current energy value is compared to a threshold
* Ratio below threshold is considered speech, ratio above is silence
* Maximum energy value can be provided (static) or set from observed audio (dynamic)
Both of the energy methods can be combined with `webrtcvad`. When combined, audio is considered to be silence unless **both** methods detect speech - i.e., `webrtcvad` classifies the audio chunk as speech and the energy value/ratio is above threshold. You can even combine all three methods using `SilenceMethod.ALL`.
# Command Line Interface
A CLI is included to test out the different parameters and silence detection methods. After installation, pipe raw 16-bit 16Khz mono audo to the `bin/rhasspy-silence` script:
```sh
$ arecord -r 16000 -f S16_LE -c 1 -t raw | bin/rhasspy-silence <ARGS>
```
The characters printed to the console indicate how `rhasspy-silence` is classifying audio frames:
* `.` - silence
* `!` - speech
* `S` - transition from silence to speech
* `-` - transition from speech to silence
* `[` - start of voice command
* `]` - end of voice command
* `T` - timeout
By changing the `--output-type` argument, you can have the current audio energy or max/current ratio printed instead. These values can then be used to set threshold values for further testing.
## CLI Arguments
```
usage: rhasspy-silence [-h]
[--output-type {speech_silence,current_energy,max_current_ratio}]
[--chunk-size CHUNK_SIZE] [--skip-seconds SKIP_SECONDS]
[--max-seconds MAX_SECONDS] [--min-seconds MIN_SECONDS]
[--speech-seconds SPEECH_SECONDS]
[--silence-seconds SILENCE_SECONDS]
[--before-seconds BEFORE_SECONDS]
[--sensitivity {1,2,3}]
[--current-threshold CURRENT_THRESHOLD]
[--max-energy MAX_ENERGY]
[--max-current-ratio-threshold MAX_CURRENT_RATIO_THRESHOLD]
[--silence-method {vad_only,ratio_only,current_only,vad_and_ratio,vad_and_current,all}]
[--debug]
optional arguments:
-h, --help show this help message and exit
--output-type {speech_silence,current_energy,max_current_ratio}
Type of printed output
--chunk-size CHUNK_SIZE
Size of audio chunks. Must be 10, 20, or 30 ms for
VAD.
--skip-seconds SKIP_SECONDS
Seconds of audio to skip before a voice command
--max-seconds MAX_SECONDS
Maximum number of seconds for a voice command
--min-seconds MIN_SECONDS
Minimum number of seconds for a voice command
--speech-seconds SPEECH_SECONDS
Consecutive seconds of speech before start
--silence-seconds SILENCE_SECONDS
Consecutive seconds of silence before stop
--before-seconds BEFORE_SECONDS
Seconds to record before start
--sensitivity {1,2,3}
VAD sensitivity (1-3)
--current-threshold CURRENT_THRESHOLD
Debiased energy threshold of current audio frame
--max-energy MAX_ENERGY
Fixed maximum energy for ratio calculation (default:
observed)
--max-current-ratio-threshold MAX_CURRENT_RATIO_THRESHOLD
Threshold of ratio between max energy and current
audio frame
--silence-method {vad_only,ratio_only,current_only,vad_and_ratio,vad_and_current,all}
Method for detecting silence
--debug Print DEBUG messages to the console
```
| /rhasspy-silence-0.4.0.tar.gz/rhasspy-silence-0.4.0/README.md | 0.545044 | 0.912981 | README.md | pypi |
import typing
from abc import ABC, abstractmethod
from dataclasses import dataclass, field
from enum import Enum
class VoiceCommandResult(str, Enum):
"""Success/failure of voice command recognition."""
SUCCESS = "success"
FAILURE = "failure"
class VoiceCommandEventType(str, Enum):
"""Possible event types during voice command recognition."""
STARTED = "started"
SPEECH = "speech"
SILENCE = "silence"
STOPPED = "stopped"
TIMEOUT = "timeout"
@dataclass
class VoiceCommandEvent:
"""Speech/silence events."""
type: VoiceCommandEventType
time: float
@dataclass
class VoiceCommand:
"""Result of voice command recognition."""
result: VoiceCommandResult
audio_data: typing.Optional[bytes] = None
events: typing.List[VoiceCommandEvent] = field(default_factory=list)
class VoiceCommandRecorder(ABC):
"""Segment audio into voice command."""
@abstractmethod
def start(self):
"""Begin new voice command."""
pass
@abstractmethod
def stop(self) -> bytes:
"""Free any resources and return recorded audio."""
pass
@abstractmethod
def process_chunk(self, audio_chunk: bytes) -> typing.Optional[VoiceCommand]:
"""Process a single chunk of audio data."""
pass
class SilenceMethod(str, Enum):
"""Method used to determine if an audio frame contains silence.
Values
------
VAD_ONLY
Only use webrtcvad
RATIO_ONLY
Only use max/current energy ratio threshold
CURRENT_ONLY
Only use current energy threshold
VAD_AND_RATIO
Use webrtcvad and max/current energy ratio threshold
VAD_AND_CURRENT
Use webrtcvad and current energy threshold
ALL
Use webrtcvad, max/current energy ratio, and current energy threshold
"""
VAD_ONLY = "vad_only"
RATIO_ONLY = "ratio_only"
CURRENT_ONLY = "current_only"
VAD_AND_RATIO = "vad_and_ratio"
VAD_AND_CURRENT = "vad_and_current"
ALL = "all" | /rhasspy-silence-0.4.0.tar.gz/rhasspy-silence-0.4.0/rhasspysilence/const.py | 0.807385 | 0.188828 | const.py | pypi |
import audioop
import logging
import math
import typing
from collections import deque
import webrtcvad
from .const import (
SilenceMethod,
VoiceCommand,
VoiceCommandEvent,
VoiceCommandEventType,
VoiceCommandRecorder,
VoiceCommandResult,
)
_LOGGER = logging.getLogger(__name__)
# -----------------------------------------------------------------------------
class WebRtcVadRecorder(VoiceCommandRecorder):
"""Detect speech/silence using webrtcvad.
Attributes
----------
vad_mode: int = 3
Sensitivity of webrtcvad (1-3), 1 is most sensitive
sample_rate: int = 16000
Sample rate of audio chunks (hertz)
chunk_size: int = 960
skip_seconds: float = 0
Seconds of audio to skip before voice command detection starts
speech_seconds: float = 0.3
Seconds of speech before voice command has begun
before_seconds: float = 0.5
Seconds of audio to keep before voice command has begun
min_seconds: float = 1.0
Minimum length of voice command (seconds)
max_seconds: Optional[float] = 30.0
Maximum length of voice command before timeout (seconds, None for no timeout)
silence_seconds: float = 0.5
Seconds of silence before a voice command has finished
max_energy: Optional[float] = None
Maximum denoise energy value (None for dynamic setting from observed audio)
max_current_ratio_threshold: Optional[float] = None
Ratio of max/current energy below which audio is considered speech
current_energy_threshold: Optional[float] = None
Energy threshold above which audio is considered speech
silence_method: SilenceMethod = "vad_only"
Method for deciding if an audio chunk contains silence or speech
"""
def __init__(
self,
vad_mode: int = 3,
sample_rate: int = 16000,
chunk_size: int = 960,
skip_seconds: float = 0,
min_seconds: float = 1,
max_seconds: typing.Optional[float] = 30,
speech_seconds: float = 0.3,
silence_seconds: float = 0.5,
before_seconds: float = 0.5,
max_energy: typing.Optional[float] = None,
max_current_ratio_threshold: typing.Optional[float] = None,
current_energy_threshold: typing.Optional[float] = None,
silence_method: SilenceMethod = SilenceMethod.VAD_ONLY,
):
self.vad_mode = vad_mode
self.sample_rate = sample_rate
self.chunk_size = chunk_size
self.skip_seconds = skip_seconds
self.min_seconds = min_seconds
self.max_seconds = max_seconds
self.speech_seconds = speech_seconds
self.silence_seconds = silence_seconds
self.before_seconds = before_seconds
self.max_energy = max_energy
self.dynamic_max_energy = max_energy is None
self.max_current_ratio_threshold = max_current_ratio_threshold
self.current_energy_threshold = current_energy_threshold
self.silence_method = silence_method
# Verify settings
if self.silence_method in [
SilenceMethod.VAD_ONLY,
SilenceMethod.VAD_AND_RATIO,
SilenceMethod.VAD_AND_CURRENT,
SilenceMethod.ALL,
]:
self.use_vad = True
else:
self.use_vad = False
if self.silence_method in [
SilenceMethod.VAD_AND_RATIO,
SilenceMethod.RATIO_ONLY,
SilenceMethod.ALL,
]:
self.use_ratio = True
assert (
self.max_current_ratio_threshold is not None
), "Max/current ratio threshold is required"
else:
self.use_ratio = False
if self.silence_method in [
SilenceMethod.VAD_AND_CURRENT,
SilenceMethod.CURRENT_ONLY,
SilenceMethod.ALL,
]:
self.use_current = True
assert (
self.current_energy_threshold is not None
), "Current energy threshold is required"
else:
self.use_current = False
# Voice detector
self.vad: typing.Optional[webrtcvad.Vad] = None
if self.use_vad:
assert self.vad_mode in range(
1, 4
), f"VAD mode must be 1-3 (got {vad_mode})"
chunk_ms = 1000 * ((self.chunk_size / 2) / self.sample_rate)
assert chunk_ms in [10, 20, 30], (
"Sample rate and chunk size must make for 10, 20, or 30 ms buffer sizes,"
+ f" assuming 16-bit mono audio (got {chunk_ms} ms)"
)
self.vad = webrtcvad.Vad()
self.vad.set_mode(self.vad_mode)
self.seconds_per_buffer = self.chunk_size / self.sample_rate
# Store some number of seconds of audio data immediately before voice command starts
self.before_buffers = int(
math.ceil(self.before_seconds / self.seconds_per_buffer)
)
# Pre-compute values
self.speech_buffers = int(
math.ceil(self.speech_seconds / self.seconds_per_buffer)
)
self.skip_buffers = int(math.ceil(self.skip_seconds / self.seconds_per_buffer))
# State
self.events: typing.List[VoiceCommandEvent] = []
self.before_phrase_chunks: typing.Deque[bytes] = deque(
maxlen=self.before_buffers
)
self.phrase_buffer: bytes = bytes()
self.max_buffers: typing.Optional[int] = None
self.min_phrase_buffers: int = 0
self.skip_buffers_left: int = 0
self.speech_buffers_left: int = 0
self.last_speech: bool = False
self.in_phrase: bool = False
self.after_phrase: bool = False
self.silence_buffers: int = 0
self.current_seconds: float = 0
self.current_chunk: bytes = bytes()
def start(self):
"""Begin new voice command."""
# State
self.events.clear()
self.before_phrase_chunks.clear()
self.phrase_buffer = bytes()
if self.max_seconds:
self.max_buffers = int(
math.ceil(self.max_seconds / self.seconds_per_buffer)
)
else:
self.max_buffers = None
self.min_phrase_buffers = int(
math.ceil(self.min_seconds / self.seconds_per_buffer)
)
self.speech_buffers_left = self.speech_buffers
self.skip_buffers_left = self.skip_buffers
self.last_speech = False
self.in_phrase = False
self.after_phrase = False
self.silence_buffers = int(
math.ceil(self.silence_seconds / self.seconds_per_buffer)
)
self.current_seconds: float = 0
self.current_chunk: bytes = bytes()
def stop(self) -> bytes:
"""Free any resources and return recorded audio."""
before_buffer = bytes()
for before_chunk in self.before_phrase_chunks:
before_buffer += before_chunk
audio_data = before_buffer + self.phrase_buffer
# Clear state
self.before_phrase_chunks.clear()
self.events.clear()
self.phrase_buffer = bytes()
self.current_chunk = bytes()
# Return leftover audio
return audio_data
def process_chunk(self, audio_chunk: bytes) -> typing.Optional[VoiceCommand]:
"""Process a single chunk of audio data."""
# Add to overall buffer
self.current_chunk += audio_chunk
# Process audio in exact chunk(s)
while len(self.current_chunk) > self.chunk_size:
# Extract chunk
chunk = self.current_chunk[: self.chunk_size]
self.current_chunk = self.current_chunk[self.chunk_size :]
if self.skip_buffers_left > 0:
# Skip audio at beginning
self.skip_buffers_left -= 1
continue
if self.in_phrase:
self.phrase_buffer += chunk
else:
self.before_phrase_chunks.append(chunk)
self.current_seconds += self.seconds_per_buffer
# Check maximum number of seconds to record
if self.max_buffers:
self.max_buffers -= 1
if self.max_buffers <= 0:
# Timeout
self.events.append(
VoiceCommandEvent(
type=VoiceCommandEventType.TIMEOUT,
time=self.current_seconds,
)
)
return VoiceCommand(
result=VoiceCommandResult.FAILURE, events=self.events
)
# Detect speech in chunk
is_speech = not self.is_silence(chunk)
if is_speech and not self.last_speech:
# Silence -> speech
self.events.append(
VoiceCommandEvent(
type=VoiceCommandEventType.SPEECH, time=self.current_seconds
)
)
elif not is_speech and self.last_speech:
# Speech -> silence
self.events.append(
VoiceCommandEvent(
type=VoiceCommandEventType.SILENCE, time=self.current_seconds
)
)
self.last_speech = is_speech
# Handle state changes
if is_speech and self.speech_buffers_left > 0:
self.speech_buffers_left -= 1
elif is_speech and not self.in_phrase:
# Start of phrase
self.events.append(
VoiceCommandEvent(
type=VoiceCommandEventType.STARTED, time=self.current_seconds
)
)
self.in_phrase = True
self.after_phrase = False
self.min_phrase_buffers = int(
math.ceil(self.min_seconds / self.seconds_per_buffer)
)
elif self.in_phrase and (self.min_phrase_buffers > 0):
# In phrase, before minimum seconds
self.min_phrase_buffers -= 1
elif not is_speech:
# Outside of speech
if not self.in_phrase:
# Reset
self.speech_buffers_left = self.speech_buffers
elif self.after_phrase and (self.silence_buffers > 0):
# After phrase, before stop
self.silence_buffers -= 1
elif self.after_phrase and (self.silence_buffers <= 0):
# Phrase complete
self.events.append(
VoiceCommandEvent(
type=VoiceCommandEventType.STOPPED,
time=self.current_seconds,
)
)
# Merge before/during command audio data
before_buffer = bytes()
for before_chunk in self.before_phrase_chunks:
before_buffer += before_chunk
return VoiceCommand(
result=VoiceCommandResult.SUCCESS,
audio_data=before_buffer + self.phrase_buffer,
events=self.events,
)
elif self.in_phrase and (self.min_phrase_buffers <= 0):
# Transition to after phrase
self.after_phrase = True
self.silence_buffers = int(
math.ceil(self.silence_seconds / self.seconds_per_buffer)
)
return None
# -------------------------------------------------------------------------
def is_silence(self, chunk: bytes) -> bool:
"""True if audio chunk contains silence."""
all_silence = True
if self.use_vad:
# Use VAD to detect speech
assert self.vad is not None
all_silence = all_silence and (
not self.vad.is_speech(chunk, self.sample_rate)
)
if self.use_ratio or self.use_current:
# Compute debiased energy of audio chunk
energy = WebRtcVadRecorder.get_debiased_energy(chunk)
if self.use_ratio:
# Ratio of max/current energy compared to threshold
if self.dynamic_max_energy:
# Overwrite max energy
if self.max_energy is None:
self.max_energy = energy
else:
self.max_energy = max(energy, self.max_energy)
assert self.max_energy is not None
if energy > 0:
ratio = self.max_energy / energy
else:
# Not sure what to do here
ratio = 0
assert self.max_current_ratio_threshold is not None
all_silence = all_silence and (ratio > self.max_current_ratio_threshold)
elif self.use_current:
# Current energy compared to threshold
assert self.current_energy_threshold is not None
all_silence = all_silence and (energy < self.current_energy_threshold)
return all_silence
# -------------------------------------------------------------------------
@staticmethod
def get_debiased_energy(audio_data: bytes) -> float:
"""Compute RMS of debiased audio."""
# Thanks to the speech_recognition library!
# https://github.com/Uberi/speech_recognition/blob/master/speech_recognition/__init__.py
energy = -audioop.rms(audio_data, 2)
energy_bytes = bytes([energy & 0xFF, (energy >> 8) & 0xFF])
debiased_energy = audioop.rms(
audioop.add(audio_data, energy_bytes * (len(audio_data) // 2), 2), 2
)
# Probably actually audio if > 30
return debiased_energy | /rhasspy-silence-0.4.0.tar.gz/rhasspy-silence-0.4.0/rhasspysilence/__init__.py | 0.775987 | 0.294025 | __init__.py | pypi |
import logging
import time
import typing
from pathlib import Path
import rhasspysnips_nlu
from snips_nlu import SnipsNLUEngine
from snips_nlu.default_configs import DEFAULT_CONFIGS
from rhasspyhermes.base import Message
from rhasspyhermes.client import GeneratorType, HermesClient, TopicArgs
from rhasspyhermes.intent import Intent, Slot, SlotRange
from rhasspyhermes.nlu import (
NluError,
NluIntent,
NluIntentNotRecognized,
NluIntentParsed,
NluQuery,
NluTrain,
NluTrainSuccess,
)
_LOGGER = logging.getLogger("rhasspysnips_nlu_hermes")
# -----------------------------------------------------------------------------
class NluHermesMqtt(HermesClient):
"""Hermes MQTT server for Snips NLU."""
def __init__(
self,
client,
snips_language: str,
engine_path: typing.Optional[Path] = None,
dataset_path: typing.Optional[Path] = None,
word_transform: typing.Optional[typing.Callable[[str], str]] = None,
no_overwrite_train: bool = False,
site_ids: typing.Optional[typing.List[str]] = None,
):
super().__init__("rhasspysnips_nlu_hermes", client, site_ids=site_ids)
self.subscribe(NluQuery, NluTrain)
self.snips_language = snips_language
self.engine_path = engine_path
self.dataset_path = dataset_path
self.word_transform = word_transform
self.no_overwrite_train = no_overwrite_train
self.engine: typing.Optional[SnipsNLUEngine] = None
# -------------------------------------------------------------------------
async def handle_query(
self, query: NluQuery
) -> typing.AsyncIterable[
typing.Union[
NluIntentParsed,
typing.Tuple[NluIntent, TopicArgs],
NluIntentNotRecognized,
NluError,
]
]:
"""Do intent recognition."""
original_input = query.input
try:
self.maybe_load_engine()
assert self.engine, "Snips engine not loaded. You may need to train."
input_text = query.input
# Fix casing for output event
if self.word_transform:
input_text = self.word_transform(input_text)
# Do parsing
result = self.engine.parse(input_text, query.intent_filter)
intent_name = result.get("intent", {}).get("intentName")
if intent_name:
slots = [
Slot(
slot_name=s["slotName"],
entity=s["entity"],
value=s["value"],
raw_value=s["rawValue"],
range=SlotRange(
start=s["range"]["start"], end=s["range"]["end"]
),
)
for s in result.get("slots", [])
]
if query.custom_entities:
# Copy user-defined entities
for entity_name, entity_value in query.custom_entities.items():
slots.append(
Slot(
entity=entity_name,
confidence=1.0,
value={"value": entity_value},
)
)
# intentParsed
yield NluIntentParsed(
input=query.input,
id=query.id,
site_id=query.site_id,
session_id=query.session_id,
intent=Intent(intent_name=intent_name, confidence_score=1.0),
slots=slots,
)
# intent
yield (
NluIntent(
input=query.input,
id=query.id,
site_id=query.site_id,
session_id=query.session_id,
intent=Intent(intent_name=intent_name, confidence_score=1.0),
slots=slots,
asr_tokens=[NluIntent.make_asr_tokens(query.input.split())],
asr_confidence=query.asr_confidence,
raw_input=original_input,
wakeword_id=query.wakeword_id,
lang=query.lang,
custom_data=query.custom_data,
),
{"intent_name": intent_name},
)
else:
# Not recognized
yield NluIntentNotRecognized(
input=query.input,
id=query.id,
site_id=query.site_id,
session_id=query.session_id,
custom_data=query.custom_data,
)
except Exception as e:
_LOGGER.exception("handle_query")
yield NluError(
site_id=query.site_id,
session_id=query.session_id,
error=str(e),
context=original_input,
)
# -------------------------------------------------------------------------
async def handle_train(
self, train: NluTrain, site_id: str = "default"
) -> typing.AsyncIterable[
typing.Union[typing.Tuple[NluTrainSuccess, TopicArgs], NluError]
]:
"""Transform sentences/slots into Snips NLU training dataset."""
try:
assert train.sentences, "No training sentences"
start_time = time.perf_counter()
new_engine = rhasspysnips_nlu.train(
sentences_dict=train.sentences,
language=self.snips_language,
slots_dict=train.slots,
engine_path=self.engine_path,
dataset_path=self.dataset_path,
)
end_time = time.perf_counter()
_LOGGER.debug("Trained Snips engine in %s second(s)", end_time - start_time)
self.engine = new_engine
yield (NluTrainSuccess(id=train.id), {"site_id": site_id})
except Exception as e:
_LOGGER.exception("handle_train")
yield NluError(
site_id=site_id, session_id=train.id, error=str(e), context=train.id
)
def get_empty_engine(self):
"""Load Snips engine configured for specific language."""
assert (
self.snips_language in DEFAULT_CONFIGS
), f"Snips language not supported: {self.snips_language}"
_LOGGER.debug("Creating empty Snips engine (language=%s)", self.snips_language)
return SnipsNLUEngine(config=DEFAULT_CONFIGS[self.snips_language])
def maybe_load_engine(self):
"""Load Snips engine if not already loaded."""
if self.engine:
# Already loaded
return
if self.engine_path and self.engine_path.exists():
_LOGGER.debug("Loading Snips engine from %s", self.engine_path)
self.engine = SnipsNLUEngine.from_path(self.engine_path)
# -------------------------------------------------------------------------
async def on_message(
self,
message: Message,
site_id: typing.Optional[str] = None,
session_id: typing.Optional[str] = None,
topic: typing.Optional[str] = None,
) -> GeneratorType:
"""Received message from MQTT broker."""
if isinstance(message, NluQuery):
async for query_result in self.handle_query(message):
yield query_result
elif isinstance(message, NluTrain):
assert site_id, "Missing site_id"
async for train_result in self.handle_train(message, site_id=site_id):
yield train_result
else:
_LOGGER.warning("Unexpected message: %s", message) | /rhasspy-snips-nlu-hermes-0.5.1.tar.gz/rhasspy-snips-nlu-hermes-0.5.1/rhasspysnips_nlu_hermes/__init__.py | 0.639961 | 0.2084 | __init__.py | pypi |
import io
import logging
import shutil
import tempfile
import typing
from pathlib import Path
import networkx as nx
import rhasspynlu
from rhasspynlu.intent import Entity, Intent, Recognition
from snips_nlu import SnipsNLUEngine
from snips_nlu.dataset import Dataset
from snips_nlu.default_configs import DEFAULT_CONFIGS
_LOGGER = logging.getLogger("rhasspysnips_nlu")
# -----------------------------------------------------------------------------
def train(
sentences_dict: typing.Dict[str, str],
language: str,
slots_dict: typing.Optional[typing.Dict[str, typing.List[str]]] = None,
engine_path: typing.Optional[typing.Union[str, Path]] = None,
dataset_path: typing.Optional[typing.Union[str, Path]] = None,
) -> SnipsNLUEngine:
"""Generate Snips YAML dataset from Rhasspy sentences/slots."""
slots_dict = slots_dict or {}
_LOGGER.debug("Creating Snips engine for language %s", language)
engine = SnipsNLUEngine(config=DEFAULT_CONFIGS[language])
# Parse JSGF sentences
_LOGGER.debug("Parsing sentences")
with io.StringIO() as ini_file:
# Join as single ini file
for lines in sentences_dict.values():
print(lines, file=ini_file)
print("", file=ini_file)
intents = rhasspynlu.parse_ini(ini_file.getvalue())
# Split into sentences and rule/slot replacements
sentences, replacements = rhasspynlu.ini_jsgf.split_rules(intents)
for intent_sentences in sentences.values():
for sentence in intent_sentences:
rhasspynlu.jsgf.walk_expression(
sentence, rhasspynlu.number_range_transform, replacements
)
# Convert to directed graph *without* expanding slots
# (e.g., $rhasspy/number)
_LOGGER.debug("Converting to intent graph")
intent_graph = rhasspynlu.sentences_to_graph(
sentences, replacements=replacements, expand_slots=False
)
# Get start/end nodes for graph
start_node, end_node = rhasspynlu.jsgf_graph.get_start_end_nodes(intent_graph)
assert (start_node is not None) and (
end_node is not None
), "Missing start/end node(s)"
if dataset_path:
# Use user file
dataset_file = open(dataset_path, "w+")
else:
# Use temporary file
dataset_file = typing.cast(
typing.TextIO, tempfile.NamedTemporaryFile(suffix=".yml", mode="w+")
)
dataset_path = dataset_file.name
with dataset_file:
_LOGGER.debug("Writing YAML dataset to %s", dataset_path)
# Walk first layer of edges with intents
for _, intent_node, edge_data in intent_graph.edges(start_node, data=True):
intent_name: str = edge_data["olabel"][9:]
# New intent
print("---", file=dataset_file)
print("type: intent", file=dataset_file)
print("name:", quote(intent_name), file=dataset_file)
print("utterances:", file=dataset_file)
# Get all paths through the graph (utterances)
used_utterances: typing.Set[str] = set()
paths = nx.all_simple_paths(intent_graph, intent_node, end_node)
for path in paths:
utterance = []
entity_name = None
slot_name = None
slot_value = None
# Walk utterance edges
for from_node, to_node in rhasspynlu.utils.pairwise(path):
edge_data = intent_graph.edges[(from_node, to_node)]
ilabel = edge_data.get("ilabel")
olabel = edge_data.get("olabel")
if olabel:
if olabel.startswith("__begin__"):
slot_name = olabel[9:]
entity_name = None
slot_value = ""
elif olabel.startswith("__end__"):
if entity_name == "rhasspy/number":
# Transform to Snips number
entity_name = "snips/number"
elif not entity_name:
# Collect actual value
assert (
slot_name and slot_value
), f"No slot name or value (name={slot_name}, value={slot_value})"
entity_name = slot_name
slot_values = slots_dict.get(slot_name)
if not slot_values:
slot_values = []
slots_dict[slot_name] = slot_values
slot_values.append(slot_value.strip())
# Reference slot/entity (values will be added later)
utterance.append(f"[{slot_name}:{entity_name}]")
# Reset current slot/entity
entity_name = None
slot_name = None
slot_value = None
elif olabel.startswith("__source__"):
# Use Rhasspy slot name as entity
entity_name = olabel[10:]
if ilabel:
# Add to current slot/entity value
if slot_name and (not entity_name):
slot_value += ilabel + " "
else:
# Add directly to utterance
utterance.append(ilabel)
elif (
olabel
and (not olabel.startswith("__"))
and slot_name
and (not slot_value)
and (not entity_name)
):
slot_value += olabel + " "
if utterance:
utterance_str = " ".join(utterance)
if utterance_str not in used_utterances:
# Write utterance
print(" -", quote(utterance_str), file=dataset_file)
used_utterances.add(utterance_str)
print("", file=dataset_file)
# Write entities
for slot_name, values in slots_dict.items():
if slot_name.startswith("$"):
# Remove arguments and $
slot_name = slot_name.split(",")[0][1:]
# Skip numbers
if slot_name in {"rhasspy/number"}:
# Should have been converted already to snips/number
continue
# Keep only unique values
values_set = set(values)
print("---", file=dataset_file)
print("type: entity", file=dataset_file)
print("name:", quote(slot_name), file=dataset_file)
print("values:", file=dataset_file)
slot_graph = rhasspynlu.sentences_to_graph(
{
slot_name: [
rhasspynlu.jsgf.Sentence.parse(value) for value in values_set
]
}
)
start_node, end_node = rhasspynlu.jsgf_graph.get_start_end_nodes(slot_graph)
n_data = slot_graph.nodes(data=True)
for path in nx.all_simple_paths(slot_graph, start_node, end_node):
words = []
for node in path:
node_data = n_data[node]
word = node_data.get("word")
if word:
words.append(word)
if words:
print(" -", quote(" ".join(words)), file=dataset_file)
print("", file=dataset_file)
# ------------
# Train engine
# ------------
if engine_path:
# Delete existing engine
engine_path = Path(engine_path)
engine_path.parent.mkdir(exist_ok=True)
if engine_path.is_dir():
# Snips will fail it the directory exists
_LOGGER.debug("Removing existing engine at %s", engine_path)
shutil.rmtree(engine_path)
elif engine_path.is_file():
_LOGGER.debug("Removing unexpected file at %s", engine_path)
engine_path.unlink()
_LOGGER.debug("Training engine")
dataset_file.seek(0)
dataset = Dataset.from_yaml_files(language, [dataset_file])
engine = engine.fit(dataset)
if engine_path:
# Save engine
engine.persist(engine_path)
_LOGGER.debug("Engine saved to %s", engine_path)
return engine
# -----------------------------------------------------------------------------
def recognize(
text: str,
engine: SnipsNLUEngine,
slots_dict: typing.Optional[typing.Dict[str, typing.List[str]]] = None,
slot_graphs: typing.Optional[typing.Dict[str, nx.DiGraph]] = None,
**parse_args,
) -> typing.List[Recognition]:
"""Recognize intent using Snips NLU."""
result = engine.parse(text, **parse_args)
intent_name = result.get("intent", {}).get("intentName")
if not intent_name:
# Recognition failure
return []
slots_dict = slots_dict or {}
slot_graphs = slot_graphs or {}
recognition = Recognition(
text=text, raw_text=text, intent=Intent(name=intent_name, confidence=1.0)
)
# Replace Snips slot values with Rhasspy slot values (substituted)
for slot in result.get("slots", []):
slot_name = slot.get("slotName")
slot_value_dict = slot.get("value", {})
slot_value = slot_value_dict.get("value")
entity = Entity(
entity=slot_name,
source=slot.get("entity", ""),
value=slot_value,
raw_value=slot.get("rawValue", slot_value),
start=slot["range"]["start"],
end=slot["range"]["end"],
)
recognition.entities.append(entity)
if (not slot_name) or (not slot_value):
continue
slot_graph = slot_graphs.get(slot_name)
if not slot_graph and (slot_name in slots_dict):
# Convert slot values to graph
slot_graph = rhasspynlu.sentences_to_graph(
{
slot_name: [
rhasspynlu.jsgf.Sentence.parse(slot_line)
for slot_line in slots_dict[slot_name]
if slot_line.strip()
]
}
)
slot_graphs[slot_name] = slot_graph
entity.tokens = slot_value.split()
entity.raw_tokens = list(entity.tokens)
if slot_graph:
# Pass Snips value through graph
slot_recognitions = rhasspynlu.recognize(entity.tokens, slot_graph)
if slot_recognitions:
# Pull out substituted value and replace in Rhasspy entitiy
new_slot_value = slot_recognitions[0].text
entity.value = new_slot_value
entity.tokens = new_slot_value.split()
return [recognition]
# -----------------------------------------------------------------------------
def quote(s):
"""Surround with quotes for YAML."""
return f'"{s}"' | /rhasspy-snips-nlu-0.3.0.tar.gz/rhasspy-snips-nlu-0.3.0/rhasspysnips_nlu/__init__.py | 0.569972 | 0.347537 | __init__.py | pypi |
import asyncio
import logging
import queue
import socket
import tempfile
import threading
import typing
from pathlib import Path
import pocketsphinx
from rhasspyhermes.audioserver import AudioFrame
from rhasspyhermes.base import Message
from rhasspyhermes.client import GeneratorType, HermesClient, TopicArgs
from rhasspyhermes.wake import (
HotwordDetected,
HotwordError,
HotwordToggleOff,
HotwordToggleOn,
HotwordToggleReason,
)
WAV_HEADER_BYTES = 44
_LOGGER = logging.getLogger("rhasspywake_pocketsphinx_hermes")
# -----------------------------------------------------------------------------
class WakeHermesMqtt(HermesClient):
"""Hermes MQTT server for Rhasspy wakeword with pocketsphinx."""
def __init__(
self,
client,
keyphrase: str,
acoustic_model: Path,
dictionary_paths: typing.List[Path],
wakeword_id: str = "",
keyphrase_threshold: float = 1e-40,
mllr_matrix: typing.Optional[Path] = None,
site_ids: typing.Optional[typing.List[str]] = None,
enabled: bool = True,
sample_rate: int = 16000,
sample_width: int = 2,
channels: int = 1,
chunk_size: int = 960,
udp_audio: typing.Optional[typing.List[typing.Tuple[str, int, str]]] = None,
udp_chunk_size: int = 2048,
debug: bool = False,
lang: typing.Optional[str] = None,
):
super().__init__(
"rhasspywake_pocketsphinx_hermes",
client,
sample_rate=sample_rate,
sample_width=sample_width,
channels=channels,
site_ids=site_ids,
)
self.subscribe(AudioFrame, HotwordToggleOn, HotwordToggleOff)
self.keyphrase = keyphrase
self.keyphrase_threshold = keyphrase_threshold
self.acoustic_model = acoustic_model
self.dictionary_paths = dictionary_paths
self.mllr_matrix = mllr_matrix
self.wakeword_id = wakeword_id
self.enabled = enabled
self.disabled_reasons: typing.Set[str] = set()
# Required audio format
self.sample_rate = sample_rate
self.sample_width = sample_width
self.channels = channels
self.chunk_size = chunk_size
# Queue of WAV audio chunks to process (plus site_id)
self.wav_queue: queue.Queue = queue.Queue()
self.first_audio: bool = True
self.audio_buffer = bytes()
self.decoder: typing.Optional[pocketsphinx.Decoder] = []
self.decoder_started = False
self.debug = debug
self.lang = lang
# Start threads
threading.Thread(target=self.detection_thread_proc, daemon=True).start()
# Listen for raw audio on UDP too
self.udp_chunk_size = udp_chunk_size
if udp_audio:
for udp_host, udp_port, udp_site_id in udp_audio:
threading.Thread(
target=self.udp_thread_proc,
args=(udp_host, udp_port, udp_site_id),
daemon=True,
).start()
# -------------------------------------------------------------------------
def load_decoder(self):
"""Load Pocketsphinx decoder."""
_LOGGER.debug(
"Loading decoder with hmm=%s, dicts=%s",
str(self.acoustic_model),
self.dictionary_paths,
)
words_needed = set(self.keyphrase.split())
with tempfile.NamedTemporaryFile(mode="w+", suffix=".txt") as dict_file:
# Combine all dictionaries
for sub_dict_path in self.dictionary_paths:
if not sub_dict_path.is_file():
_LOGGER.warning("Skipping dictionary %s", str(sub_dict_path))
continue
with open(sub_dict_path, "r") as sub_dict_file:
for line in sub_dict_file:
line = line.strip()
if line:
word = line.split(maxsplit=2)[0]
if word in words_needed:
print(line, file=dict_file)
words_needed.remove(word)
assert (
len(words_needed) == 0
), f"Missing pronunciations for words: {words_needed}"
dict_file.seek(0)
decoder_config = pocketsphinx.Decoder.default_config()
decoder_config.set_string("-hmm", str(self.acoustic_model))
decoder_config.set_string("-dict", str(dict_file.name))
decoder_config.set_string("-keyphrase", self.keyphrase)
decoder_config.set_float("-kws_threshold", self.keyphrase_threshold)
if not self.debug:
decoder_config.set_string("-logfn", "/dev/null")
if self.mllr_matrix and self.mllr_matrix.is_file():
decoder_config.set_string("-mllr", str(self.mllr_matrix))
self.decoder = pocketsphinx.Decoder(decoder_config)
# -------------------------------------------------------------------------
async def handle_audio_frame(self, wav_bytes: bytes, site_id: str = "default"):
"""Process a single audio frame"""
self.wav_queue.put((wav_bytes, site_id))
async def handle_detection(
self, wakeword_id: str, site_id: str = "default"
) -> typing.AsyncIterable[
typing.Union[typing.Tuple[HotwordDetected, TopicArgs], HotwordError]
]:
"""Handle a successful hotword detection"""
try:
yield (
HotwordDetected(
site_id=site_id,
model_id=self.keyphrase,
current_sensitivity=self.keyphrase_threshold,
model_version="",
model_type="personal",
lang=self.lang,
),
{"wakeword_id": wakeword_id},
)
except Exception as e:
_LOGGER.exception("handle_detection")
yield HotwordError(error=str(e), context=self.keyphrase, site_id=site_id)
def detection_thread_proc(self):
"""Handle WAV audio chunks."""
try:
while True:
wav_bytes, site_id = self.wav_queue.get()
if self.first_audio:
_LOGGER.debug("Receiving audio")
self.first_audio = False
if not self.decoder:
self.load_decoder()
assert self.decoder is not None
# Extract/convert audio data
audio_data = self.maybe_convert_wav(wav_bytes)
# Add to persistent buffer
self.audio_buffer += audio_data
# Process in chunks.
# Any remaining audio data will be kept in buffer.
while len(self.audio_buffer) >= self.chunk_size:
chunk = self.audio_buffer[: self.chunk_size]
self.audio_buffer = self.audio_buffer[self.chunk_size :]
if not self.decoder_started:
# Begin utterance
self.decoder.start_utt()
self.decoder_started = True
self.decoder.process_raw(chunk, False, False)
hyp = self.decoder.hyp()
if hyp:
if self.decoder_started:
# End utterance
self.decoder.end_utt()
self.decoder_started = False
wakeword_id = self.wakeword_id
if not wakeword_id:
wakeword_id = self.keyphrase
asyncio.run_coroutine_threadsafe(
self.publish_all(
self.handle_detection(wakeword_id, site_id=site_id)
),
self.loop,
)
# Stop and clear buffer to avoid duplicate reports
self.audio_buffer = bytes()
break
except Exception:
_LOGGER.exception("detection_thread_proc")
# -------------------------------------------------------------------------
def udp_thread_proc(self, host: str, port: int, site_id: str):
"""Handle WAV chunks from UDP socket."""
try:
udp_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
udp_socket.bind((host, port))
_LOGGER.debug("Listening for audio on UDP %s:%s", host, port)
while True:
wav_bytes, _ = udp_socket.recvfrom(
self.udp_chunk_size + WAV_HEADER_BYTES
)
if self.enabled:
self.wav_queue.put((wav_bytes, site_id))
except Exception:
_LOGGER.exception("udp_thread_proc")
# -------------------------------------------------------------------------
async def on_message_blocking(
self,
message: Message,
site_id: typing.Optional[str] = None,
session_id: typing.Optional[str] = None,
topic: typing.Optional[str] = None,
) -> GeneratorType:
"""Received message from MQTT broker."""
# Check enable/disable messages
if isinstance(message, HotwordToggleOn):
if message.reason == HotwordToggleReason.UNKNOWN:
# Always enable on unknown
self.disabled_reasons.clear()
else:
self.disabled_reasons.discard(message.reason)
if self.disabled_reasons:
_LOGGER.debug("Still disabled: %s", self.disabled_reasons)
else:
self.enabled = True
self.first_audio = True
_LOGGER.debug("Enabled")
elif isinstance(message, HotwordToggleOff):
self.enabled = False
self.disabled_reasons.add(message.reason)
# End utterance
if self.decoder and self.decoder_started:
self.decoder.end_utt()
self.decoder_started = False
_LOGGER.debug("Disabled")
elif isinstance(message, AudioFrame):
if self.enabled:
assert site_id, "Missing site_id"
await self.handle_audio_frame(message.wav_bytes, site_id=site_id)
else:
_LOGGER.warning("Unexpected message: %s", message)
# Mark as async generator
yield None | /rhasspy-wake-pocketsphinx-hermes-0.3.0.tar.gz/rhasspy-wake-pocketsphinx-hermes-0.3.0/rhasspywake_pocketsphinx_hermes/__init__.py | 0.635562 | 0.153549 | __init__.py | pypi |
import asyncio
import logging
import queue
import socket
import struct
import threading
import typing
from pathlib import Path
from rhasspyhermes.audioserver import AudioFrame
from rhasspyhermes.base import Message
from rhasspyhermes.client import GeneratorType, HermesClient, TopicArgs
from rhasspyhermes.wake import (
GetHotwords,
Hotword,
HotwordDetected,
HotwordError,
Hotwords,
HotwordToggleOff,
HotwordToggleOn,
HotwordToggleReason,
)
WAV_HEADER_BYTES = 44
_LOGGER = logging.getLogger("rhasspywake_porcupine_hermes")
# -----------------------------------------------------------------------------
class WakeHermesMqtt(HermesClient):
"""Hermes MQTT server for Rhasspy wakeword with Porcupine."""
def __init__(
self,
client,
porcupine: typing.Any,
model_ids: typing.List[str],
wakeword_ids: typing.List[str],
sensitivities: typing.List[float],
keyword_dirs: typing.Optional[typing.List[Path]] = None,
site_ids: typing.Optional[typing.List[str]] = None,
enabled: bool = True,
sample_rate: int = 16000,
sample_width: int = 2,
channels: int = 1,
udp_audio: typing.Optional[typing.List[typing.Tuple[str, int, str]]] = None,
udp_chunk_size: int = 2048,
lang: typing.Optional[str] = None,
):
super().__init__(
"rhasspywake_porcupine_hermes",
client,
sample_rate=sample_rate,
sample_width=sample_width,
channels=channels,
site_ids=site_ids,
)
self.subscribe(AudioFrame, HotwordToggleOn, HotwordToggleOff, GetHotwords)
self.porcupine = porcupine
self.wakeword_ids = wakeword_ids
self.model_ids = model_ids
self.sensitivities = sensitivities
self.keyword_dirs = keyword_dirs or []
self.enabled = enabled
self.disabled_reasons: typing.Set[str] = set()
# Required audio format
self.sample_rate = sample_rate
self.sample_width = sample_width
self.channels = channels
# Queue of WAV audio chunks to process (plus site_id)
self.wav_queue: queue.Queue = queue.Queue()
self.chunk_size = self.porcupine.frame_length * 2
self.chunk_format = "h" * self.porcupine.frame_length
self.audio_buffer = bytes()
self.first_audio = True
self.lang = lang
# Start threads
threading.Thread(target=self.detection_thread_proc, daemon=True).start()
# Listen for raw audio on UDP too
self.udp_chunk_size = udp_chunk_size
if udp_audio:
for udp_host, udp_port, udp_site_id in udp_audio:
threading.Thread(
target=self.udp_thread_proc,
args=(udp_host, udp_port, udp_site_id),
daemon=True,
).start()
# -------------------------------------------------------------------------
async def handle_audio_frame(self, wav_bytes: bytes, site_id: str = "default"):
"""Process a single audio frame"""
self.wav_queue.put((wav_bytes, site_id))
async def handle_detection(
self, keyword_index: int, wakeword_id: str, site_id="default"
) -> typing.AsyncIterable[
typing.Union[typing.Tuple[HotwordDetected, TopicArgs], HotwordError]
]:
"""Handle a successful hotword detection"""
try:
assert (
len(self.model_ids) > keyword_index
), f"Missing {keyword_index} in models"
yield (
HotwordDetected(
site_id=site_id,
model_id=self.model_ids[keyword_index],
current_sensitivity=self.sensitivities[keyword_index],
model_version="",
model_type="personal",
lang=self.lang,
),
{"wakeword_id": wakeword_id},
)
except Exception as e:
_LOGGER.exception("handle_detection")
yield HotwordError(
error=str(e), context=str(keyword_index), site_id=site_id
)
async def handle_get_hotwords(
self, get_hotwords: GetHotwords
) -> typing.AsyncIterable[typing.Union[Hotwords, HotwordError]]:
"""Report available hotwords"""
try:
if self.keyword_dirs:
# Add all models from keyword dir
model_paths = []
for keyword_dir in self.keyword_dirs:
if not keyword_dir.is_dir():
_LOGGER.warning("Missing keyword dir: %s", str(keyword_dir))
continue
for keyword_file in keyword_dir.glob("*.ppn"):
model_paths.append(keyword_file)
else:
# Add current model(s) only
model_paths = [Path(model_id) for model_id in self.model_ids]
models: typing.List[Hotword] = []
for ppn_file in model_paths:
words = ppn_file.with_suffix("").name.split("_")
if len(words) == 1:
# porcupine.ppn -> "porcupine"
model_words = words[0]
else:
# smart_mirror_linux.ppn -> "smart mirror"
model_words = " ".join(words[:-1])
models.append(Hotword(model_id=ppn_file.name, model_words=model_words))
yield Hotwords(
models=models, id=get_hotwords.id, site_id=get_hotwords.site_id
)
except Exception as e:
_LOGGER.exception("handle_get_hotwords")
yield HotwordError(
error=str(e), context=str(get_hotwords), site_id=get_hotwords.site_id
)
def detection_thread_proc(self):
"""Handle WAV audio chunks."""
try:
while True:
wav_bytes, site_id = self.wav_queue.get()
if self.first_audio:
_LOGGER.debug("Receiving audio")
self.first_audio = False
# Add to persistent buffer
audio_data = self.maybe_convert_wav(wav_bytes)
self.audio_buffer += audio_data
# Process in chunks.
# Any remaining audio data will be kept in buffer.
while len(self.audio_buffer) >= self.chunk_size:
chunk = self.audio_buffer[: self.chunk_size]
self.audio_buffer = self.audio_buffer[self.chunk_size :]
unpacked_chunk = struct.unpack_from(self.chunk_format, chunk)
keyword_index = self.porcupine.process(unpacked_chunk)
if keyword_index >= 0:
# Detection
if len(self.model_ids) == 1:
keyword_index = 0
if keyword_index < len(self.wakeword_ids):
wakeword_id = self.wakeword_ids[keyword_index]
else:
wakeword_id = ""
if not wakeword_id:
# Use file name
wakeword_id = Path(self.model_ids[keyword_index]).stem
asyncio.run_coroutine_threadsafe(
self.publish_all(
self.handle_detection(
keyword_index, wakeword_id, site_id=site_id
)
),
self.loop,
)
except Exception:
_LOGGER.exception("detection_thread_proc")
# -------------------------------------------------------------------------
def udp_thread_proc(self, host: str, port: int, site_id: str):
"""Handle WAV chunks from UDP socket."""
try:
udp_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
udp_socket.bind((host, port))
_LOGGER.debug("Listening for audio on UDP %s:%s", host, port)
while True:
wav_bytes, _ = udp_socket.recvfrom(
self.udp_chunk_size + WAV_HEADER_BYTES
)
if self.enabled:
self.wav_queue.put((wav_bytes, site_id))
except Exception:
_LOGGER.exception("udp_thread_proc")
# -------------------------------------------------------------------------
async def on_message_blocking(
self,
message: Message,
site_id: typing.Optional[str] = None,
session_id: typing.Optional[str] = None,
topic: typing.Optional[str] = None,
) -> GeneratorType:
"""Received message from MQTT broker."""
# Check enable/disable messages
if isinstance(message, HotwordToggleOn):
if message.reason == HotwordToggleReason.UNKNOWN:
# Always enable on unknown
self.disabled_reasons.clear()
else:
self.disabled_reasons.discard(message.reason)
if self.disabled_reasons:
_LOGGER.debug("Still disabled: %s", self.disabled_reasons)
else:
self.enabled = True
self.first_audio = True
_LOGGER.debug("Enabled")
elif isinstance(message, HotwordToggleOff):
self.enabled = False
self.disabled_reasons.add(message.reason)
_LOGGER.debug("Disabled")
elif isinstance(message, AudioFrame):
if self.enabled:
assert site_id, "Missing site_id"
await self.handle_audio_frame(message.wav_bytes, site_id=site_id)
elif isinstance(message, GetHotwords):
async for hotword_result in self.handle_get_hotwords(message):
yield hotword_result | /rhasspy-wake-porcupine-hermes-0.3.0.tar.gz/rhasspy-wake-porcupine-hermes-0.3.0/rhasspywake_porcupine_hermes/__init__.py | 0.618089 | 0.154823 | __init__.py | pypi |
import asyncio
import logging
import queue
import socket
import subprocess
import threading
import typing
from pathlib import Path
from rhasspyhermes.audioserver import AudioFrame
from rhasspyhermes.base import Message
from rhasspyhermes.client import GeneratorType, HermesClient, TopicArgs
from rhasspyhermes.wake import (
GetHotwords,
Hotword,
HotwordDetected,
HotwordError,
Hotwords,
HotwordToggleOff,
HotwordToggleOn,
HotwordToggleReason,
)
from .precise import TriggerDetector
WAV_HEADER_BYTES = 44
_LOGGER = logging.getLogger("rhasspywake_precise_hermes")
# -----------------------------------------------------------------------------
class WakeHermesMqtt(HermesClient):
"""Hermes MQTT server for Rhasspy wakeword with Mycroft Precise."""
def __init__(
self,
client,
model_path: Path,
engine_path: Path,
sensitivity: float = 0.5,
trigger_level: int = 3,
wakeword_id: str = "",
model_dirs: typing.Optional[typing.List[Path]] = None,
site_ids: typing.Optional[typing.List[str]] = None,
enabled: bool = True,
sample_rate: int = 16000,
sample_width: int = 2,
channels: int = 1,
chunk_size: int = 2048,
udp_audio: typing.Optional[typing.List[typing.Tuple[str, int, str]]] = None,
udp_chunk_size: int = 2048,
log_predictions: bool = False,
lang: typing.Optional[str] = None,
):
super().__init__(
"rhasspywake_precise_hermes",
client,
sample_rate=sample_rate,
sample_width=sample_width,
channels=channels,
site_ids=site_ids,
)
self.subscribe(AudioFrame, HotwordToggleOn, HotwordToggleOff, GetHotwords)
self.model_path = model_path
self.engine_path = engine_path
self.sensitivity = sensitivity
self.trigger_level = trigger_level
self.wakeword_id = wakeword_id
self.model_dirs = model_dirs or []
self.enabled = enabled
self.disabled_reasons: typing.Set[str] = set()
# Required audio format
self.sample_rate = sample_rate
self.sample_width = sample_width
self.channels = channels
self.chunk_size = chunk_size
# Queue of WAV audio chunks to process (plus site_id)
self.wav_queue: queue.Queue = queue.Queue()
self.first_audio: bool = True
self.audio_buffer = bytes()
self.lang = lang
self.engine_proc: typing.Optional[subprocess.Popen] = None
self.detector: typing.Optional[TriggerDetector] = None
self.last_audio_site_id: str = "default"
self.model_id = self.model_path.name
self.log_predictions = log_predictions
# Start threads
self.detection_thread = threading.Thread(
target=self.detection_thread_proc, daemon=True
)
self.detection_thread.start()
# Listen for raw audio on UDP too
self.udp_chunk_size = udp_chunk_size
if udp_audio:
for udp_host, udp_port, udp_site_id in udp_audio:
threading.Thread(
target=self.udp_thread_proc,
args=(udp_host, udp_port, udp_site_id),
daemon=True,
).start()
# -------------------------------------------------------------------------
def engine_thread_proc(self):
"""Read predictions from precise-engine."""
assert (
self.engine_proc and self.engine_proc.stdout and self.detector
), "Precise engine is not started"
for line in self.engine_proc.stdout:
line = line.decode().strip()
if line:
if self.log_predictions:
_LOGGER.debug("Prediction: %s", line)
try:
if self.detector.update(float(line)):
asyncio.run_coroutine_threadsafe(
self.publish_all(self.handle_detection()), self.loop
)
except ValueError:
_LOGGER.exception("engine_proc")
def load_engine(self, block=True):
"""Load Precise engine and model.
if block is True, wait until an empty chunk is predicted before
returning.
"""
engine_cmd = [str(self.engine_path), str(self.model_path), str(self.chunk_size)]
_LOGGER.debug(engine_cmd)
self.engine_proc = subprocess.Popen(
engine_cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE
)
self.detector = TriggerDetector(
self.chunk_size,
sensitivity=self.sensitivity,
trigger_level=self.trigger_level,
)
_LOGGER.debug(
"Loaded Mycroft Precise (model=%s, sensitivity=%s, trigger_level=%s)",
self.model_path,
self.sensitivity,
self.trigger_level,
)
if block:
# Send empty chunk and wait for a prediction
_LOGGER.debug("Waiting for Precise to start...")
empty_chunk = b"\0" * self.chunk_size
self.engine_proc.stdin.write(empty_chunk)
self.engine_proc.stdin.flush()
self.engine_proc.stdout.readline()
def stop_runner(self):
"""Stop Precise runner."""
if self.engine_proc:
self.engine_proc.terminate()
self.engine_proc.wait()
self.engine_proc = None
if self.detection_thread:
self.wav_queue.put((None, None))
self.detection_thread.join()
self.detection_thread = None
self.detector = None
# -------------------------------------------------------------------------
async def handle_audio_frame(
self, wav_bytes: bytes, site_id: str = "default"
) -> None:
"""Process a single audio frame"""
self.wav_queue.put((wav_bytes, site_id))
async def handle_detection(
self,
) -> typing.AsyncIterable[
typing.Union[typing.Tuple[HotwordDetected, TopicArgs], HotwordError]
]:
"""Handle a successful hotword detection"""
try:
wakeword_id = self.wakeword_id
if not wakeword_id:
# Use file name
wakeword_id = self.model_path.stem
yield (
HotwordDetected(
site_id=self.last_audio_site_id,
model_id=self.model_id,
current_sensitivity=self.sensitivity,
model_version="",
model_type="personal",
lang=self.lang,
),
{"wakeword_id": wakeword_id},
)
except Exception as e:
_LOGGER.exception("handle_detection")
yield HotwordError(
error=str(e),
context=str(self.model_path),
site_id=self.last_audio_site_id,
)
async def handle_get_hotwords(
self, get_hotwords: GetHotwords
) -> typing.AsyncIterable[typing.Union[Hotwords, HotwordError]]:
"""Report available hotwords"""
try:
if self.model_dirs:
# Add all models from model dirs
model_paths = []
for model_dir in self.model_dirs:
if not model_dir.is_dir():
_LOGGER.warning("Model directory missing: %s", str(model_dir))
continue
for model_file in model_dir.iterdir():
if model_file.is_file() and (model_file.suffix == ".pb"):
model_paths.append(model_file)
else:
# Add current model
model_paths = [self.model_path]
hotword_models: typing.List[Hotword] = []
for model_path in model_paths:
model_words = " ".join(model_path.with_suffix("").name.split("_"))
hotword_models.append(
Hotword(
model_id=model_path.name,
model_words=model_words,
model_type="personal",
)
)
yield Hotwords(
models=hotword_models, id=get_hotwords.id, site_id=get_hotwords.site_id
)
except Exception as e:
_LOGGER.exception("handle_get_hotwords")
yield HotwordError(
error=str(e), context=str(get_hotwords), site_id=get_hotwords.site_id
)
def detection_thread_proc(self):
"""Handle WAV audio chunks."""
try:
while True:
wav_bytes, site_id = self.wav_queue.get()
if wav_bytes is None:
# Shutdown signal
break
self.last_audio_site_id = site_id
# Handle audio frames
if self.first_audio:
_LOGGER.debug("Receiving audio")
self.first_audio = False
if not self.engine_proc:
self.load_engine()
assert (
self.engine_proc and self.engine_proc.stdin
), "Precise engine not loaded"
# Extract/convert audio data
audio_data = self.maybe_convert_wav(wav_bytes)
# Add to persistent buffer
self.audio_buffer += audio_data
# Process in chunks.
# Any remaining audio data will be kept in buffer.
while len(self.audio_buffer) >= self.chunk_size:
chunk = self.audio_buffer[: self.chunk_size]
self.audio_buffer = self.audio_buffer[self.chunk_size :]
if chunk:
# Send to precise-engine
# NOTE: The flush() is critical to this working.
self.engine_proc.stdin.write(chunk)
self.engine_proc.stdin.flush()
# Get prediction
line = self.engine_proc.stdout.readline()
line = line.decode().strip()
if line:
if self.log_predictions:
_LOGGER.debug("Prediction: %s", line)
try:
if self.detector.update(float(line)):
asyncio.run_coroutine_threadsafe(
self.publish_all(self.handle_detection()),
self.loop,
)
except ValueError:
_LOGGER.exception("prediction")
except Exception:
_LOGGER.exception("detection_thread_proc")
# -------------------------------------------------------------------------
def udp_thread_proc(self, host: str, port: int, site_id: str):
"""Handle WAV chunks from UDP socket."""
try:
udp_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
udp_socket.bind((host, port))
_LOGGER.debug("Listening for audio on UDP %s:%s", host, port)
while True:
wav_bytes, _ = udp_socket.recvfrom(
self.udp_chunk_size + WAV_HEADER_BYTES
)
if self.enabled:
self.wav_queue.put((wav_bytes, site_id))
except Exception:
_LOGGER.exception("udp_thread_proc")
# -------------------------------------------------------------------------
async def on_message_blocking(
self,
message: Message,
site_id: typing.Optional[str] = None,
session_id: typing.Optional[str] = None,
topic: typing.Optional[str] = None,
) -> GeneratorType:
"""Received message from MQTT broker."""
# Check enable/disable messages
if isinstance(message, HotwordToggleOn):
if message.reason == HotwordToggleReason.UNKNOWN:
# Always enable on unknown
self.disabled_reasons.clear()
else:
self.disabled_reasons.discard(message.reason)
if self.disabled_reasons:
_LOGGER.debug("Still disabled: %s", self.disabled_reasons)
else:
self.enabled = True
self.first_audio = True
_LOGGER.debug("Enabled")
elif isinstance(message, HotwordToggleOff):
self.enabled = False
self.disabled_reasons.add(message.reason)
_LOGGER.debug("Disabled")
elif isinstance(message, AudioFrame):
if self.enabled:
assert site_id, "Missing site_id"
await self.handle_audio_frame(message.wav_bytes, site_id=site_id)
elif isinstance(message, GetHotwords):
async for hotword_result in self.handle_get_hotwords(message):
yield hotword_result
else:
_LOGGER.warning("Unexpected message: %s", message) | /rhasspy-wake-precise-hermes-0.3.0.tar.gz/rhasspy-wake-precise-hermes-0.3.0/rhasspywake_precise_hermes/__init__.py | 0.686685 | 0.154249 | __init__.py | pypi |
# Rhasspy Wake Raven Hermes
Implements `hermes/hotword` functionality from [Hermes protocol](https://docs.snips.ai/reference/hermes) using [Rhasspy Raven](https://github.com/rhasspy/raven).
## Requirements
* Python 3.7
* [Rhasspy Raven](https://github.com/rhasspy/raven)
## Installation
```bash
$ git clone https://github.com/rhasspy/rhasspy-wake-raven-hermes
$ cd rhasspy-wake-raven-hermes
$ ./configure
$ make
$ make install
```
## WAV Templates
Steps to record a custom wake word:
1. Record yourself saying the wake word at least 3 times
2. Trim silence around the audio and export 3 WAV files to a directory
* WAV format should be 16-bit 16Khz mono
3. Pass `--keyword /path/to/directory` to `rhasspy-wake-raven-hermes` with the path to the directory with your WAV templates
You may pass more than one `--keyword` with different WAV directories. It's recommended that you use `--average-templates` to reduce CPU usage.
## Running
```bash
$ bin/rhasspy-wake-raven-hermes <ARGS>
```
## Command-Line Options
```
usage: rhasspy-wake-raven-hermes [-h] [--keyword KEYWORD [KEYWORD ...]]
[--probability-threshold PROBABILITY_THRESHOLD]
[--distance-threshold DISTANCE_THRESHOLD]
[--minimum-matches MINIMUM_MATCHES]
[--refractory-seconds REFRACTORY_SECONDS]
[--window-shift-seconds WINDOW_SHIFT_SECONDS]
[--dtw-window-size DTW_WINDOW_SIZE]
[--vad-sensitivity {1,2,3}]
[--current-threshold CURRENT_THRESHOLD]
[--max-energy MAX_ENERGY]
[--max-current-ratio-threshold MAX_CURRENT_RATIO_THRESHOLD]
[--silence-method {vad_only,ratio_only,current_only,vad_and_ratio,vad_and_current,all}]
[--average-templates]
[--udp-audio UDP_AUDIO UDP_AUDIO UDP_AUDIO]
[--examples-dir EXAMPLES_DIR]
[--examples-format EXAMPLES_FORMAT]
[--log-predictions] [--host HOST]
[--port PORT] [--username USERNAME]
[--password PASSWORD] [--tls]
[--tls-ca-certs TLS_CA_CERTS]
[--tls-certfile TLS_CERTFILE]
[--tls-keyfile TLS_KEYFILE]
[--tls-cert-reqs {CERT_REQUIRED,CERT_OPTIONAL,CERT_NONE}]
[--tls-version TLS_VERSION]
[--tls-ciphers TLS_CIPHERS]
[--site-id SITE_ID] [--debug]
[--log-format LOG_FORMAT]
optional arguments:
-h, --help show this help message and exit
--keyword KEYWORD [KEYWORD ...]
Directory with WAV templates and settings (setting-
name=value)
--probability-threshold PROBABILITY_THRESHOLD
Probability above which detection occurs (default:
0.5)
--distance-threshold DISTANCE_THRESHOLD
Normalized dynamic time warping distance threshold for
template matching (default: 0.22)
--minimum-matches MINIMUM_MATCHES
Number of templates that must match to produce output
(default: 1)
--refractory-seconds REFRACTORY_SECONDS
Seconds before wake word can be activated again
(default: 2)
--window-shift-seconds WINDOW_SHIFT_SECONDS
Seconds to shift sliding time window on audio buffer
(default: 0.02)
--dtw-window-size DTW_WINDOW_SIZE
Size of band around slanted diagonal during dynamic
time warping calculation (default: 5)
--vad-sensitivity {1,2,3}
Webrtcvad VAD sensitivity (1-3)
--current-threshold CURRENT_THRESHOLD
Debiased energy threshold of current audio frame
--max-energy MAX_ENERGY
Fixed maximum energy for ratio calculation (default:
observed)
--max-current-ratio-threshold MAX_CURRENT_RATIO_THRESHOLD
Threshold of ratio between max energy and current
audio frame
--silence-method {vad_only,ratio_only,current_only,vad_and_ratio,vad_and_current,all}
Method for detecting silence
--average-templates Average wakeword templates together to reduce number
of calculations
--udp-audio UDP_AUDIO UDP_AUDIO UDP_AUDIO
Host/port/siteId for UDP audio input
--examples-dir EXAMPLES_DIR
Save positive example audio to directory as WAV files
--examples-format EXAMPLES_FORMAT
Format of positive example WAV file names using
strftime (relative to examples-dir)
--log-predictions Log prediction probabilities for each audio chunk
(very verbose)
--host HOST MQTT host (default: localhost)
--port PORT MQTT port (default: 1883)
--username USERNAME MQTT username
--password PASSWORD MQTT password
--tls Enable MQTT TLS
--tls-ca-certs TLS_CA_CERTS
MQTT TLS Certificate Authority certificate files
--tls-certfile TLS_CERTFILE
MQTT TLS client certificate file (PEM)
--tls-keyfile TLS_KEYFILE
MQTT TLS client key file (PEM)
--tls-cert-reqs {CERT_REQUIRED,CERT_OPTIONAL,CERT_NONE}
MQTT TLS certificate requirements for broker (default:
CERT_REQUIRED)
--tls-version TLS_VERSION
MQTT TLS version (default: highest)
--tls-ciphers TLS_CIPHERS
MQTT TLS ciphers to use
--site-id SITE_ID Hermes site id(s) to listen for (default: all)
--debug Print DEBUG messages to the console
--log-format LOG_FORMAT
Python logger format
```
| /rhasspy-wake-raven-hermes-0.6.0.tar.gz/rhasspy-wake-raven-hermes-0.6.0/README.md | 0.546012 | 0.724481 | README.md | pypi |
import argparse
import asyncio
import logging
import typing
from pathlib import Path
import paho.mqtt.client as mqtt
import rhasspyhermes.cli as hermes_cli
from rhasspysilence import WebRtcVadRecorder
from rhasspysilence.const import SilenceMethod
from rhasspywake_raven import Raven, Template
from . import WakeHermesMqtt
_DIR = Path(__file__).parent
_LOGGER = logging.getLogger("rhasspywake_raven_hermes")
# -----------------------------------------------------------------------------
def main():
"""Main method."""
parser = argparse.ArgumentParser(prog="rhasspy-wake-raven-hermes")
parser.add_argument(
"--keyword",
action="append",
nargs="+",
default=[],
help="Directory with WAV templates and settings (setting-name=value)",
)
parser.add_argument(
"--probability-threshold",
type=float,
default=0.5,
help="Probability above which detection occurs (default: 0.5)",
)
parser.add_argument(
"--distance-threshold",
type=float,
default=0.22,
help="Normalized dynamic time warping distance threshold for template matching (default: 0.22)",
)
parser.add_argument(
"--minimum-matches",
type=int,
default=1,
help="Number of templates that must match to produce output (default: 1)",
)
parser.add_argument(
"--refractory-seconds",
type=float,
default=2.0,
help="Seconds before wake word can be activated again (default: 2)",
)
parser.add_argument(
"--window-shift-seconds",
type=float,
default=Raven.DEFAULT_SHIFT_SECONDS,
help=f"Seconds to shift sliding time window on audio buffer (default: {Raven.DEFAULT_SHIFT_SECONDS})",
)
parser.add_argument(
"--dtw-window-size",
type=int,
default=5,
help="Size of band around slanted diagonal during dynamic time warping calculation (default: 5)",
)
parser.add_argument(
"--vad-sensitivity",
type=int,
choices=[1, 2, 3],
default=3,
help="Webrtcvad VAD sensitivity (1-3)",
)
parser.add_argument(
"--current-threshold",
type=float,
help="Debiased energy threshold of current audio frame",
)
parser.add_argument(
"--max-energy",
type=float,
help="Fixed maximum energy for ratio calculation (default: observed)",
)
parser.add_argument(
"--max-current-ratio-threshold",
type=float,
help="Threshold of ratio between max energy and current audio frame",
)
parser.add_argument(
"--silence-method",
choices=[e.value for e in SilenceMethod],
default=SilenceMethod.VAD_ONLY,
help="Method for detecting silence",
)
parser.add_argument(
"--average-templates",
action="store_true",
help="Average wakeword templates together to reduce number of calculations",
)
parser.add_argument(
"--udp-audio",
nargs=3,
action="append",
help="Host/port/siteId for UDP audio input",
)
parser.add_argument(
"--examples-dir", help="Save positive example audio to directory as WAV files"
)
parser.add_argument(
"--examples-format",
default="{keyword}/examples/%Y%m%d-%H%M%S.wav",
help="Format of positive example WAV file names using strftime (relative to examples-dir)",
)
parser.add_argument(
"--log-predictions",
action="store_true",
help="Log prediction probabilities for each audio chunk (very verbose)",
)
parser.add_argument("--lang", help="Set lang in hotword detected message")
hermes_cli.add_hermes_args(parser)
args = parser.parse_args()
hermes_cli.setup_logging(args)
_LOGGER.debug(args)
hermes: typing.Optional[WakeHermesMqtt] = None
# -------------------------------------------------------------------------
if args.examples_dir:
# Directory to save positive example WAV files
args.examples_dir = Path(args.examples_dir)
args.examples_dir.mkdir(parents=True, exist_ok=True)
if args.keyword:
missing_keywords = not any(list(Path(k[0]).glob("*.wav")) for k in args.keyword)
else:
missing_keywords = True
if missing_keywords:
args.keyword = [[_DIR / "templates"]]
_LOGGER.debug("No keywords provided. Use built-in 'okay rhasspy' templates.")
# Create silence detector
recorder = WebRtcVadRecorder(
vad_mode=args.vad_sensitivity,
silence_method=args.silence_method,
current_energy_threshold=args.current_threshold,
max_energy=args.max_energy,
max_current_ratio_threshold=args.max_current_ratio_threshold,
)
# Load audio templates
ravens: typing.List[Raven] = []
for keyword_settings in args.keyword:
template_dir = Path(keyword_settings[0])
wav_paths = list(template_dir.glob("*.wav"))
if not wav_paths:
_LOGGER.warning("No WAV files found in %s", template_dir)
continue
keyword_name = template_dir.name if not missing_keywords else "okay-rhasspy"
# Load audio templates
keyword_templates = [
Raven.wav_to_template(p, name=str(p), shift_sec=args.window_shift_seconds)
for p in wav_paths
]
raven_args = {
"templates": keyword_templates,
"keyword_name": keyword_name,
"recorder": recorder,
"probability_threshold": args.probability_threshold,
"minimum_matches": args.minimum_matches,
"distance_threshold": args.distance_threshold,
"refractory_sec": args.refractory_seconds,
"shift_sec": args.window_shift_seconds,
"debug": args.log_predictions,
}
# Apply settings
average_templates = args.average_templates
for setting_str in keyword_settings[1:]:
setting_name, setting_value = setting_str.strip().split("=", maxsplit=1)
setting_name = setting_name.lower().replace("_", "-")
if setting_name == "name":
raven_args["keyword_name"] = setting_value
elif setting_name == "probability-threshold":
raven_args["probability_threshold"] = float(setting_value)
elif setting_name == "minimum-matches":
raven_args["minimum_matches"] = int(setting_value)
elif setting_name == "average-templates":
average_templates = setting_value.lower().strip() == "true"
if average_templates:
_LOGGER.debug(
"Averaging %s templates for %s", len(keyword_templates), template_dir
)
raven_args["templates"] = [Template.average_templates(keyword_templates)]
# Create instance of Raven in a separate thread for keyword
ravens.append(Raven(**raven_args))
udp_audio = []
if args.udp_audio:
udp_audio = [
(host, int(port), site_id) for host, port, site_id in args.udp_audio
]
# Listen for messages
client = mqtt.Client()
hermes = WakeHermesMqtt(
client,
ravens=ravens,
examples_dir=args.examples_dir,
examples_format=args.examples_format,
udp_audio=udp_audio,
site_ids=args.site_id,
lang=args.lang,
)
_LOGGER.debug("Connecting to %s:%s", args.host, args.port)
hermes_cli.connect(client, args)
client.loop_start()
try:
# Run event loop
asyncio.run(hermes.handle_messages_async())
except KeyboardInterrupt:
pass
finally:
_LOGGER.debug("Shutting down")
client.loop_stop()
hermes.stop()
# -----------------------------------------------------------------------------
if __name__ == "__main__":
main() | /rhasspy-wake-raven-hermes-0.6.0.tar.gz/rhasspy-wake-raven-hermes-0.6.0/rhasspywake_raven_hermes/__main__.py | 0.620852 | 0.152631 | __main__.py | pypi |
# Rhasspy Raven Wakeword System
Wakeword detector based on the [Snips Personal Wake Word Detector](https://medium.com/snips-ai/machine-learning-on-voice-a-gentle-introduction-with-snips-personal-wake-word-detector-133bd6fb568e).
The underlying implementation of Raven heavily borrows from [node-personal-wakeword](https://github.com/mathquis/node-personal-wakeword) by [mathquis](https://github.com/mathquis).
## Dependencies
* Python 3.7
* `python-speech-features` for [MFCC](https://python-speech-features.readthedocs.io/en/latest/) computation
* `rhasspy-silence` for [silence detection](https://github.com/rhasspy/rhasspy-silence)
* Scientific libraries
* `sudo apt-get install liblapack3 libatlas-base-dev`
## Installation
```sh
$ git clone https://github.com/rhasspy/rhasspy-wake-raven.git
$ cd rhasspy-wake-raven
$ ./configure
$ make
$ make install
```
## Recording Templates
Record at least 3 WAV templates with your wake word:
```sh
$ arecord -r 16000 -f S16_LE -c 1 -t raw | \
bin/rhasspy-wake-raven --record keyword-dir/
```
Follow the prompts and speak your wake word. When you've recorded at least 3 examples, hit CTRL+C to exit. Your WAV templates will have silence automatically trimmed, and will be saved in the directory `keyword-dir/`. Add a format string after the directory name to control the file names:
```sh
$ arecord -r 16000 -f S16_LE -c 1 -t raw | \
bin/rhasspy-wake-raven --record keyword-dir/ 'keyword-{n:02d}.wav'
```
The format string will receive the 0-based index `n` for each example.
If you want to manually record WAV templates, trim silence off the front and back and make sure to export them as 16-bit 16Khz mono WAV files.
## Running
After recording your WAV templates in a directory, run:
```sh
$ arecord -r 16000 -f S16_LE -c 1 -t raw | \
bin/rhasspy-wake-raven --keyword <WAV_DIR> ...
```
where `<WAV_DIR>` contains the WAV templates. You may add as many keywords as you'd like, though this will use additional CPU. It's recommended you use `--average-templates` to keep CPU usage down.
Some settings can be specified per-keyword:
```sh
$ arecord -r 16000 -f S16_LE -c 1 -t raw | \
bin/rhasspy-wake-raven \
--keyword keyword1/ name=my-keyword1 probability-threshold=0.45 minimum-matches=2 \
--keyword keyword2/ name=my-keyword2 probability-threshold=0.55 average-templates=true
```
If not set, `probability-threshold=`, etc. fall back on the values supplied to `--probability-threshold`, etc.
Add `--debug` to the command line to get more information about the underlying computation on each audio frame.
### Example
Using the example files for "okay rhasspy":
```sh
$ arecord -r 16000 -f S16_LE -c 1 -t raw | \
bin/rhasspy-wake-raven --keyword etc/okay-rhasspy/
```
This requires at least 1 of the 3 WAV templates to match before output like this is printed:
```json
{"keyword": "okay-rhasspy", "template": "etc/okay-rhasspy/okay-rhasspy-00.wav", "detect_seconds": 2.7488508224487305, "detect_timestamp": 1594996988.638912, "raven": {"probability": 0.45637207995699963, "distance": 0.25849045215799454, "probability_threshold": 0.5, "distance_threshold": 0.22, "tick": 1, "matches": 2, "match_seconds": 0.005367016012314707}}
```
Use `--minimum-matches` to change how many templates must match for a detection to occur or `--average-templates` to combine all WAV templates into a single template (reduces CPU usage). Adjust the sensitivity with `--probability-threshold` which sets the lower bound of the detection probability (default is 0.5).
## Output
Raven outputs a line of JSON when the wake word is detected. Fields are:
* `keyword` - name of keyword or directory
* `template` - path to WAV file template
* `detect_seconds` - seconds after start of program when detection occurred
* `detect_timestamp` - timestamp when detection occurred (using `time.time()`)
* `raven`
* `probability` - detection probability
* `probability_threshold` - range of probabilities for detection
* `distance` - normalized dynamic time warping distance
* `distance_threshold` - distance threshold used for comparison
* `matches` - number of WAV templates that matched
* `match_seconds` - seconds taken for dynamic time warping calculations
* `tick` - monotonic counter incremented for each detection
## Testing
You can test how well Raven works on a set of sample WAV files:
```sh
$ PATH=$PWD/bin:$PATH test-raven.py --test-directory /path/to/samples/ --keyword /path/to/templates/
```
This will run up to 10 parallel instances of Raven (change with `--test-workers`) and output a JSON report with detection information and summary statistics like:
```json
{
"positive": [...],
"negative": [...],
"summary": {
"true_positives": 14,
"false_positives": 0,
"true_negatives": 40,
"false_negatives": 7,
"precision": 1.0,
"recall": 0.6666666666666666,
"f1_score": 0.8
}
```
Any additional command-line arguments are passed to Raven (e.g., `--minimum-matches`).
## Command-Line Interface
```
usage: rhasspy-wake-raven [-h] [--keyword KEYWORD [KEYWORD ...]]
[--chunk-size CHUNK_SIZE]
[--record RECORD [RECORD ...]]
[--probability-threshold PROBABILITY_THRESHOLD]
[--distance-threshold DISTANCE_THRESHOLD]
[--minimum-matches MINIMUM_MATCHES]
[--refractory-seconds REFRACTORY_SECONDS]
[--print-all-matches]
[--window-shift-seconds WINDOW_SHIFT_SECONDS]
[--dtw-window-size DTW_WINDOW_SIZE]
[--vad-sensitivity {1,2,3}]
[--current-threshold CURRENT_THRESHOLD]
[--max-energy MAX_ENERGY]
[--max-current-ratio-threshold MAX_CURRENT_RATIO_THRESHOLD]
[--silence-method {vad_only,ratio_only,current_only,vad_and_ratio,vad_and_current,all}]
[--average-templates] [--exit-count EXIT_COUNT]
[--read-entire-input]
[--max-chunks-in-queue MAX_CHUNKS_IN_QUEUE]
[--skip-probability-threshold SKIP_PROBABILITY_THRESHOLD]
[--failed-matches-to-refractory FAILED_MATCHES_TO_REFRACTORY]
[--debug]
optional arguments:
-h, --help show this help message and exit
--keyword KEYWORD [KEYWORD ...]
Directory with WAV templates and settings (setting-
name=value)
--chunk-size CHUNK_SIZE
Number of bytes to read at a time from standard in
(default: 1920)
--record RECORD [RECORD ...]
Record example templates to a directory, optionally
with given name format (e.g., 'my-
keyword-{n:02d}.wav')
--probability-threshold PROBABILITY_THRESHOLD
Probability above which detection occurs (default:
0.5)
--distance-threshold DISTANCE_THRESHOLD
Normalized dynamic time warping distance threshold for
template matching (default: 0.22)
--minimum-matches MINIMUM_MATCHES
Number of templates that must match to produce output
(default: 1)
--refractory-seconds REFRACTORY_SECONDS
Seconds before wake word can be activated again
(default: 2)
--print-all-matches Print JSON for all matching templates instead of just
the first one
--window-shift-seconds WINDOW_SHIFT_SECONDS
Seconds to shift sliding time window on audio buffer
(default: 0.02)
--dtw-window-size DTW_WINDOW_SIZE
Size of band around slanted diagonal during dynamic
time warping calculation (default: 5)
--vad-sensitivity {1,2,3}
Webrtcvad VAD sensitivity (1-3)
--current-threshold CURRENT_THRESHOLD
Debiased energy threshold of current audio frame
--max-energy MAX_ENERGY
Fixed maximum energy for ratio calculation (default:
observed)
--max-current-ratio-threshold MAX_CURRENT_RATIO_THRESHOLD
Threshold of ratio between max energy and current
audio frame
--silence-method {vad_only,ratio_only,current_only,vad_and_ratio,vad_and_current,all}
Method for detecting silence
--average-templates Average wakeword templates together to reduce number
of calculations
--exit-count EXIT_COUNT
Exit after some number of detections (default: never)
--read-entire-input Read entire audio input at start and exit after
processing
--max-chunks-in-queue MAX_CHUNKS_IN_QUEUE
Maximum number of audio chunks waiting for processing
before being dropped
--skip-probability-threshold SKIP_PROBABILITY_THRESHOLD
Skip additional template calculations if probability
is below this threshold
--failed-matches-to-refractory FAILED_MATCHES_TO_REFRACTORY
Number of failed template matches before entering
refractory period (default: disabled)
--debug Print DEBUG messages to the console
```
| /rhasspy-wake-raven-0.5.2.tar.gz/rhasspy-wake-raven-0.5.2/README.md | 0.540439 | 0.796292 | README.md | pypi |
import io
import wave
from rhasspysilence import WebRtcVadRecorder
# -----------------------------------------------------------------------------
def buffer_to_wav(buffer: bytes) -> bytes:
"""Wraps a buffer of raw audio data (16-bit, 16Khz mono) in a WAV"""
with io.BytesIO() as wav_buffer:
wav_file: wave.Wave_write = wave.open(wav_buffer, mode="wb")
with wav_file:
wav_file.setframerate(16000)
wav_file.setsampwidth(2)
wav_file.setnchannels(1)
wav_file.writeframes(buffer)
return wav_buffer.getvalue()
# -----------------------------------------------------------------------------
def trim_silence(
audio_bytes: bytes,
ratio_threshold: float = 20.0,
chunk_size: int = 960,
skip_first_chunk=True,
) -> bytes:
"""Trim silence from start and end of audio using ratio of max/current energy."""
first_chunk = False
energies = []
max_energy = None
while len(audio_bytes) >= chunk_size:
chunk = audio_bytes[:chunk_size]
audio_bytes = audio_bytes[chunk_size:]
if skip_first_chunk and (not first_chunk):
first_chunk = True
continue
energy = max(1, WebRtcVadRecorder.get_debiased_energy(chunk))
energies.append((energy, chunk))
if (max_energy is None) or (energy > max_energy):
max_energy = energy
# Determine chunks below threshold
assert max_energy is not None, "No maximum energy"
start_index = None
end_index = None
for i, (energy, chunk) in enumerate(energies):
ratio = max_energy / energy
if ratio < ratio_threshold:
end_index = None
if start_index is None:
start_index = i
elif end_index is None:
end_index = i
if start_index is None:
start_index = 0
if end_index is None:
end_index = len(energies) - 1
start_index = max(0, start_index - 1)
end_index = min(len(energies) - 1, end_index + 1)
keep_bytes = bytes()
for _, chunk in energies[start_index : end_index + 1]:
keep_bytes += chunk
return keep_bytes | /rhasspy-wake-raven-0.5.2.tar.gz/rhasspy-wake-raven-0.5.2/rhasspywake_raven/utils.py | 0.756987 | 0.369969 | utils.py | pypi |
import math
import typing
import numpy as np
import scipy.spatial.distance
class DynamicTimeWarping:
"""Computes DTW and holds results.
Uses cosine distance and sakoe-chiba constraint by default.
"""
def __init__(self, distance_func: str = "cosine"):
self.cost_matrix: typing.Optional[np.ndarray] = None
self.distance: typing.Optional[float] = None
self.distance_func = distance_func or "cosine"
def compute_cost(
self,
x: np.ndarray,
y: np.ndarray,
window: typing.Optional[int] = None,
**cost_args
) -> np.ndarray:
"""Compute non-normalized distance between x and y with an optional window."""
if window is None:
return self._compute_optimal_path(x, y, **cost_args)
return self._compute_optimal_path_with_window(x, y, window, **cost_args)
def compute_path(self) -> typing.Optional[typing.List[typing.Tuple[int, int]]]:
"""Get actual path if cost matrix is available."""
if self.cost_matrix is None:
return None
m, n = self.cost_matrix.shape
row = m - 1
col = n - 1
path = [(row, col)]
eps = 1e-14
while (row > 0) or (col > 0):
if (row > 0) and (col > 0):
min_cost = min(
self.cost_matrix[row - 1][col], # insertion
self.cost_matrix[row][col - 1], # deletion
self.cost_matrix[row - 1][col - 1], # match
)
if math.isclose(
min_cost, self.cost_matrix[row - 1][col - 1], rel_tol=eps
):
row = row - 1
col = col - 1
elif math.isclose(
min_cost, self.cost_matrix[row - 1][col], rel_tol=eps
):
row = row - 1
elif math.isclose(
min_cost, self.cost_matrix[row][col - 1], rel_tol=eps
):
col = col - 1
elif (row > 0) and (col == 0):
row = row - 1
elif (row == 0) and (col > 0):
col = col - 1
path.append((row, col))
return list(reversed(path))
# -------------------------------------------------------------------------
def _compute_optimal_path(
self, x: np.ndarray, y: np.ndarray, keep_matrix=False
) -> float:
"""Computes optimal path between x and y."""
m = len(x)
n = len(y)
# Need 2-D arrays for distance calculation
if len(x.shape) == 1:
x = x.reshape(-1, 1)
if len(y.shape) == 1:
y = y.reshape(-1, 1)
distance_matrix = scipy.spatial.distance.cdist(x, y, metric=self.distance_func)
cost_matrix = np.full(shape=(m, n), fill_value=math.inf, dtype=float)
cost_matrix[0][0] = distance_matrix[0][0]
for row in range(1, m):
cost = distance_matrix[row, 0]
cost_matrix[row][0] = cost + cost_matrix[row - 1][0]
for col in range(1, n):
cost = distance_matrix[0, col]
cost_matrix[0][col] = cost + cost_matrix[0][col - 1]
for row in range(1, m):
for col in range(1, n):
cost = distance_matrix[row, col]
cost_matrix[row][col] = cost + min(
cost_matrix[row - 1][col], # insertion
cost_matrix[row][col - 1], # deletion
cost_matrix[row - 1][col - 1], # match
)
if keep_matrix:
self.cost_matrix = cost_matrix
distance = cost_matrix[m - 1][n - 1]
self.distance = distance
return distance
def _compute_optimal_path_with_window(
self,
x: np.ndarray,
y: np.ndarray,
window: int,
step_pattern: float = 1,
keep_matrix=False,
) -> float:
"""Computes optimal path between x and y using a window."""
n = len(x)
m = len(y)
# Avoid case where endpoint lies outside band
window = max(window, abs(m - n))
# Need 2-D arrays for distance calculation
if len(x.shape) == 1:
x = x.reshape(-1, 1)
if len(y.shape) == 1:
y = y.reshape(-1, 1)
# Pre-compute distance between all pairs
distance_matrix = scipy.spatial.distance.cdist(x, y, metric=self.distance_func)
cost_matrix = np.full(shape=(n + 1, m + 1), fill_value=math.inf, dtype=float)
cost_matrix[0][0] = 0
for row in range(1, n + 1):
col_start = max(1, row - window)
col_end = min(m, row + window)
for col in range(col_start, col_end + 1):
cost = distance_matrix[row - 1, col - 1]
# symmetric step pattern
cost_matrix[row][col] = min(
(step_pattern * cost) + cost_matrix[row - 1][col - 1],
cost + cost_matrix[row - 1][col],
cost + cost_matrix[row][col - 1],
)
if keep_matrix:
self.cost_matrix = cost_matrix[1:, 1:]
distance = cost_matrix[n][m]
self.distance = distance
return distance | /rhasspy-wake-raven-0.5.2.tar.gz/rhasspy-wake-raven-0.5.2/rhasspywake_raven/dtw.py | 0.849878 | 0.677207 | dtw.py | pypi |
import logging
import math
import time
import typing
from dataclasses import dataclass
import numpy as np
import python_speech_features
import scipy.io.wavfile
from rhasspysilence import WebRtcVadRecorder
from .dtw import DynamicTimeWarping
_LOGGER = logging.getLogger("rhasspy-wake-raven")
# -----------------------------------------------------------------------------
@dataclass
class Template:
"""Wakeword template."""
duration_sec: float
mfcc: np.ndarray
name: str = ""
@staticmethod
def average_templates(
templates: "typing.List[Template]", name: str = ""
) -> "Template":
"""Averages multiple templates piecewise into a single template.
Credit to: https://github.com/mathquis/node-personal-wakeword
"""
assert templates, "No templates"
if len(templates) == 1:
# Only one template
return templates[0]
# Use longest template as base
templates = sorted(templates, key=lambda t: len(t.mfcc), reverse=True)
base_template = templates[0]
name = name or base_template.name
base_mfcc: np.ndarray = base_template.mfcc
rows, cols = base_mfcc.shape
averages = [
[[base_mfcc[row][col]] for col in range(cols)] for row in range(rows)
]
avg_dtw = DynamicTimeWarping()
# Collect features
for template in templates[1:]:
avg_dtw.compute_cost(template.mfcc, base_mfcc, keep_matrix=True)
path = avg_dtw.compute_path()
assert path is not None, "Failed to get DTW path"
for row, col in path:
for i, feature in enumerate(template.mfcc[row]):
averages[col][i].append(feature)
# Average features
avg_mfcc = np.array(
[
[np.mean(averages[row][col]) for col in range(cols)]
for row in range(rows)
]
)
assert avg_mfcc.shape == base_mfcc.shape, "Wrong MFCC shape"
return Template(
duration_sec=base_template.duration_sec, mfcc=avg_mfcc, name=name
)
# -----------------------------------------------------------------------------
class Raven:
"""
Wakeword detector based on Snips Personal Wake Word Detector.
https://medium.com/snips-ai/machine-learning-on-voice-a-gentle-introduction-with-snips-personal-wake-word-detector-133bd6fb568e
Attributes
----------
templates: List[Template]
Wake word templates created from pre-trimmed WAV files
probability_threshold: float = 0.5
Probability above which which detection occurs
minimum_matches: int = 0
Minimum number of templates that must match for detection.
Use 0 for all templates.
distance_threshold: float = 0.22
Cosine distance reference for probability calculation
template_dtw: Optional[DynamicTimeWarping] = None
DTW calculator (None for default)
dtw_window_size: int = 5
Size of Sakoe-Chiba window in DTW calculation
dtw_step_pattern: float = 2
Replacement cost multipler in DTW calculation
shift_sec: float = DEFAULT_SHIFT_SECONDS
Seconds to shift overlapping window by
refractory_sec: float = 2
Seconds after detection that new detection cannot occur
skip_probability_threshold: float = 0.0
Skip additional template calculations if probability is below this threshold
failed_matches_to_refractory: Optional[int] = None
Number of failed template matches before entering refractory period.
Used to avoid high CPU usage and lag on low end systems.
recorder: Optional[WebRtcVadRecorder] = None
Silence detector (None for default settings).
MFCC/DTW calculations are only done when a non-silent chunk of audio is
detected. Calculations cease if at least N silence chunks are detected
afterwards where N is half the number of chunks needed to span the
average template duration. No calculations are done during refractory
period.
debug: bool = False
If True, template probability calculations are logged
"""
DEFAULT_SHIFT_SECONDS = 0.02
def __init__(
self,
templates: typing.List[Template],
keyword_name: str = "",
probability_threshold: float = 0.5,
minimum_matches: int = 0,
distance_threshold: float = 0.22,
template_dtw: typing.Optional[DynamicTimeWarping] = None,
dtw_window_size: int = 5,
dtw_step_pattern: float = 2,
shift_sec: float = DEFAULT_SHIFT_SECONDS,
refractory_sec: float = 2.0,
skip_probability_threshold: float = 0.0,
failed_matches_to_refractory: typing.Optional[int] = None,
recorder: typing.Optional[WebRtcVadRecorder] = None,
debug: bool = False,
):
self.templates = templates
assert self.templates, "No templates"
self.keyword_name = keyword_name
# Use or create silence detector
self.recorder = recorder or WebRtcVadRecorder()
self.vad_chunk_bytes = self.recorder.chunk_size
self.sample_rate = self.recorder.sample_rate
# Assume 16-bit samples
self.sample_width = 2
self.bytes_per_second = int(self.sample_rate * self.sample_width)
# Match settings
self.probability_threshold = probability_threshold
self.minimum_matches = minimum_matches
self.distance_threshold = distance_threshold
self.skip_probability_threshold = skip_probability_threshold
self.refractory_sec = refractory_sec
self.failed_matches_to_refractory = failed_matches_to_refractory
# Dynamic time warping calculation
self.dtw = template_dtw or DynamicTimeWarping()
self.dtw_window_size = dtw_window_size
self.dtw_step_pattern = dtw_step_pattern
# Average duration of templates
template_duration_sec = sum([t.duration_sec for t in templates]) / len(
templates
)
# Seconds to shift template window by during processing
self.template_shift_sec = shift_sec
self.shifts_per_template = (
int(math.floor(template_duration_sec / shift_sec)) - 1
)
# Bytes needed for a template
self.template_chunk_bytes = int(
math.ceil(template_duration_sec * self.bytes_per_second)
)
# Ensure divisible by sample width
while (self.template_chunk_bytes % self.sample_width) != 0:
self.template_chunk_bytes += 1
# Audio
self.vad_audio_buffer = bytes()
self.template_audio_buffer = bytes()
self.example_audio_buffer = bytes()
self.template_mfcc: typing.Optional[np.ndarray] = None
self.template_chunks_left = 0
self.num_template_chunks = int(
math.ceil((self.template_chunk_bytes / self.vad_chunk_bytes) / 2)
)
# State machine
self.num_refractory_chunks = int(
math.ceil(
self.sample_rate
* self.sample_width
* (refractory_sec / self.vad_chunk_bytes)
)
)
self.refractory_chunks_left = 0
self.failed_matches = 0
self.match_seconds: typing.Optional[float] = None
# If True, log DTW predictions
self.debug = debug
# Keep previously-computed distances and probabilities for debugging
self.last_distances: typing.List[typing.Optional[float]] = [
None for _ in self.templates
]
self.last_probabilities: typing.List[typing.Optional[float]] = [
None for _ in self.templates
]
def process_chunk(self, chunk: bytes, keep_audio: bool = False) -> typing.List[int]:
"""Process a single chunk of raw audio data.
Attributes
----------
chunk: bytes
Raw audio chunk
Returns
-------
List of matching template indexes
"""
self.vad_audio_buffer += chunk
# Break audio into VAD-sized chunks (typically 30 ms)
num_vad_chunks = int(
math.floor(len(self.vad_audio_buffer) / self.vad_chunk_bytes)
)
if num_vad_chunks > 0:
for i in range(num_vad_chunks):
# Process single VAD-sized chunk
matching_indexes = self._process_vad_chunk(i, keep_audio=keep_audio)
if matching_indexes:
# Detection - reset and return immediately
self.vad_audio_buffer = bytes()
return matching_indexes
# Remove processed audio
self.vad_audio_buffer = self.vad_audio_buffer[
(num_vad_chunks * self.vad_chunk_bytes) :
]
# No detection
return []
def _process_vad_chunk(
self, chunk_index: int, keep_audio: bool = False
) -> typing.List[int]:
"""Process the ith VAD-sized chunk of raw audio data from vad_audio_buffer.
Attributes
----------
chunk_index: int
ith VAD-sized chunk in vad_audio_buffer
Returns
-------
List of matching template indexes
"""
matching_indexes: typing.List[int] = []
if self.refractory_chunks_left > 0:
self.refractory_chunks_left -= 1
if self.refractory_chunks_left <= 0:
_LOGGER.debug("Exiting refractory period")
if keep_audio:
self.example_audio_buffer = bytes()
# In refractory period after wake word was detected.
# Ignore any incoming audio.
return matching_indexes
# Test chunk for silence/speech
chunk_start = chunk_index * self.vad_chunk_bytes
chunk = self.vad_audio_buffer[chunk_start : chunk_start + self.vad_chunk_bytes]
is_silence = self.recorder.is_silence(chunk)
if is_silence:
# Decrement audio chunks left to process before ignoring audio
self.template_chunks_left = max(0, self.template_chunks_left - 1)
else:
# Reset count of audio chunks to process
self.template_chunks_left = self.num_template_chunks
if self.template_chunks_left <= 0:
# No speech recently, so reset and ignore chunk.
self._reset_state()
if keep_audio:
self.example_audio_buffer = bytes()
return matching_indexes
self.template_audio_buffer += chunk
if keep_audio:
self.example_audio_buffer += chunk
# Process audio if there's enough for at least one template
while len(self.template_audio_buffer) >= self.template_chunk_bytes:
# Compute MFCC features for entire audio buffer (one or more templates)
buffer_chunk = self.template_audio_buffer[: self.template_chunk_bytes]
self.template_audio_buffer = self.template_audio_buffer[
self.template_chunk_bytes :
]
buffer_array = np.frombuffer(buffer_chunk, dtype=np.int16)
mfcc_start_time = time.perf_counter()
buffer_mfcc = python_speech_features.mfcc(
buffer_array, winstep=self.template_shift_sec
)
if self.template_mfcc is None:
# Brand new matrix
self.template_mfcc = buffer_mfcc
else:
# Add to existing MFCC matrix
self.template_mfcc = np.vstack((self.template_mfcc, buffer_mfcc))
if self.debug:
mfcc_end_time = time.perf_counter()
_LOGGER.debug(
"MFCC for %s byte(s) in %s seconds",
len(buffer_chunk),
mfcc_end_time - mfcc_start_time,
)
last_row = (
-1
if (self.template_mfcc is None)
else (len(self.template_mfcc) - self.shifts_per_template)
)
if last_row >= 0:
assert self.template_mfcc is not None
for row in range(last_row + 1):
match_start_time = time.perf_counter()
window_mfcc = self.template_mfcc[row : row + self.shifts_per_template]
matching_indexes = self._process_window(window_mfcc)
if matching_indexes:
# Clear buffers to avoid multiple detections and entire refractory period
self._reset_state()
self._begin_refractory()
# Record time for debugging
self.match_seconds = time.perf_counter() - match_start_time
return matching_indexes
# Check for failure state
self.failed_matches += 1
if (self.failed_matches_to_refractory is not None) and (
self.failed_matches >= self.failed_matches_to_refractory
):
# Enter refractory period after too many failed template matches in a row
self._reset_state()
self._begin_refractory()
return matching_indexes
self.template_mfcc = self.template_mfcc[last_row + 1 :]
# No detections
return matching_indexes
def _process_window(self, window_mfcc: np.ndarray) -> typing.List[int]:
"""Process a single template-sized window of MFCC features.
Returns
-------
List of matching template indexes
"""
matching_indexes: typing.List[int] = []
for i, template in enumerate(self.templates):
# Compute optimal distance with a window
dtw_start_time = time.perf_counter()
distance = self.dtw.compute_cost(
template.mfcc,
window_mfcc,
self.dtw_window_size,
step_pattern=self.dtw_step_pattern,
)
# Normalize by sum of temporal dimensions
normalized_distance = distance / (len(window_mfcc) + len(template.mfcc))
# Compute detection probability
probability = self.distance_to_probability(normalized_distance)
if self.debug:
dtw_end_time = time.perf_counter()
_LOGGER.debug(
"%s %s: prob=%s, norm_dist=%s, dist=%s, dtw_time=%s, template_time=%s",
self.keyword_name,
i,
probability,
normalized_distance,
distance,
dtw_end_time - dtw_start_time,
template.duration_sec,
)
# Keep calculations results for debugging
self.last_distances[i] = normalized_distance
self.last_probabilities[i] = probability
if probability >= self.probability_threshold:
# Detection occured
matching_indexes.append(i)
if (self.minimum_matches > 0) and (
len(matching_indexes) >= self.minimum_matches
):
# Return immediately once minimum matches are satisfied
return matching_indexes
elif probability < self.skip_probability_threshold:
# Skip other templates if below threshold
return matching_indexes
return matching_indexes
def _reset_state(self):
"""Reset VAD state machine."""
self.template_audio_buffer = bytes()
self.template_mfcc = None
self.failed_matches = 0
def _begin_refractory(self):
"""Enter refractory state where audio is ignored."""
self.refractory_chunks_left = self.num_refractory_chunks
_LOGGER.debug("Enter refractory for %s second(s)", self.refractory_sec)
# -------------------------------------------------------------------------
def distance_to_probability(self, normalized_distance: float) -> float:
"""Compute detection probability using distance and threshold."""
return 1 / (
1
+ math.exp(
(normalized_distance - self.distance_threshold)
/ self.distance_threshold
)
)
@staticmethod
def wav_to_template(
wav_file, name: str = "", shift_sec: float = DEFAULT_SHIFT_SECONDS
) -> Template:
"""Convert pre-trimmed WAV file to wakeword template."""
sample_rate, wav_data = scipy.io.wavfile.read(wav_file)
duration_sec = len(wav_data) / sample_rate
wav_mfcc = python_speech_features.mfcc(wav_data, sample_rate, winstep=shift_sec)
return Template(name=name, duration_sec=duration_sec, mfcc=wav_mfcc) | /rhasspy-wake-raven-0.5.2.tar.gz/rhasspy-wake-raven-0.5.2/rhasspywake_raven/__init__.py | 0.810816 | 0.442877 | __init__.py | pypi |
import asyncio
import logging
import queue
import socket
import threading
import typing
from dataclasses import dataclass
from pathlib import Path
from rhasspyhermes.audioserver import AudioFrame
from rhasspyhermes.base import Message
from rhasspyhermes.client import GeneratorType, HermesClient, TopicArgs
from rhasspyhermes.wake import (
GetHotwords,
Hotword,
HotwordDetected,
HotwordError,
Hotwords,
HotwordToggleOff,
HotwordToggleOn,
HotwordToggleReason,
)
WAV_HEADER_BYTES = 44
_LOGGER = logging.getLogger(__name__)
# -----------------------------------------------------------------------------
@dataclass
class SnowboyModel:
"""Settings for a single snowboy model"""
model_path: Path
sensitivity: str = "0.5"
audio_gain: float = 1.0
apply_frontend: bool = False
def float_sensitivity(self) -> float:
"""Get float of first sensitivity value."""
# 0.5,0.5
return float(self.sensitivity.split(",")[0])
# -----------------------------------------------------------------------------
class WakeHermesMqtt(HermesClient):
"""Hermes MQTT server for Rhasspy wakeword with snowboy."""
def __init__(
self,
client,
models: typing.List[SnowboyModel],
wakeword_ids: typing.List[str],
model_dirs: typing.Optional[typing.List[Path]] = None,
site_ids: typing.Optional[typing.List[str]] = None,
enabled: bool = True,
sample_rate: int = 16000,
sample_width: int = 2,
channels: int = 1,
chunk_size: int = 960,
udp_audio: typing.Optional[typing.List[typing.Tuple[str, int, str]]] = None,
udp_chunk_size: int = 2048,
lang: typing.Optional[str] = None,
):
super().__init__(
"rhasspywake_snowboy_hermes",
client,
sample_rate=sample_rate,
sample_width=sample_width,
channels=channels,
site_ids=site_ids,
)
self.subscribe(AudioFrame, HotwordToggleOn, HotwordToggleOff, GetHotwords)
self.models = models
self.wakeword_ids = wakeword_ids
self.model_dirs = model_dirs or []
self.enabled = enabled
self.disabled_reasons: typing.Set[str] = set()
# Required audio format
self.sample_rate = sample_rate
self.sample_width = sample_width
self.channels = channels
self.chunk_size = chunk_size
# Queue of WAV audio chunks to process (plus site_id)
self.wav_queue: queue.Queue = queue.Queue()
self.first_audio: bool = True
self.audio_buffer = bytes()
self.lang = lang
# Load detector
self.detectors: typing.List[typing.Any] = []
self.model_ids: typing.List[str] = []
# Start threads
threading.Thread(target=self.detection_thread_proc, daemon=True).start()
# Listen for raw audio on UDP too
self.udp_chunk_size = udp_chunk_size
if udp_audio:
for udp_host, udp_port, udp_site_id in udp_audio:
threading.Thread(
target=self.udp_thread_proc,
args=(udp_host, udp_port, udp_site_id),
daemon=True,
).start()
# -------------------------------------------------------------------------
def load_detectors(self):
"""Load snowboy detectors from models"""
from snowboy import snowboydecoder, snowboydetect
self.model_ids = []
self.detectors = []
for model in self.models:
assert model.model_path.is_file(), f"Missing {model.model_path}"
_LOGGER.debug("Loading snowboy model: %s", model)
detector = snowboydetect.SnowboyDetect(
snowboydecoder.RESOURCE_FILE.encode(), str(model.model_path).encode()
)
detector.SetSensitivity(model.sensitivity.encode())
detector.SetAudioGain(model.audio_gain)
detector.ApplyFrontend(model.apply_frontend)
self.detectors.append(detector)
self.model_ids.append(model.model_path.stem)
# -------------------------------------------------------------------------
async def handle_audio_frame(self, wav_bytes: bytes, site_id: str = "default"):
"""Process a single audio frame"""
self.wav_queue.put((wav_bytes, site_id))
async def handle_detection(
self, model_index: int, wakeword_id: str, site_id: str = "default"
) -> typing.AsyncIterable[
typing.Union[typing.Tuple[HotwordDetected, TopicArgs], HotwordError]
]:
"""Handle a successful hotword detection"""
try:
assert len(self.model_ids) > model_index, f"Missing {model_index} in models"
sensitivity = 0.5
if model_index < len(self.models):
sensitivity = self.models[model_index].float_sensitivity()
yield (
HotwordDetected(
site_id=site_id,
model_id=self.model_ids[model_index],
current_sensitivity=sensitivity,
model_version="",
model_type="personal",
lang=self.lang,
),
{"wakeword_id": wakeword_id},
)
except Exception as e:
_LOGGER.exception("handle_detection")
yield HotwordError(error=str(e), context=str(model_index), site_id=site_id)
async def handle_get_hotwords(
self, get_hotwords: GetHotwords
) -> typing.AsyncIterable[typing.Union[Hotwords, HotwordError]]:
"""Report available hotwords"""
try:
if self.model_dirs:
# Add all models from model dirs
model_paths = []
for model_dir in self.model_dirs:
if not model_dir.is_dir():
_LOGGER.warning("Model directory missing: %s", str(model_dir))
continue
for model_file in model_dir.iterdir():
if model_file.is_file() and (
model_file.suffix in [".umdl", ".pmdl"]
):
model_paths.append(model_file)
else:
# Add current model(s) only
model_paths = [Path(model.model_path) for model in self.models]
hotword_models: typing.List[Hotword] = []
for model_path in model_paths:
model_words = " ".join(model_path.with_suffix("").name.split("_"))
model_type = "universal" if model_path.suffix == ".umdl" else "personal"
hotword_models.append(
Hotword(
model_id=model_path.name,
model_words=model_words,
model_type=model_type,
)
)
yield Hotwords(
models=hotword_models, id=get_hotwords.id, site_id=get_hotwords.site_id
)
except Exception as e:
_LOGGER.exception("handle_get_hotwords")
yield HotwordError(
error=str(e), context=str(get_hotwords), site_id=get_hotwords.site_id
)
def detection_thread_proc(self):
"""Handle WAV audio chunks."""
try:
while True:
wav_bytes, site_id = self.wav_queue.get()
if not self.detectors:
self.load_detectors()
# Extract/convert audio data
audio_data = self.maybe_convert_wav(wav_bytes)
# Add to persistent buffer
self.audio_buffer += audio_data
# Process in chunks.
# Any remaining audio data will be kept in buffer.
while len(self.audio_buffer) >= self.chunk_size:
chunk = self.audio_buffer[: self.chunk_size]
self.audio_buffer = self.audio_buffer[self.chunk_size :]
for detector_index, detector in enumerate(self.detectors):
# Return is:
# -2 silence
# -1 error
# 0 voice
# n index n-1
result_index = detector.RunDetection(chunk)
if result_index > 0:
# Detection
if detector_index < len(self.wakeword_ids):
wakeword_id = self.wakeword_ids[detector_index]
else:
wakeword_id = ""
if not wakeword_id:
if detector_index < len(self.models):
# Use file name
wakeword_id = self.models[
detector_index
].model_path.stem
else:
# Fall back to default
wakeword_id = "default"
_LOGGER.debug(
"Wake word detected: %s (site_id=%s)",
wakeword_id,
site_id,
)
asyncio.run_coroutine_threadsafe(
self.publish_all(
self.handle_detection(
detector_index, wakeword_id, site_id=site_id
)
),
self.loop,
)
except Exception:
_LOGGER.exception("detection_thread_proc")
# -------------------------------------------------------------------------
def udp_thread_proc(self, host: str, port: int, site_id: str):
"""Handle WAV chunks from UDP socket."""
try:
udp_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
udp_socket.bind((host, port))
_LOGGER.debug("Listening for audio on UDP %s:%s", host, port)
while True:
wav_bytes, _ = udp_socket.recvfrom(
self.udp_chunk_size + WAV_HEADER_BYTES
)
if self.enabled:
self.wav_queue.put((wav_bytes, site_id))
except Exception:
_LOGGER.exception("udp_thread_proc")
# -------------------------------------------------------------------------
async def on_message_blocking(
self,
message: Message,
site_id: typing.Optional[str] = None,
session_id: typing.Optional[str] = None,
topic: typing.Optional[str] = None,
) -> GeneratorType:
"""Received message from MQTT broker."""
# Check enable/disable messages
if isinstance(message, HotwordToggleOn):
if message.reason == HotwordToggleReason.UNKNOWN:
# Always enable on unknown
self.disabled_reasons.clear()
else:
self.disabled_reasons.discard(message.reason)
if self.disabled_reasons:
_LOGGER.debug("Still disabled: %s", self.disabled_reasons)
else:
self.enabled = True
self.first_audio = True
_LOGGER.debug("Enabled")
elif isinstance(message, HotwordToggleOff):
self.enabled = False
self.disabled_reasons.add(message.reason)
_LOGGER.debug("Disabled")
elif isinstance(message, AudioFrame):
if self.enabled:
assert site_id, "Missing site_id"
await self.handle_audio_frame(message.wav_bytes, site_id=site_id)
elif isinstance(message, GetHotwords):
async for hotword_result in self.handle_get_hotwords(message):
yield hotword_result
else:
_LOGGER.warning("Unexpected message: %s", message) | /rhasspy-wake-snowboy-hermes-0.3.0.tar.gz/rhasspy-wake-snowboy-hermes-0.3.0/rhasspywake_snowboy_hermes/__init__.py | 0.742235 | 0.230801 | __init__.py | pypi |
import os
import numpy as np
class PostProcessOpt():
"""
The PostProcessOpt class provides methods to retrieve the fitness and
population for the generations provided during the optimization.
Parameters
----------
case : str
Name of the case to be evaluated.
eval_type : str
The type of evaluation performed. Equals 'DET' for deterministic
optimization and 'ROB' for robust optimization.
"""
def __init__(self, case, eval_type):
path_file = os.path.dirname(os.path.abspath(__file__))
path_start = os.path.abspath(os.path.join(path_file, os.pardir))
path = os.path.join(path_start,
'RESULTS',
case
)
self.n_pop = 0
self.x_lines = []
self.y_lines = []
self.result_path = os.path.join(path,
eval_type,
)
self.fitness_file = ''
self.population_file = ''
self.n_gen = 0
def determine_pop_gen(self):
"""
Determines the number of design samples in the population
and the number of generations performed.
"""
self.n_gen = 0
with open(self.fitness_file, 'r') as file:
self.y_lines = file.readlines()
for string in self.y_lines:
if string == '-\n':
# count the number of generations
self.n_gen += 1
# determine the population size
self.n_pop = int(len(self.y_lines) / self.n_gen - 1)
def get_fitness_values(self, gen):
"""
Returns the fitness values for the population
generated in the specified generation.
Parameters
----------
gen : int
The generation of interest.
Returns
-------
fit_val : ndarray
The fitness values for the population of interest.
"""
fit_val = np.zeros((len(self.y_lines[0].split(",")), self.n_pop))
for index, line in enumerate(
self.y_lines[(gen - 1) * self.n_pop + gen -1:
gen * self.n_pop-1 + gen]):
# add fitness values for the desired generation
fit_val[:, index] = [float(i) for i in line.split(",")]
return fit_val
def get_population_values(self, gen):
"""
Returns the fitness values for the population
generated in the specified generation.
Parameters
----------
gen : int
The generation of interest.
Returns
-------
pop_val : ndarray
The population of interest.
"""
with open(self.population_file, 'r') as file:
self.x_lines = file.readlines()
pop_val = np.zeros((len(self.x_lines[0].split(",")), self.n_pop))
for index, line in enumerate(
self.x_lines[(gen - 1) * self.n_pop + gen-1:
gen * self.n_pop-1 + gen]):
# add population values for the desired generation
pop_val[:, index] = [float(i) for i in line.split(",")]
return pop_val
def sorted_result_file(self, y_val, x_val):
"""
Generates the files that include the sorted population
and fitness files. The population and fitness are sorted
based on the first objective.
Parameters
----------
y_val : ndarray
The fitness values.
x_val : ndarray
The set of design samples.
"""
# write sorted fitness values in corresponding file
with open(self.fitness_file[:-4] + "_final_sorted.csv", 'w') as file:
for sample in y_val:
for value in sample:
file.write('%f,' % value)
file.write('\n')
# write sorted population values in corresponding file
with open(self.population_file[:-4] + "_final_sorted.csv", 'w') as file:
for sample in x_val:
for value in sample:
file.write('%f, ' % value)
file.write('\n')
def get_fitness_population(self, result_dir, gen=0):
"""
Returns the population and corresponding fitness values
for the generation of interest.
Parameters
----------
result_dir : str
The directory were the results are stored.
gen : int, optional
The generation of interest. The default is 0,
i.e. the final generation.
Returns
-------
y_gen : ndarray
The fitness values.
x_gen : ndarray
The set of design samples.
"""
self.fitness_file = os.path.join(self.result_path,
result_dir,
'fitness.csv',
)
self.population_file = os.path.join(self.result_path,
result_dir,
'population.csv',
)
# set the population size and number of generations
self.determine_pop_gen()
# get fitness values for desired generation
y_unsorted = self.get_fitness_values(gen)
# get population values for desired generation
x_unsorted = self.get_population_values(gen)
a, b = x_unsorted.shape
c, d = y_unsorted.shape
# get indices when sorting the y_unsorted array,
# sorting is based on first objective
indices = np.argsort(y_unsorted[0])
x_gen = np.zeros((a, b))
y_gen = np.zeros((c, d))
for j, k in enumerate(indices):
for index, y_in in enumerate(y_gen):
y_gen[index][j] = y_unsorted[index][k]
for index, x_in in enumerate(x_gen):
x_gen[index][j] = x_unsorted[index][k]
# print sorted results in separate files
self.sorted_result_file(y_gen.transpose(), x_gen.transpose())
return y_gen, x_gen
class PostProcessUQ():
"""
The PostProcessUQ class provides methods to retrieve the LOO error,
plot the Sobol indices, PDF and CDF.
Parameters
----------
case : str
Name of the case to be evaluated.
pol_order : int
The polynomial order.
"""
def __init__(self, case, pol_order):
path_file = os.path.dirname(os.path.abspath(__file__))
path_start = os.path.abspath(os.path.join(path_file, os.pardir))
path = os.path.join(path_start,
'RESULTS',
case
)
self.result_path = os.path.join(path,
'UQ',
)
self.pol_order = pol_order
def read_distr_file(self, distr_file):
"""
Reads the file with information on the
cumulative density function or probability
density function.
Parameters
----------
distr_file : str
The name of the distribution file.
Returns
-------
x_val : ndarray
The values from the PDF or CDF on the
quantity of interest.
y_val : ndarray
The probability density (for the PDF)
or cumulative probability (for the CDF).
"""
# read the file with info on the CDF or PDF
with open(distr_file, 'r') as file:
lines = file.readlines()
x_val = np.ones(len(lines) - 1)
y_val = np.ones(len(lines) - 1)
for index, line in enumerate(lines[1:]):
tmp = line.split(",")
x_val[index] = float(tmp[0])
y_val[index] = float(tmp[1])
return x_val, y_val
def get_sobol(self, result_dir, objective):
"""
Retrieves the information on the Sobol' indices from
the corresponding file in the result directory.
Parameters
----------
result_dir : str
The result directory.
objective : str
The name of the quantity of interest.
Returns
-------
names : list
The names of the stochastic parameters.
sobol : list
The total' order Sobol' indices.
"""
sobol_file = os.path.join(self.result_path,
'%s' % result_dir,
'full_pce_order_%i_%s_Sobol_indices.csv' % (
self.pol_order, objective)
)
# retrieve the parameter names and corresponding Sobol indices
res_tmp = []
with open(sobol_file, 'r') as file:
for line in file.readlines()[1:]:
res_tmp.append([i for i in line.split(",")])
names = [row[0] for row in res_tmp]
sobol = [float(row[2]) for row in res_tmp]
return names, sobol
def get_pdf(self, result_dir, objective):
"""
Retrieves the points that define the probability density function.
Parameters
----------
result_dir : str
The result directory.
objective : str
The name of the quantity of interest.
Returns
-------
x_val : ndarray
The values from the PDF on the
quantity of interest.
y_val : ndarray
The probability density.
"""
pdf_file = os.path.join(self.result_path,
'%s' % result_dir,
'data_pdf_%s.csv' % objective
)
# get the x and y values for the PDF
x_val, y_val = self.read_distr_file(pdf_file)
return x_val, y_val
def get_cdf(self, result_dir, objective):
"""
Retrieves the points that define the cumulative density function.
Parameters
----------
result_dir : str
The result directory.
objective : str
The name of the quantity of interest.
Returns
-------
x : ndarray
The values from the CDF on the
quantity of interest.
y : ndarray
The cumulative probability.
"""
cdf_file = os.path.join(self.result_path,
'%s' % result_dir,
'data_cdf_%s.csv' % objective
)
# get the x and y values for the CDF
x_val, y_val = self.read_distr_file(cdf_file)
return x_val, y_val
def get_loo(self, result_dir, objective):
"""
Reads the Leave-One-Out error from the corresponding
file in the result directory.
Parameters
----------
result_dir : str
The result directory.
objective : str
The name of the quantity of interest.
Returns
-------
loo : float
The Leave-One-Out error.
"""
loo_file = os.path.join(self.result_path,
'%s' % (result_dir),
'full_pce_order_%i_%s.txt' % (
self.pol_order, objective)
)
# retrieve the LOO error
with open(loo_file, 'r') as file:
line = file.readlines()[0]
loo = float(line.split()[1])
return loo
def get_mean_std(self, result_dir, objective):
"""
Reads the mean and standard deviation
from the corresponding file in the result directory.
Parameters
----------
result_dir : str
The result directory.
objective : str
The name of the quantity of interest.
Returns
-------
mean : float
The mean.
std : float
The standard deviation.
"""
loo_file = os.path.join(self.result_path,
'%s' % (result_dir),
'full_pce_order_%i_%s.txt' % (
self.pol_order, objective)
)
# retrieve the mean and standard deviation
with open(loo_file, 'r') as file:
line = file.readlines()[2]
mean = float(line.split()[1])
with open(loo_file, 'r') as file:
line = file.readlines()[3]
std = float(line.split()[2])
return mean, std
def get_max_sobol(self, result_dirs, objective, threshold=0.05):
"""
This method gathers the Sobol' indices for each stochastic parameter
for each sample. The highest Sobol' index for each stochastic
parameter is compared with the threshold value. If the highest
Sobol' index is higher than the threshold, the name of the
stochastic parameter is printed under 'significant Sobol indices'.
If not, it is printed under 'negligible Sobol indices'.
Parameters
----------
result_dir : list
The result directories.
objective : str
The name of the quantity of interest.
threshold : float, optional
The threshold that determines if a Sobol' index
is considered significant. The default is 0.05.
"""
# store the dictionary with parameter names and Sobol indices in a
# list for each result directory evaluated
n_samples = len(result_dirs)
res_dict = [{}] * n_samples
for index, result_dir in enumerate(result_dirs):
names, sobol = self.get_sobol(result_dir, objective)
res_dict[index] = dict(zip(names, sobol))
# get the highest Sobol index for each parameter over the different
# dictionaries
max_dict = dict()
for name in names:
sobol_res = np.zeros(n_samples)
for j, dic in enumerate(res_dict):
sobol_res[j] = dic[name]
max_dict[name] = max(sobol_res)
print('significant Sobol indices:')
for k in names:
if max_dict[k] >= threshold:
print('%s: %4f' % (k, max_dict[k]))
print('\nnegligible Sobol indices:')
for k in names:
if max_dict[k] < threshold:
print('%s: %4f' % (k, max_dict[k])) | /rheia_meca2675-1.0.5.tar.gz/rheia_meca2675-1.0.5/src/rheia/POST_PROCESS/post_process.py | 0.827932 | 0.447158 | post_process.py | pypi |
import os
from pyDOE import lhs
from rheia.CASES.determine_stoch_des_space import load_case, check_dictionary
import rheia.UQ.pce as uq
def get_design_variables(case):
"""
This function loads the design variable names and bounds
out of the :file:`design_space.csv` file.
Parameters
----------
case : string
The name of the case.
Returns
-------
var_dict : dict
A dictionary which includes the design variables and their bounds.
"""
var_dict = {}
path = os.path.dirname(os.path.abspath(__file__))
path_to_read = os.path.join(
os.path.abspath(
os.path.join(
path,
os.pardir)),
'CASES',
case,
'design_space.csv')
# read in the design variable bounds
with open(path_to_read, 'r') as file:
for line in file:
tmp = line.split(",")
if tmp[1] == 'var':
var_dict[tmp[0]] = [float(tmp[2]), float(tmp[3])]
return var_dict
def set_design_samples(var_dict, n_samples):
"""
Based on the design variable characteristics,
a set of design samples is created through
Latin Hypercube Sampling.
Parameters
----------
var_dict : dict
A dictionary which includes the design variables and their bounds.
n_samples : int
The number of design samples to be created.
Returns
-------
samples : array
The generated design samples.
"""
# generate the samples through Latin Hypercube Sampling
samples = lhs(len(var_dict), samples=n_samples)
bounds = list(var_dict.values())
# scale the samples based on the design variable bounds
for i, bound in enumerate(bounds):
samples[:, i] *= (bound[1] - bound[0])
samples[:, i] += bound[0]
return samples
def write_design_space(case, iteration, var_dict, sample, ds = 'design_space.csv'):
"""
A new design space file is created. In this file,
the model parameters are copied from the original file,
i.e. file:`design_space`. The design variable names are copied,
but the bounds are loaded out of the array `sample`.
This function is of interest when evaluating the LOO error
or Sobol' indices for several design samples.
Parameters
----------
case : string
The name of the case.
iteration : int
The index of the design sample
out of the collection of generated design samples.
var_dict : dict
A dictionary which includes the design variables and their bounds.
sample : array
The design sample out of the collection of generated design samples.
ds : string, optional
The design_space filename. The default is 'design_space'.
"""
path = os.path.dirname(os.path.abspath(__file__))
des_var_file = os.path.join(os.path.abspath(os.path.join(path, os.pardir)),
'CASES',
case,
'design_space.csv',
)
new_des_var_file = os.path.join(
os.path.abspath(
os.path.join(
path,
os.pardir)),
'CASES',
case,
'%s_%i%s' % (ds[:-4], iteration, ds[-4:])
)
# write the new design_space file if it does not exist already
if not os.path.isfile(new_des_var_file):
with open(des_var_file, 'r') as file:
text = []
for line in file.readlines():
found = False
tmp = line.split()
for index, name in enumerate(list(var_dict.keys())):
if name == tmp[0]:
text.append('%s par %f \n' % (name, sample[index]))
found = True
if not found:
text.append(line)
with open(new_des_var_file, 'w') as file:
for item in text:
file.write("%s" % item)
def run_uq(run_dict, design_space='design_space.csv'):
"""
This function is the main to run uncertainty quantification.
First, the input distributions are created,
followed by the reading of previously evaluated samples.
Thereafter, the new samples are created and evaluated
in the system model when desired. Finally, the PCE is
constructed, the statistical moments printed and
the distributions generated (when desired) for the
quantity of interest.
Parameters
----------
run_dict : dict
The dictionary with information on the uncertainty quantification.
design_space : string, optional
The design_space filename. The default is 'design_space'.
"""
# check if the UQ dictionary is properly characterized
check_dictionary(run_dict, uq_bool=True)
objective_position = run_dict['objective names'].index(
run_dict['objective of interest'])
# load the object on the design space, the evaluation function
# and the params provided for each model evaluation
space_obj, eval_func, params = load_case(
run_dict, design_space, uq_bool=True,
create_only_samples=run_dict['create only samples'])
my_data = uq.Data(run_dict, space_obj)
# acquire information on stochastic parameters
my_data.read_stoch_parameters()
# create result csv file to capture all input-output of the samples
my_data.create_samples_file()
# create experiment object
my_experiment = uq.RandomExperiment(my_data, objective_position)
# create uniform/gaussian distributions and corresponding orthogonal
# polynomials
my_experiment.create_distributions()
# calculate the number of terms in the PCE according to the
# truncation scheme
my_experiment.n_terms()
# read in the previously generated samples
my_experiment.read_previous_samples(run_dict['create only samples'])
# create a design of experiment for the remaining samples
# to be evaluated
my_experiment.create_samples(
size=my_experiment.n_samples - len(my_experiment.x_prev))
# check if the samples need to be evaluated or not
my_experiment.create_only_samples(run_dict['create only samples'])
# when the PCE needs to be constructed
if not run_dict['create only samples']:
# evaluate the samples remaining to reach the required
# number of samples for the PCE
if my_experiment.n_samples > len(my_experiment.x_prev):
my_experiment.evaluate(eval_func, params)
elif my_experiment.n_samples == len(my_experiment.x_prev):
my_experiment.y = my_experiment.y_prev
else:
my_experiment.y = my_experiment.y_prev[:my_experiment.n_samples]
# create PCE object
my_pce = uq.PCE(my_experiment)
# evaluate the PCE
my_pce.run()
# calculate the LOO error
my_pce.calc_loo()
# calculate the Sobol' indices
my_pce.calc_sobol()
# extract and print results
my_pce.print_res()
# generate the pdf and cdf when desired
if run_dict['draw pdf cdf'][0]:
my_pce.draw(int(run_dict['draw pdf cdf'][1])) | /rheia_meca2675-1.0.5.tar.gz/rheia_meca2675-1.0.5/src/rheia/UQ/uncertainty_quantification.py | 0.712832 | 0.520862 | uncertainty_quantification.py | pypi |
import os
import rheia.CASES.H2_MOBILITY.h2_mobility as lb
def set_params():
"""
Set the fixed parameters for each model evaluation.
"""
path = os.path.dirname(os.path.abspath(__file__))
filename_climate = os.path.join(os.path.abspath(
os.path.join(path,
os.pardir)),
'DATA',
'climate',
'climate_Brussels.csv')
# get the solar irradiance and ambient temperature data
my_data = lb.ReadData(filename_climate)
sol_irr, t_amb = my_data.load_climate()
# store data in params list
params = [sol_irr, t_amb]
return params
def evaluate(x_in, params=[]):
'''
Evaluation of the system objectives for one given design.
Parameters
----------
x_in : tuple
An enumerate object for the input sample.
The first element of x
- the index of the input sample in the list of samples -
can be used for multiprocessing purposes of executable files
with input and output text files.
The second element of x - the input sample -
is a dictionary with the names and values for the model parameters
and design variables.
params : list, optional
List with fixed data, used during model evaluation. The default is [].
Returns
-------
lcom : float
the levelized cost of driving
lco2 : float
the carbon intensity
grid_e_bought : float
the annual grid electricity bought
'''
arguments = params + [x_in[1]]
# Evaluation object
my_evaluation = lb.Evaluation(*arguments)
# evaluate system model
succes = my_evaluation.evaluation()
if succes:
# when constrained satisfied, get values for objectives
lcom = my_evaluation.res['lcom']
ci = my_evaluation.res['ci']
grid_e_bought = sum(my_evaluation.res['grid_e_buy'])/1e6
else:
# when constrained violated, return large value for objectives
lcom = 1e8
ci = 1e8
grid_e_bought = 1e8
return lcom, ci, grid_e_bought | /rheia_meca2675-1.0.5.tar.gz/rheia_meca2675-1.0.5/src/rheia/CASES/H2_MOBILITY/case_description.py | 0.731538 | 0.533337 | case_description.py | pypi |
import os
import rheia.CASES.H2_POWER.h2_power as pv_h2
def set_params():
"""
Set the fixed parameters for each model evaluation.
"""
path = os.path.dirname(os.path.abspath(__file__))
filename_climate = os.path.join(os.path.abspath(
os.path.join(path,
os.pardir)),
'DATA',
'climate',
'climate_Brussels.csv')
filename_demand = os.path.join(os.path.abspath(
os.path.join(path,
os.pardir)),
'DATA',
'demand',
'load_Brussels_dwelling.csv')
# get the solar irradiance, ambient temperature data and electric demand
my_data = pv_h2.ReadData(filename_climate, filename_demand)
sol_irr, t_amb = my_data.load_climate()
load_elec = my_data.load_demand()
# store data in params list
params = [sol_irr, t_amb, load_elec]
return params
def evaluate(x_in, params=[]):
'''
Evaluation of the system objectives for one given design.
Parameters
----------
x_in : tuple
An enumerate object for the input sample.
The first element of x
- the index of the input sample in the list of samples -
can be used for multiprocessing purposes of executable files
with input and output text files.
The second element of x - the input sample -
is a dictionary with the names and values for the model parameters
and design variables.
params : list, optional
List with fixed data, used during model evaluation. The default is [].
Returns
-------
lcoe : float
the levelized cost of electricity
ssr: float
the self-sufficiency ratio
'''
arguments = params + [x_in[1]]
# Evaluation object
my_evaluation = pv_h2.Evaluation(*arguments)
# evaluate system model
my_evaluation.evaluation()
# get values for objectives
lcoe = my_evaluation.res['lcoe']
ssr = my_evaluation.res['ssr']
return lcoe, ssr | /rheia_meca2675-1.0.5.tar.gz/rheia_meca2675-1.0.5/src/rheia/CASES/H2_POWER/case_description.py | 0.715623 | 0.458531 | case_description.py | pypi |
# Load rheology data
If you have a google account you can run this documentation notebook [Open in colab](https://colab.research.google.com/github/rheopy/rheofit/blob/master/docs/source/load_data.ipynb)
```
from IPython.display import clear_output
!pip install git+https://github.com/rheopy/rheofit.git --upgrade
clear_output()
import rheofit
import numpy as np
import pandas as pd
import pybroom as pb
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_style('whitegrid')
```
The module rheofit.rheodata provides a class to store structured rheology data.
We initially focus loading datafile exported by the trios software in two possible format:
* Excel format - as structured by the trios software with the multitab export opion
* Rheoml format [rheoml link](https://www.tomcoat.com/IUPAC/index.php?Page=XML%20Repository)
## Import data from Multitab excel (assuming format exported by TA Trios)
```
# Download example of xls file format
import requests
url = 'https://github.com/rheopy/rheofit/raw/master/docs/source/_static/Flow_curve_example.xls'
r = requests.get(url, allow_redirects=True)
with open('Flow_curve_example.xls', 'wb') as file:
file.write(r.content)
excel_file=pd.ExcelFile('Flow_curve_example.xls')
excel_file.sheet_names
flow_curve=rheofit.rheodata.rheology_data('Flow_curve_example.xls')
flow_curve.filename
flow_curve.Details
flow_curve[0]
print(flow_curve[0][0])
flow_curve[0][1]
print(flow_curve[1][0])
flow_curve[1][1]
# In summary
flow_curve=rheofit.rheodata.rheology_data('Flow_curve_example.xls')
for (label,data) in flow_curve:
plt.loglog('Shear rate','Stress',data=data,marker='o',label=label)
plt.xlabel('$\dot\gamma$')
plt.ylabel('$\sigma$')
plt.legend()
flow_curve.tidy
import altair as alt
alt.Chart(flow_curve.tidy).mark_point().encode(
alt.X('Shear rate', scale=alt.Scale(type='log')),
alt.Y('Stress', scale=alt.Scale(type='log')),
color='stepname')
#.interactive()
```
## Import data from Rheoml
```
# Download example of xls file format
import requests
url = 'https://raw.githubusercontent.com/rheopy/rheofit/master/docs/source/_static/Flow_curve_example.xml'
r = requests.get(url, allow_redirects=True)
with open('Flow_curve_example.xml', 'wb') as file:
file.write(r.content)
# In summary from xml (rheoml schema)
# to do - add to rheology_data class the option to import from xml
# Solve naming problem for shear stress and shear rate
steps_table_list=rheofit.rheodata.dicttopanda(rheofit.rheodata.get_data_dict('Flow_curve_example.xml'))
for data in steps_table_list:
plt.loglog('ShearRate','ShearStress',data=data,marker='o')
plt.xlabel('$\dot\gamma$')
plt.ylabel('$\sigma$')
plt.legend()
```
| /rheofit-0.1.3.tar.gz/rheofit-0.1.3/docs/source/load_data.ipynb | 0.658418 | 0.869659 | load_data.ipynb | pypi |
from mpl_toolkits.axes_grid1 import Divider, Size
from mpl_toolkits.axes_grid1.mpl_axes import Axes
from itertools import cycle
import matplotlib as mpl
import matplotlib.pyplot as plt
mpl.rcParams['text.latex.preamble'] = r'\usepackage{amsmath}'
from math import log10, floor
import sortednp as snp
import numpy as np
import warnings
def legend_modulus(ax, loc='lower right', second_legend=False, bbox_to_anchor=0):
handles = [mpl.lines.Line2D([], [], c='k', label=r'$\mathrm{loss~modulus}$'),
mpl.lines.Line2D([], [], c='k', mfc='k', label=r'$\mathrm{storage~modulus}$')]
if second_legend :
if bbox_to_anchor != 0:
legend = plt.legend(handles=handles, loc=loc, bbox_to_anchor=bbox_to_anchor)
ax.add_artist(legend)
else:
legend = plt.legend(handles=handles, loc=loc)
ax.add_artist(legend)
else:
if bbox_to_anchor != 0:
ax.legend(loc=loc, handles=handles, bbox_to_anchor=bbox_to_anchor)
else :
ax.legend(loc=loc, handles=handles)
def annotate_crit_shear_rates(ax, gammadotc, etas, tauc_TC, position_txt=0.6):
gammadot2 = tauc_TC / etas
ax.axvline(gammadotc, c='green', ls='--', marker='')
ax.axvline(gammadot2, c='purple', ls='--', marker='')
ax.annotate(r'$\dot{\gamma}_\mathrm{c}$', xy=(gammadotc, position_txt), c='g',
xycoords=('data', 'axes fraction'), backgroundcolor='w', ha='center')
ax.annotate(r'$\sigma_{\mathrm{y}} / \eta_\infty$', xy=(gammadot2, position_txt), c='purple',
xycoords=('data', 'axes fraction'), backgroundcolor='w', ha='center')
def get_crit_shear_rates(df_fit, sample_id, T):
df_fith = df_fit[(df_fit.sample_ID == sample_id) & (df_fit.temperature == T)]
gammadotc = float(df_fith['gamma_dotc'])
tauc_TC = float(df_fith['tauc_TC'])
etas = float(df_fith['etas'])
return gammadotc, tauc_TC, etas
def title_fig(microgel_type, solvant, T, wp=None):
if wp == None:
return str(microgel_type) + '_in_' + str(solvant) + '_T' + str(T)
else :
return microgel_type + '_' + str(wp).replace('.', 'p') + '_in_' + solvant + '_T' + str(T)
def label_sample(micorgel_type, solvant, T):
return str(microgel_type) + '~in~' + str(solvant) + '~T' + str(T)
def write_sample_info(ax, microgel_type, T, solvant, w_percent=None):
if w_percent == None:
ax.text(0, 1.1, r'$\mathrm{T}~' + str(T) + '^\circ \mathrm{C}~~~~\mathrm{' + microgel_type + '}~\mathrm{in~' + solvant + '} $', va='center', transform=ax.transAxes)
else:
ax.text(0, 1.1, r'$\mathrm{T}~' + str(T) + '^\circ \mathrm{C}~~~~\mathrm{' + microgel_type + '}~' + str(w_percent) + '~\%~~~~\mathrm{in~' + solvant + '} $', va='center', transform=ax.transAxes)
def merge_physical_data(x1, x2, y1, y2):
# Create the variable if they don't exist yet
try:
x1
except NameError:
x1 = np.array([])
try:
y1
except NameError:
y1 = np.array([])
# Change datatype in given arrays in case there are ints or stuff
x1 = np.asarray(x1, dtype=float)
x2 = np.asarray(x2, dtype=float)
y1 = np.asarray(y1, dtype=float)
y2 = np.asarray(y2, dtype=float)
# The merge and sort action for x
merged_x, ind = snp.merge(x1, x2, indices = True)
# The merge and sort action for y
ind_1 = ind[0]
ind_2 = ind[1]
merged_y = np.zeros(len(merged_x))
count_1 = 0
count_2 = 0
for i in range(0, len(merged_x)):
if i in ind_1:
merged_y[i] = y1[count_1]
count_1 += 1
elif i in ind_2:
merged_y[i] = y2[count_2]
count_2 += 1
return merged_x, merged_y
def round_physical(x, err_x):
n = int(floor(log10(abs(err_x))))
return round(x, -n), round(err_x, -n)
def round_significative(x):
return round(x, -int(floor(log10(abs(x)))))
def create_plot(two_sided=False, colors = ['#6F4C9B', '#5568B8', '#4D8AC6',
'#60AB9E', '#77B77D', '#A6BE54',
'#D1B541', '#E49C39', '#DF4828', '#990D38'], markers = ['o', 'v', '^', 's', 'D', '*'], figsize=(5, 3.4)):
"""
Crée un environnement de plot
Parameters
----------
twosided : bool
allows to change the size of the figure accordingly.
colors : list of strings
a default list exists but this allows to change it if u want
markers : list of strings
a default list of markers exists, but u can change it if needed
Returns
-------
fig, ax : matplotlib objects to be used as normal
"""
color = cycle(colors)
marker = cycle(markers)
if two_sided :
fig = plt.figure(figsize=(3.4, 3.4))
else :
fig = plt.figure(figsize=figsize)
# The first & third items are for padding and the second items are for the
# axes. Sizes are in inches.
h = [Size.Fixed(1.0), Size.Scaled(1.), Size.Fixed(.2)]
v = [Size.Fixed(0.7), Size.Scaled(1.), Size.Fixed(.5)]
divider = Divider(fig, (0.0, 0.0, 1., 1.), h, v, aspect=False)
# the width and height of the rectangle is ignored.
ax = Axes(fig, divider.get_position())
ax.set_axes_locator(divider.new_locator(nx=1, ny=1))
fig.add_axes(ax)
return fig, ax, color, marker
def get_fit_amp_sweep(x, y, graph=True):
warnings.simplefilter("ignore")
x = x[:, np.newaxis]
# Fit Ransac
ransac = sklearn.linear_model.RANSACRegressor(residual_threshold=x.std()/20,
base_estimator = sklearn.linear_model.LinearRegression(positive=True))
ransac.fit(x[:], y[:])
pente = ransac.estimator_.coef_[0]
while pente != 0:
ransac.fit(x[:], y[:])
pente = ransac.estimator_.coef_[0]
inlier_mask = ransac.inlier_mask_
outlier_mask = np.logical_not(inlier_mask)
# Prédiction des deux fits
line_X = np.linspace(x.min(), x.max(), 100)
line_X = line_X[:, np.newaxis]
line_y_ransac = ransac.predict(line_X)
if graph:
#print("R²")
#print(ransac.estimator_.score(line_X, line_y_ransac))
y0 = ransac.estimator_.intercept_
slop = ransac.estimator_.coef_[0]
print("Ordonnée à l'origine", y0)
print("Pente", slop)
lw = 1
plt.scatter(x[inlier_mask], y[inlier_mask], color='yellowgreen', marker='.',
label='Inliers')
plt.scatter(x[outlier_mask], y[outlier_mask], color='gold', marker='.',
label='Outliers')
#plt.plot(line_X, line_y_ransac, color='cornflowerblue', linewidth=lw,
# label='RANSAC regressor')
plt.plot(np.linspace(x[0], x[-1], 20),
y0 + slop * np.linspace(x[0], x[-1], 20),
lw=1, ls='-', color='teal', label='yo', marker='')
plt.legend(bbox_to_anchor=(1.1, 1.1))._legend_box.align='left'
plt.xscale('log')
plt.yscale('log')
#plt.xlabel(r'$p~\mathrm{(m)}$')
#plt.ylabel(r'$F~\mathrm{(N)}$')
#plt.title('V = ' + str(V))
return y0, slop
def get_fit_end_recover(x, y, graph=True):
#warnings.simplefilter("ignore")
x = x[:, np.newaxis]
# Fit Ransac
ransac = sklearn.linear_model.RANSACRegressor(residual_threshold=x.std()/20,
base_estimator = sklearn.linear_model.LinearRegression(positive=True))
ransac.fit(x[:], y[:])
pente = ransac.estimator_.coef_[0]
inlier_mask = ransac.inlier_mask_
outlier_mask = np.logical_not(inlier_mask)
# Prédiction des deux fits
line_X = np.linspace(x.min(), x.max(), 100)
line_X = line_X[:, np.newaxis]
line_y_ransac = ransac.predict(line_X)
y0 = ransac.estimator_.intercept_
slop = ransac.estimator_.coef_[0]
return y0, slop | /rheofit-0.1.3.tar.gz/rheofit-0.1.3/data/Manon/plot_function.py | 0.526099 | 0.472805 | plot_function.py | pypi |
<a href="https://colab.research.google.com/github/rheopy/rheofit/blob/master/notebooks/index.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# Rheofit notebooks index
This index is the landing notebook for the colab badge
* [Quickstart](Quickstart.ipynb)
```
Example of how to start using rheofit to analyze one or more flow curves, explore results and save to file
```
* [load rheology data](load_data.ipynb)
```
Examples of how to load rheology data using function provided by rheodata class
```
* [Fitting single flow curve data](Fitting_single_flow_curve_colab.ipynb)
```
this notebook focus on the possible analysis performed on a single flow curve.
```
```
```
| /rheofit-0.1.3.tar.gz/rheofit-0.1.3/notebooks/index.ipynb | 0.885037 | 0.9842 | index.ipynb | pypi |
class VariantCandidate:
def __init__(self, read_pair, var_type):
self.supporting_reads = []
self.supporting_reads.append(read_pair)
self.left_pos = read_pair.get_left_pos()
self.inner_left = read_pair.get_left_pos()
self.right_pos = read_pair.get_right_pos()
self.inner_right = read_pair.get_right_pos()
self.var_type = var_type
if self.left_pos > self.right_pos:
print(read_pair)
self.center = read_pair.get_center()
self.width = read_pair.get_width()
self.depth = 1
def add_read_pair(self, read_pair):
self.supporting_reads.append(read_pair)
self.depth += 1
self.left_pos = min(self.left_pos, read_pair.get_left_pos())
self.inner_left = max(self.inner_left, read_pair.get_left_pos())
self.right_pos = max(self.right_pos, read_pair.get_right_pos())
self.inner_right = min(self.inner_right, read_pair.get_right_pos())
self.center = (self.left_pos + self.right_pos) / 2
def get_center(self):
return self.center
def get_width(self):
return self.right_pos - self.left_pos
def belongs(self, read_pair):
the_center = read_pair.get_center()
return the_center >= self.left_pos and the_center <= self.right_pos
def overlaps(self, another):
return (another.left_pos <= self.right_pos and another.left_pos >= self.left_pos) or (another.right_pos >= self.left_pos and another.right_pos <= self.right_pos)
def merge_with(self, another):
for read_pair in another.supporting_reads:
self.add_read_pair(read_pair)
def inner_span_to_str(self):
return "[inner-left:{} inner-right:{} span:{} depth:{}]".format(self.inner_left, self.inner_right, self.inner_right - self.inner_left, self.depth)
def get_inner_span(self):
return self.inner_right - self.inner_left
def __str__(self):
ret = "{} ".format(self.var_type)
for read in self.supporting_reads:
ret += "[{},{}]\n".format(read.get_left_pos(), read.get_right_pos())
return ret | /rheos_common-0.0.7.tar.gz/rheos_common-0.0.7/rheos_common/model/variant_candidate.py | 0.644001 | 0.393269 | variant_candidate.py | pypi |
import math
from dataclasses import dataclass
from typing import List, Dict, Tuple, Sequence, Any, Set
@dataclass
class PartitionRecord(object):
contig: str
left: int
right: int
partition: int
class Partitioner(object):
seq_dict: Dict[str, int] = {
"1": 249250621,
"2": 243199373,
"3": 198022430,
"4": 191154276,
"5": 180915260,
"6": 171115067,
"7": 159138663,
"8": 146364022,
"9": 141213431,
"10": 135534747,
"11": 135006516,
"12": 133851895,
"13": 115169878,
"14": 107349540,
"15": 102531392,
"16": 90354753,
"17": 81195210,
"18": 78077248,
"19": 59128983,
"20": 63025520,
"21": 48129895,
"22": 51304566,
"X": 155270560,
"Y": 59373566,
"MT": 16569
}
partition_width: int = 20000000
partition_records: List[PartitionRecord] = []
records_by_partition: Dict[int, PartitionRecord] = {}
records_by_contig: Dict[str, List[PartitionRecord]] = {}
def get_partition_for_rec(self, contig: str, left: int, right:int):
partitions_for_rec: Set = []
rec_list = self.records_by_contig[contig]
pass
def __init__(self):
partition_index = 0
for key, value in self.seq_dict.items():
print(f"{key} {value}")
num_partitions = math.ceil(value / self.partition_width, )
for i in range(num_partitions):
# print(f"{i*self.partition_width + 1} {min((i+1)*self.partition_width, value)}")
my_record = PartitionRecord(contig=key,
left=i * self.partition_width + 1,
right=min((i + 1) * self.partition_width, value),
partition=partition_index)
self.partition_records.append(my_record)
self.records_by_partition[partition_index] = my_record
contig_recs = self.records_by_contig.get(key, [])
contig_recs.append(my_record)
self.records_by_contig[key] = contig_recs
partition_index += 1
my_partitioner = Partitioner()
print(my_partitioner.partition_records)
print(my_partitioner.records_by_partition)
print(my_partitioner.records_by_contig) | /rheos_common-0.0.7.tar.gz/rheos_common-0.0.7/rheos_common/model/partitioner.py | 0.456894 | 0.279079 | partitioner.py | pypi |
from sortedcontainers import SortedDict
from rheos_common.model.variant_candidate import VariantCandidate
class VariantCandidateCollection:
def __init__(self):
self.variants = SortedDict()
def add_variant_candidate(self, variant_candidate):
self.variants[variant_candidate.get_center()] = variant_candidate
def add_read_pair(self, read_pair, var_type):
if len(self.variants) == 0:
new_var_can = VariantCandidate(read_pair, var_type)
self.add_variant_candidate(new_var_can)
else:
center = read_pair.get_center()
ind = self.variants.bisect(center)
if ind >= 1:
var_key = self.variants.iloc[ind-1]
var_can = self.variants[var_key]
if var_can.var_type == var_type:
if var_can.belongs(read_pair):
var_can.add_read_pair(read_pair)
return
else:
print("Multiple variants at same location")
if ind < len(self.variants):
var_key = self.variants.iloc[ind]
var_can = self.variants[var_key]
if var_can.belongs(read_pair):
var_can.add_read_pair(read_pair)
return
new_var_can = VariantCandidate(read_pair, var_type)
self.add_variant_candidate(new_var_can)
def merge(self):
first = True
for key in self.variants:
if first:
cur_var = self.variants[key]
first = False
continue
candidate = self.variants[key]
if cur_var.overlaps(candidate):
cur_var.merge_with(candidate)
del self.variants[key]
else:
cur_var = candidate
def purge(self):
keys_to_purge = []
for key in iter(self.variants):
if self.variants[key].get_inner_span() < 0 or self.variants[key].depth <= 1:
keys_to_purge.append(key)
for key in keys_to_purge:
del self.variants[key] | /rheos_common-0.0.7.tar.gz/rheos_common-0.0.7/rheos_common/model/variant_candidate_collection.py | 0.52074 | 0.152347 | variant_candidate_collection.py | pypi |
**RHESSysCalibrator** {#index}
**Introduction**
RHESSysCalibrator is a system for managing calibration sessions and run of
RHESSys; A calibratration session contains one or more runs. Each run represents a
distinct and uniformly random set of sensitiviy parameters.
RHESSysCalibrator uses a database (in sqlite3 format) to keep track of each
session and run. RHESSysCalibrator handles launching and management of each
run, and will update its database as run jobs are submitted, begin
running, and finish (either with or without error). RHESSysCalibrator supports parallel execution of model runs using: (1) multiple processes (i.e. on your laptop or workstation); (2) compute clusters running Load Sharing Facility (LSF); (3) compute clusters running PBS/TORQUE; and (4) compute clusters running SLURM.
In addition to creating new calibration sessions, it is also possible
to re-start sessions that have exited before completion. RHESSysCalibrator also handles calibration post processing (i.e. calculating Nash-Sutcliffe efficiency for modeled vs. observed streamflow), as well as uncertainty estimation using Generalized Likelihood Uncertainty Estimation (GLUE; Beven & Binley 1992).
<!-- START doctoc generated TOC please keep comment here to allow auto update -->
<!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE -->
**Table of Contents**
- [Authors](#authors)
- [Source code](#source-code)
- [Installation instructions](#installation-instructions)
- [RHESSys calibration background](#rhessys-calibration-background)
- [Usage instructions](#usage-instructions)
- [Setup a new calibrator project](#setup-a-new-calibrator-project)
- [Configuring a calibration session](#configuring-a-calibration-session)
- [Create a new calibration session](#create-a-new-calibration-session)
- [Using screen to run RHESSysCalibrator on compute clusters](#using-screen-to-run-rhessyscalibrator-on-compute-clusters)
- [Restarting failed model sessions](#restarting-failed-model-sessions)
- [Calculate model fitness statistics for basin-level output](#calculate-model-fitness-statistics-for-basin-level-output)
- [Performing GLUE uncertainty estimation](#performing-glue-uncertainty-estimation)
- [Applying behavioral parameter sets to another RHESSys model](#applying-behavioral-parameter-sets-to-another-rhessys-model)
- [Visualizing behavioral model output](#visualizing-behavioral-model-output)
- [Comparing behavioral simulations](#comparing-behavioral-simulations)
- [Visualizing behavioral model output using other tools](#visualizing-behavioral-model-output-using-other-tools)
- [Appendix](#appendix)
- [Model directory structure](#model-directory-structure)
- [References](#references)
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
Authors
-------
Brian Miles - brian_miles@unc.edu
Taehee Hwang - taehee@indiana.edu
Lawrence E. Band - lband@email.unc.edu
For questions or support contact [Brian Miles](brian_miles@unc.edu)
Source code
-----------
Source code can be found at: https://github.com/selimnairb/RHESSysCarlibrator
Documentation can be found at: http://pythonhosted.org/rhessyscalibrator
Installation instructions
-------------------------
RHESSysCalibrator requires [RHESSysWorkflows](https://github.com/selimnairb/RHESSysWorkflows) and [EcohydroLib](https://github.com/selimnairb/EcohydroLib); detailed
installation instructions for OS X and Linux can be found [here](https://github.com/selimnairb/RHESSysWorkflows).
> Though RHESSysCalibrator is written in Python it has only been tested under OS X and
> Linux.
Once you have installed RHESSysWorkflows and EcohydroLib using the instructions above, install RHESSysCarlibrator using pip:
sudo pip install rhessyscalibrator
> Note that when installing RHESSysCalibrator and its dependencies on a compute cluster,
> you will likely first need to setup a virtual Python environment using *virtualenv*.
RHESSys calibration background
------------------------------
A discussion of model calibration theory is beyond the scope of this manual. Basic RHESSys model calibration is typically performed by calibrating the model's soil parameters against basin-level streamflow using a Monte Carlo approach. These soil parameters as well as general calibration strategies are described in detail [here](https://github.com/RHESSys/RHESSys/wiki/Calibrating-and-running-RHESSys).
Usage instructions
------------------
## Setup a new calibrator project
The first step to using RHESSysCalibrator is to create empty
RHESSysCalibrator project. First create a new directory:
mkdir MY_CALIBRATION_PROJECT
Then, populate this directory with the [correct subdirectory structure](#model-directory-structure):
rhessys_calibrator.py -b Baisman_10m_calib --create
Next, if the RHESSys model you wish to calibrate was created using RHESSysWorkflows, you can use the provided *rw2rc.py* script to copy your RHESSysWorkflows project into the empty RHESSysCalibrator project:
rw2rc.py -p MY_RHESSYSWORKFLOWS_PROJECT -b MY_CALIBRATION_PROJECT
If you are not working from a RHESSysWorkflows project, you will need to copy the necessary model files into the [correct locations](#model-directory-structure) in your calibration directory.
Lastly, copy observed daily streamflow and precipitation data for your watershed into the *obs* directory of your RHESSysCalibrator project, for example:
cp PATH/TO/MY_OBSERVED_DATA MY_CALIBRATION_PROJECT/obs
Note that format for observered data must be a CSV file including the following columnsl:
datetime,streamflow_mm,precip_mm
1/1/1991,3.7,0
1/2/1991,3.5,0
1/3/1991,3.51,8.4
1/4/1991,3.32,0.5
1/5/1991,3.21,0
...
The datetime column must contain the data in MM/DD/YYYY format. Streamflow and precipitation data must be in units of mm/day; streamflow must be in a column named "streamflow_mm", precipitation in a column named "precip_mm". Make sure your line endings are Unix, not Windows or Mac.
## Configuring a calibration session
RHESSysCalibrator uses a file called *cmd.proto* to control how calibration runs are created. The typical contents of this file is:
$rhessys -st 2003 1 1 1 -ed 2008 10 1 1 -b -t $tecfile -w $worldfile -r $flowtable -pre $output_path -s $s1[0.01, 20.0] $s2[1.0, 150.0] $s3[0.1, 10.0] -sv $sv1[0.01, 20.0] $sv2[1.0, 150.0] -gw $gw1[0.001, 0.3] $gw2[0.01, 0.9]
or for older versions of RHESSysCalibrator (or if you used the *--noparamranges* option when creating your project), the ranges for each parameter will not be included:
$rhessys -st 2003 1 1 1 -ed 2008 10 1 1 -b -t $tecfile -w $worldfile -r $flowtable -pre $output_path -s $s1 $s2 $s3 -sv $sv1 $sv2 -gw $gw1 $gw2
> Use the *--allparams* option when creating your project to include all possible RHESSys commmand line parameters
> in the project's *cmd.proto*.
Here we are setting the start and end dates of the calibration runs, as well as setting the soil parameters to calibrated against. In this case for each model run the following parameters will be varied: decay of saturated hydraulic conductivity for soil drainage (s1 or m in model parlance); saturated hydraulic conductivity for soil drainage (s2 or Ksat0); soil depth (s3); decay of saturated hydraulic conductivity for infiltration (sv1 or m_v); vertical saturated conductivity for infilration (sv2 or Ksat0_v); bypass flow from detention storage directly to hillslope groundwater (gw1); loss from the saturated zone to hillslope groundwater (gw2).
In general, you should only need to change the start and end dates, which calibration parameters to include, and
the range of the uniform distribution from which each parameter will be sampled; parameter ranges are specified by placing the desired closed interval (i.e. two floating point numbers separated by commas and enclosed by square brackets) immediately following the parameter name. Ignore all other parts of cmd.proto (e.g. $worldfile, $output_path) as these are used by RHESSysCalibrator to create the RHESSys command line for each model run created as part of the calibration session.
When running on compute clusters, we recommend that you also make a test cmd.proto for debugging your model:
cp MY_CALIBRATION_PROJECT/cmd.proto MY_CALIBRATION_PROJECT/cmd.proto.test
For example edit cmd.proto.test to run the model for only a single month:
$rhessys -st 2007 1 1 1 -ed 2007 2 1 1 -b -t $tecfile -w $worldfile -r $flowtable -pre $output_path $s1[0.01, 20.0] $s2[1.0, 150.0] $s3[0.1, 10.0] -sv $sv1[0.01, 20.0] $sv2[1.0, 150.0] -gw $gw1[0.001, 0.3] $gw2[0.01, 0.9]
Now save a copy of cmd.proto so that we don't overwrite it when testing our model:
cp MY_CALIBRATION_PROJECT/cmd.proto MY_CALIBRATION_PROJECT/cmd.proto.run
Before running a debug calibration session, copy cmd.proto.test to cmd.proto:
cp MY_CALIBRATION_PROJECT/cmd.proto.test MY_CALIBRATION_PROJECT/cmd.proto
## Create a new calibration session
To run RHESSysCalibrator, we use the *rhessys_calibrator* command:
rhessys_calibrator.py -b MY_CALIBRATION_PROJECT -p 'My RHESSys model' -n 'Debug calibration session, one iteration' -i 1 -j 1 --parallel_mode process
The *-b* option specifies the RHESSyCalibrator project directory to use. We describe the project with the *-p* option, and provide notes for this particular calibration session with the *-n* option. As this is a test calibration session, we specify that we only want to run one iteration using the *-i* option, and that we only want to run at most one model run at a given time using the *-j*. Lastly, we set the parallel mode to *process*, which is appropriate for running on a laptop or workstation. To run the calibration session on a compute cluster running LSF change the command line as follows:
rhessys_calibrator.py -b MY_CALIBRATION_PROJECT -p 'My RHESSys model' -n 'Debug calibration session, one iteration' -i 1 -j 1 --parallel_mode lsf --mem_limit N -q QUEUE_NAME
Where *N* is the amount of memory, in gigabytes (GB) that your model needs, and *QUEUE_NAME* is the name of the LSF queue to which jobs for this session should
be submitted. Note that the queue name will be specific to your compute cluster (ask for administrator for details).
Or to run the session on a compute cluster running PBS/TORQUE:
rhessys_calibrator.py -b MY_CALIBRATION_PROJECT -p 'My RHESSys model' -n 'Debug calibration session, one iteration' -i 1 -j 1 --parallel_mode pbs --mem_limit N --wall_time M
Where *M* is the amount of hours you estimate that your model will need to complete. You may also optionally specify the *-q* option when running under PBS. Similarly, to run the session on a compute cluster running SLURM:
rhessys_calibrator.py -b MY_CALIBRATION_PROJECT -p 'My RHESSys model' -n 'Debug calibration session, one iteration' -i 1 -j 1 --parallel_mode slurm --mem_limit N -q QUEUE_NAME
here, *Q* is the name of the SLURM partition to submit the job to. You may also optionally specify the *--wall_time* option when running under SLURM.
For details about these and other options, run *rhessys_calibrator* with the *--help* option:
rhessys_calibrator.py --help
> While running, RHESSysCalibrator will print lots of ugly though usually informative
> messages to the screen to tell you want it is doing.
Once you have successfully run your calibration project for a single iteration using your test *cmd.proto*, you can launch a real calibration session by first replacing the test *cmd.proto* with the real *cmd.proto*:
cp MY_CALIBRATION_PROJECT/cmd.proto.run MY_CALIBRATION_PROJECT/cmd.proto
Then, you can launch a calibration session as follows. For multiprocessor mode (i.e. on a laptop or workstation computer):
rhessys_calibrator.py -b MY_CALIBRATION_PROJECT -p 'My RHESSys model' -n 'Debug calibration session, one iteration' -i 500 -j 8 --parallel_mode process
Where *-i* specifcies that this session should consist of 500 iterations or model realizations, with *-j* controlling the maximum number of iterations or simultaneous jobs (8 in this case) to run in parallel. The number of simultaneous jobs possible in multiprocessor mode will depend on the number of processors/cores/virtual cores in your computer, as well as the amount of memory and the amount of memory your RHESSys model requires (which depends on the spatial extent and resolution of your model domain).
> You can see how much memory your model requires to run by using your computer's
> Activity Monitor tool while the model is running.
When running on compute clusters, you will typically be able to running many more simultaneous jobs in parallel. For example on LSF-based clusters:
rhessys_calibrator.py -b MY_CALIBRATION_PROJECT -p 'My RHESSys model' -n 'Debug calibration session, one iteration' -i 5000 -j 1000 --parallel_mode lsf --mem_limit N -q QUEUE_NAME
or for PBS/TORQUE-based clusters:
rhessys_calibrator.py -b MY_CALIBRATION_PROJECT -p 'My RHESSys model' -n 'Debug calibration session, one iteration' -i 5000 -j 1000 --parallel_mode pbs --mem_limit N --wall_time M
or for SLURM-based clusters:
rhessys_calibrator.py -b MY_CALIBRATION_PROJECT -p 'My RHESSys model' -n 'Debug calibration session, one iteration' -i 5000 -j 1000 --parallel_mode slurm --mem_limit N --wall_time M
Here we are telling RHESSysCalibrator to run 5,000 model iterations in this session with at most 1,000 simultaneous jobs. Note that the number of simultaneous jobs possible will depend on the size of the compute cluster (e.g. number of cores) as well as administrative policies. For example, some systems restrict users to using at most a few hundred compute cores at any one time and may impose aggregate memory limits across all of your jobs, for example a few terabytes (TB). Consult the documentation for your cluster before trying to run more than a few simultaneous jobs using RHESSysCalibrator.
### Using screen to run RHESSysCalibrator on compute clusters
When running calibration runs on compute clusters, we recommend running
*rhessys_calibrator* within a *screen* session. *Screen* is a tool that allows you to
run commands in a terminal that will not exit if your connection to the compute
cluster is lost. Assuming *screen* is installed on your cluters (if it is not, ask your administrator to install it), you would launch screen before running any RHESSysCalibrator commands:
screen
This will cause screen to launch and to run a normal login shell, from which you can run RHESSysCalibrator commands. To detach from your screen session hit Control-A then Control-D on your keyboard. To re-attached to a detached screen session (whether you detached or we forceably disconnected) specify then *-r* option when running screen:
screen -r
## Restarting failed model sessions
On occasion, the *rhessys_calibrator* session may be forceably stopped before all calibration runs have finished (you may even decide to quit a session yourself). You can use the *rhessys_calibrator_restart* command to restart such a session, for LSF-based clusters:
rhessys_calibrator_restart.py -b MY_CALIBRATION_PROJECT -s N -i 5000 -j 1000 --parallel_mode lsf --mem_limit M -q QUEUE_NAME
or for PBS/TORQUE-based clusters:
rhessys_calibrator_restart.py -b MY_CALIBRATION_PROJECT -s N -i 5000 -j 1000 --parallel_mode pbs --mem_limit M --wall_time W
or for SLURM-based clusters:
rhessys_calibrator_restart.py -b MY_CALIBRATION_PROJECT -s N -i 5000 -j 1000 --parallel_mode slurm --mem_limit M --wall_time W
Where the *-s* option is used to specify the ID of the session that you would like to restart. Will print how many runs have completed, how many will be restarted, and how many new runs will be started, before asking you if you wish to continue.
## Calculate model fitness statistics for basin-level output
After the calibration session finishes (i.e. once all the model runs have completed), you can use *rhessys_calibrator_postprocess* to calculate model fitness parameters (e.g. Nash-Sutcliffe Efficiency for daily streamflow and daily log(streamflow)):
rhessys_calibrator_postprocess.py -b MY_CALIBRATION_PROJECT -f MY_OBSERVED_DATA -s 2 --enddate 2007 2 1 1 --figureX 8 --figureY 6
The *-s* option specifies the calibration session for which we wish to calculate fitness parameters; typically session is *2* is our first realy calibration session as the first session was our testing session. Here we also specify the end date of the time period over which we wish to calculate fitness parameters; this is done using the *--enddate* option. Note that you can also specify the temporal aggregation with which to calculate fitness parameters using the *-p* (a.k.a. *--period*) option. The default is 'daily', but 'weekly' and 'monthly' are also supported. The *--figureX* and *--figureY* options control the X and Y dimensions (in inches) of output plots.
Once post-processing is finished, the following message will be printed:
Fitness results saved to post-process session: N
where "N" is the number of the post-process session just created for your calibration session; remember this number. The sensitivity of each parameter will be illustrated in "dotty plot" figure output as PDF file named *dotty_plots_SESSION_2_POSTPROCESS_1_daily.pdf* stored in the calibration project directory.
You can see the parameters used for each calibration run, as well as the fitness values for each run, by opening the calibration SQLite database stored in the calibration project. We recommend that you use the SQLite Manager add-on in the FireFox web browser to do so, though you can use any tool that can read SQLite version 3 databases. The calibration database for our project can be found here:
MY_CALIBRATION_PROJECT/db/calibration.sqlite
> If you are working with a database originally created in RHESSysCalibrator 1.0, the filename will be *calibration.db* instead.
Using SQLite Manager, you can view calibration parameters and model fitness statistics, determine the model command line and output location for each model run. The name and purpos of the most important tables are as follows:
Table | Data stored
--- | ---
session | General information, one entry each time *rhessys_calibrator* or *rhessys_calibrator_behavioral* is run.
run | Detailed information about each model run in a session, multiple runs are associated with each session.
postprocess | General post-process information, one entry for each time *rhessys_calibrator_postprocess* or *rhessys_calibrator_behavioral* is run.
runfitness | Detailed run fitness information for a given model run, multiple runfitness entries are associated with each postprocess session.
To export model run information to CSV files suitable for importing into data analysis tools, you can use the *rhessys_calibrator_postprocess_export* tool:
rhessys_calibrator_postprocess_export.py -b MY_CALIBRATION_PROJECT -s N -f mypreferred_filename.csv
where "N" is the number of the post-process session output by *rhessys_calibrator_postprocess*.
## Performing GLUE uncertainty estimation
Once you have a suite of model realizations from a calibration session, RHESSysCalibrator can also facilitate simple uncertainty analysis using the Generalized Likelihood Uncertainty Estimation methodology (GLUE; Beven & Binley 1992).
### Applying behavioral parameter sets to another RHESSys model
The *rhessys_calibrator_behavioral* command will allow you to apply so-called behavioral model parameters from a previous calibration session to a new behavioral session representing a particular model scenario (e.g. a change in land cover from forest to suburban development; climate change scenarios, etc.).
The following invocation of *rhessys_calibrator_behavioral* will apply the top 100 model realizations, sorted in descending order by NSE-log then NSE, from post-process session 2 to a new behavioral RHESSysCalibrator project (make sure to copy the calibration.sqlite from the calibration project into the behavioral project), for LSF-based clusters:
rhessys_calibrator_behavioral.py -b MY_BEHAVIORAL_CALIBRATION_PROJECT -p 'Behavioral runs for My RHESSys model' -s 2 -c cmd.proto -j 100 --parallel_mode lsf --mem_limit M -q QUEUE_NAME -f "postprocess_id=2 order by nse_log desc, nse desc limit 100"
or for PBS/TORQUE-based clusters:
rhessys_calibrator_behavioral.py -b MY_BEHAVIORAL_CALIBRATION_PROJECT -p 'Behavioral runs for My RHESSys model' -s 2 -c cmd.proto -j 100 --parallel_mode pbs --mem_limit M --wall_time W -f "postprocess_id=2 order by nse_log desc, nse desc limit 100"
or for SLURM-based clusters:
rhessys_calibrator_behavioral.py -b MY_BEHAVIORAL_CALIBRATION_PROJECT -p 'Behavioral runs for My RHESSys model' -s 2 -c cmd.proto -j 100 --parallel_mode slurm --mem_limit M --wall_time W -f "postprocess_id=2 order by nse_log desc, nse desc limit 100"
Note that to allow the time period for uncertainty estimation of behavioral simulations to differ from that of the calibration session, we can specify a particular *cmd.proto* to use, which may or may not be the same as the *cmd.proto* used for the calibration session.
> The *-f* option can be any valid SQLite "WHERE" clause.
When the behavioral runs complete *rhessys_calibrator_behavioral* will print out the number of the calibration session and post-process session created, e.g.:
Behavioral results saved to session 3, post-process session 2
Take note of these for future reference in the visualization section below.
### Visualizing behavioral model output
Once the behavioral simulations are complete, you can visualize the uncertainty around estimates of streamflow using the *rhessys_calibrator_postprocess_behavioral* command:
rhessys_calibrator_postprocess_behavioral.py -b MY_BEHAVIORAL_CALIBRATION_PROJECT -s 2 -of PDF --figureX 8 --figureY 3 --supressObs --plotWeightedMean
For a full list of options, run:
rhessys_calibrator_postprocess_behavioral.py --help
*rhessys_calibrator_postprocess_behavioral* will output: (1) the number of modeled observations; (2) the number of observerations within the 95% uncertainty bounds; and (3) the Average Relative Interval Length (ARIL; Jin et al. 2010); ARIL represents the width of the prediction interval. Better models are those that have a smaller ARIL and a higher number of observed streamflow values falling within the prediction interval, relative to a worse model.
### Comparing behavioral simulations
You can use the *rhessys_calibrator_postprocess_behavioral_compare* command to compare two sets of behavioral runs:
rhessys_calibrator_postprocess_behavioral_compare.py MY_BEHAVIORAL_CALIBRATION_PROJECT_1 MY_BEHAVIORAL_CALIBRATION_PROJECT_2 3 4 -t "Model scenario 1 vs. scenario 2 - 95% uncertainty bounds" -of PDF --figureX 8 --figureY 3 --plotWeightedMean --legend "Scenario 1" "Scenario 2" --behavioral_filter "nse>0 order by nse_log desc, nse desc limit 100" --supressObs --color
Here we are telling RHESSysCalibrator that we would like to compare post-process sessions 3 and 4, plotting the weighted ensemble mean (Seibert & Beven 2009) with the *--plotWeightedMean* option. Note that *MY_BEHAVIORAL_CALIBRATION_PROJECT_1* and *MY_BEHAVIORAL_CALIBRATION_PROJECT_2* could be the same RHESSysCalibrator project.
*rhessys_calibrator_postprocess_behavioral_compare* will perform a Kolmogorov-Smirnov (K-S) test to determine if the weighted ensemble mean streamflow time series from the behavioral runs are statistically significant. Here is some example output:
```
Critical value for Kolmogorov-Smirnov statistic (D_alpha; alpha=0.0500): 0.0712
Kolmogorov-Smirnov statistic (D): 0.0205
D > D_alpha? False
p-value: 0.9977
p-value < alpha? False
```
Which indicates that there is no statistically significant difference between the weighted ensemble mean streamflow time series of the two behavioral model scenarios.
### Visualizing behavioral model output using other tools
If you would like to visualize behavioral streamflow data using other analysis or visualization tools, you can use the *rhessys_calibrator_postprocess_behavioral_timeseries* command to out output: min, max, median, mean, and weighted ensemble mean time series:
rhessys_calibrator_postprocess_behavioral_timeseries.py -b MY_BEHAVIORAL_CALIBRATION_PROJECT -s 3
where the *-s* or *--postprocess_session* option refers to the post-process session created for your behavioral run by *rhessys_calibrator_behavioral*.
Then, for example, you can use RHESSysWorkflow's *RHESSysPlot* command to make scatter plots of streamflow for two behavioral sessions:
RHESSysPlot.py -p scatter -o MY_BEHAVIORAL_CALIBRATION_PROJECT/obs/MY_OBSERVED_DATA -b MY_BEHAVIORAL_CALIBRATION_PROJECT/behavioral_ts_SESSION_3_weighted_ensmb_mean.csv MY_BEHAVIORAL_CALIBRATION_PROJECT/behavioral_ts_SESSION_4_weighted_ensmb_mean.csv -c streamflow -t "Weighted ensemble mean daily streamflow: Scenario 1 v. Scenario 2" -l "Streamflow - Scenario 1 (mm/day)" "Streamflow - Scenario 2 (mm/day)" --supressObs -f scenario1_v_scenario2 --figureX 8 --figureY 6
See:
RHESSysPlot.py --help
for a complete description of possible options.
## Appendix
### Model directory structure
Before running rhessys_calibrator.py to perform calibrations, it is first
necessary to create the session directory structure. This is done by
issuing the --create argument, along with the always-required
--basedir argument. This will create the following directory
structure within $BASEDIR:
```
db/ Where the session DB will be stored (by
RHESSysCalibrator)
rhessys/src/ Where you should place your RHESSys source code (optional).
rhessys/bin/ Where you will place your RHESSys binary. (if more than one
executable is present, there is no guarantee as to which will
be used.)
rhessys/worldfiles/active/ Where you will place world files for which
calibration should be performed. (All worldfiles
in this directory will be calibrated as part of the
session)
rhessys/flow/ Where you will place flow tables associated with
each worldfile to be calibrated. Flow table file name must be
of the form $(WORLDFILE_NAME)_flow_table.dat
rhessys/tecfiles/active Where you will place the TEC file used in your
calibration runs (only one will be used, with
no guaranty of what will be used if there is
more than one file in this directory)
rhessys/defs/ Where you will place default files referenced in
your worldfiles
rhessys/clim/ Where you will place climate data referenced in your
worldfiles
rhessys/output/ Where output for each run will be stored. Each
run's output will be stored in a directory named
"SESSION_$SESSION_ID$WORLDFILE_ITR_$ITR" where
$SESSION_ID is the session associated with a run,
$ITR is the iteration and $WORLDFILE corresponds
to a worldfile listed in
$BASEDIR/rhessys/worldfiles/active
obs/ Where you will store observed data to be compared to
data from calibration model runs.
```
### References
Beven, K. & Binley, A., 1992. The future of distributed models: Model calibration and uncertainty prediction. Hydrological Processes, 6(3), pp.279–298.
Jin, X., Xu, C.-Y., Zhang, Q., & Singh, V. P., 2010. Parameter and modeling uncertainty simulated by GLUE and a formal Bayesian method for a conceptual hydrological model. Journal of Hydrology, 383(3-4), pp.147–155.
Seibert, J. & Beven, K J, 2009. Gauging the ungauged basin: how many discharge measurements are needed? Hydrology and Earth System Sciences.
| /rhessyscalibrator-2.1.0.tar.gz/rhessyscalibrator-2.1.0/README.md | 0.593845 | 0.721847 | README.md | pypi |
import sys
import argparse
import math
import numpy as np
import pandas as pd
import statsmodels.api as sm
import matplotlib.pyplot as plt
import matplotlib
from rhessysworkflows.rhessys import RHESSysOutput
OBS_HEADER_STREAMFLOW = 'streamflow_mm'
OBS_HEADER_PRECIP = 'precip_mm'
PLOT_TYPE_STD = 'standard'
PLOT_TYPE_LOGY = 'logy'
PLOT_TYPE_CDF = 'cdf'
PLOT_TYPE_TABLE = 'table'
PLOT_TYPES = [PLOT_TYPE_STD, PLOT_TYPE_LOGY, PLOT_TYPE_CDF, PLOT_TYPE_TABLE]
PLOT_DEFAULT = PLOT_TYPE_STD
def plotTable(args, col_names, obs, data, ax):
startDate = data['streamflow'].index[0]
endDate = data['streamflow'].index[-1]
obsRunoffRat = np.sum( obs[OBS_HEADER_STREAMFLOW] ) / np.sum( obs[OBS_HEADER_PRECIP] )
modRunoffRat = np.sum( data['streamflow']) / np.sum( data['precip'] )
obsET = np.sum( obs[OBS_HEADER_PRECIP] ) - np.sum( obs[OBS_HEADER_STREAMFLOW] )
summary = [ [ np.sum( obs[OBS_HEADER_PRECIP] ), np.sum( data['precip'] ) ],
[ np.sum(obs[OBS_HEADER_STREAMFLOW]), np.sum(data['streamflow']) ],
[ obsET, np.sum( data['evap'] ) + np.sum( data['trans'] ) ],
[ obsRunoffRat, modRunoffRat ]
]
text = [ [ "%.2f" % num for num in summary[0] ],
[ "%.2f" % num for num in summary[1] ],
[ "%.2f" % num for num in summary[2] ],
[ "%.2f" % num for num in summary[3] ]
]
ax.axis('off')
# Draw the table
table = ax.table(cellText=text,
colWidths=[0.33, 0.33],
colLabels=col_names,
rowLabels=['Precip ($mm$)', 'Streamflow ($mm$)', 'ET ($mm$)', 'Runoff ratio'],
loc='center right')
table.auto_set_font_size(False)
table.set_fontsize(9)
ax.text(0, 0.75, "Mass balance: %s - %s" % \
(startDate.strftime('%Y/%m/%d'), endDate.strftime('%Y/%m/%d') ),
horizontalalignment='left',
#verticalalignment='center',
fontsize=10)
def plotGraph(args, plottype, obs, data, columns, min_x, max_x, ax, secondary=None,
plotColor=True):
if plotColor:
obs_color = 'black'
mod_color = 'green'
second_color = 'blue'
else:
obs_color = 'black'
mod_color = 'grey'
second_color = 'black'
if plottype == PLOT_TYPE_STD or \
plottype == PLOT_TYPE_LOGY:
x = obs.index
elif plottype == PLOT_TYPE_CDF:
x = np.linspace(min_x, max_x, num=1000 )
# Plot observed values
# Standard or log plot
obs_y = obs[OBS_HEADER_STREAMFLOW]
if plottype == PLOT_TYPE_CDF:
obs_ecdf = sm.distributions.ECDF(obs_y)
obs_y = obs_ecdf(x)
obs_plt = None
if not args.supressObs:
(obs_plt,) = ax.plot(x, obs_y, obs_color, linewidth=2)
# Plot modeled values
data_plt = []
for c in columns:
# Standard or log plot
mod_y = data[c]
if plottype == PLOT_TYPE_CDF:
mod_ecdf = sm.distributions.ECDF(data[c])
mod_y = mod_ecdf(x)
(mod_plt,) = ax.plot(x, mod_y, color=mod_color, linewidth=1)
data_plt.append(mod_plt)
# X-axis
if plottype == PLOT_TYPE_STD or \
plottype == PLOT_TYPE_LOGY:
num_years = len(x) / 365
if num_years > 2:
if num_years > 5:
ax.xaxis.set_major_locator(matplotlib.dates.YearLocator())
else:
ax.xaxis.set_major_locator(matplotlib.dates.MonthLocator(interval=3))
else:
ax.xaxis.set_major_locator(matplotlib.dates.MonthLocator())
ax.xaxis.set_major_formatter(matplotlib.dates.DateFormatter('%b-%Y'))
# Rotate
plt.setp( ax.xaxis.get_majorticklabels(), rotation=45 )
plt.setp( ax.xaxis.get_majorticklabels(), fontsize='x-small' )
if plottype == PLOT_TYPE_CDF:
ax.set_xlim(min_x, max_x)
ax.set_xscale('log')
if args.xlabel:
ax.set_xlabel(args.xlabel)
else:
pass
ax.set_xlabel( 'Streamflow ($mm\ day^{-1}$)' )
elif args.xlabel:
ax.set_xlabel(args.xlabel)
# Y-axis
if plottype == PLOT_TYPE_LOGY:
ax.set_yscale('log')
if args.ylabel:
ax.set_ylabel(args.ylabel)
elif plottype != PLOT_TYPE_CDF and plottype != PLOT_TYPE_LOGY:
y_label = 'Streamflow ($mm\ day^{-1}$)'
ax.set_ylabel( y_label )
data_plt.insert(0, obs_plt)
# Plot secondary data (if specified)
if secondary and \
(plottype == PLOT_TYPE_STD or plottype == PLOT_TYPE_LOGY):
# Plot
ax2 = ax.twinx()
if args.secondaryPlotType == 'line':
(sec_plot,) = ax2.plot(x, data[secondary])
elif args.secondaryPlotType == 'bar':
sec_plot = ax2.bar(x, data[secondary], facecolor=second_color, edgecolor='none', width=2.0)
ax2.invert_yaxis()
ax2.set_ylabel('Precipication ($mm\ day^{-1}$)')
return data_plt
if __name__ == "__main__":
# Handle command line options
parser = argparse.ArgumentParser(description='Plot CDF of N datasets vs. observed data')
parser.add_argument('-n', '--outname', required=True,
help='Base name of file to output figure to. Only specify base name of file, not extension (PDF and PNG files will be produced)')
parser.add_argument('-o', '--obs', required=True,
help='File containing observed data. Must be a CSV file with the following headers: datetime, streamflow_mm, precip_mm')
parser.add_argument('-d', '--data', required=True,
help='One or more data files')
parser.add_argument('-t', '--title', required=False,
help='Title of figure')
parser.add_argument('-x', '--xlabel', required=False,
help='X-axis label')
parser.add_argument('-y', '--ylabel', required=False,
help='Y-axis label')
parser.add_argument('--figureX', required=False, type=int, default=8,
help='The width of the plot, in inches')
parser.add_argument('--figureY', required=False, type=int, default=6,
help='The height of the plot, in inches')
parser.add_argument('--supressObs', required=False, action='store_true',
help='Do not plot observed data. Observed data will still be used for aligning timeseries')
parser.add_argument("--color", action="store_true", required=False, default=False,
help="Plot in color")
parser.add_argument('--secondaryPlotType', required=False, choices=['bar', 'line'], default='bar',
help='Type of plot to use for secondary data.')
parser.add_argument('--secondaryLabel', required=False,
help='Label to use for seconary Y-axis')
args = parser.parse_args()
# Open observed data
obs = pd.read_csv(args.obs, index_col=0, parse_dates=True)
# Open data and align to observed
cols = ['streamflow', 'evap', 'trans', 'precip']
obs_align = None
max_x = min_x = 0
mod_file = open(args.data, 'r')
mod_df = RHESSysOutput.readColumnsFromFile(mod_file, cols,
readHour=False)
# Align timeseries
(mod_align, obs_align) = mod_df.align(obs, axis=0, join='inner')
tmp_max_x = max(mod_align['streamflow'].max(), obs_align[OBS_HEADER_STREAMFLOW].max())
if tmp_max_x > max_x:
max_x = tmp_max_x
min_x = max(min_x, mod_align['streamflow'].min())
mod_file.close()
fig = plt.figure(figsize=(args.figureX, args.figureY), dpi=80, tight_layout=True)
ax_std = fig.add_subplot(221)
ax_log = fig.add_subplot(222)
ax_cdf = fig.add_subplot(223)
ax_tab = fig.add_subplot(224)
data_plt = plotGraph(args, PLOT_TYPE_STD, obs_align, mod_align, ['streamflow'],
min_x, max_x, ax_std, secondary='precip', plotColor=args.color)
fig.text(0.0, 1.0, '(a)')
plotGraph(args, PLOT_TYPE_LOGY, obs_align, mod_align, ['streamflow'],
min_x, max_x, ax_log, plotColor=args.color)
fig.text(1.0, 1.0, '(b)')
plotGraph(args, PLOT_TYPE_CDF, obs_align, mod_align, ['streamflow'],
min_x, max_x, ax_cdf, plotColor=args.color)
fig.text(0.0, 0.5, '(c)')
col_names = ['Observed', 'Modeled']
plotTable(args, col_names, obs_align, mod_align, ax_tab)
fig.text(1.0, 0.5, '(d)')
# Figure annotations
if args.title:
fig.suptitle(args.title, y=1.01)
legend_items = ['Observed', 'Modeled']
fig.legend( data_plt, legend_items, 'lower right', fontsize=10, ncol=2 )
# Output plot
if args.color:
outname = "%s_color" % (args.outname,)
else:
outname=args.outname
plot_filename_png = "%s.png" % (outname,)
plot_filename_pdf = "%s.pdf" % (outname,)
plt.savefig(plot_filename_png, bbox_inches='tight') #, bbox_extra_artists=[table])
plt.savefig(plot_filename_pdf, bbox_inches='tight') #, bbox_extra_artists=[table]) | /rhessysworkflows-1.34.tar.gz/rhessysworkflows-1.34/bin/RHESSysPlotMassbalance.py | 0.468061 | 0.459076 | RHESSysPlotMassbalance.py | pypi |
import sys
import argparse
import math
import numpy as np
import pandas as pd
import statsmodels.api as sm
import matplotlib.pyplot as plt
import matplotlib
from rhessysworkflows.rhessys import RHESSysOutput
PLOT_TYPE_STD = 'standard'
PLOT_TYPE_LOGY = 'logy'
PLOT_TYPE_CDF = 'cdf'
PLOT_TYPE_SCATTER = 'scatter'
PLOT_TYPE_SCATTER_LOG = 'scatter-log'
PLOT_TYPES = [PLOT_TYPE_STD, PLOT_TYPE_LOGY, PLOT_TYPE_CDF, PLOT_TYPE_SCATTER, PLOT_TYPE_SCATTER_LOG]
PLOT_DEFAULT = PLOT_TYPE_STD
LINE_TYPE_LINE = 'line'
LINE_TYPE_DASH = 'dash'
LINE_TYPE_DASH_DOT = 'dashdot'
LINE_TYPE_COLON = 'colon'
LINE_TYPE_DOT = 'dot'
LINE_TYPE_DICT = { LINE_TYPE_LINE: '-',
LINE_TYPE_DASH: '--',
LINE_TYPE_DASH_DOT: '-.',
LINE_TYPE_COLON: ':',
LINE_TYPE_DOT: '.' }
LINE_TYPES = [LINE_TYPE_LINE, LINE_TYPE_DASH, LINE_TYPE_DASH_DOT, LINE_TYPE_COLON, LINE_TYPE_DOT]
NUM_LINE_TYPES = len(LINE_TYPES)
def plotGraphScatter(args, obs, data, log=False, sizeX=1, sizeY=1, dpi=80):
assert( len(data) == len(args.legend) == 2 )
if args.scatterwidth:
linewidth = args.scatterwidth
else:
linewidth = 1
fig = plt.figure(figsize=(sizeX, sizeY), dpi=dpi, tight_layout=False)
ax = fig.add_subplot(111)
x = data[0]
y = data[1]
min_val = min( min(x), min(y) )
max_val = max( max(x), max(y) )
floor = math.floor(min_val)
ceil = math.ceil(max_val)
range = np.linspace(floor, ceil, 1000)
if not log:
ax.plot(data[0], data[1], '.', markersize=linewidth*6)
ax.plot(range, range, 'k-', linewidth=linewidth)
else:
ax.loglog(data[0], data[1], '.', markersize=linewidth*6)
ax.loglog(range, range, 'k-', linewidth=linewidth)
# Fit line
(m, b) = np.polyfit(x, y, 1)
fit_y = m*range
if not log:
ax.plot(range, fit_y, '--', color='grey', linewidth=linewidth)
else:
ax.loglog(range, fit_y, '--', color='grey', linewidth=linewidth)
ax.set_xlim(floor, ceil)
ax.set_ylim(floor, ceil)
# Plot annotations
# Annotate fit line, making sure the annotation does not overlap 1:1 line
if m <= 1:
if not log:
ax.text(0.85*ceil, 0.8*max(fit_y), "$y = %.2f x$" % (m,),
fontsize=args.ticklabelfontsize)
else:
ax.text(0.3*ceil, 0.2*max(fit_y), "$y = %.2f x$" % (m,),
fontsize=args.ticklabelfontsize)
else:
if not log:
ax.text(0.65*ceil, 0.8*max(fit_y), "$y = %.2f x$" % (m,),
fontsize=args.ticklabelfontsize)
else:
ax.text(0.3*ceil, 0.2*max(fit_y), "$y = %.2f x$" % (m,),
fontsize=args.ticklabelfontsize)
if args.title:
title = args.title
else:
title = "%s vs. %s" % (args.legend[0], args.legend[1])
fig.suptitle(title, y=0.98, fontsize=args.titlefontsize, fontweight=args.fontweight)
# X-axis
ax.set_xlabel(args.legend[0], fontsize=args.axesfontsize, fontweight=args.fontweight)
plt.setp(ax.get_xticklabels(), fontsize=args.ticklabelfontsize)
# Y-axis
ax.set_ylabel(args.legend[1], fontsize=args.axesfontsize, fontweight=args.fontweight)
plt.setp(ax.get_yticklabels(), fontsize=args.ticklabelfontsize)
def plotGraph(args, obs, data, sizeX=1, sizeY=1, dpi=80):
fig = plt.figure(figsize=(sizeX, sizeY), dpi=dpi, tight_layout=True)
ax = fig.add_subplot(111)
if args.plottype == PLOT_TYPE_STD or \
args.plottype == PLOT_TYPE_LOGY:
x = obs.index
elif args.plottype == PLOT_TYPE_CDF:
x = np.linspace(min_x, max_x, num=len(obs) )
# Plot observed values
# Standard or log plot
obs_y = obs
if args.plottype == PLOT_TYPE_CDF:
obs_ecdf = sm.distributions.ECDF(obs)
obs_y = obs_ecdf(x)
obs_plt = None
if not args.supressObs:
(obs_plt,) = ax.plot(x, obs_y, linewidth=2.0, color='black')
# Plot modeled values
data_plt = []
for (i, d) in enumerate(data):
# Standard or log plot
mod_y = d
if args.plottype == PLOT_TYPE_CDF:
mod_ecdf = sm.distributions.ECDF(d)
mod_y = mod_ecdf(x)
# Plot (we could move this outside of the for loop)
if args.linewidth:
linewidth = args.linewidth[i]
else:
linewidth = 1.0
if args.linestyle:
linestyle = LINE_TYPE_DICT[ args.linestyle[i] ]
else:
# Rotate styles
styleIdx = ( (i + 1) % NUM_LINE_TYPES ) - 1
linestyle = LINE_TYPE_DICT[ LINE_TYPES[styleIdx] ]
if args.color:
(mod_plt,) = ax.plot(x, mod_y, linewidth=linewidth, linestyle=linestyle,
color=args.color[i])
else:
(mod_plt,) = ax.plot(x, mod_y, linewidth=linewidth, linestyle=linestyle)
data_plt.append(mod_plt)
# Plot annotations
columnName = args.column.capitalize()
if args.title:
title = args.title
else:
if args.plottype == PLOT_TYPE_STD:
title = columnName
elif args.plottype == PLOT_TYPE_LOGY:
title = "log(%s)" % (columnName,)
elif args.plottype == PLOT_TYPE_CDF:
title = "Cummulative distribution - %s" % (columnName,)
fig.suptitle(title, y=0.99)
# X-axis
if args.plottype == PLOT_TYPE_STD or \
args.plottype == PLOT_TYPE_LOGY:
num_years = len(x) / 365
if num_years > 4:
if num_years > 10:
ax.xaxis.set_major_locator(matplotlib.dates.YearLocator())
else:
ax.xaxis.set_major_locator(matplotlib.dates.MonthLocator(interval=3))
else:
ax.xaxis.set_major_locator(matplotlib.dates.MonthLocator())
ax.xaxis.set_major_formatter(matplotlib.dates.DateFormatter('%b-%Y'))
# Rotate
plt.setp( ax.xaxis.get_majorticklabels(), rotation=45)
plt.setp( ax.xaxis.get_majorticklabels(), fontsize='x-small')
if args.plottype == PLOT_TYPE_CDF:
ax.set_xlim(min_x, max_x)
ax.set_xscale('log')
if args.xlabel:
ax.set_xlabel(args.xlabel)
else:
ax.set_xlabel( columnName )
elif args.xlabel:
ax.set_xlabel(args.xlabel)
# Y-axis
if args.plottype == PLOT_TYPE_LOGY:
ax.set_yscale('log')
if args.ylabel:
ax.set_ylabel(args.ylabel)
elif args.plottype != PLOT_TYPE_CDF:
y_label = columnName
if args.plottype == PLOT_TYPE_LOGY:
y_label = "log( %s )" % (columnName,)
ax.set_ylabel( y_label )
if args.supressObs:
legend_items = args.legend
else:
data_plt.insert(0, obs_plt)
legend_items = ['Observed'] + args.legend
# Plot secondary data (if specified)
if args.secondaryData and \
(args.plottype == PLOT_TYPE_STD or args.plottype == PLOT_TYPE_LOGY):
sec_file = open(args.secondaryData, 'r')
(sec_datetime, sec_data) = RHESSysOutput.readColumnFromFile(sec_file,
args.secondaryColumn,
startHour=0)
sec_file.close()
sec = pd.Series(sec_data, index=sec_datetime)
# Align timeseries
(sec_align, obs_align) = sec.align(obs, join='inner')
# Plot
ax2 = ax.twinx()
if args.secondaryPlotType == 'line':
(sec_plot,) = ax2.plot(x, sec_align)
elif args.secondaryPlotType == 'bar':
sec_plot = ax2.bar(x, sec_align, facecolor='blue', edgecolor='none', width=2.0)
secondaryLabel = args.secondaryColumn.capitalize()
if args.secondaryLabel:
secondaryLabel = args.secondaryLabel
ax2.invert_yaxis()
ax2.set_ylabel(args.secondaryLabel)
#ax.set_zorder(ax2.get_zorder()+1) # put ax in front of ax2
#ax.patch.set_visible(False) # hide the 'canvas'
# Plot legend last
num_cols = len(data)
if not args.supressObs:
num_cols += 1
if args.plottype == PLOT_TYPE_CDF:
fig.legend( data_plt, legend_items, 'lower center', fontsize='x-small',
bbox_to_anchor=(0.5, -0.015), ncol=num_cols, frameon=False )
else:
fig.legend( data_plt, legend_items, 'lower center', fontsize='x-small',
bbox_to_anchor=(0.5, -0.01), ncol=num_cols, frameon=False )
if __name__ == "__main__":
# Handle command line options
parser = argparse.ArgumentParser(description='Plot CDF of N datasets vs. observed data')
parser.add_argument('-p', '--plottype', required=False,
default=PLOT_DEFAULT, choices=PLOT_TYPES,
help='Type of plot')
parser.add_argument('-f', '--outfileSuffix', required=False,
help='Suffix to append on to name part of file name (i.e. before extension)')
parser.add_argument('-o', '--obs', required=True,
help='File containing observed data')
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('-d', '--data', nargs='+',
help='One or more RHESSys output data files')
group.add_argument('-b', '--behavioralData', nargs='+',
help='One or more ensemble output files from RHESSys behavioral runs')
parser.add_argument('--color', required=False, nargs='+',
help='Color of symbol to be applied to plots of data. Color must be expressed in form recognized by matplotlib.')
parser.add_argument('--linewidth', required=False, nargs='+', type=float,
help='Width of lines to be applied to plots of data. Value must be float in units of points.')
parser.add_argument('--linestyle', required=False, nargs='+', choices=LINE_TYPES,
help='Style of symbol to be applied to plots of data. Styles correspond to those of matplotlib.')
parser.add_argument('-c', '--column', required=True,
help='Name of column to use from data files')
parser.add_argument('-t', '--title', required=False,
help='Title of figure')
parser.add_argument('-l', '--legend', required=True, nargs='+',
help='Legend item labels')
parser.add_argument('-x', '--xlabel', required=False,
help='X-axis label')
parser.add_argument('-y', '--ylabel', required=False,
help='Y-axis label')
parser.add_argument('--titlefontsize', required=False, type=float,
default=12)
parser.add_argument('--scatterwidth', required=False, type=float,
default=1, help='Width to use for lines and markers in scatter plots. Markers size will be determine by multiplying scatterwidth by 6.')
parser.add_argument('--fontweight', required=False,
choices=['ultralight','light','normal','regular','book','medium','roman','semibold','demibold','demi','bold','heavy','extra bold','black'],
default='regular')
parser.add_argument('--legendfontsize', required=False, type=float,
default=6)
parser.add_argument('--axesfontsize', required=False, type=float,
default=12)
parser.add_argument('--ticklabelfontsize', required=False, type=float,
default=12)
parser.add_argument('--figureX', required=False, type=int, default=4,
help='The width of the plot, in inches')
parser.add_argument('--figureY', required=False, type=int, default=3,
help='The height of the plot, in inches')
parser.add_argument('--supressObs', required=False, action='store_true',
help='Do not plot observed data. Observed data will still be used for aligning timeseries. Not applicable to scatter plot output.')
parser.add_argument('--secondaryData', required=False,
help='A data file containing the varaible to plot on a secondary Y-axis')
parser.add_argument('--secondaryPlotType', required=False, choices=['bar', 'line'], default='bar',
help='Type of plot to use for secondary data.')
parser.add_argument('--secondaryColumn', required=False,
help='Name of column to use from secondary data file')
parser.add_argument('--secondaryLabel', required=False,
help='Label to use for seconary Y-axis')
args = parser.parse_args()
if args.color:
if len(args.color) != len(args.data):
sys.exit('Number of colors must match number of data files')
if args.linewidth:
if min(args.linewidth) <= 0.0:
sys.exit('All line widths must be > 0.0')
if len(args.linewidth) != len(args.data):
sys.exit('Number of line widths must match number of data files')
if args.linestyle:
if len(args.linestyle) != len(args.data):
sys.exit('Number of line styles must match number of data files')
if args.secondaryData and not args.secondaryColumn:
sys.exit('A secondary data file was specified, but the secondary column to use was not')
if args.data and ( len(args.data) != len(args.legend) ):
sys.exit('Number of legend items must equal the number of data files')
elif args.behavioralData and ( len(args.behavioralData) != len(args.legend) ):
sys.exit('Number of legend items must equal the number of data files')
# Open data and align to observed
obs_align = None
data = []
max_x = min_x = 0
if args.data:
# Open observed data
obs_file = open(args.obs, 'r')
(obs_datetime, obs_data) = RHESSysOutput.readObservedDataFromFile(obs_file,
readHour=False)
obs_file.close()
obs = pd.Series(obs_data, index=obs_datetime)
for d in args.data:
mod_file = open(d, 'r')
(tmp_datetime, tmp_data) = RHESSysOutput.readColumnFromFile(mod_file, args.column, startHour=0)
tmp_mod = pd.Series(tmp_data, index=tmp_datetime)
# Align timeseries
(mod_align, obs_align) = tmp_mod.align(obs, join='inner')
tmp_max_x = max(mod_align.max(), obs_align.max())
if tmp_max_x > max_x:
max_x = tmp_max_x
min_x = max(min_x, mod_align.min())
mod_file.close()
data.append( mod_align )
elif args.behavioralData:
# Open observed data (behavioral data has hour in it, so we need to read obs. data differently)
obs_file = open(args.obs, 'r')
(obs_datetime, obs_data) = RHESSysOutput.readObservedDataFromFile(obs_file,
readHour=True)
obs_file.close()
obs = pd.Series(obs_data, index=obs_datetime)
for b in args.behavioralData:
tmp_mod = pd.read_csv(b, index_col=0, parse_dates=True)
# Convert df to series
tmp_mod = pd.Series(tmp_mod[args.column], index=tmp_mod.index)
# Align timeseries
(mod_align, obs_align) = tmp_mod.align(obs, join='inner')
tmp_max_x = max(mod_align.max(), obs_align.max())
if tmp_max_x > max_x:
max_x = tmp_max_x
min_x = max(min_x, mod_align.min())
data.append( mod_align )
if args.plottype == PLOT_TYPE_SCATTER:
plotGraphScatter(args, obs_align, data, log=False,
sizeX=args.figureX, sizeY=args.figureY)
elif args.plottype == PLOT_TYPE_SCATTER_LOG:
plotGraphScatter(args, obs_align, data, log=True,
sizeX=args.figureX, sizeY=args.figureY)
else:
plotGraph(args, obs_align, data,
sizeX=args.figureX, sizeY=args.figureY)
# Output plot
filename = args.plottype
if args.outfileSuffix:
filename += '_' + args.outfileSuffix
plot_filename_png = "%s.png" % (filename,)
plot_filename_pdf = "%s.pdf" % (filename,)
plt.savefig(plot_filename_png)
plt.savefig(plot_filename_pdf) | /rhessysworkflows-1.34.tar.gz/rhessysworkflows-1.34/bin/RHESSysPlot.py | 0.435902 | 0.328402 | RHESSysPlot.py | pypi |
import time
import threading
import logging
class Pool(object):
"""A pool of available powershell objects."""
# We keep a few PowerShell instances around as setup is expensive
# (around 4 seconds on my system). This way interfaces on top of this API
# can be more responsive.
# The algorithm is adaptive so there should be little reason to change
# these.
minsize = 2
maxival = 300
maxlife = 3600
maxcount = 100
fast_delay = 5
slow_delay = 60
def __init__(self, type, constructor):
"""Constructor"""
self.type = type
self.constructor = constructor
self.logger = logging.getLogger('rhevm.pool')
self._pool = {}
self._lock = threading.Lock()
self._thread = None
self._threads_to_join = []
self._last_maintenance = time.time()
self._last_full_maintenance = self._last_maintenance
def get(self, args):
"""Return a new instance."""
instance = self._get_instance(args)
if not instance:
instance = self._create_instance(args)
return instance
def put(self, instance):
"""Put an instance back into the pool."""
instance.count += 1
instance.last_time = time.time()
self._add_instance(instance.args, instance)
def clear(self):
"""Clear the pool (NOT thread safe)."""
if self._thread:
self._thread.join()
terminate = []
for key in self._pool:
terminate += self._pool[key][1]
self._pool.clear()
for inst in terminate:
self._terminate_instance(inst)
if terminate:
self.logger.debug('Cleared <%s> pool (%d instances)'
% (self._get_type(), len(terminate)))
self._thread = None
def size(self):
"""Return the size of the pool."""
size = 0
self._lock.acquire()
try:
for key in self._pool:
size += len(self._pool[key][1])
finally:
self._lock.release()
return size
def maintenance(self):
"""Perform maintenance on the pool."""
start = False
self._lock.acquire()
try:
if self._thread and not self._thread.isAlive():
self._thread.join()
self._thread = None
if self._thread is None and \
time.time() - self._last_maintenance > self.fast_delay:
self._thread = threading.Thread(target=self._maintenance_thread)
start = True
finally:
self._lock.release()
if start:
self._thread.start()
def _maintenance_thread(self):
"""INTERNAL: perform maintenance on the pool."""
self.logger.debug('Started maintenance thread.')
if time.time() - self._last_full_maintenance > self.slow_delay:
self._expire_instances()
self._decrease_size()
self._last_full_maintenance = time.time()
self._increase_size()
self.logger.debug('Maintenance complete - pool size is now %d' \
% self.size())
self._last_maintenance = time.time()
def _get_key(self, args):
"""INTERNAL: return a lookup key."""
values = args.items()
values.sort()
key = [ '%s=%s' % (key, value) for (key, value) in values ]
key = '/'.join(key)
return key
def _get_type(self):
"""INTERNAL: return a type identifier."""
return self.type.__name__
def _create_instance(self, args):
"""INTERNAL: create a new PowerShell instance."""
instance = self.constructor(**args)
instance.args = args
instance.count = 0
instance.created = time.time()
instance.last_used = instance.created
return instance
def _terminate_instance(self, instance):
try:
instance.terminate()
except Exception:
pass
def _get_instance(self, args):
"""INTERNAL: return an instance."""
now = time.time()
key = self._get_key(args)
self._lock.acquire()
try:
if key not in self._pool or not self._pool[key][1]:
return
for ix,inst in enumerate(self._pool[key][1]):
if now - inst.created < self.maxlife \
and now - inst.last_used < self.maxival \
and inst.count < self.maxcount:
del self._pool[key][1][ix]
return inst
finally:
self._lock.release()
def _add_instance(self, args, instance):
"""INTERNAL: add a new instance to the pool."""
key = self._get_key(args)
self._lock.acquire()
try:
if key not in self._pool:
self._pool[key] = (args, [])
self._pool[key][1].append(instance)
self._pool[key][1].sort(lambda x,y: cmp(y.last_used, x.last_used))
finally:
self._lock.release()
def _increase_size(self):
"""INTERNAL: increase the size of the pool."""
create = []
self._lock.acquire()
try:
for key in self._pool:
if len(self._pool[key][1]) < self.minsize:
create.append(self._pool[key][0])
finally:
self._lock.release()
for args in create:
instance = self._create_instance(args)
self._add_instance(args, instance)
if create:
self.logger.debug('Created %d instances of type <%s>' \
% (len(create), self._get_type()))
def _expire_instances(self):
"""INTERNAL: expire instance."""
terminate = []
now = time.time()
self._lock.acquire()
try:
for key in self._pool:
remove = []
for inst in self._pool[key][1]:
if now - inst.created > self.maxlife \
or now - inst.last_used > self.maxival \
or inst.count > self.maxcount:
remove.append(inst)
for inst in remove:
self._pool[key][1].remove(inst)
terminate += remove
finally:
self._lock.release()
for inst in terminate:
self._terminate_instance(inst)
if terminate:
self.logger.debug('Expired %d instances of type <%s> due to age' \
% (len(terminate), self._get_type()))
def _decrease_size(self):
"""INTERNAL: decrease size of the pool."""
terminate = []
self._lock.acquire()
try:
for key in self._pool:
if len(self._pool[key][1]) > self.minsize:
terminate += self._pool[key][1][self.minsize:]
del self._pool[key][1][self.minsize:]
finally:
self._lock.release()
for inst in terminate:
self._terminate_instance(inst)
if terminate:
self.logger.debug('Removed %d instances of <%s> due to full pool' \
% (len(terminate), self._get_type())) | /rhevm-api-0.9.0.tar.gz/rhevm-api-0.9.0/lib/rhevm/pool.py | 0.685423 | 0.164181 | pool.py | pypi |
import functools
import dask.array
import numpy as np
import pandas as pd
import xarray as xr
from dask import distributed as dd
import rhg_compute_tools.utils
def dataarrays_from_delayed(futures, client=None, **client_kwargs):
"""
Returns a list of xarray dataarrays from a list of futures of dataarrays
Parameters
----------
futures : list
list of :py:class:`dask.delayed.Future` objects holding
:py:class:`xarray.DataArray` objects.
client : object, optional
:py:class:`dask.distributed.Client` to use in gathering
metadata on futures. If not provided, client is inferred
from context.
client_kwargs : optional
kwargs to pass to ``client.map`` and ``client.gather`` commands (e.g.
``priority``)
Returns
-------
arrays : list
list of :py:class:`xarray.DataArray` objects with
:py:class:`dask.array.Array` backends.
Examples
--------
Given a mapped xarray DataArray, pull the metadata into memory while
leaving the data on the workers:
.. code-block:: python
>>> import numpy as np
>>> def build_arr(multiplier):
... return multiplier * xr.DataArray(
... np.arange(2), dims=['x'], coords=[['a', 'b']])
...
>>> client = dd.Client()
>>> fut = client.map(build_arr, range(3))
>>> arrs = dataarrays_from_delayed(fut, priority=1)
>>> arrs[-1] # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
<xarray.DataArray ...(x: 2)>
dask.array<...shape=(2,), dtype=int64, chunksize=(2,), chunktype=numpy.ndarray>
Coordinates:
* x (x) <U1 'a' 'b'
This list of arrays can now be manipulated using normal xarray tools:
.. code-block:: python
>>> xr.concat(arrs, dim='simulation') # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
<xarray.DataArray ...(simulation: 3, x: 2)>
dask.array<...shape=(3, 2), dtype=int64, chunksize=(1, 2), chunktype=numpy.ndarray>
Coordinates:
* x (x) <U1 'a' 'b'
Dimensions without coordinates: simulation
>>> client.close()
"""
if client is None:
client = dd.get_client()
delayed_arrays = client.map(lambda x: x.data, futures, **client_kwargs)
dask_array_metadata = client.gather(
client.map(lambda x: (x.data.shape, x.data.dtype), futures, **client_kwargs)
)
dask_arrays = [
dask.array.from_delayed(delayed_arrays[i], *dask_array_metadata[i])
for i in range(len(futures))
]
# using dict(x.coords) b/c gathering coords can blow up memory for some reason
array_metadata = client.gather(
client.map(
lambda x: {
"dims": x.dims,
"coords": dict(x.coords),
"attrs": x.attrs,
"name": x.name,
},
futures,
**client_kwargs
)
)
data_arrays = [
xr.DataArray(dask_arrays[i], **array_metadata[i]) for i in range(len(futures))
]
return data_arrays
def dataarray_from_delayed(futures, dim=None, client=None, **client_kwargs):
"""
Returns a DataArray from a list of futures
Parameters
----------
futures : list
list of :py:class:`dask.delayed.Future` objects holding
:py:class:`xarray.DataArray` objects.
dim : str, optional
dimension along which to concat :py:class:`xarray.DataArray`.
Inferred by default.
client : object, optional
:py:class:`dask.distributed.Client` to use in gathering
metadata on futures. If not provided, client is inferred
from context.
client_kwargs : optional
kwargs to pass to ``client.map`` and ``client.gather`` commands (e.g.
``priority``)
Returns
-------
array : object
:py:class:`xarray.DataArray` concatenated along ``dim`` with
a :py:class:`dask.array.Array` backend.
Examples
--------
Given a mapped xarray DataArray, pull the metadata into memory while
leaving the data on the workers:
.. code-block:: python
>>> import numpy as np, pandas as pd
>>> def build_arr(multiplier):
... return multiplier * xr.DataArray(
... np.arange(2), dims=['x'], coords=[['a', 'b']])
...
>>> client = dd.Client()
>>> fut = client.map(build_arr, range(3))
>>> da = dataarray_from_delayed(
... fut,
... dim=pd.Index(range(3), name='simulation'),
... priority=1
... )
...
>>> da # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
<xarray.DataArray ...(simulation: 3, x: 2)>
dask.array<...shape=(3, 2), dtype=int64, chunksize=(1, 2), chunktype=numpy.ndarray>
Coordinates:
* x (x) <U1 'a' 'b'
* simulation (simulation) int64 0 1 2
>>> client.close()
"""
data_arrays = dataarrays_from_delayed(futures, client=client, **client_kwargs)
da = xr.concat(data_arrays, dim=dim)
return da
def datasets_from_delayed(futures, client=None, **client_kwargs):
"""
Returns a list of xarray datasets from a list of futures of datasets
Parameters
----------
futures : list
list of :py:class:`dask.delayed.Future` objects holding
:py:class:`xarray.Dataset` objects.
client : object, optional
:py:class:`dask.distributed.Client` to use in gathering
metadata on futures. If not provided, client is inferred
from context.
client_kwargs : optional
kwargs to pass to ``client.map`` and ``client.gather`` commands (e.g.
``priority``)
Returns
-------
datasets : list
list of :py:class:`xarray.Dataset` objects with
:py:class:`dask.array.Array` backends for each variable.
Examples
--------
Given a mapped :py:class:`xarray.Dataset`, pull the metadata into memory
while leaving the data on the workers:
.. code-block:: python
>>> import numpy as np
>>> def build_ds(multiplier):
... return multiplier * xr.Dataset({
... 'var1': xr.DataArray(
... np.arange(2), dims=['x'], coords=[['a', 'b']])})
...
>>> client = dd.Client()
>>> fut = client.map(build_ds, range(3))
>>> arrs = datasets_from_delayed(fut, priority=1)
>>> arrs[-1] # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
<xarray.Dataset>
Dimensions: (x: 2)
Coordinates:
* x (x) <U1 'a' 'b'
Data variables:
var1 (x) int64 dask.array<chunksize=(2,), meta=np.ndarray>
This list of arrays can now be manipulated using normal xarray tools:
.. code-block:: python
>>> xr.concat(arrs, dim='y') # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
<xarray.Dataset>
Dimensions: (x: 2, y: 3)
Coordinates:
* x (x) <U1 'a' 'b'
Dimensions without coordinates: y
Data variables:
var1 (y, x) int64 dask.array<chunksize=(1, 2), meta=np.ndarray>
>>> client.close()
"""
if client is None:
client = dd.get_client()
data_var_keys = client.gather(
client.map(lambda x: list(x.data_vars.keys()), futures, **client_kwargs)
)
delayed_arrays = [
{
k: client.submit(lambda x: x[k].data, futures[i], **client_kwargs)
for k in data_var_keys[i]
}
for i in range(len(futures))
]
dask_array_metadata = client.gather(
[
{
k: (
client.submit(
lambda x: (x[k].data.shape, x[k].data.dtype),
futures[i],
**client_kwargs
)
)
for k in data_var_keys[i]
}
for i in range(len(futures))
]
)
dask_data_arrays = [
{
k: (
dask.array.from_delayed(
delayed_arrays[i][k], *dask_array_metadata[i][k]
)
)
for k in data_var_keys[i]
}
for i in range(len(futures))
]
# using dict(x.coords) b/c gathering coords can blow up memory for some reason
array_metadata = client.gather(
[
{
k: client.submit(
lambda x: {
"dims": x[k].dims,
"coords": dict(x[k].coords),
"attrs": x[k].attrs,
},
futures[i],
**client_kwargs
)
for k in data_var_keys[i]
}
for i in range(len(futures))
]
)
data_arrays = [
{
k: (xr.DataArray(dask_data_arrays[i][k], **array_metadata[i][k]))
for k in data_var_keys[i]
}
for i in range(len(futures))
]
datasets = [xr.Dataset(arr) for arr in data_arrays]
dataset_metadata = client.gather(
client.map(lambda x: x.attrs, futures, **client_kwargs)
)
for i in range(len(futures)):
datasets[i].attrs.update(dataset_metadata[i])
return datasets
def dataset_from_delayed(futures, dim=None, client=None, **client_kwargs):
"""
Returns an :py:class:`xarray.Dataset` from a list of futures
Parameters
----------
futures : list
list of :py:class:`dask.delayed.Future` objects holding
:py:class:`xarray.Dataset` objects.
dim : str, optional
dimension along which to concat :py:class:`xarray.Dataset`.
Inferred by default.
client : object, optional
:py:class:`dask.distributed.Client` to use in gathering
metadata on futures. If not provided, client is inferred
from context.
client_kwargs : optional
kwargs to pass to ``client.map`` and ``client.gather`` commands (e.g.
``priority``)
Returns
-------
dataset : object
:py:class:`xarray.Dataset` concatenated along ``dim`` with
:py:class:`dask.array.Array` backends for each variable.
Examples
--------
Given a mapped :py:class:`xarray.Dataset`, pull the metadata into memory
while leaving the data on the workers:
.. code-block:: python
>>> import numpy as np, pandas as pd
>>> def build_ds(multiplier):
... return multiplier * xr.Dataset({
... 'var1': xr.DataArray(
... np.arange(2), dims=['x'], coords=[['a', 'b']])})
...
>>> client = dd.Client()
>>> fut = client.map(build_ds, range(3))
>>> ds = dataset_from_delayed(fut, dim=pd.Index(range(3), name='y'), priority=1)
>>> ds # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
<xarray.Dataset>
Dimensions: (x: 2, y: 3)
Coordinates:
* x (x) <U1 'a' 'b'
* y (y) int64 0 1 2
Data variables:
var1 (y, x) int64 dask.array<chunksize=(1, 2), meta=np.ndarray>
>>> client.close()
"""
datasets = datasets_from_delayed(futures, client=client, **client_kwargs)
ds = xr.concat(datasets, dim=dim)
return ds
def choose_along_axis(arr, axis=-1, replace=True, nchoices=1, p=None):
"""
Wrapper on np.random.choice, but along a single dimension within a larger array
Parameters
----------
arr : np.array
Array with more than one dimension. Choices will be drawn from along the
``axis`` dimension.
axis : integer, optional
Dimension along which to draw samples
replace : bool, optional
Whether to sample with replacement. Passed to :py:func:`np.random.choice`.
Default 1.
nchoices : int, optional
Number of samples to draw. Must be less than or equal to the number of
valid options if replace is False. Default 1.
p : np.array
Array with the same shape as ``arr`` with weights for each choice. Each
dimension is sampled independently, so weights will be normalized to 1
along the ``axis`` dimension.
Returns
-------
sampled : np.array
Array with the same shape as ``arr`` but with length ``nchoices`` along axis
``axis`` and with values chosen from the values of ``arr`` along dimension
``axis`` with weights ``p``.
Examples
--------
Let's say we have an array with NaNs in it:
.. code-block:: python
>>> arr = np.arange(40).reshape(4, 2, 5).astype(float)
>>> for i in range(4):
... arr[i, :, i+1:] = np.nan
>>> arr # doctest: +NORMALIZE_WHITESPACE
array([[[ 0., nan, nan, nan, nan],
[ 5., nan, nan, nan, nan]],
[[10., 11., nan, nan, nan],
[15., 16., nan, nan, nan]],
[[20., 21., 22., nan, nan],
[25., 26., 27., nan, nan]],
[[30., 31., 32., 33., nan],
[35., 36., 37., 38., nan]]])
We can set weights such that we only select from non-nan values
.. code-block:: python
>>> p = (~np.isnan(arr))
>>> p = p / p.sum(axis=2).reshape(4, 2, 1)
Now, sampling from this along the second dimension will draw from
these values:
.. code-block:: python
>>> np.random.seed(1)
>>> choose_along_axis(arr, 2, p=p, nchoices=10) # doctest: +NORMALIZE_WHITESPACE
array([[[ 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[ 5., 5., 5., 5., 5., 5., 5., 5., 5., 5.]],
[[11., 11., 10., 11., 11., 11., 10., 10., 10., 11.],
[15., 15., 16., 16., 16., 15., 16., 16., 15., 16.]],
[[22., 22., 20., 22., 20., 21., 22., 20., 20., 20.],
[25., 27., 25., 25., 26., 25., 26., 25., 26., 27.]],
[[30., 31., 32., 31., 30., 32., 32., 32., 33., 32.],
[38., 35., 35., 38., 36., 35., 38., 36., 38., 37.]]])
See Also
--------
:py:func:`np.random.choice` : 1-d version of this function
"""
if p is None:
p = np.ones_like(arr).astype(float) / arr.shape[axis]
axis = axis % len(arr.shape)
new_shape = tuple(list(arr.shape[:axis]) + [nchoices] + list(arr.shape[axis + 1 :]))
result = np.ndarray(shape=new_shape, dtype=arr.dtype)
for ind in np.ndindex(tuple([l for i, l in enumerate(arr.shape) if i != axis])):
indexer = tuple(list(ind[:axis]) + [slice(None)] + list(ind[axis:]))
result[indexer] = np.random.choice(
arr[indexer],
size=nchoices,
replace=replace,
p=p[indexer],
)
return result
def choose_along_dim(da, dim, samples=1, expand=None, new_dim_name=None):
"""
Sample values from a DataArray along a dimension
Wraps :py:func:`np.random.choice` to sample a different random index
(or set of indices) from along dimension ``dim`` for each combination of
elements along the other dimensions. This is very different from block
resampling - to block resample along a dimension simply choose a set
of indices and draw these from the array using :py:meth:`xr.DataArray.sel`.
Parameters
----------
da : xr.DataArray
DataArray from which to sample values.
dim: str
Dimension along which to sample. Sampling will draw from elements along
this dimension for all combinations of other dimensions.
samples : int, optional
Number of samples to take from the dimension ``dim``. If greater than 1,
``expand`` is ignored (and set to True).
expand : bool, optional
Whether to expand the array along the sampled dimension.
new_dim_name : str, optoinal
Name for the new dimension. If not provided, will use ``dim``.
Returns
-------
sampled : xr.DataArray
DataArray with sampled values chosen along dimension ``dim``
Examples
--------
.. code-block:: python
>>> da = xr.DataArray(
... np.arange(40).reshape(4, 2, 5),
... dims=['x', 'y', 'z'],
... coords=[np.arange(4), np.arange(2), np.arange(5)],
... )
>>> da # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
<xarray.DataArray (x: 4, y: 2, z: 5)>
array([[[ 0, 1, 2, 3, 4],
[ 5, 6, 7, 8, 9]],
[[10, 11, 12, 13, 14],
[15, 16, 17, 18, 19]],
[[20, 21, 22, 23, 24],
[25, 26, 27, 28, 29]],
[[30, 31, 32, 33, 34],
[35, 36, 37, 38, 39]]])
Coordinates:
* x (x) int64 0 1 2 3
* y (y) int64 0 1
* z (z) int64 0 1 2 3 4
We can take a random value along the ``'z'`` dimension:
.. code-block:: python
>>> np.random.seed(1)
>>> choose_along_dim(da, 'z')
<xarray.DataArray (x: 4, y: 2)>
array([[ 2, 8],
[10, 16],
[20, 25],
[30, 36]])
Coordinates:
* x (x) int64 0 1 2 3
* y (y) int64 0 1
If you provide a ``sample`` argument greater than one (or
set expand=True) the array will be expanded to a new
dimension:
.. code-block:: python
>>> np.random.seed(1)
>>> choose_along_dim(da, 'z', samples=3) # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
<xarray.DataArray (x: 4, y: 2, z: 3)>
array([[[ 2, 3, 0],
[ 6, 5, 5]],
[[10, 11, 11],
[17, 17, 18]],
[[21, 24, 20],
[28, 27, 27]],
[[30, 30, 34],
[39, 36, 38]]])
Coordinates:
* x (x) int64 0 1 2 3
* y (y) int64 0 1
* z (z) int64 0 1 2
"""
sampled = choose_along_axis(da.values, axis=da.get_axis_num(dim), nchoices=samples)
if samples > 1:
expand = True
if not expand:
sampled = np.take(sampled, 0, axis=da.get_axis_num(dim))
return xr.DataArray(
sampled,
dims=[d for d in da.dims if d != dim],
coords=[da.coords[d] for d in da.dims if d != dim],
)
else:
if new_dim_name is None:
new_dim_name = dim
return xr.DataArray(
sampled,
dims=[d if d != dim else new_dim_name for d in da.dims],
coords=[da.coords[d] if d != dim else np.arange(samples) for d in da.dims],
)
@xr.register_dataarray_accessor("random")
class random:
def __init__(self, xarray_obj):
self._xarray_obj = xarray_obj
@functools.wraps(choose_along_dim)
def choice(self, *args, **kwargs):
return choose_along_dim(self._xarray_obj, *args, **kwargs)
def document_dataset(
ds: xr.Dataset,
repository_root : [str, None] = None,
tz : str = "UTC",
inplace : bool = True,
) -> xr.Dataset:
"""
Add repository state and timestamp to dataset attrs
Parameters
----------
ds : xr.Dataset
Dataset to document
repository_root : str or None, optional
Path to the root of the repository to document. If ``None`` (default),
the current directory will be used, and will search parent directories
for a git repository. If a string is passed, parent directories will
not be searched - the directory must be a repository root which
conatins a ``.git`` directory.
tz : str, optional
time zone string parseable by datetime.datetime (e.g. "US/Pacific").
Default "UTC".
inplace : bool, optional
Whether to update the dataset's attributes in place (default) or to return
a copy of the dataset.
Returns
-------
ds : xr.Dataset
Dataset with updated attribute information. A dataset is returned regardless
of arguments - the inplace argument determines whether the returned dataset
will be a shallow copy or the original object (default).
"""
if not inplace:
ds = ds.copy(deep=False)
ds.attrs.update(rhg_compute_tools.utils.get_repo_state(repository_root))
ds.attrs["updated"] = pd.Timestamp.now(tz=tz).strftime("%c (%Z)")
return ds | /rhg_compute_tools-1.3.tar.gz/rhg_compute_tools-1.3/rhg_compute_tools/xarray.py | 0.879095 | 0.616647 | xarray.py | pypi |
import matplotlib.cm
import numpy as np
from matplotlib.colors import LinearSegmentedColormap
try:
_string_types = (str, unicode)
except NameError:
_string_types = (str,)
def get_color_scheme(values, cmap=None, colors=None, levels=None, how=None):
"""
Generate a norm and color scheme from data
Parameters
----------
values : array-like
data to be plotted, from which to generate cmap and norm.
This should be an array, DataArray, etc. that we can use
to find the min/max and/or quantiles of the data.
cmap : str, optional
named matplotlib cmap (default inferred from data)
colors : list-like, optional
list of colors to use in a discrete colormap, or with which
to create a custom color map
levels : list-like, optional
boundaries of discrete colormap, provide
how : str, optional
Optional setting form ``{'linear', 'log', 'symlog', None}``.
Used to construct the returned ``norm`` object, which defines
the way the colors map to values. By default, we the method is
inferred from the ``values``.
Returns
-------
cmap : object
:py:class:`matplotlib.colors.cmap` color mapping
norm : object
:py:class:`matplotlib.colors.Normalize` instance using the provided
values, levels, color specification, and "how" method
"""
mini, maxi = float(values.min()), float(values.max())
amax = max(abs(mini), abs(maxi))
if (cmap is None) and (colors is not None):
cmap = LinearSegmentedColormap.from_list("custom_cmap", colors)
elif cmap is None:
if (mini < 0) and (maxi > 0):
cmap = matplotlib.cm.RdBu_r
else:
cmap = matplotlib.cm.viridis
elif isinstance(cmap, _string_types):
cmap = matplotlib.cm.get_cmap(cmap)
if how is None and levels is not None:
norm = matplotlib.colors.BoundaryNorm(levels, cmap.N)
elif (how is None) or (how == "eq_hist"):
if levels is None:
levels = 11
bin_edges = np.percentile(
values[~np.isnan(values)], np.linspace(0, 100, levels)
)
norm = matplotlib.colors.BoundaryNorm(bin_edges, cmap.N)
elif how == "log":
norm = matplotlib.colors.LogNorm(vmin=mini, vmax=maxi)
elif how == "symlog":
norm = matplotlib.colors.SymLogNorm(
vmin=-amax, vmax=amax, linthresh=(amax / 100)
)
elif how == "linear":
norm = matplotlib.colors.Normalize(vmin=mini, vmax=maxi)
else:
raise ValueError(
"color scheme `how` argument {} not recognized. "
"choose from {eq_hist, log, symlog, linear} or "
"provide `levels`".format(how)
)
return cmap, norm
def add_colorbar(ax, cmap="viridis", norm=None, orientation="vertical", **kwargs):
"""
Add a colorbar to a plot, using a pre-defined cmap and norm
Parameters
----------
ax : object
matplotlib axis object
cmap : str or object, optional
:py:class:`matplotlib.colors.cmap` instance or name of a
registered cmap (default viridis)
norm: object, optional
:py:class:`matplotlib.colors.Normalize` instance. default is a linear
norm between the min and max of the first plotted object.
orientation : str, optional
default 'vertical'
**kwargs :
passed to colorbar constructor
"""
if norm is None:
norm = matplotlib.colors.Normalize
n_cmap = matplotlib.cm.ScalarMappable(norm=norm, cmap=cmap)
n_cmap.set_array([])
cbar = ax.get_figure().colorbar(n_cmap, ax=ax, orientation=orientation, **kwargs)
return cbar | /rhg_compute_tools-1.3.tar.gz/rhg_compute_tools-1.3/rhg_compute_tools/design/plotting.py | 0.926653 | 0.668015 | plotting.py | pypi |
import fsspec
import os
import re
import yaml
import tempfile
import rasterio
import numpy as np
import pandas as pd
import xarray as xr
import geopandas as gpd
from zarr.errors import GroupNotFoundError
from contextlib import contextmanager
import contextlib
try:
from gcsfs.retry import HttpError as GCSFSHttpError
except ImportError:
from gcsfs.utils import HttpError as GCSFSHttpError
def get_fs(fp) -> fsspec.AbstractFileSystem:
filesystem = "" if "://" not in fp else fp.split("://")[0]
fs = fsspec.filesystem(
filesystem,
cache_timeout=-1,
timeout=120,
requests_timeout=120,
read_timeout=120,
conn_timeout=120,
)
return fs
@contextmanager
def report_if_not_found(fp):
try:
yield
except (FileNotFoundError, GroupNotFoundError):
raise FileNotFoundError(fp)
except KeyError as e:
if ".zmetadata" not in str(e.args[0]):
raise
raise FileNotFoundError(fp)
def read_zarr(
fp,
fs=None,
mapper_kwargs=None,
isel_dict: dict = None,
load_data: bool = False,
**kwargs,
) -> xr.Dataset:
if fs is None:
fs = get_fs(fp)
with report_if_not_found(fp):
mapper = fs.get_mapper(fp)
ds = xr.open_zarr(mapper, **kwargs)
if isel_dict is not None:
assert isinstance(isel_dict, dict), "`sel_dict_method` should be a dict"
assert all(
[x in ds.dims for x in isel_dict]
), f"all keys of `isel_dict` are not in ds.dims {isel_dict.keys(), list(ds.dims)}"
ds = ds.isel(isel_dict)
if load_data:
ds = ds.load()
return ds
def load_netcdf(blob, fsspec_kwargs=None, *args, retries=5, **kwargs) -> xr.Dataset:
"""Read a geotiff or raster file from a local or gs:// location"""
if fsspec_kwargs is None:
fsspec_kwargs = {}
for i in range(retries + 1):
try:
if os.path.exists(blob):
with xr.open_dataset(blob, *args, **kwargs) as ds:
# yield ds
ds = ds.load()
return ds
break
elif "://" in str(blob):
with tempfile.NamedTemporaryFile(suffix=".nc") as tmp_fp:
fp = str(tmp_fp.name)
protocol = re.match(r"(\w+)://", blob, re.I).group(1)
fs = fsspec.filesystem(protocol, **fsspec_kwargs)
fs.get(blob, fp)
with xr.open_dataset(fp, *args, **kwargs) as ds:
# yield ds
ds = ds.load()
return ds
break
except (IOError, GCSFSHttpError) as e:
if i >= retries:
raise OSError(f"read aborted after {i} retry attempts: {e}")
def read_dataset(fp, engine=None, **kwargs) -> xr.Dataset:
if engine is None:
if fp.endswith(".zarr"):
engine = "zarr"
elif fp.endswith(".nc") or fp.endswith(".nc4"):
engine = "netcdf4"
else:
raise IOError(f"engine could not be auto-determined from fp: {fp}")
if engine == "zarr":
return read_zarr(fp, **kwargs)
elif engine == "netcdf4":
return load_netcdf(fp, **kwargs)
else:
raise IOError(
f"engine not recognized: {engine}. Choose one of {{'zarr', 'netcdf4'}}."
)
def read_shapefile(fp):
with fsspec.open(fp) as f:
return gpd.read_file(f)
@contextlib.contextmanager
def read_rasterio(blob, fsspec_kwargs=None, *args, retries=5, **kwargs) -> xr.DataArray:
"""Read a NETCDF file from a local or gs:// location"""
if fsspec_kwargs is None:
fsspec_kwargs = {}
for i in range(retries + 1):
try:
if os.path.exists(blob):
with xr.open_rasterio(blob) as ds:
yield ds
break
elif "://" in str(blob):
with tempfile.TemporaryDirectory() as tmpdir:
f = os.path.join(tmpdir, os.path.basename(blob))
fs = fsspec.filesystem(blob.split("://")[0])
fs.get(blob.replace("/gcs/", "gs://"), f)
with rasterio.open(f) as data:
with xr.open_rasterio(data) as ds:
yield ds
break
else:
raise ValueError("file protocol not recognized: {blob}")
except (IOError, GCSFSHttpError) as e:
if i >= retries:
raise OSError(f"read aborted after {i} retry attempts: {e}")
@contextlib.contextmanager
def read_netcdf(blob, fsspec_kwargs=None, *args, retries=5, **kwargs) -> xr.Dataset:
"""Read a geotiff or raster file from a local or gs:// location. Very similar to load_netcdf, but without the load."""
if fsspec_kwargs is None:
fsspec_kwargs = {}
for i in range(retries + 1):
try:
if os.path.exists(blob):
with xr.open_dataset(blob, *args, **kwargs) as ds:
yield ds
break
elif "://" in str(blob):
with tempfile.NamedTemporaryFile(suffix=".nc") as tmp_fp:
fp = str(tmp_fp.name)
protocol = re.match(r"(\w+)://", blob, re.I).group(1)
fs = fsspec.filesystem(protocol, **fsspec_kwargs)
fs.get(blob, fp)
with xr.open_dataset(fp, *args, **kwargs) as ds:
yield ds
break
except (IOError, GCSFSHttpError) as e:
if i >= retries:
raise OSError(f"read aborted after {i} retry attempts: {e}")
def read_csv(blob, **fsspec_kwargs) -> pd.DataFrame:
"""
Read a csv file from a local or gs:// location
"""
if not "://" in str(blob):
fsspec_kwargs = {}
return pd.read_csv(blob, **fsspec_kwargs)
def read_parquet(blob, **fsspec_kwargs) -> pd.DataFrame:
"""
Read a parquet file from a local or gs:// location
"""
if not "://" in str(blob):
fsspec_kwargs = {}
return pd.read_parquet(blob, **fsspec_kwargs)
def read_dataframe(blob, **fsspec_kwargs) -> pd.DataFrame:
"""
Read a CSV or parquet file from a local or gs:// location
"""
if blob.endswith(".csv") or blob.endswith(".txt"):
return read_csv(blob, **fsspec_kwargs)
elif blob.endswith(".parquet"):
return read_parquet(blob, **fsspec_kwargs)
else:
parts = os.path.basename(blob).split(".")
if len(parts) == 1:
raise ValueError("No extension could be inferred for file: {}".format(blob))
ext = ".".join(parts[1:])
raise ValueError(
"File type could not be inferred from extension: {}".format(ext)
)
def read_csvv_response(fp) -> xr.Dataset:
with fsspec.open(fp, "r") as f:
firstline = f.readline().strip()
assert firstline == "---", firstline
header, data = f.read().split("...\n")
header = yaml.safe_load(header)
fields = [
"observations",
"prednames",
"covarnames",
"obsnames",
"gamma",
"gammavcv",
"residvcv",
]
parsed_data = {}
current_field = None
current = []
for line in data.split("\n"):
if line.strip() in fields:
if current_field is not None:
if "names" in current_field:
parsed_data[current_field] = np.array(
[c.strip().rstrip(",").split(",") for c in current]
)
else:
parsed_data[current_field] = np.loadtxt(current, delimiter=",")
current = []
current_field = line.strip()
else:
current.append(line.strip().rstrip(",").replace(", ", ","))
if "names" in current_field:
parsed_data[current_field] = np.array(
[c.strip().rstrip(",").split(",") for c in current]
)
else:
parsed_data[current_field] = np.loadtxt(current, delimiter=",")
if "obsnames" not in parsed_data:
parsed_data["obsnames"] = np.array(
["outcome"] * len(parsed_data["prednames"].flat)
)
X = pd.MultiIndex.from_arrays(
[
parsed_data["prednames"].flat,
parsed_data["covarnames"].flat,
parsed_data["obsnames"].flat,
],
names=["predictor", "covariate", "outcome"],
)
Y = pd.MultiIndex.from_arrays(
[
parsed_data["prednames"].flat,
parsed_data["covarnames"].flat,
parsed_data["obsnames"].flat,
],
names=["predictor_y", "covariate_y", "outcome_y"],
)
ds = xr.Dataset(
{
"gamma": xr.DataArray(parsed_data["gamma"], [X], ["X"]),
"vcv": xr.DataArray(parsed_data["gammavcv"], [X, Y], ["X", "Y"]),
"residvcv": xr.DataArray(parsed_data["residvcv"], [], []),
},
attrs={k: v for k, v in header.items() if not isinstance(v, dict)},
)
dict_keys = [k for k, v in header.items() if isinstance(v, dict)]
for dk in dict_keys:
ds.attrs.update({f"{dk}_{k}": v for k, v in header[dk].items()})
return ds | /rhg_compute_tools-1.3.tar.gz/rhg_compute_tools-1.3/rhg_compute_tools/io_tools/readers.py | 0.433382 | 0.234549 | readers.py | pypi |
import fsspec
import git
import xarray as xr
import pandas as pd
import dask
from pathlib import Path
import os
import zarr
from typing import Union
import rhg_compute_tools.xarray
from rhg_compute_tools.io_tools.readers import get_fs
def _xr_document_repo_state(ds: xr.Dataset, repo_root=".") -> xr.Dataset:
repo = git.Repo(repo_root, search_parent_directories=True)
repo_dir = os.path.dirname(repo.git_dir)
rhg_compute_tools.xarray.document_dataset(ds, repo_dir)
return ds
def document_dataset(ds: xr.Dataset, repo_root: str = ".") -> xr.Dataset:
ds = _xr_document_repo_state(ds, repo_root=repo_root)
ds.attrs["updated"] = pd.Timestamp.now(tz="UTC").strftime("%c (%Z)")
return ds
def get_maximal_chunks_encoding(ds: xr.Dataset, **var_chunks) -> dict:
encoding_kwargs = {"encoding": {}}
for c in ds.coords.keys():
if ds.coords[c].chunks is None:
encoding_kwargs["encoding"][c] = {
"chunks": tuple([-1 for _ in ds.coords[c].dims])
}
else:
encoding_kwargs["encoding"][c] = {
"chunks": tuple([max(v) for v in ds.coords[c].chunks])
}
for v in ds.data_vars.keys():
if ds[v].chunks is None:
encoding_kwargs["encoding"][v] = {
"chunks": tuple([var_chunks.get(d, -1) for d in ds[v].dims])
}
else:
encoding_kwargs["encoding"][v] = {
"chunks": tuple([max(v) for v in ds[v].chunks])
}
return encoding_kwargs
def write_zarr(
out_ds: xr.Dataset,
out_fp: str,
fs: Union[None, fsspec.filesystem] = None,
set_maximal_chunks: bool = True,
writer_kwargs: Union[dict, None] = None,
encoding_kwargs: Union[dict, None] = None,
) -> None:
if fs is None:
fs = get_fs(out_fp)
if writer_kwargs is None:
writer_kwargs = {}
assert isinstance(out_ds, xr.Dataset), (
"Do not write a DataArray. Instead use da.to_dataset(name='variable_name') "
"to convert to a Dataset, and then assign metadata prior to writing"
)
mapper = fs.get_mapper(out_fp)
if encoding_kwargs is None:
if set_maximal_chunks:
encoding_kwargs = get_maximal_chunks_encoding(out_ds)
else:
encoding_kwargs = {}
for v in list(out_ds.coords.keys()):
if out_ds.coords[v].dtype == object and v != "time":
out_ds.coords[v] = out_ds.coords[v].astype("unicode")
for v in list(out_ds.variables.keys()):
if out_ds[v].dtype == object and v != "time":
out_ds[v] = out_ds[v].astype("unicode")
try:
futures = out_ds.to_zarr(
mapper, compute=False, **writer_kwargs, **encoding_kwargs
)
dask.compute(futures, retries=3)
except zarr.errors.ContainsGroupError:
raise zarr.errors.ContainsGroupError(out_fp)
def write_netcdf(
out_ds: xr.Dataset,
out_fp: str,
fs: Union[None, fsspec.filesystem] = None,
) -> None:
assert isinstance(out_ds, xr.Dataset), (
"for consistency, do not write a DataArray. Instead use da.to_dataset(name='variable_name') "
"to convert to a Dataset, and then assign metadata prior to writing"
)
# netcdf requires the parent directory to be created
parent_dir = "/".join(out_fp.split("/")[:-1]).replace("gs://", "/gcs/")
os.makedirs(parent_dir, exist_ok=True)
futures = out_ds.to_netcdf(out_fp.replace("gs://", "/gcs/"), compute=False)
dask.compute(futures, retries=3) | /rhg_compute_tools-1.3.tar.gz/rhg_compute_tools-1.3/rhg_compute_tools/io_tools/writers.py | 0.542621 | 0.280471 | writers.py | pypi |
from typing import List
import numpy as np
import rhino3dm
from shapely.geometry import LineString, Point
from rhino_shapely_interop.transformations import CoordTransform
class RhCurv:
"""Wrapper for a rhino curve.
Parameters
----------
curv :
A rhino3dm.Curve
Methods
-------
refine(num) :
Refine the individual Bézier curves of the rhino.Curve
get_shapely_line(transform) :
Get the shapely line string for the rhino curve.
is_line() :
Is the rhino line a straight line
"""
def __init__(self, curv: rhino3dm.Curve):
"""Constructor
Parameters
----------
curv : rhino3dm.Curve
A rhino3dm curve
"""
self._curv = curv
self._nurb = curv.ToNurbsCurve()
self._greville_points_param = [
self._nurb.GrevilleParameter(idx)
for idx in range(len(self._nurb.Points))
]
self._degree = self._nurb.Order - 1
self._greville_points_param_modif = self._greville_points_param
def refine(self, num: int) -> None:
"""Refine the individual Bézier curves of the rhino.Curve
Parameters
----------
num : integer
Number of refinements
"""
gen_interv = (
(
self._greville_points_param[ii],
self._greville_points_param[ii + 1],
)
for ii in range(len(self._greville_points_param) - 1)
)
self._greville_points_param_modif = [self._greville_points_param[0]]
for ii, jj in gen_interv:
self._greville_points_param_modif += list(
np.linspace(ii, jj, num + 2)[1:]
)
def get_shapely_line(self, transform: CoordTransform) -> LineString:
"""Get the shapely line string for the rhino curve.
Parameters
----------
transform : func
A function that transforms (3,n) ndarray into a new coordinate
system.
Returns
-------
Shapely.Geometry.LineString
The discretized shapely representation of a rhino curve.
"""
pnts = []
for t in self._greville_points_param_modif:
pnt = self._curv.PointAt(t)
pnts.append([pnt.X, pnt.Y, pnt.Z])
pnts_np = transform(np.array(pnts).T).round(decimals=12)
return LineString(pnts_np.T)
def is_line(self) -> bool:
"""Is the rhino line a straight line
Returns
-------
Boolean
"""
return self._curv.IsLinear()
@property
def get_greville_points(self) -> List[np.ndarray]:
"""Get the Greville points (points on the curve) at params defined by rhino.
Returns
-------
List of numpy vectors
Greville points
"""
pnts = []
for t in self._greville_points_param:
pnt = self._curv.PointAt(t)
pnts.append(np.array([pnt.X, pnt.Y, pnt.Z]))
return pnts
@property
def is_planer(self) -> bool:
"""Wrapper for rhino curve function.
Returns
-------
Boolean
"""
return self._curv.IsPlanar()
class RhPnt:
"""Wrapper for a rhino point.
Parameters
----------
pnt :
A rhino3dm.Point
Methods
-------
get_shapely_point(transform) :
Get the shapely point string for the rhino point.
as_numpy :
Get the numpy array representation.
"""
def __init__(self, pnt: rhino3dm.Point):
"""Wrapper for a rhino point.
Parameters
----------
pnt : RhinoPoint3d or RhinoPoint2d
"""
self._pnt = pnt
try:
self._pnt_np = np.array([pnt.X, pnt.Y, pnt.Z])
except AttributeError:
try:
self._pnt_np = np.array([pnt.X, pnt.Y, 0])
except AttributeError:
self._pnt_np = np.array(
[pnt.Location.X, pnt.Location.Y, pnt.Location.Z]
)
def get_shapely_point(self, transform: CoordTransform) -> Point:
"""Get the shapely point string for the rhino point.
Parameters
----------
transform : func
A function that transforms (3,n) ndarray into a new coordinate
system.
Returns
-------
Shapely.Geometry.PointString
The shapely representation of a rhino point.
"""
pnts_np = transform(np.array(self._pnt_np).T).round(decimals=12)
return Point(pnts_np)
@property
def as_numpy(self) -> np.ndarray:
"""Get the points numpy representation.
Returns
-------
ndarray
"""
return self._pnt_np | /rhino_shapley_interop-0.0.4-py3-none-any.whl/rhino_shapely_interop/rhino_wrappers.py | 0.945298 | 0.521776 | rhino_wrappers.py | pypi |
import numpy as np
class CoordTransform:
"""Class that is responsible for the tranformations between a
3d environment (x,y,z) and a 2d environment (x',y'). The 2d plane is
derived from two vectors that define the 2d plane.
Parameters
----------
vec1 : ndarray
A 3d vector (x,y,z) in the prefered 2d plane. This is normalised and
used as e1.
vec2 : ndarray
Another 3d vector (x,y,z) in the prefered 2d plane.
Attributes
----------
plane_normal : ndarray
The normal of the plane in (x,y,z).
Methods
-------
transform(pnts) :
Transforms a coordinate from the (x,y,z) into (x',y')
plane_normal() :
The vector normal to `vec1` and `vec2`.
"""
def __init__(self, vec1: np.ndarray, vec2: np.ndarray):
"""Constructor
Parameters
----------
vec1 : ndarray
A 3d vector (x,y,z) in the prefered 2d plane. This is normalised
and used as e1.
vec2 : ndarray
Another 3d vector (x,y,z) in the prefered 2d plane.
"""
self._e1 = vec1 / np.linalg.norm(vec1)
e3 = np.cross(vec1, vec2)
self._e3 = e3 / np.linalg.norm(e3)
self._e2 = np.cross(self._e3, self._e1)
self.Tinv = np.array([self._e1, self._e2, self._e3]).T
self.T = np.linalg.inv(self.Tinv)
def transform(self, pnts: np.ndarray) -> np.ndarray:
"""Transforms a coordinate from the (x,y,z) into (x',y')
Parameters
----------
pnts : ndarray
Points in (x,y,z)
Returns
-------
ndarray
Points in (x',y')
"""
pnts_prime = self.T.dot(pnts)
if pnts.ndim == 1:
return pnts_prime[0:2]
return pnts_prime[0:2, :]
@property
def plane_normal(self) -> np.ndarray:
"""The vector normal to `vec1` and `vec2`.
Returns
-------
ndarray
"""
return self._e3 | /rhino_shapley_interop-0.0.4-py3-none-any.whl/rhino_shapely_interop/transformations.py | 0.947332 | 0.91181 | transformations.py | pypi |

# pybind11 — Seamless operability between C++11 and Python
[](http://pybind11.readthedocs.org/en/master/?badge=master)
[](http://pybind11.readthedocs.org/en/stable/?badge=stable)
[](https://gitter.im/pybind/Lobby)
[](https://travis-ci.org/pybind/pybind11)
[](https://ci.appveyor.com/project/wjakob/pybind11)
**pybind11** is a lightweight header-only library that exposes C++ types in Python
and vice versa, mainly to create Python bindings of existing C++ code. Its
goals and syntax are similar to the excellent
[Boost.Python](http://www.boost.org/doc/libs/1_58_0/libs/python/doc/) library
by David Abrahams: to minimize boilerplate code in traditional extension
modules by inferring type information using compile-time introspection.
The main issue with Boost.Python—and the reason for creating such a similar
project—is Boost. Boost is an enormously large and complex suite of utility
libraries that works with almost every C++ compiler in existence. This
compatibility has its cost: arcane template tricks and workarounds are
necessary to support the oldest and buggiest of compiler specimens. Now that
C++11-compatible compilers are widely available, this heavy machinery has
become an excessively large and unnecessary dependency.
Think of this library as a tiny self-contained version of Boost.Python with
everything stripped away that isn't relevant for binding generation. Without
comments, the core header files only require ~4K lines of code and depend on
Python (2.7 or 3.x, or PyPy2.7 >= 5.7) and the C++ standard library. This
compact implementation was possible thanks to some of the new C++11 language
features (specifically: tuples, lambda functions and variadic templates). Since
its creation, this library has grown beyond Boost.Python in many ways, leading
to dramatically simpler binding code in many common situations.
Tutorial and reference documentation is provided at
[http://pybind11.readthedocs.org/en/master](http://pybind11.readthedocs.org/en/master).
A PDF version of the manual is available
[here](https://media.readthedocs.org/pdf/pybind11/master/pybind11.pdf).
## Core features
pybind11 can map the following core C++ features to Python
- Functions accepting and returning custom data structures per value, reference, or pointer
- Instance methods and static methods
- Overloaded functions
- Instance attributes and static attributes
- Arbitrary exception types
- Enumerations
- Callbacks
- Iterators and ranges
- Custom operators
- Single and multiple inheritance
- STL data structures
- Smart pointers with reference counting like ``std::shared_ptr``
- Internal references with correct reference counting
- C++ classes with virtual (and pure virtual) methods can be extended in Python
## Goodies
In addition to the core functionality, pybind11 provides some extra goodies:
- Python 2.7, 3.x, and PyPy (PyPy2.7 >= 5.7) are supported with an
implementation-agnostic interface.
- It is possible to bind C++11 lambda functions with captured variables. The
lambda capture data is stored inside the resulting Python function object.
- pybind11 uses C++11 move constructors and move assignment operators whenever
possible to efficiently transfer custom data types.
- It's easy to expose the internal storage of custom data types through
Pythons' buffer protocols. This is handy e.g. for fast conversion between
C++ matrix classes like Eigen and NumPy without expensive copy operations.
- pybind11 can automatically vectorize functions so that they are transparently
applied to all entries of one or more NumPy array arguments.
- Python's slice-based access and assignment operations can be supported with
just a few lines of code.
- Everything is contained in just a few header files; there is no need to link
against any additional libraries.
- Binaries are generally smaller by a factor of at least 2 compared to
equivalent bindings generated by Boost.Python. A recent pybind11 conversion
of PyRosetta, an enormous Boost.Python binding project,
[reported](http://graylab.jhu.edu/RosettaCon2016/PyRosetta-4.pdf) a binary
size reduction of **5.4x** and compile time reduction by **5.8x**.
- Function signatures are precomputed at compile time (using ``constexpr``),
leading to smaller binaries.
- With little extra effort, C++ types can be pickled and unpickled similar to
regular Python objects.
## Supported compilers
1. Clang/LLVM 3.3 or newer (for Apple Xcode's clang, this is 5.0.0 or newer)
2. GCC 4.8 or newer
3. Microsoft Visual Studio 2015 Update 3 or newer
4. Intel C++ compiler 17 or newer (16 with pybind11 v2.0 and 15 with pybind11 v2.0 and a [workaround](https://github.com/pybind/pybind11/issues/276))
5. Cygwin/GCC (tested on 2.5.1)
## About
This project was created by [Wenzel Jakob](http://rgl.epfl.ch/people/wjakob).
Significant features and/or improvements to the code were contributed by
Jonas Adler,
Lori A. Burns,
Sylvain Corlay,
Trent Houliston,
Axel Huebl,
@hulucc,
Sergey Lyskov
Johan Mabille,
Tomasz Miąsko,
Dean Moldovan,
Ben Pritchard,
Jason Rhinelander,
Boris Schäling,
Pim Schellart,
Henry Schreiner,
Ivan Smirnov, and
Patrick Stewart.
### License
pybind11 is provided under a BSD-style license that can be found in the
``LICENSE`` file. By using, distributing, or contributing to this project,
you agree to the terms and conditions of this license.
| /rhino3dm-0.11.0.tar.gz/rhino3dm-0.11.0/src/lib/pybind11/README.md | 0.905513 | 0.834002 | README.md | pypi |
import numpy as np
from scipy import ndimage
class Filters():
''' Filters.
'''
def __init__(self, spym_instance):
self._spym = spym_instance
def gauss(self, **kwargs):
''' Apply Gaussian smoothing filter.
Args:
size: size of the filter in pixels.
'''
self._spym._dr.data = gauss(self._spym._dr.data, **kwargs)
def median(self, **kwargs):
''' Apply median smoothing filter.
Args:
size: size of the filter in pixels.
'''
self._spym._dr.data = median(self._spym._dr.data, **kwargs)
def mean(self, **kwargs):
''' Apply mean smoothing filter.
Args:
size: size of the filter in pixels.
'''
self._spym._dr.data = mean(self._spym._dr.data, **kwargs)
def sharpen(self, **kwargs):
''' Apply a sharpening filter.
Args:
size: size of the filter in pixels.
alpha: weight.
'''
self._spym._dr.data = sharpen(self._spym._dr.data, **kwargs)
def destripe(self, **kwargs):
''' Find and remove scan stripes by averaging neighbourhood lines.
Args:
min_length: only scars that are as long or longer than this value (in pixels) will be marked.
hard_threshold: the minimum difference of the value from the neighbouring upper and lower lines to be considered a defect.
soft_threshold: values differing at least this much do not form defects themselves, but they are attached to defects obtained from the hard threshold if they touch one.
sign: whether mark stripes with positive values, negative values or both.
rel_threshold: the minimum difference of the value from the neighbouring upper and lower lines to be considered a defect (in physical values). Overwrite hard_threshold.
Returns:
destriped 2d array.
'''
if not self._spym._dr.data.ndim == 2:
print("The DataArray is not an image. Abort.")
return
self._spym._dr.data, self._spym._mask = destripe(self._spym._dr.data.astype(float), **kwargs)
def gauss(image,
size=3):
''' Apply Gaussian smoothing filter.
Args:
image: numpy array.
size: size of the filter in pixels.
Returns:
filtered numpy array.
'''
sigma = size * 0.42466
return ndimage.filters.gaussian_filter(image, sigma)
def median(image,
size=3):
''' Apply median smoothing filter.
Args:
image: numpy array.
size: size of the filter in pixels.
Returns:
filtered numpy array.
'''
return ndimage.filters.median_filter(image, size=size)
def mean(image,
size=3):
''' Apply mean smoothing filter.
Args:
image: numpy array.
size: size of the filter in pixels.
Returns:
filtered numpy array.
'''
return ndimage.filters.uniform_filter(image, size=size)
def sharpen(image,
size=3,
alpha=30):
''' Apply a sharpening filter.
Args:
image: numpy array.
size: size of the filter in pixels.
alpha: weight.
Returns:
filtered numpy array.
'''
blurred = ndimage.gaussian_filter(image, size)
filter_blurred = ndimage.gaussian_filter(blurred, 1)
sharpened = blurred + alpha * (blurred - filter_blurred)
return sharpened
def destripe(image,
min_length = 20,
hard_threshold = 0.4,
soft_threshold = 0.2,
sign = 'positive',
rel_threshold = None):
''' Find and remove scan stripes by averaging neighbourhood lines.
Args:
image: 2d numpy array.
min_length: only scars that are as long or longer than this value (in pixels) will be marked.
hard_threshold: the minimum difference of the value from the neighbouring upper and lower lines to be considered a defect.
soft_threshold: values differing at least this much do not form defects themselves, but they are attached to defects obtained from the hard threshold if they touch one.
sign: whether mark stripes with positive values, negative values or both.
rel_threshold: the minimum difference of the value from the neighbouring upper and lower lines to be considered a defect (in physical values). Overwrite hard_threshold.
Returns:
destriped 2d array.
'''
# Normalize image
rng = (image.max() - image.min()) / 2
n_image = (image - image.mean())/rng
# Calculate positive line differences
d_pos = np.diff(n_image.clip(0, None), axis=0)
np.clip(d_pos, 0, None, out=d_pos)
diff_pos = np.empty(image.shape)
diff_pos[0] = d_pos[0]
diff_pos[1:] = d_pos
# Calculate negative line differences
d_neg = np.diff(n_image.clip(None, 0), axis=0)
np.clip(d_neg, None, 0, out=d_neg)
diff_neg = np.empty(image.shape)
diff_neg[0] = d_neg[0]
diff_neg[1:] = d_neg
# Calculate physical threshold
if rel_threshold:
hard_threshold = rel_threshold*rng
# Calculate masks for hard and soft thresholds
m_hard_pos = False
m_soft_pos = False
m_hard_neg = False
m_soft_neg = False
if sign in ['positive', 'both']:
m_hard_pos = diff_pos > hard_threshold
m_soft_pos = diff_pos > soft_threshold
if sign in ['negative', 'both']:
m_hard_neg = diff_neg < -hard_threshold
m_soft_neg = diff_neg < -soft_threshold
# Opening (erosion+dilation) of the masks
m_hard = ndimage.binary_opening(m_hard_pos+m_hard_neg, structure=np.ones((1,min_length), dtype=bool))
m_soft = ndimage.binary_opening(m_soft_pos+m_soft_neg, structure=np.ones((1,2*min_length), dtype=bool))
# Addition of hard and soft mask
mask = ndimage.binary_opening(m_soft+m_hard, structure=np.ones((1,min_length), dtype=bool))
# Filter masked values
image_masked = np.ma.array(image, mask = mask, fill_value=np.NaN)
filt = ndimage.uniform_filter(image_masked.data, size=(3, 1))
filtered = image*np.invert(mask) + filt*mask
return filtered, mask | /rhkpy-1.3.1-py3-none-any.whl/spym/process/filters.py | 0.906818 | 0.806472 | filters.py | pypi |
import numpy as np
class Level():
''' Level.
'''
def __init__(self, spym_instance):
self._spym = spym_instance
def fixzero(self, **kwargs):
''' Add a constant to all the data to move the minimum (or the mean value) to zero.
Args:
to_mean: bool, optional. If true move mean value to zero, if false move mimimum to zero (default).
'''
self._spym._dr.data = fixzero(self._spym._dr.data, **kwargs)
def plane(self, **kwargs):
'''Corrects for sample tilting by subtraction of a plane.
'''
if not self._spym._dr.data.ndim == 2:
print("The DataArray is not an image. Abort.")
return
self._spym._dr.data, self._spym._bkg = plane(self._spym._dr.data.astype(float), **kwargs)
def align(self, **kwargs):
'''Align rows.
Args:
baseline: defines how baselines are estimated; 'mean' (default), 'median', 'poly'.
axis: axis along wich calculate the baselines.
poly_degree: polnomial degree if baseline='poly'.
'''
if not self._spym._dr.data.ndim == 2:
print("The DataArray is not an image. Abort.")
return
self._spym._dr.data, self._spym._bkg = align(self._spym._dr.data.astype(float), **kwargs)
def fixzero(image,
to_mean=False):
''' Add a constant to all the data to move the minimum (or the mean value) to zero.
Args:
image: numpy array.
to_mean: bool, optional. If true move mean value to zero, if false move mimimum to zero (default).
Returns:
numpy array.
'''
if to_mean:
fixed = image - image.mean()
else:
fixed = image - image.min()
return fixed
def plane(image):
'''Corrects for image tilting by subtraction of a plane.
Args:
image: 2d numpy array.
Returns:
flattened image as 2d numpy array.
'''
bkg_x = _poly_bkg(image.mean(axis=0), 1)
bkg_y = _poly_bkg(image.mean(axis=1), 1)
bkg_xx = np.apply_along_axis(_fill, 1, image, bkg_x)
bkg_yy = np.apply_along_axis(_fill, 0, image, bkg_y)
bkg = bkg_xx + bkg_yy
planned = image - bkg
return planned, bkg
def align(image, baseline='mean', axis=1, poly_degree=2):
'''Align rows.
Args:
image: 2d numpy array.
baseline: defines how baselines are estimated; 'mean' (default), 'median', 'poly'.
axis: axis along wich calculate the baselines.
poly_degree: polnomial degree if baseline='poly'.
Returns:
corrected 2d numpy array.
'''
if baseline == 'mean':
bkg = np.apply_along_axis(_mean_bkg, axis, image)
elif baseline == 'median':
bkg = np.apply_along_axis(_median_bkg, axis, image)
elif baseline == 'poly':
bkg = np.apply_along_axis(_poly_bkg, axis, image, poly_degree)
aligned = image - bkg
return aligned, bkg
def _mean_bkg(line):
return np.full(line.shape[0], line.mean())
def _median_bkg(line):
return np.full(line.shape[0], np.median(line))
def _poly_bkg(line, poly_degree):
x = np.linspace(-.5, .5, line.shape[0])
coefs = np.polyfit(x, line, poly_degree)
return np.polyval(coefs, x)
def _fill(line, value):
return value | /rhkpy-1.3.1-py3-none-any.whl/spym/process/level.py | 0.939436 | 0.815783 | level.py | pypi |
import os
from . import rhksm4, omicronscala
def load(filename, scaling=True):
''' Import data from common SPM file formats.
Currently supported file formats are:
* NeXus (.nx, .nxs). Package nxarray is needed.
* RHK (.sm4).
* Omicron Scala (.par).
Args:
filename: path to the SPM file.
scaling: if True convert data to physical units (default), if False keep raw data.
Returns:
xarray Dataset with data and metadata.
'''
if filename.endswith(".nx") or filename.endswith(".NX") or filename.endswith(".nxs") or filename.endswith(".NXS"):
try:
import nxarray
except ImportError:
print("Error: nxarray package is needed to open .nx/.nxs files.")
return None
try:
ds = nxarray.load(filename)
except:
print("Error: the file does not appear to be valid.")
return None
if filename.endswith(".par") or filename.endswith(".PAR"):
try:
ds = omicronscala.to_dataset(filename, scaling=scaling)
except:
print("Error: the file does not appear to be valid.")
return None
if filename.endswith(".sm4") or filename.endswith(".SM4"):
try:
ds = rhksm4.to_dataset(filename, scaling=scaling)
except:
print("Error: the file does not appear to be valid.")
return None
for dr in ds:
ds[dr].attrs["filename"] = os.path.basename(filename)
return ds
def convert(filename, folder=None):
''' Convert data from supported SPM file formats to NeXus/HDF5.
Args:
filename: path to the SPM file.
folder: (optional) path for converted files. If not provided, converted files are placed in the same folder of the originals.
Returns:
Nothing.
'''
ds = load(filename, scaling=False)
try:
import nxarray
filename = os.path.splitext(filename)[0]
if folder:
path = os.path.join(folder, os.path.basename(filename))
else:
path = filename
ds.nxr.save(path)
except ImportError:
print("Error: nxarray package is needed to convert files.") | /rhkpy-1.3.1-py3-none-any.whl/spym/io/load.py | 0.673192 | 0.385259 | load.py | pypi |
from ._scala import OMICRONscala
def load(parFile):
'''This method load data and metadata associated to an Omicron SCALA .par file.
Args:
parFile: the name of the .par file to be loaded
Returns:
a container for the channels in the .par file with their data and metadata
Examples:
f = omicronscala.load('/path/to/file.par') # load the file
ch0 = f[0] # assign first channel
ch0.label # returns channel name label
ch0.data # returns channel data as a numpy array
ch0.attrs # returns channel metadata as a dictionary
'''
return OMICRONscala(parFile)
def to_dataset(parFile, scaling=True):
'''This method load an Omicron SCALA .par file into an xarray Dataset.
The xarray package is required.
Args:
parFile: the name of the .par file to be loaded
scaling: if True convert data to physical units (default),
if False keep data in ADC units
Returns:
an xarray Dataset
Examples:
ds = omicronscala.to_dataset('/path/to/file.par')
ds
<xarray.Dataset>
ds.Z_Forward
<xarray.DataArray>
'''
try:
import xarray as xr
except:
print("Error: xarray package not found.")
return
f = load(parFile)
ds = xr.Dataset()
for ch in f:
ds[ch.label] = _to_datarr(ch, scaling=scaling)
return ds
def to_nexus(parFile, filename=None, **kwargs):
'''This method convert an Omicron SCALA .par file into a NeXus file.
The nxarray package is required.
Args:
parFile: the name of the .par file to be converted
filename: (optional) path of the NeXus file to be saved.
If not provided, a NeXus file is saved in the same folder
of the .par file.
**kwargs: any optional argument accepted by nexus NXdata.save() method
Returns:
nothing
Examples:
omicronscala.to_nexus('/path/to/file.par')
'''
try:
import nxarray as nxr
except:
print("Error: nxarray package not found.")
return
if not filename:
import os
filename = os.path.splitext(parFile)[0]
ds = to_dataset(parFile, scaling=False)
ds.nxr.save(filename, **kwargs)
def _to_datarr(ch, scaling):
'''Create an xarray DataArray from an OMICRONchannel
'''
import xarray as xr
## Create DataArray
dr = xr.DataArray(ch.data,
coords=ch.coords,
attrs=ch.attrs,
name=ch.label)
## Set xarray/nexusformat attributes
dr.attrs['long_name'] = ch.label.replace("_", " ")
dr.attrs['units'] = dr.attrs['PhysicalUnit']
dr.coords['x'].attrs['units'] = 'nm'
dr.coords['y'].attrs['units'] = 'nm'
## Set additional nexusformat attributes
dr.attrs['scaling_factor'] = dr.attrs['Resolution']
dr.attrs['offset'] = 0.0
dr.attrs['start_time'] = dr.attrs['Timestamp']
dr.attrs['notes'] = dr.attrs['Comment']
dr.attrs['interpretation'] = 'image'
## Set additional NXstm nexusformat attributes
dr.attrs['bias'] = dr.attrs['GapVoltage']
dr.attrs['bias_units'] = 'V'
dr.attrs['setpoint'] = dr.attrs['FeedbackSet']
dr.attrs['setpoint_units'] = 'nA'
dr.attrs['scan_angle'] = dr.attrs['ScanAngle']
dr.attrs['feedback_active'] = True
dr.attrs['feedback_pgain'] = dr.attrs['LoopGain']
#dr.attrs['time_per_point'] = dr.attrs['TopographyTimeperPoint']
dr.coords['x'].attrs['offset'] = dr.attrs['XOffset']
dr.coords['x'].attrs['long_name'] = 'x'
dr.coords['y'].attrs['offset'] = dr.attrs['YOffset']
dr.coords['y'].attrs['long_name'] = 'y'
# Scale data to physical units
if scaling:
dr.data = dr.data.astype(float) * dr.attrs['scaling_factor'] + dr.attrs['offset']
dr.attrs['scaling_factor'] = 1.0
dr.attrs['offset'] = 0.0
return dr | /rhkpy-1.3.1-py3-none-any.whl/spym/io/omicronscala/_methods.py | 0.800692 | 0.495789 | _methods.py | pypi |
import numpy as np
import os
class OMICRONscala:
"""This is the main class that represents an Omicron SCALA file
Args:
filepath: the name of the .par file to be opened
"""
def __init__(self,
filepath):
"""Load data and metadata relative to the given .par file
Args:
filepath: path to the .par file.
Returns: metadata as dictionary and data as numpy array
"""
self._filepath = filepath
self._path = os.path.dirname(filepath)
self._meta = self._loadMeta()
self._data = self._loadData()
self._channels = self._addChannels()
def __getitem__(self, index):
return self._channels[index]
def _loadMeta(self):
"""Load metadata from .par file into a dictionary
Returns: dictionary of metadata
"""
# Open .par file and load content
with open(self._filepath) as f:
meta = f.readlines()
# Remove newline character and whitespaces
meta = [e.strip() for e in meta]
meta = [e.replace(' ','') for e in meta]
# Remove final comment part
meta = [e.split(';',1)[0] for e in meta]
# Remove empty lines
meta = [e for e in meta if e not in ('', ';')]
# Add key to channel parameters, for each channel
self._chlist = list()
self._imglist = list()
self._speclist = list()
self._SpecParameter = ''
for i,e in enumerate(meta):
if 'TopographicChannel' in e:
chName = meta[i+8][-3:].upper()+"_"
meta[i] = chName+meta[i]
meta[i+1] = chName+'Direction:'+meta[i+1]
meta[i+2] = chName+'MinimumRawValue:'+meta[i+2]
meta[i+3] = chName+'MaximumRawValue:'+meta[i+3]
meta[i+4] = chName+'MinimumPhysValue:'+meta[i+4]
meta[i+5] = chName+'MaximumPhysValue:'+meta[i+5]
meta[i+6] = chName+'Resolution:'+meta[i+6]
meta[i+7] = chName+'PhysicalUnit:'+meta[i+7]
meta[i+8] = chName+'Filename:'+meta[i+8]
meta[i+9] = chName+'DisplayName:'+meta[i+9]
self._chlist.append(chName)
self._imglist.append(chName)
elif 'SpectroscopyChannel' in e:
chName = meta[i+16][-3:].upper()+"_"
meta[i] = chName+meta[i]
self._SpecParameter = meta[i+1]
meta[i+1] = chName+'Parameter:'+meta[i+1]
meta[i+2] = chName+'Direction:'+meta[i+2]
meta[i+3] = chName+'MinimumRawValue:'+meta[i+3]
meta[i+4] = chName+'MaximumRawValue:'+meta[i+4]
meta[i+5] = chName+'MinimumPhysValue:'+meta[i+5]
meta[i+6] = chName+'MaximumPhysValue:'+meta[i+6]
meta[i+7] = chName+'Resolution:'+meta[i+7]
meta[i+8] = chName+'PhysicalUnit:'+meta[i+8]
meta[i+9] = chName+'NumberSpecPoints:'+meta[i+9]
meta[i+10] = chName+'StartPoint:'+meta[i+10]
meta[i+11] = chName+'EndPoint:'+meta[i+11]
meta[i+12] = chName+'Increment:'+meta[i+12]
meta[i+13] = chName+'AcqTimePerPoint:'+meta[i+13]
meta[i+14] = chName+'DelayTimePerPoint:'+meta[i+14]
meta[i+15] = chName+'Feedback:'+meta[i+15]
meta[i+16] = chName+'Filename:'+meta[i+16]
meta[i+17] = chName+'DisplayName:'+meta[i+17]
self._chlist.append(chName)
self._speclist.append(chName)
elif self._SpecParameter+'Parameter' in e:
meta[i] = 'SpecParam:'+self._SpecParameter
meta[i+1] = 'SpecParamRampSpeedEnabled:'+meta[i+1]
meta[i+2] = 'SpecParamT1us:'+meta[i+2]
meta[i+3] = 'SpecParamT2us:'+meta[i+3]
meta[i+4] = 'SpecParamT3us:'+meta[i+4]
meta[i+5] = 'SpecParamT4us:'+meta[i+5]
# Split list into pairs
meta = [e.split(':',1) for e in meta]
# Create dictionary for metadata
meta = {k:v for k,v in meta}
# Adjust date as YYYY-MM-DD and time as HH:MM
year = '20'+meta['Date'][6:8]
month = meta['Date'][3:5]
day = meta['Date'][0:2]
hours = meta['Date'][8:10]
seconds = meta['Date'][11:13]
meta['Time'] = hours+':'+seconds
meta['Date'] = year+'-'+month+'-'+day
# Calculate timestamp in seconds
timeStamp = meta['Date']+'T'+meta['Time']+":00"
meta['Timestamp'] = timeStamp
return meta
def _loadData(self):
"""Load data from .par file into a numpy array
Returns: multidimensional numpy array
"""
# Initialize data array
xsize = int(self._meta['ImageSizeinX'])
ysize = int(self._meta['ImageSizeinY'])
data = list()
# Cycle over image channels
for i, chPrefix in enumerate(self._imglist):
chFile = self._meta[chPrefix+'Filename']
# Load data from current channel
data.append(np.resize(np.fromfile(os.path.join(self._path, chFile),dtype='>i2'), (xsize, ysize)))
# Return data
return data
def _addChannels(self):
channels = list()
for i, chName in enumerate(self._imglist):
data = self._data[i]
attrs = dict()
for k,v in self._meta.items():
if chName in k:
key = k.replace(chName, '')
if key in ['MinimumRawValue', 'MaximumRawValue', 'NumberSpecPoints']:
attrs[key] = int(v)
elif key in ['MinimumPhysValue', 'MaximumPhysValue', 'Resolution',
'StartPoint', 'EndPoint', 'Increment', 'AcqTimePerPoint:', 'DelayTimePerPoint']:
attrs[key] = float(v)
else:
attrs[key] = v
attrs.pop("Filename")
channel = OMICRONchannel(data, {**attrs, **self._globAttrs()})
channels.append(channel)
return channels
def _globAttrs(self):
attrs = self._meta.copy()
for i, chName in enumerate(self._chlist):
for k,v in self._meta.items():
if chName in k:
del attrs[k]
float_keys = ['FieldXSizeinnm',
'FieldYSizeinnm',
'IncrementX',
'IncrementY',
'ScanAngle',
'XOffset',
'YOffset',
'GapVoltage',
'FeedbackSet',
'LoopGain',
'XResolution',
'YResolution',
'ScanSpeed',
'XDrift',
'YDrift',
'TopographyTimeperPoint',
'ZSpeed',
'ZOutputGain',
'ZInputGain']
int_keys = ['Format',
'ImageSizeinX',
'ImageSizeinY',
'SpectroscopyGridValueinX',
'SpectroscopyGridValueinY',
'SpectroscopyPointsinX',
'SpectroscopyLinesinY',
'SpecParamT1us',
'SpecParamT2us',
'SpecParamT3us',
'SpecParamT4us']
for k,v in attrs.items():
if k in float_keys:
attrs[k] = float(v)
elif k in int_keys:
attrs[k] = int(v)
return attrs
class OMICRONchannel:
def __init__(self, data, attrs):
self.data = data
self.attrs = attrs
if self.attrs['TopographicChannel'] == "Z":
channel = "Topography"
elif self.attrs['TopographicChannel'] == "I":
channel = "Current"
else:
channel = self.attrs['TopographicChannel']
self.label = channel + "_" + self.attrs['Direction']
xsize = self.attrs['ImageSizeinX']
xres = self.attrs['IncrementX']
ysize = self.attrs['ImageSizeinY']
yres = self.attrs['IncrementY']
self.coords = [('y', yres * np.arange(ysize, dtype=np.float)),
('x', xres * np.arange(xsize, dtype=np.float))] | /rhkpy-1.3.1-py3-none-any.whl/spym/io/omicronscala/_scala.py | 0.696578 | 0.212681 | _scala.py | pypi |
from ._sm4 import RHKsm4
def load(sm4file):
'''This method load data and metadata from an RHK .sm4 file.
Args:
sm4file: the name of the .sm4 file to be loaded
Returns:
a container for the pages in the .sm4 file with their data and metadata
Examples:
f = rhksm4.load('/path/to/file.sm4') # load the file
p0 = f[0] # assign first page in the file
p0.label # returns page label name
p0.data # returns page data as a numpy array
p0.attrs # returns page metadata as a dictionary
'''
return RHKsm4(sm4file)
def to_dataset(sm4file, scaling=True):
'''This method load an RHK .sm4 file into an xarray Dataset.
The xarray package is required.
Args:
sm4file: the name of the .sm4 file to be loaded
scaling: if True convert data to physical units (default),
if False keep data in ADC units
Returns:
an xarray Dataset
Examples:
ds = rhksm4.to_dataset('/path/to/file.sm4')
ds
<xarray.Dataset>
ds.IDxxxxx
<xarray.DataArray>
'''
try:
import xarray as xr
except:
print("Error: xarray package not found.")
return
f = load(sm4file)
ds = xr.Dataset()
for p in f:
ds[p.label] = _to_datarr(p, scaling=scaling)
return ds
def to_nexus(sm4file, filename=None, **kwargs):
'''This method convert an RHK .sm4 file into a NeXus file.
The nxarray package is required.
Args:
sm4file: the name of the .sm4 file to be converted
filename: (optional) path of the NeXus file to be saved.
If not provided, a NeXus file is saved in the same folder
of the .sm4 file.
**kwargs: any optional argument accepted by nexus NXdata.save() method
Returns:
nothing
Examples:
rhksm4.to_nexus('/path/to/file.sm4')
'''
try:
import nxarray as nxr
except:
print("Error: nxarray package not found.")
return
if not filename:
filename = sm4file._file.name
ds = to_dataset(sm4file, scaling=False)
ds.nxr.save(filename, **kwargs)
def _to_datarr(p, scaling):
'''Create an xarray DataArray from an RHKPage
'''
import xarray as xr
## Create DataArray
dr = xr.DataArray(p.data,
coords=p.coords,
attrs=p.attrs,
name=p.label)
## Define coordinates labels
x_label = dr.coords.dims[0]
try:
y_label = dr.coords.dims[1]
except IndexError:
y_label = ""
## Set xarray/nexusformat attributes
dr.attrs['long_name'] = dr.name.replace("_", " ")
dr.attrs['units'] = dr.attrs['RHK_Zunits']
dr.coords[x_label].attrs['units'] = dr.attrs['RHK_Xunits']
if y_label in dr.coords:
dr.coords[y_label].attrs['units'] = dr.attrs['RHK_Yunits']
## Set additional nexusformat attributes
dr.attrs['scaling_factor'] = dr.attrs['RHK_Zscale']
dr.attrs['offset'] = dr.attrs['RHK_Zoffset']
dr.attrs['start_time'] = dr.attrs['RHK_DateTime']
dr.attrs['notes'] = dr.attrs['RHK_UserText']
if dr.attrs['RHK_PageDataType'] == 1: # Line type
dr.attrs['interpretation'] = 'spectrum'
elif dr.attrs['RHK_PageDataType'] == 0: # Image type
dr.attrs['interpretation'] = 'image'
else:
dr.attrs['interpretation'] = None
## Set additional NXstm nexusformat attributes
dr.attrs['bias'] = dr.attrs['RHK_Bias']
dr.attrs['bias_units'] = 'V'
try:#TODO add support for correspondent PRM metadata
dr.attrs['setpoint'] = dr.attrs['RHK_ZPI_SetPoint']
dr.attrs['setpoint_units'] = dr.attrs['RHK_ZPI_SetPointUnit']
if dr.attrs['RHK_ZPI_FeedbackType'] == 'Off':
dr.attrs['feedback_active'] = False
else:
dr.attrs['feedback_active'] = True
dr.attrs['feedback_pgain'] = dr.attrs['RHK_ZPI_ProportionalGain']
except KeyError:
dr.attrs['setpoint'] = 0
dr.attrs['setpoint_units'] = ""
dr.attrs['feedback_active'] = None
dr.attrs['feedback_pgain'] = 0
dr.attrs['scan_angle'] = dr.attrs['RHK_Angle']
dr.attrs['time_per_point'] = dr.attrs['RHK_Period']
dr.coords[x_label].attrs['offset'] = dr.attrs['RHK_Xoffset']
if y_label in dr.coords:
dr.coords[y_label].attrs['offset'] = dr.attrs['RHK_Yoffset']
# Set filename
dr.attrs['filename'] = dr.attrs['RHK_FileName']
dr.attrs.pop('RHK_FileName')
# Set coordinates labels
if dr.attrs['RHK_Xlabel'] == '':
dr.coords[x_label].attrs['long_name'] = x_label
else:
dr.coords[x_label].attrs['long_name'] = dr.attrs['RHK_Xlabel']
if y_label in dr.coords:
if dr.attrs['RHK_PageDataType'] == 1:
dr.coords[y_label].attrs['long_name'] = 'Trace'
else:
dr.coords[y_label].attrs['long_name'] = y_label
# Scale data to physical units
if scaling:
dr.data = dr.data.astype(float) * dr.attrs['scaling_factor'] + dr.attrs['offset']
dr.attrs['scaling_factor'] = 1.0
dr.attrs['offset'] = 0.0
return dr | /rhkpy-1.3.1-py3-none-any.whl/spym/io/rhksm4/_methods.py | 0.640523 | 0.457621 | _methods.py | pypi |
import numpy as np
from enum import Enum
## Definition of types
## Object type
class object_type(Enum):
RHK_OBJECT_UNDEFINED = 0
RHK_OBJECT_PAGE_INDEX_HEADER = 1
RHK_OBJECT_PAGE_INDEX_ARRAY = 2
RHK_OBJECT_PAGE_HEADER = 3
RHK_OBJECT_PAGE_DATA = 4
RHK_OBJECT_IMAGE_DRIFT_HEADER = 5
RHK_OBJECT_IMAGE_DRIFT = 6
RHK_OBJECT_SPEC_DRIFT_HEADER = 7
RHK_OBJECT_SPEC_DRIFT_DATA = 8
RHK_OBJECT_COLOR_INFO = 9
RHK_OBJECT_STRING_DATA = 10
RHK_OBJECT_TIP_TRACK_HEADER = 11
RHK_OBJECT_TIP_TRACK_DATA = 12
RHK_OBJECT_PRM = 13
RHK_OBJECT_THUMBNAIL = 14
RHK_OBJECT_PRM_HEADER = 15
RHK_OBJECT_THUMBNAIL_HEADER = 16
RHK_OBJECT_API_INFO = 17
RHK_OBJECT_HISTORY_INFO = 18
RHK_OBJECT_PIEZO_SENSITIVITY = 19
RHK_OBJECT_FREQUENCY_SWEEP_DATA = 20
RHK_OBJECT_SCAN_PROCESSOR_INFO = 21
RHK_OBJECT_PLL_INFO = 22
RHK_OBJECT_CH1_DRIVE_INFO = 23
RHK_OBJECT_CH2_DRIVE_INFO = 24
RHK_OBJECT_LOCKIN0_INFO = 25
RHK_OBJECT_LOCKIN1_INFO = 26
RHK_OBJECT_ZPI_INFO = 27
RHK_OBJECT_KPI_INFO = 28
RHK_OBJECT_AUX_PI_INFO = 29
RHK_OBJECT_LOWPASS_FILTER0_INFO = 30
RHK_OBJECT_LOWPASS_FILTER1_INFO = 31
## Page Data type
class page_data_type(Enum):
RHK_DATA_IMAGE = 0
RHK_DATA_LINE = 1
RHK_DATA_XY_DATA = 2
RHK_DATA_ANNOTATED_LINE = 3
RHK_DATA_TEXT = 4
RHK_DATA_ANNOTATED_TEXT = 5
RHK_DATA_SEQUENTIAL = 6
RHK_DATA_MOVIE = 7
## Page Source type
class page_source_type(Enum):
RHK_SOURCE_RAW = 0
RHK_SOURCE_PROCESSED = 1
RHK_SOURCE_CALCULATED = 2
RHK_SOURCE_IMPORTED = 3
## Page type
class page_type(Enum):
RHK_PAGE_UNDEFINED = 0
RHK_PAGE_TOPOGRAPHIC = 1
RHK_PAGE_CURRENT = 2
RHK_PAGE_AUX = 3
RHK_PAGE_FORCE = 4
RHK_PAGE_SIGNAL = 5
RHK_PAGE_FFT_TRANSFORM = 6
RHK_PAGE_NOISE_POWER_SPECTRUM = 7
RHK_PAGE_LINE_TEST = 8
RHK_PAGE_OSCILLOSCOPE = 9
RHK_PAGE_IV_SPECTRA = 10
RHK_PAGE_IV_4x4 = 11
RHK_PAGE_IV_8x8 = 12
RHK_PAGE_IV_16x16 = 13
RHK_PAGE_IV_32x32 = 14
RHK_PAGE_IV_CENTER = 15
RHK_PAGE_INTERACTIVE_SPECTRA = 16
RHK_PAGE_AUTOCORRELATION = 17
RHK_PAGE_IZ_SPECTRA = 18
RHK_PAGE_4_GAIN_TOPOGRAPHY = 19
RHK_PAGE_8_GAIN_TOPOGRAPHY = 20
RHK_PAGE_4_GAIN_CURRENT = 21
RHK_PAGE_8_GAIN_CURRENT = 22
RHK_PAGE_IV_64x64 = 23
RHK_PAGE_AUTOCORRELATION_SPECTRUM = 24
RHK_PAGE_COUNTER = 25
RHK_PAGE_MULTICHANNEL_ANALYSER = 26
RHK_PAGE_AFM_100 = 27
RHK_PAGE_CITS = 28
RHK_PAGE_GPIB = 29
RHK_PAGE_VIDEO_CHANNEL = 30
RHK_PAGE_IMAGE_OUT_SPECTRA = 31
RHK_PAGE_I_DATALOG = 32
RHK_PAGE_I_ECSET = 33
RHK_PAGE_I_ECDATA = 34
RHK_PAGE_I_DSP_AD = 35
RHK_PAGE_DISCRETE_SPECTROSCOPY_PP = 36
RHK_PAGE_IMAGE_DISCRETE_SPECTROSCOPY = 37
RHK_PAGE_RAMP_SPECTROSCOPY_RP = 38
RHK_PAGE_DISCRETE_SPECTROSCOPY_RP = 39
## Line type
class line_type(Enum):
RHK_LINE_NOT_A_LINE = 0
RHK_LINE_HISTOGRAM = 1
RHK_LINE_CROSS_SECTION = 2
RHK_LINE_LINE_TEST = 3
RHK_LINE_OSCILLOSCOPE = 4
RHK_LINE_RESERVED = 5
RHK_LINE_NOISE_POWER_SPECTRUM = 6
RHK_LINE_IV_SPECTRUM = 7
RHK_LINE_IZ_SPECTRUM = 8
RHK_LINE_IMAGE_X_AVERAGE = 9
RHK_LINE_IMAGE_Y_AVERAGE = 10
RHK_LINE_NOISE_AUTOCORRELATION_SPECTRUM = 11
RHK_LINE_MULTICHANNEL_ANALYSER_DATA = 12
RHK_LINE_RENORMALIZED_IV = 13
RHK_LINE_IMAGE_HISTOGRAM_SPECTRA = 14
RHK_LINE_IMAGE_CROSS_SECTION = 15
RHK_LINE_IMAGE_AVERAGE = 16
RHK_LINE_IMAGE_CROSS_SECTION_G = 17
RHK_LINE_IMAGE_OUT_SPECTRA = 18
RHK_LINE_DATALOG_SPECTRUM = 19
RHK_LINE_GXY = 20
RHK_LINE_ELECTROCHEMISTRY = 21
RHK_LINE_DISCRETE_SPECTROSCOPY = 22
RHK_LINE_DATA_LOGGER = 23
RHK_LINE_TIME_SPECTROSCOPY = 24
RHK_LINE_ZOOM_FFT = 25
RHK_LINE_FREQUENCY_SWEEP = 26
RHK_LINE_PHASE_ROTATE = 27
RHK_LINE_FIBER_SWEEP = 28
## Image type
class image_type(Enum):
RHK_IMAGE_NORMAL = 0
RHK_IMAGE_AUTOCORRELATED = 1
## Scan direction type
class scan_type(Enum):
RHK_SCAN_RIGHT = 0
RHK_SCAN_LEFT = 1
RHK_SCAN_UP = 2
RHK_SCAN_DOWN = 3
## Drift option type
class drift_option_type(Enum):
RHK_DRIFT_DISABLED = 0
RHK_DRIFT_EACH_SPECTRA = 1
RHK_DRIFT_EACH_LOCATION = 2
## SM4 class definition
class RHKsm4:
"""This is the main class that represents a RHK SM4 file
Args:
filename: the name of the .sm4 file to be opened
"""
def __init__(self,
filename):
## Open the file
self._file = open(filename, 'rb')
## Read the File Header
self._header = RHKFileHeader(self)
## Read Object list of File Header
self._header._read_object_list(self)
## Read Page Index Header
self._page_index_header = RHKPageIndexHeader(self)
## Read Object list of Page Index Header
self._page_index_header._read_object_list(self)
## Seek to the start position of the Page Index Array
offset = self._page_index_header._get_offset('RHK_OBJECT_PAGE_INDEX_ARRAY')
self._seek(offset, 0)
## Read Page Index Array
self._pages = []
for i in range(self._page_index_header.page_count):
page = RHKPage(self)
#Read Page Index
self._pages.append(page)
#Read Object list of Page Index
page._read_object_list(self)
## Read Pages content
for page in self:
page._read()
## Close the file
self._file.close()
return
def __getitem__(self, index):
return self._pages[index]
def _readb(self, dtype, count):
'''Read bytewise a single line of the file
'''
return np.fromfile(self._file, dtype=dtype, count=count)[0]
def _reads(self, count):
'''Read bytewise *count* lines of the file and join as string
'''
string = ''.join([chr(i) for i in np.fromfile(self._file, dtype=np.uint16, count=count)])
return string.rstrip('\x00')
def _readstr(self):
'''Read RHK string object
Each string is written to file by first writing the string length(2 bytes),
then the string. So when we read, first read a short value, which gives the
string length, then read that much bytes which represents the string.
'''
length = self._readb(np.uint16, 1)#first 2 bytes is the string length
string = ''.join([chr(i) for i in np.fromfile(self._file, dtype=np.uint16, count=length)])
try:
string.encode("utf8")
return string.rstrip('\x00')
except UnicodeEncodeError:
return ""
def _readtime(self):
'''Read RHK filetime object
It is expressed in Windows epoch, a 64-bit value representing
the number of 100-nanosecond intervals since January 1, 1601 (UTC).
'''
return np.fromfile(self._file, dtype=np.uint64, count=1)[0]
def _seek(self, offset, whence):
'''Seek the file to the given position
'''
self._file.seek(offset, whence)
class RHKObject:
'''Define an RHK object.
An Object contains:
Object ID: (4 bytes) Type of data stored
Offset: (4 bytes) Data offset
Size: (4 bytes) size of the data
Using the data offset and size, we can read the corresponding object data.
'''
def __init__(self, sm4):
'''Read the object properties.
'''
self.id = sm4._readb(np.uint32, 1)
try:
self.name = object_type(self.id).name
except ValueError:
self.name = 'RHK_OBJECT_UNKNOWN'
self.offset = sm4._readb(np.uint32, 1)
self.size = sm4._readb(np.uint32, 1)
''' Seek to the end position of the current Object
(for compatibility with future file versions
in case Object Field Size is no longer 12 bytes)'''
#sm4._seek(sm4._header.object_field_size - 12, 1)
class RHKObjectContainer:
'''Represents a class containing RHK Objects
'''
def _read_object_list(self, sm4):
'''Populate Object list
'''
self._object_list = []
for i in range(self._object_list_count):
self._object_list.append(RHKObject(sm4))
def _get_offset(self, object_name):
'''Get offset of the given object
'''
for obj in self._object_list:
if obj.name == object_name:
return obj.offset
def _read_object_content(self, obj):
# Chech if object position is valid then read it
if obj.offset != 0 and obj.size != 0:
if obj.id == 5:
self._read_ImageDriftHeader(obj.offset)
elif obj.id == 6:
self._read_ImageDrift(obj.offset)
elif obj.id == 7:
self._read_SpecDriftHeader(obj.offset)
elif obj.id == 8:
self._read_SpecDriftData(obj.offset)
elif obj.id == 9:
## Color Info is skipped
#self._read_ColorInfo(obj.offset)
pass
elif obj.id == 10:
self._read_StringData(obj.offset)
elif obj.id == 11:
self._read_TipTrackHeader(obj.offset)
elif obj.id == 12:
self._read_TipTrackData(obj.offset)
elif obj.id == 13:
# PRMdata is read within _read_PRMHeader()
#self._read_PRMdata(obj.offset)
pass
elif obj.id == 15:
self._read_PRMHeader(obj.offset)
elif obj.id == 17:
self._read_APIInfo(obj.offset)
elif obj.id == 18:
self._read_HistoryInfo(obj.offset)
elif obj.id == 19:
self._read_PiezoSensitivity(obj.offset)
elif obj.id == 20:
self._read_FrequencySweepData(obj.offset)
elif obj.id == 21:
self._read_ScanProcessorInfo(obj.offset)
elif obj.id == 22:
self._read_PLLInfo(obj.offset)
elif obj.id == 23:
self._read_ChannelDriveInfo(obj.offset, 'RHK_CH1Drive')
elif obj.id == 24:
self._read_ChannelDriveInfo(obj.offset, 'RHK_CH2Drive')
elif obj.id == 25:
self._read_LockinInfo(obj.offset, 'RHK_Lockin0')
elif obj.id == 26:
self._read_LockinInfo(obj.offset, 'RHK_Lockin1')
elif obj.id == 27:
self._read_PIControllerInfo(obj.offset, 'RHK_ZPI')
elif obj.id == 28:
self._read_PIControllerInfo(obj.offset, 'RHK_KPI')
elif obj.id == 29:
self._read_PIControllerInfo(obj.offset, 'RHK_AuxPI')
elif obj.id == 30:
self._read_LowPassFilterInfo(obj.offset, 'RHK_LowPassFilter0')
elif obj.id == 31:
self._read_LowPassFilterInfo(obj.offset, 'RHK_LowPassFilter1')
def _read_StringData(self, offset):
''' Read String Data for the current page.
_string_count gives the number of strings in the current page.
'''
self._sm4._seek(offset, 0)
# Create string labels list, adding any additional (at date unknown) label
strList = ["RHK_Label",
"RHK_SystemText",
"RHK_SessionText",
"RHK_UserText",
"RHK_FileName",
"RHK_Date",
"RHK_Time",
"RHK_Xunits",
"RHK_Yunits",
"RHK_Zunits",
"RHK_Xlabel",
"RHK_Ylabel",
"RHK_StatusChannelText",
"RHK_CompletedLineCount",
"RHK_OverSamplingCount",
"RHK_SlicedVoltage",
"RHK_PLLProStatus",
"RHK_SetpointUnit",
"CHlist"]
for i in range(self._string_count - 19):
strList.append('RHK_Unknown'+"{:0>3d}".format(i))
# Actual read of the strings
for k in range(self._string_count):
if k == 4: #file path
self._path = self._sm4._readstr()
self.attrs[strList[k]] = self._sm4._file.name
elif k in [13, 14]: #conversion to integer
self.attrs[strList[k]] = int(self._sm4._readstr())
elif k == 18: # parse CHDriveValues string
CHlist = self._sm4._readstr().split("\n")
for i, CH in enumerate(CHlist):
self.attrs["RHK_CH"+str(i+1)+"DriveValue"] = float(CH.split(" ")[3])
self.attrs["RHK_CH"+str(i+1)+"DriveValueUnits"] = CH.split(" ")[4]
else:
self.attrs[strList[k]] = self._sm4._readstr()
# Create ISO8601 datetime stamp
mm, dd, yy = self.attrs['RHK_Date'].split('/')
datetime = '20' + yy + '-' + mm + '-' + dd + 'T' + self.attrs['RHK_Time'] + '.000'
self.attrs['RHK_DateTime'] = datetime
# Add line type units based on line_type enum class
line_type_xunits = {'RHK_LINE_NOT_A_LINE': '',
'RHK_LINE_HISTOGRAM': '',
'RHK_LINE_CROSS_SECTION': '',
'RHK_LINE_LINE_TEST': '',
'RHK_LINE_OSCILLOSCOPE': '',
'RHK_LINE_RESERVED': '',
'RHK_LINE_NOISE_POWER_SPECTRUM': '',
'RHK_LINE_IV_SPECTRUM': 'Bias',
'RHK_LINE_IZ_SPECTRUM': 'Z',
'RHK_LINE_IMAGE_X_AVERAGE': '',
'RHK_LINE_IMAGE_Y_AVERAGE': '',
'RHK_LINE_NOISE_AUTOCORRELATION_SPECTRUM': '',
'RHK_LINE_MULTICHANNEL_ANALYSER_DATA': '',
'RHK_LINE_RENORMALIZED_IV': '',
'RHK_LINE_IMAGE_HISTOGRAM_SPECTRA': '',
'RHK_LINE_IMAGE_CROSS_SECTION': '',
'RHK_LINE_IMAGE_AVERAGE': '',
'RHK_LINE_IMAGE_CROSS_SECTION_G': '',
'RHK_LINE_IMAGE_OUT_SPECTRA': '',
'RHK_LINE_DATALOG_SPECTRUM': '',
'RHK_LINE_GXY': '',
'RHK_LINE_ELECTROCHEMISTRY': '',
'RHK_LINE_DISCRETE_SPECTROSCOPY': '',
'RHK_LINE_DATA_LOGGER': '',
'RHK_LINE_TIME_SPECTROSCOPY': 'Time',
'RHK_LINE_ZOOM_FFT': '',
'RHK_LINE_FREQUENCY_SWEEP': '',
'RHK_LINE_PHASE_ROTATE': '',
'RHK_LINE_FIBER_SWEEP': ''}
if self.attrs['RHK_Xlabel'] == '':
self.attrs['RHK_Xlabel'] = line_type_xunits[self.attrs['RHK_LineTypeName']]
def _read_SpecDriftHeader(self, offset):
''' Read Spec Drift Header for the current page.
'''
self._sm4._seek(offset, 0)
self.attrs['RHK_SpecDrift_Filetime'] = self._sm4._readtime()
self.attrs['RHK_SpecDrift_DriftOptionType'] = self._sm4._readb(np.uint32, 1)
self.attrs['RHK_SpecDrift_DriftOptionTypeName'] = drift_option_type(self.attrs['RHK_SpecDrift_DriftOptionType']).name
_ = self._sm4._readb(np.uint32, 1) # SpecDrift StringCount
self.attrs['RHK_SpecDrift_Channel'] = self._sm4._readstr()
def _read_SpecDriftData(self, offset):
''' Read Spec Drift Data for the current page.
'''
self._sm4._seek(offset, 0)
self.attrs['RHK_SpecDrift_Time'] = []
self.attrs['RHK_SpecDrift_Xcoord'] = []
self.attrs['RHK_SpecDrift_Ycoord'] = []
self.attrs['RHK_SpecDrift_dX'] = []
self.attrs['RHK_SpecDrift_dY'] = []
self.attrs['RHK_SpecDrift_CumulativeX'] = []
self.attrs['RHK_SpecDrift_CumulativeY'] = []
for k in range(self.attrs['RHK_Ysize']):
self.attrs['RHK_SpecDrift_Time'].append(self._sm4._readb(np.float32, 1))
self.attrs['RHK_SpecDrift_Xcoord'].append(self._sm4._readb(np.float32, 1))
self.attrs['RHK_SpecDrift_Ycoord'].append(self._sm4._readb(np.float32, 1))
self.attrs['RHK_SpecDrift_dX'].append(self._sm4._readb(np.float32, 1))
self.attrs['RHK_SpecDrift_dY'].append(self._sm4._readb(np.float32, 1))
self.attrs['RHK_SpecDrift_CumulativeX'].append(self._sm4._readb(np.float32, 1))
self.attrs['RHK_SpecDrift_CumulativeY'].append(self._sm4._readb(np.float32, 1))
def _read_ImageDriftHeader(self, offset):
''' Read Image Drift Header for the current page.
'''
self._sm4._seek(offset, 0)
self.attrs['RHK_ImageDrift_Filetime'] = self._sm4._readtime()
self.attrs['RHK_ImageDrift_DriftOptionType'] = self._sm4._readb(np.uint32, 1)
self.attrs['RHK_ImageDrift_DriftOptionTypeName'] = drift_option_type(self.attrs['RHK_ImageDrift_DriftOptionType']).name
def _read_ImageDrift(self, offset):
''' Read Image Drift for the current page.
'''
self._sm4._seek(offset, 0)
self.attrs['RHK_ImageDrift_Time'] = self._sm4._readb(np.float32, 1)
self.attrs['RHK_ImageDrift_dX'] = self._sm4._readb(np.float32, 1)
self.attrs['RHK_ImageDrift_dY'] = self._sm4._readb(np.float32, 1)
self.attrs['RHK_ImageDrift_CumulativeX'] = self._sm4._readb(np.float32, 1)
self.attrs['RHK_ImageDrift_CumulativeY'] = self._sm4._readb(np.float32, 1)
self.attrs['RHK_ImageDrift_VectorX'] = self._sm4._readb(np.float32, 1)
self.attrs['RHK_ImageDrift_VectorY'] = self._sm4._readb(np.float32, 1)
def _read_ColorInfo(self, offset):
''' Read Color Info for the current page.
Color Info is only for use into RHK DAW software.
'''
self._sm4._seek(offset, 0)
## Initialize metadata
self.attrs['RHK_Color_StructSize'] = []
self.attrs['RHK_Color_Reserved'] = []
#HSVColor
self.attrs['RHK_Color_Hstart'] = []
self.attrs['RHK_Color_Sstart'] = []
self.attrs['RHK_Color_Vstart'] = []
self.attrs['RHK_Color_Hstop'] = []
self.attrs['RHK_Color_Sstop'] = []
self.attrs['RHK_Color_Vstop'] = []
self.attrs['RHK_Color_ClrDirection'] = []
self.attrs['RHK_Color_NumEntries'] = []
self.attrs['RHK_Color_StartSlidePos'] = []
self.attrs['RHK_Color_EndSlidePos'] = []
#Color Transform
self.attrs['RHK_Color_Gamma'] = []
self.attrs['RHK_Color_Alpha'] = []
self.attrs['RHK_Color_Xstart'] = []
self.attrs['RHK_Color_Xstop'] = []
self.attrs['RHK_Color_Ystart'] = []
self.attrs['RHK_Color_Ystop'] = []
self.attrs['RHK_Color_MappingMode'] = []
self.attrs['RHK_Color_Invert'] = []
for k in range(self._color_info_count):
self.attrs['RHK_Color_StructSize'].append(self._sm4._readb(np.uint16, 1))
self.attrs['RHK_Color_Reserved'].append(self._sm4._readb(np.uint16, 1))
## HSVColor
self.attrs['RHK_Color_Hstart'].append(self._sm4._readb(np.float32, 1))
self.attrs['RHK_Color_Sstart'].append(self._sm4._readb(np.float32, 1))
self.attrs['RHK_Color_Vstart'].append(self._sm4._readb(np.float32, 1))
self.attrs['RHK_Color_Hstop'].append(self._sm4._readb(np.float32, 1))
self.attrs['RHK_Color_Sstop'].append(self._sm4._readb(np.float32, 1))
self.attrs['RHK_Color_Vstop'].append(self._sm4._readb(np.float32, 1))
self.attrs['RHK_Color_ClrDirection'].append(self._sm4._readb(np.uint32, 1))
self.attrs['RHK_Color_NumEntries'].append(self._sm4._readb(np.uint32, 1))
self.attrs['RHK_Color_StartSlidePos'].append(self._sm4._readb(np.float32, 1))
self.attrs['RHK_Color_EndSlidePos'].append(self._sm4._readb(np.float32, 1))
## Color Transform
self.attrs['RHK_Color_Gamma'].append(self._sm4._readb(np.float32, 1))
self.attrs['RHK_Color_Alpha'].append(self._sm4._readb(np.float32, 1))
self.attrs['RHK_Color_Xstart'].append(self._sm4._readb(np.float32, 1))
self.attrs['RHK_Color_Xstop'].append(self._sm4._readb(np.float32, 1))
self.attrs['RHK_Color_Ystart'].append(self._sm4._readb(np.float32, 1))
self.attrs['RHK_Color_Ystop'].append(self._sm4._readb(np.float32, 1))
self.attrs['RHK_Color_MappingMode'].append(self._sm4._readb(np.uint32, 1))
self.attrs['RHK_Color_Invert'].append(self._sm4._readb(np.uint32, 1))
def _read_TipTrackHeader(self, offset):
''' Read Tip track Header for the current page.
'''
self._sm4._seek(offset, 0)
self.attrs['RHK_TipTrack_Filetime'] = self._sm4._readtime()
self.attrs['RHK_TipTrack_FeatureHeight'] = self._sm4._readb(np.float32, 1)
self.attrs['RHK_TipTrack_FeatureWidth'] = self._sm4._readb(np.float32, 1)
self.attrs['RHK_TipTrack_TimeConstant'] = self._sm4._readb(np.float32, 1)
self.attrs['RHK_TipTrack_CycleRate'] = self._sm4._readb(np.float32, 1)
self.attrs['RHK_TipTrack_PhaseLag'] = self._sm4._readb(np.float32, 1)
_ = self._sm4._readb(np.uint32, 1) # TipTrack StringCount
self.attrs['RHK_TipTrack_TipTrackInfoCount'] = self._sm4._readb(np.uint32, 1)
self.attrs["RHK_TipTrack_Channel"] = self._sm4._readstr()
def _read_TipTrackData(self, offset):
''' Read Tip Track Data for the current page.
'''
self._sm4._seek(offset, 0)
self.attrs['RHK_TipTrack_CumulativeTime'] = []
self.attrs['RHK_TipTrack_Time'] = []
self.attrs['RHK_TipTrack_dX'] = []
self.attrs['RHK_TipTrack_dY'] = []
for k in range(self.attrs['RHK_TipTrack_TipTrackInfoCount']):
self.attrs['RHK_TipTrack_CumulativeTime'].append(self._sm4._readb(np.float32, 1))
self.attrs['RHK_TipTrack_Time'].append(self._sm4._readb(np.float32, 1))
self.attrs['RHK_TipTrack_dX'].append(self._sm4._readb(np.float32, 1))
self.attrs['RHK_TipTrack_dY'].append(self._sm4._readb(np.float32, 1))
def _read_PRMdata(self, offset):
''' Read PRM Data for the current page.
Valid only for RHK XPMPro generated files.
PRM data could be compressed with Zlib.
'''
import zlib
self._sm4._seek(offset, 0)
if self._PRM_CompressionFlag == 0:
PRMdata = np.fromfile(self._sm4._file, dtype=np.uint32, count=self._PRM_DataSize)
elif self._PRM_CompressionFlag == 1:
comprPRMdata = np.fromfile(self._sm4._file, dtype=np.uint32, count=self._PRM_CompressionSize)
PRMdata = zlib.decompress(comprPRMdata, wbits=0, bufsize=self._PRM_DataSize)
self.attrs['RHK_PRMdata'] = PRMdata.decode('CP437')
def _read_PRMHeader(self, offset):
''' Read PRM Header for the current page.
Valid only for RHK XPMPro generated files.
'''
self._sm4._seek(offset, 0)
self._PRM_CompressionFlag = self._sm4._readb(np.uint32, 1)
self._PRM_DataSize = self._sm4._readb(np.uint32, 1)
self._PRM_CompressionSize = self._sm4._readb(np.uint32, 1)
prm_data_offset = self._sm4._header._get_offset('RHK_OBJECT_PRM')
self._read_PRMdata(prm_data_offset)
def _read_APIInfo(self, offset):
''' Read API Info for the current page.
'''
self._sm4._seek(offset, 0)
self.attrs['RHK_API_VoltageHigh'] = self._sm4._readb(np.float32, 1)
self.attrs['RHK_API_VoltageLow'] = self._sm4._readb(np.float32, 1)
self.attrs['RHK_API_Gain'] = self._sm4._readb(np.float32, 1)
self.attrs['RHK_API_Offset'] = self._sm4._readb(np.float32, 1)
self.attrs['RHK_API_RampMode'] = self._sm4._readb(np.uint32, 1)
self.attrs['RHK_API_RampType'] = self._sm4._readb(np.uint32, 1)
self.attrs['RHK_API_Step'] = self._sm4._readb(np.uint32, 1)
self.attrs['RHK_API_ImageCount'] = self._sm4._readb(np.uint32, 1)
self.attrs['RHK_API_DAC'] = self._sm4._readb(np.uint32, 1)
self.attrs['RHK_API_MUX'] = self._sm4._readb(np.uint32, 1)
self.attrs['RHK_API_STMBias'] = self._sm4._readb(np.uint32, 1)
_ = self._sm4._readb(np.uint32, 1) # API StringCount
self.attrs['RHK_API_Units'] = self._sm4._readstr()
def _read_HistoryInfo(self, offset):
''' Read History Info for the current page.
'''
self._sm4._seek(offset, 0)
_ = self._sm4._readb(np.uint32, 1) # History StringCount
_ = self._sm4._readstr() # History Path
_ = self._sm4._readstr() # History Pixel2timeFile
def _read_PiezoSensitivity(self, offset):
''' Read Piezo Sensitivity for the current page.
'''
self._sm4._seek(offset, 0)
self.attrs['RHK_PiezoSensitivity_TubeX'] = self._sm4._readb(np.float64, 1)
self.attrs['RHK_PiezoSensitivity_TubeY'] = self._sm4._readb(np.float64, 1)
self.attrs['RHK_PiezoSensitivity_TubeZ'] = self._sm4._readb(np.float64, 1)
self.attrs['RHK_PiezoSensitivity_TubeZOffset'] = self._sm4._readb(np.float64, 1)
self.attrs['RHK_PiezoSensitivity_ScanX'] = self._sm4._readb(np.float64, 1)
self.attrs['RHK_PiezoSensitivity_ScanY'] = self._sm4._readb(np.float64, 1)
self.attrs['RHK_PiezoSensitivity_ScanZ'] = self._sm4._readb(np.float64, 1)
self.attrs['RHK_PiezoSensitivity_Actuator'] = self._sm4._readb(np.float64, 1)
_ = self._sm4._readb(np.uint32, 1) # PiezoSensitivity StringCount
self.attrs['RHK_PiezoSensitivity_TubeXUnit'] = self._sm4._readstr()
self.attrs['RHK_PiezoSensitivity_TubeYUnit'] = self._sm4._readstr()
self.attrs['RHK_PiezoSensitivity_TubeZUnit'] = self._sm4._readstr()
self.attrs['RHK_PiezoSensitivity_TubeZOffsetUnit'] = self._sm4._readstr()
self.attrs['RHK_PiezoSensitivity_ScanXUnit'] = self._sm4._readstr()
self.attrs['RHK_PiezoSensitivity_ScanYUnit'] = self._sm4._readstr()
self.attrs['RHK_PiezoSensitivity_ScanZUnit'] = self._sm4._readstr()
self.attrs['RHK_PiezoSensitivity_ActuatorUnit'] = self._sm4._readstr()
self.attrs['RHK_PiezoSensitivity_TubeCalibration'] = self._sm4._readstr()
self.attrs['RHK_PiezoSensitivity_ScanCalibration'] = self._sm4._readstr()
self.attrs['RHK_PiezoSensitivity_ActuatorCalibration'] = self._sm4._readstr()
def _read_FrequencySweepData(self, offset):
''' Read Frequency Sweep Data for the current page.
'''
self._sm4._seek(offset, 0)
self.attrs['RHK_FrequencySweep_PSDTotalSignal'] = self._sm4._readb(np.float64, 1)
self.attrs['RHK_FrequencySweep_PeakFrequency'] = self._sm4._readb(np.float64, 1)
self.attrs['RHK_FrequencySweep_PeakAmplitude'] = self._sm4._readb(np.float64, 1)
self.attrs['RHK_FrequencySweep_DriveAmplitude'] = self._sm4._readb(np.float64, 1)
self.attrs['RHK_FrequencySweep_Signal2DriveRatio'] = self._sm4._readb(np.float64, 1)
self.attrs['RHK_FrequencySweep_QFactor'] = self._sm4._readb(np.float64, 1)
_ = self._sm4._readb(np.uint32, 1) # FrequencySweep StringCount
self.attrs['RHK_FrequencySweep_TotalSignalUnit'] = self._sm4._readstr()
self.attrs['RHK_FrequencySweep_PeakFrequencyUnit'] = self._sm4._readstr()
self.attrs['RHK_FrequencySweep_PeakAmplitudeUnit'] = self._sm4._readstr()
self.attrs['RHK_FrequencySweep_DriveAmplitudeUnit'] = self._sm4._readstr()
self.attrs['RHK_FrequencySweep_Signal2DriveRatioUnit'] = self._sm4._readstr()
self.attrs['RHK_FrequencySweep_QFactorUnit'] = self._sm4._readstr()
def _read_ScanProcessorInfo(self, offset):
''' Read Scan Processor Info for the current page.
'''
self._sm4._seek(offset, 0)
self.attrs['RHK_ScanProcessor_XSlopeCompensation'] = self._sm4._readb(np.float64, 1)
self.attrs['RHK_ScanProcessor_YSlopeCompensation'] = self._sm4._readb(np.float64, 1)
_ = self._sm4._readb(np.uint32, 1) # ScanProcessor StringCount
self.attrs['RHK_ScanProcessor_XSlopeCompensationUnit'] = self._sm4._readstr()
self.attrs['RHK_ScanProcessor_YSlopeCompensationUnit'] = self._sm4._readstr()
def _read_PLLInfo(self, offset):
''' Read PLL Info for the current page.
'''
self._sm4._seek(offset, 0)
_ = self._sm4._readb(np.uint32, 1) # PLL StringCount
self.attrs['RHK_PLL_AmplitudeControl'] = self._sm4._readb(np.uint32, 1)
self.attrs['RHK_PLL_DriveAmplitude'] = self._sm4._readb(np.float64, 1)
self.attrs['RHK_PLL_DriveRefFrequency'] = self._sm4._readb(np.float64, 1)
self.attrs['RHK_PLL_LockinFreqOffset'] = self._sm4._readb(np.float64, 1)
self.attrs['RHK_PLL_LockinHarmonicFactor'] = self._sm4._readb(np.float64, 1)
self.attrs['RHK_PLL_LockinPhaseOffset'] = self._sm4._readb(np.float64, 1)
self.attrs['RHK_PLL_PIGain'] = self._sm4._readb(np.float64, 1)
self.attrs['RHK_PLL_PIIntCutoffFreq'] = self._sm4._readb(np.float64, 1)
self.attrs['RHK_PLL_PILowerBound'] = self._sm4._readb(np.float64, 1)
self.attrs['RHK_PLL_PIUpperBound'] = self._sm4._readb(np.float64, 1)
self.attrs['RHK_PLL_DissPIGain'] = self._sm4._readb(np.float64, 1)
self.attrs['RHK_PLL_DissPIIntCutoffFreq'] = self._sm4._readb(np.float64, 1)
self.attrs['RHK_PLL_DissPILowerBound'] = self._sm4._readb(np.float64, 1)
self.attrs['RHK_PLL_DissPIUpperBound'] = self._sm4._readb(np.float64, 1)
self.attrs['RHK_PLL_LockinFilterCutoffFreq'] = self._sm4._readstr()
self.attrs['RHK_PLL_DriveAmplitudeUnit'] = self._sm4._readstr()
self.attrs['RHK_PLL_DriveFrequencyUnit'] = self._sm4._readstr()
self.attrs['RHK_PLL_LockinFreqOffsetUnit'] = self._sm4._readstr()
self.attrs['RHK_PLL_LockinPhaseUnit'] = self._sm4._readstr()
self.attrs['RHK_PLL_PIGainUnit'] = self._sm4._readstr()
self.attrs['RHK_PLL_PIICFUnit'] = self._sm4._readstr()
self.attrs['RHK_PLL_PIOutputUnit'] = self._sm4._readstr()
self.attrs['RHK_PLL_DissPIGainUnit'] = self._sm4._readstr()
self.attrs['RHK_PLL_DissPIICFUnit'] = self._sm4._readstr()
self.attrs['RHK_PLL_DissPIOutputUnit'] = self._sm4._readstr()
def _read_ChannelDriveInfo(self, offset, metaString):
''' Read Channel Drive Info for the current page.
'''
self._sm4._seek(offset, 0)
_ = self._sm4._readb(np.uint32, 1) # ChannelDrive StringCount
self.attrs[metaString + '_MasterOscillator'] = self._sm4._readb(np.uint32, 1)
self.attrs[metaString + '_Amplitude'] = self._sm4._readb(np.float64, 1)
self.attrs[metaString + '_Frequency'] = self._sm4._readb(np.float64, 1)
self.attrs[metaString + '_PhaseOffset'] = self._sm4._readb(np.float64, 1)
self.attrs[metaString + '_HarmonicFactor'] = self._sm4._readb(np.float64, 1)
self.attrs[metaString + '_AmplitudeUnit'] = self._sm4._readstr()
self.attrs[metaString + '_FrequencyUnit'] = self._sm4._readstr()
self.attrs[metaString + '_PhaseOffsetUnit'] = self._sm4._readstr()
self.attrs[metaString + '_ReservedUnit'] = self._sm4._readstr()
def _read_LockinInfo(self, offset, metaString):
''' Read Lockin Info for the current page.
'''
self._sm4._seek(offset, 0)
_ = self._sm4._readb(np.uint32, 1) # LockinInfo StringCount
self.attrs[metaString + '_NonMasterOscillator'] = self._sm4._readb(np.uint32, 1)
self.attrs[metaString + '_Frequency'] = self._sm4._readb(np.float64, 1)
self.attrs[metaString + '_HarmonicFactor'] = self._sm4._readb(np.float64, 1)
self.attrs[metaString + '_PhaseOffset'] = self._sm4._readb(np.float64, 1)
#Try FilterCutoffFrequency reading from some file versions
try:
self.attrs[metaString + '_FilterCutoffFrequency'] = self._sm4._readstr()
self.attrs[metaString + '_FreqUnit'] = self._sm4._readstr()
self.attrs[metaString + '_PhaseUnit'] = self._sm4._readstr()
except:
self.attrs[metaString + '_FilterCutoffFrequency'] = ""
self.attrs[metaString + '_FreqUnit'] = ""
self.attrs[metaString + '_PhaseUnit'] = ""
def _read_PIControllerInfo(self, offset, metaString):
''' Read PI Controller Info for the current page.
'''
self._sm4._seek(offset, 0)
self.attrs[metaString + '_SetPoint'] = self._sm4._readb(np.float64, 1)
self.attrs[metaString + '_ProportionalGain'] = self._sm4._readb(np.float64, 1)
self.attrs[metaString + '_IntegralGain'] = self._sm4._readb(np.float64, 1)
self.attrs[metaString + '_LowerBound'] = self._sm4._readb(np.float64, 1)
self.attrs[metaString + '_UpperBound'] = self._sm4._readb(np.float64, 1)
_ = self._sm4._readb(np.uint32, 1) # PIController StringCount
self.attrs[metaString + '_FeedbackType'] = self._sm4._readstr()
self.attrs[metaString + '_SetPointUnit'] = self._sm4._readstr()
self.attrs[metaString + '_ProportionalGainUnit'] = self._sm4._readstr()
self.attrs[metaString + '_IntegralGainUnit'] = self._sm4._readstr()
self.attrs[metaString + '_OutputUnit'] = self._sm4._readstr()
def _read_LowPassFilterInfo(self, offset, metaString):
''' Read Low-Pass Filter Info for the current page.
'''
self._sm4._seek(offset, 0)
_ = self._sm4._readb(np.uint32, 1) # LowPassFilter StringCount
freq, units = self._sm4._readstr().split(" ")
self.attrs[metaString + '_CutoffFrequency'] = float(freq)
self.attrs[metaString + '_CutoffFrequencyUnits'] = units
class RHKFileHeader(RHKObjectContainer):
'''Class representing the File Header.
The File Header contains the general information about the SM4 file
and the file offset to other details like index header, PRM data etc.
'''
def __init__(self, sm4):
'''Read the File Header.
File header size: (2 bytes) the size for the actual File Header (in current version =56 bytes)
File header content:
Signature: (18x2 bytes) "STiMage 005.006 1". Mayor version.Minor version Unicode=1
Total page count: (4 bytes) the basic structure is a page, where data is saved
Object list count: (4 bytes) the count of Objects stored just after the file header (currently =3).
Object field size: (4 bytes) the size of the Object structure (currently =12 bytes per Object)
Reserved: (4x2 bytes) 2 fields reserved for future use.
'''
## File Header Size
self.header_size = sm4._readb(np.uint16, 1)
## File Header
self.signature = sm4._reads(18)
self.total_page_count = sm4._readb(np.uint32, 1)
self._object_list_count = sm4._readb(np.uint32, 1)
self.object_field_size = sm4._readb(np.uint32, 1)
self.reserved = sm4._readb(np.uint32, 2)
''' Seek to the end position of the File Header
(for compatibility with future file versions
in case File Header Size is no longer 56 bytes)'''
sm4._seek(self.header_size + 2, 0)
class RHKPageIndexHeader(RHKObjectContainer):
'''Class representing the Page Index Header.
'''
def __init__(self, sm4):
'''Read the Page Index Header.
Page Index Header content:
Page count: (4 bytes) Stores the number of pages in the Page Index Array
Object List Count: Stores the count of Objects stored after Page Index Header (currently =1)
Reserved: (4x2 bytes) 2 fields reserved for future use.
Object List: Stores the Objects in the Page Index Header. Currently is stored one Object:
1. Page Index Array
'''
## Seek to the position of the Page Index Header
self.offset = sm4._header._get_offset('RHK_OBJECT_PAGE_INDEX_HEADER')
sm4._seek(self.offset, 0)
self.page_count = sm4._readb(np.uint32, 1)# the number of pages in the page index array
self._object_list_count = sm4._readb(np.uint32, 1)
self.reserved = sm4._readb(np.uint32, 2)
class RHKPageHeader(RHKObjectContainer):
''' Class representing the Page Header
'''
def __init__(self, page, sm4):
'''Read the Page Header
The page header stores the header details of each page.
It is followed by its Objects in the number given by 'object-list_count'.
'''
self.sm4 = sm4
## Seek for the position of the Page Header
self.offset = page._get_offset('RHK_OBJECT_PAGE_HEADER')
self.sm4._seek(self.offset, 0)
if ( page._page_data_type == 6 ):#"Sequential" Page Data type
self.read_sequential_type(page)
else:
self.read_default_type(page)#all other Page Data types
def read_sequential_type(self, page):
page.attrs['RHK_DataType'] = self.sm4._readb(np.uint32, 1)
page.attrs['RHK_DataLength'] = self.sm4._readb(np.uint32, 1)
page.attrs['RHK_ParamCount'] = self.sm4._readb(np.uint32, 1)
self._object_list_count = self.sm4._readb(np.uint32, 1)
page.attrs['RHK_DataInfoSize'] = self.sm4._readb(np.uint32, 1)
page.attrs['RHK_DataInfoStringCount'] = self.sm4._readb(np.uint32, 1)
## Adding manually these attributes for consistency with subsequent code
page._page_type = 0
page._line_type = 0
page.attrs['RHK_PageType'] = page._page_type
page.attrs['RHK_PageTypeName'] = page_type(page._page_type).name
page.attrs['RHK_LineType'] = page._line_type
page.attrs['RHK_LineTypeName'] = line_type(page._line_type).name
page._page_data_size = page.attrs['RHK_ParamCount']*(page.attrs['RHK_DataLength'] + 1)
def read_default_type(self, page):
_ = self.sm4._readb(np.uint16, 1) # FieldSize
page._string_count = self.sm4._readb(np.uint16, 1)
page._page_type = self.sm4._readb(np.uint32, 1)
page.attrs['RHK_PageType'] = page._page_type
try:
page.attrs['RHK_PageTypeName'] = page_type(page._page_type).name
except ValueError:
page.attrs['RHK_PageTypeName'] = 'RHK_PAGE_UNKNOWN'
page.attrs['RHK_DataSubSource'] = self.sm4._readb(np.uint32, 1)
page._line_type = self.sm4._readb(np.uint32, 1)
page.attrs['RHK_LineType'] = page._line_type
try:
page.attrs['RHK_LineTypeName'] = line_type(page._line_type).name
except ValueError:
page.attrs['RHK_LineTypeName'] = 'RHK_LINE_UNKNOWN'
page.attrs['RHK_Xcorner'] = self.sm4._readb(np.uint32, 1)
page.attrs['RHK_Ycorner'] = self.sm4._readb(np.uint32, 1)
''' Xsize is the number of pixels in the X direction for an image page,
or the number of points per spectra/line for line pages.'''
page.attrs['RHK_Xsize'] = self.sm4._readb(np.uint32, 1)
''' Ysize is the number of pixels in the Y direction for an image page,
or the number of spectra stored in the page.'''
page.attrs['RHK_Ysize'] = self.sm4._readb(np.uint32, 1)
page._image_type = self.sm4._readb(np.uint32, 1)
page.attrs['RHK_ImageType'] = page._image_type
try:
page.attrs['RHK_ImageTypeName'] = image_type(page._image_type).name
except ValueError:
page.attrs['RHK_ImageTypeName'] = 'RHK_IMAGE_UNKNOWN'
page._scan_type = self.sm4._readb(np.uint32, 1)
page.attrs['RHK_ScanType'] = page._scan_type
try:
page.attrs['RHK_ScanTypeName'] = scan_type(page._scan_type).name
except ValueError:
page.attrs['RHK_ScanTypeName'] = 'RHK_SCAN_UNKNOWN'
page.attrs['RHK_GroupId'] = self.sm4._readb(np.uint32, 1)
page._page_data_size = self.sm4._readb(np.uint32, 1)
page.attrs['RHK_MinZvalue'] = self.sm4._readb(np.uint32, 1)
page.attrs['RHK_MaxZvalue'] = self.sm4._readb(np.uint32, 1)
page.attrs['RHK_Xscale'] = self.sm4._readb(np.float32, 1)
page.attrs['RHK_Yscale'] = self.sm4._readb(np.float32, 1)
page.attrs['RHK_Zscale'] = self.sm4._readb(np.float32, 1)
page.attrs['RHK_XYscale'] = self.sm4._readb(np.float32, 1)
page.attrs['RHK_Xoffset'] = self.sm4._readb(np.float32, 1)
page.attrs['RHK_Yoffset'] = self.sm4._readb(np.float32, 1)
page.attrs['RHK_Zoffset'] = self.sm4._readb(np.float32, 1)
page.attrs['RHK_Period'] = self.sm4._readb(np.float32, 1)
page.attrs['RHK_Bias'] = self.sm4._readb(np.float32, 1)
page.attrs['RHK_Current'] = self.sm4._readb(np.float32, 1)
page.attrs['RHK_Angle'] = self.sm4._readb(np.float32, 1)
page._color_info_count = self.sm4._readb(np.uint32, 1)
page.attrs['RHK_GridXsize'] = self.sm4._readb(np.uint32, 1)
page.attrs['RHK_GridYsize'] = self.sm4._readb(np.uint32, 1)
self._object_list_count = self.sm4._readb(np.uint32, 1)
page._32bit_data_flag = self.sm4._readb(np.uint8, 1)
page._reserved_flags = self.sm4._readb(np.uint8, 3)# 3 bytes
page._reserved = self.sm4._readb(np.uint8, 60)# 60 bytes
def read_objects(self, page):
# Read Page Header objects
self._read_object_list(self.sm4)
# Add Data Info if "Sequential" Page Data type
if ( page._page_data_type == 6 ):
## Initialize metadata
page.attrs['RHK_Sequential_ParamGain'] = []
page.attrs['RHK_Sequential_ParamLabel'] = []
page.attrs['RHK_Sequential_ParamUnit'] = []
for i in range(page.attrs['RHK_ParamCount']):
## Parameter gain
page.attrs['RHK_Sequential_ParamGain'].append(self.sm4._readb(np.float32, 1))
## Name of the parameter
page.attrs['RHK_Sequential_ParamLabel'].append(self.sm4._readstr())
## Unit of the parameter
page.attrs['RHK_Sequential_ParamUnit'].append(self.sm4._readstr())
# Read each object and add to Page metadata
for obj in self._object_list:
page._read_object_content(obj)
class RHKPage(RHKObjectContainer):
''' Class representing Page
'''
def __init__(self, sm4):
'''Read the Page Index
Content:
Page ID: Unique GUID for each Page
Page Data Type: The type of data stored with the page.
Page Source Type: Identifies the page source type.
Object List Count: Stores the count of Objects stored after each Page Index
Minor Version: (4 bytes) stores the minor version of the file (2 in QP,
4 in XPMPro, 6 in Rev9)
Object List: Stores the Objects in the Page Index. Currently we are storing:
1. Page Header
2. Page Data
3. Thumbnail
4. Thumbnail header
'''
self._sm4 = sm4
## Initialize Page Index and Page meta dictionaries
self.attrs = {}
self.attrs['RHK_PRMdata'] = ""
self.attrs['RHK_PageID'] = sm4._readb(np.uint16, 8)
self._page_data_type = sm4._readb(np.uint32, 1)
self.attrs['RHK_PageDataType'] = self._page_data_type
try:
self.attrs['RHK_PageDataTypeName'] = page_data_type(self._page_data_type).name
except ValueError:
self.attrs['RHK_PageDataTypeName'] = 'RHK_DATA_UNKNOWN'
self._page_source_type = sm4._readb(np.uint32, 1)
self.attrs['RHK_PageSourceType'] = self._page_source_type
try:
self.attrs['RHK_PageSourceTypeName'] = page_source_type(self._page_source_type).name
except ValueError:
self.attrs['RHK_PageSourceTypeName'] = 'RHK_SOURCE_UNKNOWN'
self._object_list_count = sm4._readb(np.uint32, 1)
self.attrs['RHK_MinorVer'] = sm4._readb(np.uint32, 1)
## Add signature from File Header
self.attrs['RHK_Signature'] = sm4._header.signature
def _read(self):
'''Read the Page Header and Page Data
Thumbnail and Thumbnail Header are discarded
'''
## Read Page Header
self._header = RHKPageHeader(self, self._sm4)
self._header.read_objects(self)
## Set page label
if self.attrs['RHK_PageDataType'] == 0 and self._page_data_type != 6:
if self.attrs['RHK_ScanType'] == 0:
scan_direction = '_Forward'
elif self.attrs['RHK_ScanType'] == 1:
scan_direction = '_Backward'
else:
scan_direction = ''
if self.attrs['RHK_Label'] != '':
label = self.attrs['RHK_Label']
label = label.replace(" ", "_")
label = label.replace("-", "_")
if label.startswith("_"):
label = label[1:]
self.label = label + scan_direction
else:
self.label = "ID" + str(self.attrs['RHK_PageID'])
## Read Page Data
self._read_data()
## Read PRM data from file header
for obj in self._sm4._header._object_list:
self._read_object_content(obj)
def _read_data(self):
'''Read Page Data
'''
## Seek for the position of the Page Data
offset = self._get_offset('RHK_OBJECT_PAGE_DATA')
self._sm4._seek(offset, 0)
## Load data, selecting float or long integer type
data_size = int(self._page_data_size / 4)
if ( self._line_type in [1, 6, 9, 10, 11, 13, 18, 19, 21, 22] or self._page_data_type == 6 ):
raw_data = np.fromfile(self._sm4._file, dtype=np.float32, count=data_size)
## For Sequential_data page, the page data contains an array of size ‘n’ with ‘m’ elements is
## stored. Where m is the Param count and n is the Data length (array size) stored in the
## page header. The first float data in each element represents the output values.
else:
raw_data = np.fromfile(self._sm4._file, dtype=np.int32, count=data_size)
# Reshape and store data
self.data, self.coords = self._reshape_data(raw_data)
def _reshape_data(self, raw_data):
'''Reshape data of the page and create its coordinates
'''
xsize = self.attrs['RHK_Xsize']
ysize = self.attrs['RHK_Ysize']
xscale = self.attrs['RHK_Xscale']
yscale = self.attrs['RHK_Yscale']
## Define coordinates labels
x_label = self.label + "_x"
y_label = self.label + "_y"
# Reshape data
if self._page_data_type == 0: # Image type
data = raw_data.reshape(xsize, ysize)
coords = [(x_label, abs(xscale) * np.arange(xsize, dtype=np.float64)),
(y_label, abs(yscale) * np.arange(ysize, dtype=np.float64))]
# Check scale and adjust accordingly data orientation
if xscale < 0:
data = np.flip(data, axis=1)
if yscale > 0:
data = np.flip(data, axis=0)
elif self._page_data_type == 1: # Line type
data = raw_data.reshape(ysize, xsize).transpose()
xoffset = self.attrs['RHK_Xoffset']
coords = [(x_label, xscale * np.arange(xsize, dtype=np.float64) + xoffset),
(y_label, int(yscale) * np.arange(ysize, dtype=np.uint32))]
if self._line_type == 22: # Discrete spectroscopy has shape xsize*(ysize+1)
tmp = raw_data.reshape(xsize, ysize+1).transpose()
coords[1] = (x_label, tmp[0])
data = tmp[1:]
else:
data = raw_data
coords = [(x_label, np.arange(xsize*ysize, dtype=np.uint32))]
return data, coords | /rhkpy-1.3.1-py3-none-any.whl/spym/io/rhksm4/_sm4.py | 0.403684 | 0.176494 | _sm4.py | pypi |
import matplotlib.pyplot as plt
import hvplot.xarray
class Plotting():
''' Plotting.
'''
def __init__(self, spym_instance):
self._spym = spym_instance
def plot(self, title=None, waterfall=False, waterfall_limit=15, **kwargs):
''' Plot data with custom parameters using matplotlib.
Args:
title: (optional) title of the figure (string). By default gives some basic information on the data plotted. Pass an empty string to disable it.
waterfall: (optional) boolean determining if plot spectrum data as waterfall (default is False).
waterfall_limit: (optional) number of spectra above which spectrum data is plotted as image instead of waterfall (default is 15).
**kwargs: any argument accepted by xarray.plot() function.
'''
dr = self._spym._dr
attrs = dr.attrs
# Clear plt
plt.clf()
# Set plot properties
if attrs['interpretation'] == 'spectrum':
y_coord = dr.coords[dr.coords.dims[1]]
# Check if plot spectra as waterfall or image
if len(y_coord.data) <= waterfall_limit or waterfall:
# plot wraps matplotlib.pyplot.plot()
plot = dr.plot.line(hue=dr.coords.dims[1], **kwargs)
else:
plot = dr.plot(y=dr.coords.dims[1], **kwargs)
elif attrs['interpretation'] == 'image':
# plot is an instance of matplotlib.collections.QuadMesh
plot = dr.plot.pcolormesh(**kwargs)
fig = plot.get_figure()
ax = plot.axes
ax.invert_yaxis()
# Fit figure pixel size to image
fig_width, fig_height = self._fit_figure_to_image(fig, dr.data, ax)
fig.set_size_inches(fig_width, fig_height)
# Apply colormap
plot.set_cmap('afmhot')
else:
# Create figure
# xarray plot() wraps:
# - matplotlib.pyplot.plot() for 1d arrays
# - matplotlib.pyplot.pcolormesh() for 2d arrays
# - matplotlib.pyplot.hist() for anything else
plot = dr.plot(**kwargs)
# Set figure title
if title is None:
title = self._format_title()
plt.title(title)
plt.plot()
return plot
def hvplot(self, title=None, **kwargs):
''' Plot data with custom parameters using hvplot.
Args:
title: (optional) title of the figure (string). By default gives some basic information on the data plotted. Pass an empty string to disable it.
**kwargs: any argument accepted by hvplot() function.
'''
dr = self._spym._dr
attrs = dr.attrs
# Set figure title
if title is None:
title = self._format_title()
# Set hvplot properties
if attrs['interpretation'] == 'spectrum':
hvplot = dr.hvplot(**kwargs).opts(title=title,
invert_axes=True)
elif attrs['interpretation'] == 'image':
hvplot = dr.hvplot(**kwargs).opts(title=title,
cmap='afmhot',
frame_width=512,
frame_height=512,
invert_yaxis=True,
data_aspect=1)
else:
hvplot = dr.hvplot(**kwargs).opts(title=title)
return hvplot
def _format_title(self):
''' Provide a title from the metadata of the DataArray.
'''
title = ""
attrs = self._spym._dr.attrs
if "filename" in attrs:
title += attrs["filename"] + "\n"
title += "{:.2f} {}, {:.2f} {}".format(
attrs["bias"],
attrs["bias_units"],
attrs["setpoint"],
attrs["setpoint_units"])
return title
def _fit_figure_to_image(self, figure, image, axis=None):
''' Calculate figure size so that plot (matplotlib axis) pixel size is equal to the image size.
Args:
figure: matplotlib Figure instance.
image: 2d numpy array.
axis: axis of the figure to adapt, if None takes the first (or only) axis.
Returns:
adapted width and height of the figure in inches.
'''
if axis is None:
axis = figure.axes[0]
bounds = axis.bbox.bounds
im_width, im_height = image.shape
width_scale = im_width/bounds[2]
height_scale = im_height/bounds[3]
fig_width, fig_height = figure.get_size_inches()
return fig_width*width_scale, fig_height*height_scale | /rhkpy-1.3.1-py3-none-any.whl/spym/plotting/__init__.py | 0.914003 | 0.763109 | __init__.py | pypi |
import sys
import hashlib
import logging
logger = logging.getLogger(__name__)
class Hasher(object):
""" Create consistent SHA1 hash of a file.
"""
def __init__(self):
super(Hasher, self).__init__()
self.hashes = {
'SHA1': hashlib.sha1(),
'SHA256': hashlib.sha256()
}
def _supported_algorithm(self, algorithm):
if algorithm in self.hashes.keys():
return True
return False
def reset_hashes(self):
""" Resets all hashes.
"""
self.__init__()
def hash_from_path(self, file_path, algorithm='SHA1'):
""" Open a file with path to file.
"""
if not self._supported_algorithm(algorithm):
logger.error("{0} is an unsupported algorithm. Options: {1}"
.format(algorithm, self.hashes.keys()))
return
if file_path is None:
logger.error("No file_path provided ...")
return
data = None
try:
with open(file_path, 'rb') as f:
data = f.read()
except Exception as e:
logger.error("Unable to process file: {0}".format(e))
if data is not None:
return self.hash_from_bytes(data, algorithm)
return None
def hash_from_bytes(self, data_bytes, algorithm='SHA1'):
""" Updates internal sha1 value, returns current hexdigest.
Intended usage is to pass bytes of a file. If this is a local file,
the process would be similar to what's done in hash_from_path()
my_hasher = Hasher()
with open('/path/to/file.ext', 'rb') as f:
data = f.read()
hash_val = my_hasher.hash_from_bytes(data)
"""
if not self._supported_algorithm(algorithm):
logger.error("{0} is an unsupported algorithm. Options: {1}"
.format(algorithm, self.hashes.keys()))
return
self.hashes[algorithm].update(data_bytes)
return self.hashes[algorithm].hexdigest()
if __name__ == '__main__':
""" Convenience for testing this on CLI. Not intended for actual use.
"""
try:
my_hasher = Hasher()
file_path = sys.argv[1]
try:
algo = sys.argv[2]
except Exception as e:
algo = 'SHA1'
hash_val_1 = my_hasher.hash_from_path(file_path, algo)
print("{0}: {1}".format(algo, hash_val_1))
my_hasher.reset_hashes()
except Exception as e:
logger.exception("Error ...{0}".format(e)) | /rho-crypto-0.5.2.tar.gz/rho-crypto-0.5.2/rho_crypto/hashing.py | 0.424889 | 0.199991 | hashing.py | pypi |
# Rho ML
__Rho ML__ provides a _thin_, _thoughtful_, and _proven_ interface for
putting Data Science to work in production and enterprise-grade
environments. [Rho](https://rho.ai "Rho AI") uses __Rho ML__ for
workloads as varied as _NLP_, to _Computer Vision_ to
_Decision Modeling_ for professional racing. We see __Rho ML__ as
having a few key benefits.
#. __Any Model__ (_we won't dictate your libraries of choice!_)
* Any Model with a Python interface
* [PyTorch](https://pytorch.org/ "PyTorch")
* [Tensorflow](https://www.tensorflow.org/ "Tensorflow")
* [spaCy](https://spacy.io/ "spaCy")
* [Keras](https://keras.io/ "Keras")
* [insert your preferred library here]
* ... or some other custom Python code
#. __Out-of-the-Box Versioning__ (_yet customizable_)
* Versioning is a common blind-spot in data science as compared to the
de facto standard of [Semver](https://semver.org/ "Semver") in much of
software engineering and modern CI/CD workflows.
* __Rho ML__ provides this _out-of-the-box_, no strings attached.
* That said, we get that not all versioning is created equal, and provide
easy access to customizing version patterns.
#. __Default Serialization and Deserialization__ (_yet customizable_)
* Storing models for production workloads is non-trivial.
* Frequently, libraries (including those listed above) provide their
"hello world" and "quickstart" guides expecting you're on a local
development machine with a "save to disk" type interface. __Rho ML__
provides instant-access to easy, production-grade, methods to
store and retrieve models.
* The default option may not work, so __Rho ML__ provides easy
modification as necessary for advanced use cases.
#. __Cloud and Cache__ (_speed versus cost_)
* A "model" is not created equal with respect to production workloads.
Storing and retrieving from the cloud versus locally (cached locally)
makes a tremendous difference in speed and cost when dealing with models
that often exceed 10s of megabytes / gigabytes.
* __Rho ML__ provides a sensible default for managing storage in both
scenarios.
#. __Shameless Plug__ (_enterprise deployments_)
* Every __Rho ML__ model has instant compatibilty with
[Sermos](https://sermos.ai "Sermos") for enterprise-scale deployments
that need 10s to 10s of millions of transactions, scheduled tasks,
models behind public APIs, or complex
[pipelines](https://en.wikipedia.org/wiki/Directed_acyclic_graph "DAGs").
Rho ML is extremely easy to use and has only two external dependencies
[attrs](https://www.attrs.org/en/stable/ "attrs"), and
## Install
Install this software? Easy:
pip install rho-ml
## Quickstart Guide
Here is a trivial example of a rules-based "model" implemented as a `RhoModel`,
including serialization.
from rho_ml import RhoModel, ValidationFailedError, Version, LocalModelStorage
class MyModel(RhoModel):
def predict_logic(self, prediction_data):
""" Logic for running the model on some prediction data """
return prediction_data * 5
def validate_prediction_input(self, prediction_data):
""" Ensure data has an appropriate type before prediction """
if not (isinstance(prediction_data, int)
or isinstance(prediction_data, float)):
raise ValidationError("Prediction data wasn't numeric!")
def validate_prediction_output(self, data):
""" Ensure the prediction result is between 0 and 5 """
if not 0 <= data <= 5:
raise ValidationError("Prediction result should always be
between 0 and 5!")
some_instance = MyModel(name='some_name',
version=Version.from_string("0.0.1"))
result = some_instance.predict(0.5, run_validation=True) # works!
result_2 = some_instance.predict(10, run_validation=True) # fails!
local_storage = LocalModelStorage(base_path='./some-folder')
local_storage.store(some_instance)
stored_key = local_storage.get_key_from_pattern(model_name='some_name',
version_pattern='0.*.*')
deserialized = local_storage.retrieve(key=stored_key)
## Core Concepts
### Rho Model
The `RhoModel` base class is the central concept in `RhoML`. A `RhoModel`
is a basic wrapper that enforces what we believe are the central tasks a
machine learning model should accomplish, provides a consistent interface
to 'all models', and provides the scaffolding for writing models that have
validated input and output.
TODO: Add additional detail on each component of a RhoModel and provide
several examples.
### Model Locator
A "model locator" in Rho ML is the combination of the _model name_, the
_model version_, and a _delimiter_ between them.
This is important for storage and retrieval of models as they evolve over
time. Using the default settings is highly recommended but each component is
configurable.
By default:
* Model names can be any alphanumeric character
* Delimeter is "_" (the underscore character)
* Model versions must adhere to [semver versioning](https://semver.org/)
e.g. `MyModel_0.1.0`
### Serialization
TODO: Describe concept of serializing/deserializing.
## Testing
To run the tests you need to have `pyenv` running on your system, along with
all python versions listed in `tox.ini` under `envlist`.
* Install the required Python versions noted in `tox.ini`, e.g.
pyenv install 3.7.4
Install the testing requirements locally.
pip install -e .[test]
Now, run the tests:
tox
| /rho-ml-0.12.1.tar.gz/rho-ml-0.12.1/README.md | 0.70202 | 0.752354 | README.md | pypi |
import glob
import logging
import os
import tempfile
import pickle
from typing import Union
import attr
from rho_ml.model_locator import generate_model_locator, \
find_highest_compatible_version
logger = logging.getLogger(__name__)
@attr.s(auto_attribs=True, frozen=True)
class StoredModel(object):
""" Stores the bytes of a serialized model and is intended for storage.
This is used as a generic format to store `RhoModel` instances. The default
behavior is to use pickle to serialize a given `RhoModel`, and then unpickle
in `StoredModel`. In cases where pickle is not appropriate for model
serialization, a `StoredModel` child class should be created with the
load_model method overridden.
Attributes:
model_bytes (bytes): The serialized byte string of some `RhoModel`
"""
model_bytes: bytes = attr.ib(repr=False)
def load_model(self) -> 'RhoModel':
""" Use the instantiated StoredModel to load & return RhoModel instance.
Note: this method should be overridden if the underlying RhoModel
requires specialized deserialization logic.
Returns:
`RhoModel`: the deserialized model instance in storage
Examples::
my_stored_model = StoredModel.from_pickle(some_stored_bytes)
my_rho_model = my_stored_model.load_model()
"""
return pickle.loads(self.model_bytes)
def to_pickle(self) -> bytes:
""" Serialize the `StoredModel` instance (note: this is *not* for
serializing the underlying RhoModel instance, which is serialized
before instantiating the `StoredModel`.
Returns:
bytes: the pickled bytes of the `StoredModel` instance.
Examples::
serialized_stored_model = my_stored_model.to_pickle()
"""
return pickle.dumps(self, protocol=4)
@classmethod
def from_pickle(cls, stored_bytes: bytes) -> 'StoredModel':
""" Load a StoredModel from it's pickled bytes.
This is the inverse of :meth:`~StoredModel.to_pickle`.
Args:
stored_bytes (bytes): The serialized `StoredModel`
Returns:
StoredModel: re-instantiated `StoredModel`
"""
return pickle.loads(stored_bytes)
class PipelineStorageConfig(object):
""" Base class for utility classes used to store RhoModel objects.
TODO: Add example of when this is necessary and how it's used.
"""
def store(self, model: 'RhoModel'):
""" Store a RhoModel.
Creates a StoredModel, and stores the pickle bytes of that StoredModel
to the appropriate store.
Args:
model (`RhoModel`): The `RhoModel` instance to store
Returns:
NoneType
"""
raise NotImplementedError
def retrieve(self, *args, **kwargs) -> 'RhoModel':
""" Pull the pickled bytes of the StoredModel from the data store and
use that to instantiate the underlying RhoModel.
"""
raise NotImplementedError
def get_key_from_pattern(self, model_name: str,
version_pattern: str) -> str:
""" Given some pattern of the form 'name-1.2.3', 'name-1.2.*', or
'name-1.*', etc., return the matching key with the highest version.
Returns None if nothing is found.
Args:
model_name (str): The name of the `RhoModel` search for
version_pattern (str): Version (incl. wildcards) to search for
Returns:
str: key of the highest matching artifact in the data store
Examples::
model_key = my_stored_model.get_key_from_pattern(
'my_model_name', '0.*.*`)
assert model_key == 'my_model_name_0.1.2'
"""
raise NotImplementedError
@attr.s(auto_attribs=True)
class LocalModelStorage(PipelineStorageConfig):
""" Class for caching `RhoModel` objects locally.
Args:
base_path (str): the directory to store, search, and/or retrieve models
from.
"""
base_path: str = attr.ib(default=None)
def __attrs_post_init__(self):
""" Provide default temp directory if no base_path set.
"""
if not self.base_path:
self.base_path = tempfile.gettempdir()
def store(self, model: 'RhoModel'):
""" Save pickled bytes of a RhoModel to local storage, in the
`self.base_path` directory. """
storage_key = generate_model_locator(
model_name=model.name, model_version=model.version_string)
storage_path = os.path.join(self.base_path, storage_key)
stored_model = model.build_stored_model()
store_bytes = stored_model.to_pickle()
with open(storage_path, 'wb') as f:
f.write(store_bytes)
def retrieve(self, key: str) -> Union['RhoModel', None]:
""" Attempt to retrieve model at a path that is stored locally. Return
the loaded model if found, otherwise None.
"""
storage_path = os.path.join(self.base_path, key)
try:
with open(storage_path, 'rb') as f:
stored_model = StoredModel.from_pickle(stored_bytes=f.read())
model = stored_model.load_model()
except FileNotFoundError:
return None
return model
def get_key_from_pattern(self, model_name: str,
version_pattern: str) -> Union[str, None]:
""" Search `self.base_path` for an artifact matching the model name and
pattern.
"""
search_pattern = generate_model_locator(model_name=model_name,
model_version=version_pattern)
search_path = os.path.join(self.base_path, search_pattern)
local_candidates = glob.glob(search_path)
result_key = find_highest_compatible_version(
search_version=version_pattern, search_list=local_candidates)
return result_key | /rho-ml-0.12.1.tar.gz/rho-ml-0.12.1/rho_ml/serialization.py | 0.799716 | 0.333042 | serialization.py | pypi |
import pickle
from typing import Any, Callable, Optional, Union, Dict
import attr
from rho_ml import Version, StoredModel
class ValidationFailedError(Exception):
""" Error for when validation fails for either training or prediction
input/output in a RhoModel. """
pass
@attr.s(auto_attribs=True)
class RhoModel(object):
""" The most basic wrapper for an ML or other model. Specific models should
be a subclass of Predictor, and implement whatever appropriate subset of
abstract methods defined here.
Args:
version (Version): model version
name (str, optional): model name, defaults to the `RhoModel` subclass
name. Used by serialization utils.
Example::
model = MyRhoModel(version=Version.from_string("0.0.1"))
# name is 'MyRhoModel' because no name was passed
"""
version: Union[Version, Dict[str, int]]
name: Optional[str] = attr.ib(kw_only=True, default=None)
def __attrs_post_init__(self):
""" If attr.asdict is used to help serialize a model, this will
convert the version back to a Version from dict. Set the default name
if self.name is None. """
if isinstance(self.version, dict):
self.version = Version(**self.version)
if not self.name:
self.name = str(self.__class__.__name__)
@property
def version_string(self) -> str:
""" Convenience property to provide the version as a string instead
of a Version object
"""
return self.version.to_string()
def validate_training_input(self, data: Any) -> None:
""" Logic to validate data passed to self.train().
Should raise a :class:`.ValidationError` if validation fails, otherwise
do nothing.
Args:
data: Arbitrary training data
Returns:
None: this should raise if validation fails, otherwise do
nothing.
"""
raise NotImplementedError
def validate_training_output(self, data: Any):
""" Logic to validate data returned from self.train().
Should raise a :class:`.ValidationError` if validation fails, otherwise
do nothing.
Args:
data: Arbitrary training output
Returns:
None: this should raise if validation fails, otherwise do
nothing. """
raise NotImplementedError
def validate_prediction_input(self, data: Any):
""" Logic to validate data passed to self.predict().
Should raise a :class:`.ValidationError` if validation fails, otherwise
do nothing.
Args:
data: Arbitrary prediction input
Returns:
None: this should raise if validation fails, otherwise do
nothing. """
raise NotImplementedError
def validate_prediction_output(self, data: Any):
""" Logic to validate data returned from self.predict().
Should raise a :class:`.ValidationError` if validation fails, otherwise
do nothing.
Args:
data: Arbitrary prediction input
Returns:
None: this should raise if validation fails, otherwise do
nothing. """
raise NotImplementedError
def train_logic(self, training_data: Any, *args, **kwargs) -> Any:
""" This method should be overridden with the appropriate logic to
take training data, evaluation data, run training, and return
relevant data (e.g. training and validation metrics for each epoch).
Args:
training_data: Arbitrary training data
Returns:
"""
raise NotImplementedError
def train(self, training_data: Any, run_validation: bool, *args,
**kwargs) -> Any:
""" Default method for running training of a model on some arbitrary
data. Optionally validate inputs and outputs using
self.validate_training_input and self.validate_training_output. """
if run_validation:
result = get_validated_result(
model_method=self.train_logic,
input_data=training_data,
input_validator=self.validate_training_input,
output_validator=self.validate_training_output,
*args,
**kwargs)
else:
result = self.train_logic(training_data=training_data,
*args,
**kwargs)
return result
def predict_logic(self, prediction_data: Any, *args, **kwargs) -> Any:
""" This method should take a dict containing items with appropriate
types for the model, and generate outputs with appropriate types."""
raise NotImplementedError
def predict(self, prediction_data: Any, run_validation: bool, *args,
**kwargs) -> Any:
""" Default method for running predictions on some arbitrary data,
with optional validation of inputs and outputs. """
if run_validation:
result = get_validated_result(
model_method=self.predict_logic,
input_data=prediction_data,
input_validator=self.validate_prediction_input,
output_validator=self.validate_prediction_output,
*args,
**kwargs)
else:
result = self.predict_logic(prediction_data=prediction_data,
*args,
**kwargs)
return result
def serialize(self) -> bytes:
""" Serialize the predictor object to a byte string.
Note: this should be overridden if specialized serialization /
deserialization is required. """
return pickle.dumps(self, protocol=4)
@classmethod
def deserialize(cls, serialized: bytes):
""" Instantiate a RhoModel object from a serialized byte string
DEPRECATED: use build_stored_model instead """
raise NotImplementedError
def build_stored_model(self) -> StoredModel:
""" Create a StoredModel object which can be used to properly
reinstantiate the model later.
Note: this *must* be implemented to use Sermos serialization utils
"""
model_bytes = self.serialize()
stored_model = StoredModel(model_bytes=model_bytes)
return stored_model
def save_to_disk(self, path_to_output: str):
""" Logic to save a single Predictor object to disk."""
serialized = self.serialize()
with open(path_to_output, 'wb') as f:
f.write(serialized)
@classmethod
def load_from_disk(cls, path_to_file: str):
""" Logic to load the Predictor subclass from disk. """
with open(path_to_file, 'rb') as f:
loaded = cls.deserialize(f.read())
return loaded
def get_validated_result(model_method: Callable, input_data: Any,
input_validator: Callable[[Any], None],
output_validator: Callable[[Any], None], *args,
**kwargs) -> Any:
""" Helper function to generalize validation of training and
prediction data """
input_validator(input_data)
output_data = model_method(input_data, *args, **kwargs)
output_validator(output_data)
return output_data | /rho-ml-0.12.1.tar.gz/rho-ml-0.12.1/rho_ml/rho_model.py | 0.931353 | 0.42656 | rho_model.py | pypi |
import logging
import re
from typing import List, Tuple, Union, Optional
import attr
logger = logging.getLogger(__name__)
DEFAULT_MODEL_NAME_PATTERN = r"(.*)"
DEFAULT_DELIMITER_PATTERN = r"\_"
DEFAULT_VERSION_PATTERN = r"([\d\*]+\.[\d\*]+\.[\d\*]+.*)"
def generate_model_locator(
model_name: str,
model_version: str,
delimiter: str = "_",
model_name_pattern: Union[str, None] = DEFAULT_MODEL_NAME_PATTERN,
delimiter_pattern: str = DEFAULT_DELIMITER_PATTERN,
version_pattern: str = DEFAULT_VERSION_PATTERN) -> str:
""" Combine the model name and version into name_version format, which
by default uses "_" as the delimiter.
Validates model name and version against the provided (or default)
patterns.
"""
# Validate model name against pattern
if not re.compile(model_name_pattern).match(model_name):
raise ValueError(f"{model_name} does not conform to "
f"model name pattern {model_name_pattern}")
# Validate delimiter against pattern
if not re.compile(delimiter_pattern).match(delimiter):
raise ValueError(f"{delimiter} does not conform to "
f"delimiter pattern {delimiter_pattern}")
# Validate model version against pattern
if not re.compile(version_pattern).match(model_version):
raise ValueError(f"{model_name} does not conform to "
f"model version pattern {version_pattern}")
# Combine and return if all good
return model_name + delimiter + model_version
def validate_model_version(
model_version: str,
version_pattern: str = DEFAULT_VERSION_PATTERN) -> str:
""" Model version must conform to semver but can include wildcards.
"""
pattern = re.compile(r"^" + version_pattern)
if not pattern.match(model_version):
raise ValueError("Invalid model_version format ({0}). Must conform "
"to semver with wildcards allowed. "
"e.g. 1.1.1, 1.*.*".format(model_version))
return model_version
def split_model_locator(
model_locator: str,
model_name_pattern: Union[str, None] = DEFAULT_MODEL_NAME_PATTERN,
delimiter_pattern: str = DEFAULT_DELIMITER_PATTERN,
version_pattern: str = DEFAULT_VERSION_PATTERN,
exclude_paths: bool = False
) -> Tuple[Union[str, None], Union[str, None]]:
""" Given a full model locator including it's name and version (Model_0.1.0)
return the model name and version as separate entities based on
provided patterns (e.g. ('Model', '0.1.0')).
Allows model_name_pattern to be None, in which case this expects the
`model_locator` to instead be the version string along (e.g. 0.1.0)
and will return (None, '0.1.0')
exclude_paths allows you to pass the model_locator as a full path
to the object. This is off by default because a model name can be
"anything" according to the default model name pattern and this operates
with the basic rule that the model_locator will exist after the final
forward slash ('/')
"""
version_search = re.compile(version_pattern)
version_group = version_search.search(model_locator)
if version_group is None:
this_version = None
else:
this_version = version_group.group(1)
if model_name_pattern is None:
this_model_name = None
else:
model_name_search = re.compile(model_name_pattern + delimiter_pattern)
model_name_group = model_name_search.search(model_locator)
if model_name_group is None:
this_model_name = None
else:
this_model_name = model_name_group.group(1)
if exclude_paths:
this_model_name = this_model_name.split('/')[-1]
return this_model_name, this_version
def find_highest_compatible_version(
search_version: str,
search_list: List[str],
model_name_pattern: Union[str, None] = DEFAULT_MODEL_NAME_PATTERN,
delimiter_pattern: str = DEFAULT_DELIMITER_PATTERN,
version_pattern: str = DEFAULT_VERSION_PATTERN) -> str:
""" Return the model locator or version of the highest compatible version.
If search_list is a list of model locators (['model_0.1.1']) it returns
the full locator of the highest version that matches `search_version`.
If search_list is a list of versions only (['0.1.1']), then it does
the same search against `search_version` and returns just version number
The default model locator pattern is a combination of the default
model name pattern (any valid characters ending in "_" as the delimiter
for the beginning of the version pattern) and the default version
pattern (semver compatible 0.0.0*).
That is::
{{model name}}_{{model version}}
e.g.::
MyModel_0.1.0
my_model_0.1.0
prefix/mymodel_0.1.0
If a different convention is followed, you can update the default
model_name_pattern and version_pattern. Note: any custom search patterns
must still adhere to some baked in assumptions
#. The model name pattern must have a single capture group OR be
None, which will result in this assuming it's only searching
against a version pattern.
#. The version pattern needs to follow semver, which is what the
default pattern provides, so it's not recommended to change and
also requires to have exactly 1 capture group.
Args:
search_version (string): The version to search. This can include
wildcards. E.g. "1.0.0", "2.*.*", "1.1.*"
search_list (list): List of full model locators with model name and
version, e.g. `['model_name_0.0.0', 'model_name_0.1.0']`
or a list of just version numbers, e.g. `['0.0.0', '0.1.0']`
model_name_pattern (r string): Regex pattern describing any name
that may be prepended to the search version. Pass as None
to exclude a model name pattern (e.g. if your
search_list is a list of only version nums
['0.1.1', '0.1.2']). NOTE: This pattern must currently consist
of exactly one regex group.
delimiter_pattern (r string): Regex pattern describing delimiter
Not recommended to modify this pattern.
version_pattern (r string): Regex pattern describing version
numbers. NOT recommended to modify this pattern, as the logic
assumes semver, which is default version pattern provided.
Returns:
string: string with major, minor, patch of highest
compatible version found in available keys based on the
search version. e.g. "0.1.0"
e.g. if search version is "0.1.*" & available keys includes
['my_name_0.1.1', 'my_name_1.0.0', 'my_name_0.1.5']
this will return my_name_0.1.5
"""
search_version = validate_model_version(search_version)
def _run_search(search_list, v_index, version_elements) -> Optional[str]:
""" Loop through all keys to identify highest compatible.
This is called sequentially for major, minor, patch.
Args:
search_list (list): List of full model locators with name and
version, e.g.
['model_name_0.0.0', 'model_name_0.1.0']
or a list of just version numbers, e.g.
['0.0.0', '0.1.0']
v_index (int): The index of the model version to find. 0 is
major, 1 is minor, 2 is patch.
version_elements (list): List of integers representing the
indices of version elements that have already been
found. The first element is always major, second is
minor, third is patch. e.g. [0, 1, 0] == "0.1.0"
Returns:
string: string with major, minor, patch of highest
compatible version found in available keys based on the
search version. e.g. "0.1.0"
"""
highest_compatible_num = -1 # Haven't found anything yet
for available_version in search_list:
model_name, model_version = split_model_locator(
available_version,
model_name_pattern=model_name_pattern,
delimiter_pattern=delimiter_pattern,
version_pattern=version_pattern)
if model_version is None:
continue
# Skip this available_version if it has a version element
# > something we've already found. e.g. if we already found
# major version 1 as the max, and this is major version 0,
# then it doesn't matter what the value of this_num is because
# it's already incompatible. This can only ever ben 3 elements
# when following semver, so hardcode.
incompatible_previous_version_num = False
for i in range(len(version_elements)):
if version_elements[i] is None \
or int(model_version.split(".")[i]) \
!= version_elements[i]:
incompatible_previous_version_num = True
if incompatible_previous_version_num:
continue
# We only compare one version num (major/minor/patch) at a time
this_num = int(model_version.split(".")[v_index])
# The 'search_num' can include wildcards, which means it
# can be anything. If it's an int, then this_num must be
# equal to the search_num.
search_num = search_version.split(".")[v_index]
if search_num == "*":
if this_num > highest_compatible_num:
highest_compatible_num = this_num
else:
search_num = int(search_num)
if this_num > highest_compatible_num \
and this_num == search_num:
highest_compatible_num = this_num
if highest_compatible_num == -1:
return None
return highest_compatible_num
version_elements = []
for element_idx in range(3):
version_elements.append(
_run_search(search_list, element_idx, version_elements))
if any(v is None for v in version_elements):
msg = f"Unable to find compatible version with pattern {search_version}"
if model_name_pattern is not None:
msg += f" for model pattern {model_name_pattern}"
msg += f" from available options: {search_list}"
logger.warning(msg)
return None
highest_version = str(version_elements[0]) + "." \
+ str(version_elements[1]) \
+ "." + str(version_elements[2])
# Return the whole key that has the discovered version.
for key in search_list:
model_name, model_version = split_model_locator(
key,
model_name_pattern=model_name_pattern,
delimiter_pattern=delimiter_pattern,
version_pattern=version_pattern)
if model_version == highest_version:
return key
return None
def find_matching_model_names(
search_pattern: str,
available_model_locators: List[str],
model_name_pattern: str = DEFAULT_MODEL_NAME_PATTERN,
delimiter_pattern: str = DEFAULT_DELIMITER_PATTERN,
version_pattern: str = DEFAULT_VERSION_PATTERN) -> List[str]:
""" Return the model locators with model names that match the search_pattern
By default, this expects that the model locators follow the convention
{{model name}}_{{model version}}
e.g.
MyModel_0.1.0
my_model_0.1.0
prefix/mymodel_0.1.0
If a different convention is followed, you can update the default
model_name_pattern. The name pattern must have a single capture group.
Args:
search_pattern (string): The model name to look for. This can
include wildcards. E.g. "MyModel", "*Model", "*"
available_model_locators (list): List of full model locators with
name and version, e.g.
['model_name_0.0.0', 'model_name_0.1.0', 'MyModel_0.1.0']
model_name_pattern (r string): Regex pattern describing any name
that may be prepended to the search version. Pass as None
to return all keys. NOTE: This pattern must currently consist
of exactly one regex group.
delimiter_pattern (r string): Regex pattern describing delimiter
Not recommended to modify this pattern.
Returns:
List[str]: List of model locators that match the search. e.g.
if search pattern is "\*Model" & available keys includes
['my_name_0.1.1', 'my_name_1.0.0', 'MyModel_0.1.5'] this will
return ['MyModel_0.1.5']
"""
matched_locators = []
for locator in available_model_locators:
model_name, model_version = split_model_locator(
locator, model_name_pattern, delimiter_pattern, version_pattern)
if model_name is None:
continue
# We allow a star "*" as a wildcard indicator to be consistent with
# version search.
if '*' in search_pattern:
wildcard_pattern = r''
for char in search_pattern:
if char == '*':
wildcard_pattern += r'.*'
else:
wildcard_pattern += char
# If the wildcard is NOT the first character, then prepend `^` to
# search pattern. If wildcard is NOT the last character, then
# append `$` to search pattern.
if search_pattern[0] != '*':
wildcard_pattern = r'^' + wildcard_pattern
if search_pattern[-1] != '*':
wildcard_pattern = wildcard_pattern + r'$'
wildcard_pattern = re.compile(wildcard_pattern)
if wildcard_pattern.search(model_name):
matched_locators.append(locator)
else: # If no wildcard, do an exact match search
if model_name == search_pattern:
matched_locators.append(locator)
return matched_locators
def convert_empty_str(val: Optional[str]) -> Optional[str]:
""" Helper method to convert an empty string to None for consistency.
"""
if not val:
result = None
else:
result = val
return result
@attr.s(auto_attribs=True, frozen=True)
class Version(object):
""" Standard version class for RhoModel instances.
Supports comparison (<, >, ==, etc.) """
major: int
minor: int
patch: int
label: Optional[str] = attr.ib(converter=convert_empty_str, default=None)
separator_pattern = r'[\-\_\+\.]' # Separate version from label
label_pattern = r'((?P<separator>' \
+ separator_pattern \
+ r')(?P<label>.+))?' # Add grp names to label & separator
version_pattern = r'(?P<major>\d+)\.(?P<minor>\d+)\.(?P<patch>\d+)' \
+ label_pattern # version groups & label groups combinee
def __str__(self):
return self.to_string()
@property
def label_no_separator(self):
""" Convenience property to provide the label and exclude the separator
e.g.
label == '-alpha.1'
label_no_separator == 'alpha.1'
"""
if self.label:
match = re.match(self.label_pattern, self.label)
if match is not None:
return match.group('label')
return None
@staticmethod
def _get_to_increment(to_increment: str):
""" Accepts a user defined to_increment value and ensures it's valid /
gets default value.
"""
if to_increment is None:
to_increment = 'patch'
allowable_options = ('major', 'minor', 'patch')
if to_increment not in allowable_options:
raise ValueError("Value of `to_increment` must be one of "
f"{allowable_options}")
return to_increment
@staticmethod
def _get_new_version(version: 'Version', to_increment: str):
""" Return the "new" major/minor/patch versions based on a provided
'to_increment' value.
"""
# Validate the to_increment val
to_increment = version._get_to_increment(to_increment)
if to_increment == 'major':
major = version.major + 1
minor = 0
patch = 0
elif to_increment == 'minor':
major = version.major
minor = version.minor + 1
patch = 0
elif to_increment == 'patch':
major = version.major
minor = version.minor
patch = version.patch + 1
else:
raise ValueError(
f"{to_increment} is not a valid version parameter "
f"to bump! Should be one of 'major', 'minor', "
f"or 'patch'")
return major, minor, patch
@classmethod
def increment_version(cls,
current_version: 'Version',
to_increment: Union[str, None] = None) -> 'Version':
""" Given an old Version, increment the value of the version's
`major`, `minor`, or `patch` value based on `to_increment`.
If `to_increment` is None, defaults to `patch`
This will always remove any label that may exist on the current
version.
"""
major, minor, patch = \
cls._get_new_version(current_version, to_increment)
return cls(major=major, minor=minor, patch=patch, label=None)
@classmethod
def from_string(cls, val: str):
""" Create a Version object from a string of the form
'major.minor.patch-label', where the label is optional. If it is
present, it must be separated from the rest of the string by either
a hyphen (-), underscore (_), plus (+), or period (.).
semver has specific guidelines around usage of "+_docs_build" and
"-prerelease" but this level of granularity is not currently
provided in Rho ML. It is major.minor.patch(optional additional of
your choosing) where (optional additional) must start with one of the
valid separators listed above.
"""
match = re.match(cls.version_pattern, val)
label = None
if match.group('separator') is not None \
and match.group('label') is not None:
label = match.group('separator') + match.group('label')
return cls(major=int(match.group('major')),
minor=int(match.group('minor')),
patch=int(match.group('patch')),
label=label)
def to_string(self) -> str:
""" Create a version string of the form
major.min.patch[separator][label]
where the separator and label are only present if both exist.
"""
version_str = "{0}.{1}.{2}".format(self.major, self.minor, self.patch)
if self.label:
version_str += self.label
return version_str | /rho-ml-0.12.1.tar.gz/rho-ml-0.12.1/rho_ml/model_locator.py | 0.862916 | 0.201813 | model_locator.py | pypi |
import re
import string
from typing import List, Set
from rho_pdf_diff.text_diff import DiffFilter, Diff
class CapitalizationFilter(DiffFilter):
""" Throw away changes which only differ by capitalization """
@staticmethod
def remove_caps_only_diffs(diffs: List[Diff]) -> List[Diff]:
def diffs_are_same(diff: Diff) -> bool:
result = (diff.text.strip().lower() ==
diff.corresponding_text.strip().lower())
return result
def mark_diff(diff: Diff) -> Diff:
if diffs_are_same(diff=diff):
diff.is_relevant = False
return diff
return [mark_diff(diff) for diff in diffs]
def run_filter(self, diff_result: 'DiffResult'):
diff_result.change_insertion_diffs = self.remove_caps_only_diffs(
diffs=diff_result.change_insertion_diffs)
diff_result.change_deletion_diffs = self.remove_caps_only_diffs(
diffs=diff_result.change_deletion_diffs)
class PunctuationFilter(DiffFilter):
""" Throw away solo diffs (not changes) which have
only punctuation. """
@staticmethod
def remove_punct_only_diffs(diffs: List[Diff]) -> List[Diff]:
def diff_is_punct_only(diff: Diff) -> bool:
return all(char in string.punctuation for char
in diff.text.strip())
def mark_diff(diff: Diff) -> Diff:
if diff_is_punct_only(diff=diff):
diff.is_relevant = False
return diff
return [mark_diff(diff) for diff in diffs]
def run_filter(self, diff_result: 'DiffResult'):
diff_result.solo_insertion_diffs = self.remove_punct_only_diffs(
diffs=diff_result.solo_insertion_diffs)
diff_result.solo_deletion_diffs = self.remove_punct_only_diffs(
diffs=diff_result.solo_deletion_diffs)
class RegexFilter(DiffFilter):
remove_regex: str = r'[^A-Za-z0-9]+'
def remove_regex_matches(self, diffs: List[Diff]) -> List[Diff]:
def diff_is_bad_regex_only(diff: Diff) -> bool:
m = re.fullmatch(self.remove_regex, diff.text.strip())
return bool(m)
def mark_diff(diff: Diff) -> Diff:
if diff_is_bad_regex_only(diff=diff):
diff.is_relevant = False
return diff
return [mark_diff(diff) for diff in diffs]
def run_filter(self, diff_result: 'DiffResult'):
diff_result.solo_insertion_diffs = self.remove_regex_matches(
diffs=diff_result.solo_insertion_diffs)
diff_result.solo_deletion_diffs = self.remove_regex_matches(
diffs=diff_result.solo_deletion_diffs)
diff_result.change_insertion_diffs = self.remove_regex_matches(
diffs=diff_result.change_insertion_diffs)
diff_result.change_deletion_diffs = self.remove_regex_matches(
diffs=diff_result.change_deletion_diffs)
class SameTextFilter(DiffFilter):
# todo: this may be over-aggressive removing any insertion that is also a
# deletion anywhere in the doc. It may be more approrpiate to only
# consider diffs within some context window as being equivalent.
min_char_length: int = 2 # only dedupe diffs at least this long
@staticmethod
def get_diff_text(diff: Diff) -> str:
""" Override this if removing case sensitivity, stripping whitespace,
and standardizing non-alphanumeric characters are not desired. """
def remove_non_alphanumeric_ends(text: str) -> str:
first_removed = re.sub(r'^[^A-Za-z0-9]+', '', text)
return re.sub(r'[^A-Za-z0-9]+$', '', first_removed)
def standardize_non_alphanumeric_chars(text: str) -> str:
""" Replace any non-alphanumeric char w/ aerial tramway """
return re.sub(r'[^A-Za-z0-9]', '\u1f6a1', text)
return standardize_non_alphanumeric_chars(
remove_non_alphanumeric_ends(diff.text.lower().strip()))
def mark_duplicate_diffs(self,
diffs: List[Diff],
compare_texts: Set[str]) -> List[Diff]:
def mark_diff(diff: Diff) -> Diff:
diff_text = self.get_diff_text(diff)
if (diff_text in compare_texts) and (
len(diff_text) >= self.min_char_length):
diff.is_relevant = False
return diff
return [mark_diff(diff) for diff in diffs]
def run_filter(self, diff_result: 'DiffResult'):
insertion_texts = {self.get_diff_text(diff) for diff
in diff_result.insertion_diffs}
deletion_texts = {self.get_diff_text(diff) for diff
in diff_result.deletion_diffs}
diff_result.solo_insertion_diffs = self.mark_duplicate_diffs(
diffs=diff_result.solo_insertion_diffs,
compare_texts=deletion_texts)
diff_result.change_insertion_diffs = self.mark_duplicate_diffs(
diffs=diff_result.change_insertion_diffs,
compare_texts=deletion_texts)
diff_result.solo_deletion_diffs = self.mark_duplicate_diffs(
diffs=diff_result.solo_deletion_diffs,
compare_texts=insertion_texts)
diff_result.change_deletion_diffs = self.mark_duplicate_diffs(
diffs=diff_result.change_deletion_diffs,
compare_texts=insertion_texts) | /rho-pdf-diff-0.14.0.tar.gz/rho-pdf-diff-0.14.0/rho_pdf_diff/filter_rules.py | 0.554712 | 0.552178 | filter_rules.py | pypi |
import collections
import difflib
import itertools
import logging
import random
from typing import List, Tuple, Dict, Optional, Set, Sequence, Counter
import attr
import math
import numpy
from rho_pdf_diff.document import Page
logger = logging.getLogger(__name__)
@attr.s(auto_attribs=True)
class PairSimilarityCache(object):
""" A class to compute and cache similarity between (old, new) page pairs
Note: pages are referenced by page number (1-indexed) """
old_doc_pages: List[Page]
new_doc_pages: List[Page]
score_cache: Dict[Tuple[int, int], float] = attr.ib(init=False)
def __attrs_post_init__(self):
self.score_cache = {}
def _compute_similarity(self, old_page_num: int,
new_page_num: int) -> float:
""" Compute the similarity between two Page objects. This default
implementation uses the Jaccard similarity, but this can be
overridden to use different metrics. """
return self._compute_jaccard(old_page_num, new_page_num)
def _compute_jaccard(self, old_page_num: int, new_page_num: int) -> float:
""" Compute the similarity between two Page objects. This default
implementation uses the Jaccard similarity, but this can be
overridden to use different metrics. """
if None not in (old_page_num, new_page_num):
old_page = self.old_doc_pages[old_page_num - 1]
new_page = self.new_doc_pages[new_page_num - 1]
old_words = {word.stripped_text for word in old_page.words}
new_words = {word.stripped_text for word in new_page.words}
intersection = old_words.intersection(new_words)
union = old_words.union(new_words)
if len(union) != 0:
return len(intersection) / len(union)
else:
return 0.0
else:
return 0.0
def _compute_ratio(self, old_page_num: int, new_page_num: int) -> float:
""" Compute the list similarity score for two pages. """
if None not in (old_page_num, new_page_num):
old_page = self.old_doc_pages[old_page_num - 1]
new_page = self.new_doc_pages[new_page_num - 1]
old_words = [word.stripped_text for word in old_page.words]
new_words = [word.stripped_text for word in new_page.words]
result = (difflib.SequenceMatcher(None, old_words, new_words).ratio() + \
difflib.SequenceMatcher(None, new_words, old_words).ratio())/2
return sm.ratio()
else:
return 0.0
def get_similarity_score(self, old_page_num: int,
new_page_num: int) -> float:
""" Get the similarity score from score_cache if it exists, otherwise
compute the similarity. If the key isn't already in the score cache,
write the result to the cache. """
cache_key = (old_page_num, new_page_num)
similarity_score = self.score_cache.get(
cache_key, self._compute_similarity(old_page_num, new_page_num))
self.score_cache.setdefault(cache_key, similarity_score)
return similarity_score
def hash_page_pairs_sequence(pairs: Sequence[Tuple[int, int]]) -> int:
return hash(tuple(pairs))
@attr.s(auto_attribs=True, frozen=True)
class PageSequenceScore(object):
page_number_pairs: Sequence[Tuple[int, int]] = attr.ib(converter=tuple)
similarity_score: float
@property
def pad_locations(self) -> Tuple[List[int], List[int]]:
""" Return indices (0-indexed) of None elements of whichever sequence
has them """
return ([
i for i, x in enumerate(self.page_number_pairs) if x[0] is None
], [i for i, x in enumerate(self.page_number_pairs) if x[1] is None])
@attr.s(auto_attribs=True)
class PageAlignment(object):
old_pages: List[Page]
new_pages: List[Page]
minimum_virtual_pads: int = 1
pair_scores_cache: PairSimilarityCache = attr.ib(init=False, repr=False)
sequences_tried_cache: Set[PageSequenceScore] = attr.ib(init=False,
repr=False)
old_doc_insert_after: Counter[int] = attr.ib(init=False)
new_doc_insert_after: Counter[int] = attr.ib(init=False)
top_candidate: PageSequenceScore = attr.ib(init=False)
num_virtual_pads: int = attr.ib(init=False)
# The following are parameters for the optimization of similarity score. We aim for the first step
# of the algorithm to accept a worse state 98% of the time based on heuristics, but this could be
# changed if someone really wanted. 'annealing_iterations' controls how long this runs for. This is
# set short, and we can turn this up to around 5000 while keeping the timing under a few minutes.
first_step_acceptance_probability: float = 0.98
tmin: float = 1e-2
annealing_iterations: int = 2500
# annealing_iterations: int = int(2e4)
def __attrs_post_init__(self):
self.pair_scores_cache = PairSimilarityCache(
old_doc_pages=self.old_pages, new_doc_pages=self.new_pages)
self.sequences_tried_cache = set()
self.old_doc_insert_after = collections.Counter()
self.new_doc_insert_after = collections.Counter()
@property
def doc_to_pad(self):
if len(self.old_pages) < len(self.new_pages):
result = 'old'
elif len(self.new_pages) < len(self.old_pages):
result = 'new'
else: # equal length pages
result = 'neither'
return result
@property
def base_sequence(self):
""" The sequence with pads added to the top and bottom of both docs to make them equal in length """
def get_page_numbers(pages: List[Page]) -> List[int]:
return [page.page_number for page in pages]
self.num_virtual_pads = max(
self.minimum_virtual_pads,
abs(len(self.old_pages) - len(self.new_pages)))
virtual_pads = [None for x in range(0, self.num_virtual_pads)]
old_page_nums = virtual_pads + get_page_numbers(self.old_pages)
new_page_nums = virtual_pads + get_page_numbers(self.new_pages)
return list(itertools.zip_longest(old_page_nums, new_page_nums))
def score_page_pairs(self, page_pairs: List[Tuple[int, int]]) -> float:
scores = (self.pair_scores_cache.get_similarity_score(x, y) \
for x, y in page_pairs)
return sum(scores)
def generate_modified_candidate(self,
existing_candidate: PageSequenceScore) \
-> PageSequenceScore:
""" Take an existing candidate, randomly select a pad and swap it with a page
and return the resulting candidate.
"""
def generate_token_sequence(which_doc: str) -> List[str]:
page_pair_column = 0
if which_doc == 'new':
page_pair_column = 1
tokens = []
pad_idxs = [
idx
for idx in existing_candidate.pad_locations[page_pair_column]
]
for i, _ in enumerate(existing_candidate.page_number_pairs):
if i in pad_idxs:
tokens.append('pad')
else:
tokens.append('page')
return tokens
def rebuild_page_sequence(tokens: List[str]) -> List[Optional[int]]:
page_sequence = []
page_num_counter = 1
for token in tokens:
if token == 'page':
page_sequence.append(page_num_counter)
page_num_counter += 1
else:
page_sequence.append(None)
return page_sequence
def rebuild_page_pairs(which_doc: str, moved_pages: List[Optional[int]]) \
-> List[Tuple[int, int]]:
page_pair_column = 1
if which_doc == 'new':
page_pair_column = 0
unchanged_doc_pages = [
x[page_pair_column]
for x in existing_candidate.page_number_pairs
]
if which_doc == 'old':
result = list(zip(moved_pages, unchanged_doc_pages))
else:
result = list(zip(unchanged_doc_pages, moved_pages))
return result
def pad_move(token: List[str]) -> List[str]:
"""Implements an atomic 'move' for the simulated annealing optimization we seek to carry out.
We define this as a single pairwise swap between a pad and a page."""
def swap_positions(list, pos1, pos2):
# popping both the elements from list
first_ele = list.pop(pos1)
second_ele = list.pop(pos2 - 1)
# inserting in each others positions
list.insert(pos1, second_ele)
list.insert(pos2, first_ele)
return list
# find and select a pad, page pair at random to swap
pad_idxs = [i for i, x in enumerate(token) if x == 'pad']
swapped_pad_idx = random.sample(pad_idxs, 1)[0]
swapped_page_idx = random.randint(0, len(token) - 1)
while swapped_page_idx in pad_idxs:
swapped_page_idx = random.randint(0, len(token) - 1)
swap_positions(token, swapped_pad_idx, swapped_page_idx)
return token
new_or_old = random.choice(('new', 'old'))
# Todo: I'm not sure this is the best way to do this. This works ok, but it's fairly nondeterministic
# on long documents.
sequence_token = generate_token_sequence(new_or_old)
moved_tokens = pad_move(sequence_token)
moved_page_sequence = rebuild_page_sequence(tokens=moved_tokens)
page_pairs = rebuild_page_pairs(new_or_old,
moved_pages=moved_page_sequence)
similarity_score = self.score_page_pairs(page_pairs=page_pairs)
next_result = PageSequenceScore(page_number_pairs=page_pairs,
similarity_score=similarity_score)
if next_result not in self.sequences_tried_cache:
self.sequences_tried_cache.add(next_result)
return next_result
def initialize_search(self):
base_pairs_sequence = self.base_sequence
score = self.score_page_pairs(page_pairs=base_pairs_sequence)
first_result = PageSequenceScore(page_number_pairs=base_pairs_sequence,
similarity_score=score)
self.top_candidate = first_result
def build_insert_after_counts(self):
""" Gets the number of pages to insert after each page in each doc, for
the current maximal scoring sequence in the search cache. """
cleaned_page_number_pairs = [
x for x in self.top_candidate.page_number_pairs
if x != (None, None)
]
score = self.score_page_pairs(page_pairs=cleaned_page_number_pairs)
self.top_candidate = PageSequenceScore(
page_number_pairs=cleaned_page_number_pairs,
similarity_score=score)
current_old_page = 0
current_new_page = 0
for old_page_num, new_page_num in cleaned_page_number_pairs:
if isinstance(old_page_num, int):
current_old_page = old_page_num
else: # old page is None
self.old_doc_insert_after.update([current_old_page])
if isinstance(new_page_num, int):
current_new_page = new_page_num
else: # new page is None
self.new_doc_insert_after.update([current_new_page])
def set_alignment(self):
logger.debug("Beginning page alignment!")
self.initialize_search()
current_candidate = self.top_candidate
page_difference = abs(len(self.old_pages) - len(self.new_pages))
cooling_rate = 0.001
# cooling_rate = 0.01
if page_difference >= 50:
self.annealing_iterations = 2 * int(self.annealing_iterations *
(page_difference / 50))
# cooling_rate = 0.01
logger.debug(
"Document length difference exceeds 50 pages, cooling rate increased to {}!"
.format(cooling_rate))
if self.doc_to_pad != 'neither':
initial_objective_differential = abs(self.generate_modified_candidate(self.top_candidate).similarity_score - \
self.top_candidate.similarity_score)
tmax, tmin = -0.5 * initial_objective_differential / math.log(
self.first_step_acceptance_probability), self.tmin
temperature_schedule = numpy.logspace(
numpy.log10(tmax),
numpy.log10(tmin),
num=self.annealing_iterations)
#temperature_schedule = [tmax*10**(-1*x*cooling_rate) + tmin for x in range(self.annealing_iterations)]
for i, t in enumerate(temperature_schedule):
test_candidate = self.generate_modified_candidate(
current_candidate)
if boltzmann_weight(current_candidate, test_candidate,
t) > random.uniform(0, 1):
current_candidate = test_candidate
if i % 100 == 0:
logger.debug("Current best score: {0}".format(
current_candidate.similarity_score))
self.top_candidate = current_candidate
self.build_insert_after_counts()
logger.debug("Finished page alignment!")
def boltzmann_weight(old_candidate, new_candidate, temperature):
# Note that we seek to maximize a positive energy function, so there is no minus sign in the exponent.
objective_differential = new_candidate.similarity_score - old_candidate.similarity_score
return math.exp(objective_differential / temperature) | /rho-pdf-diff-0.14.0.tar.gz/rho-pdf-diff-0.14.0/rho_pdf_diff/image_construction/annealing_page_alignment.py | 0.818773 | 0.253693 | annealing_page_alignment.py | pypi |
import logging
import attr
from reportlab.lib.colors import Color
from reportlab.pdfgen.canvas import Canvas
from typing import List, Iterator, Union, Dict, BinaryIO
from rho_pdf_diff.document import Word, Page
from rho_pdf_diff.text_diff import Diff
logger = logging.getLogger(__name__)
def build_blank_page_image(width: float,
height: float,
file: Union[str, BinaryIO]) -> Canvas:
""" Return a blank canvas of given dimensions, which will ultimately save
to the file (filename or writeable file object.) """
c = Canvas(file, pagesize=(width, height), bottomup=False)
return c
class SpanStyle(object):
""" Base class for determining how to draw a diff (e.g. bounding rectangle,
strikethrough, underline, color, etc.). Typically there will be one
DiffStyle for additions and one for removals. """
def mark_one_span(self, words: List[Word], canvas: Canvas) -> Canvas:
""" Define how to draw a diff on a document, given the diffs have
already been split into lines. """
raise NotImplementedError
@staticmethod
def split_words_to_boxes(words: List[Word], line_height_tolerance: float) \
-> Iterator[List[Word]]:
""" If e.g. a line break is in the middle of a span of words, we
need to ensure the words on each line are marked up separately. """
if words:
last_word = words[0]
chunk = []
for word in words:
word_height = last_word.height
last_y = last_word.y_min
current_y = word.y_min
if abs(last_y - current_y) < line_height_tolerance * word_height:
chunk.append(word)
else:
yield chunk
chunk = [word]
last_word = word
if chunk:
yield chunk # need to ensure last chunk of words is yielded
def mark_words(self, words: List[Word], canvas: Canvas,
line_height_tolerance: float = 0.05) -> Canvas:
continuous_chunks = self.split_words_to_boxes(
words=words, line_height_tolerance=line_height_tolerance)
for chunk in continuous_chunks:
if chunk:
canvas = self.mark_one_span(words=chunk, canvas=canvas)
return canvas
@attr.s(auto_attribs=True)
class HighlightStyle(SpanStyle):
color: Color # set w/ RGBA, CMYK, etc.
def mark_one_span(self, words: List[Word], canvas: Canvas) -> Canvas:
x_min = words[0].x_min
x_max = words[-1].x_max
width = x_max - x_min
y_min = words[0].y_min
y_max = words[0].y_max
height = y_max - y_min
canvas.setFillColor(self.color)
canvas.setStrokeColor(self.color)
canvas.rect(x=x_min, y=y_min, width=width, height=height, fill=1)
return canvas
def extract_words_on_page_from_diff(diff: Diff, page_num: int) -> List[Word]:
""" Diffs can span multiple pages, but we're drawing diff overlays one
page at a time. """
return [word for word in diff.words if word.page_number == page_num]
def build_page_diff_overlay(page: Page,
diffs: List[Diff],
style_map: Dict[str, SpanStyle],
file: Union[str, BinaryIO]) -> Canvas:
""" Combines text and bounding box info from a Page and Diff objects with
information about how to draw bounding boxes in a file. This image doesn't
contain doc text, but is an overlay to put on top of the actual doc.
The file (filename or file object) is necessary here because reportlab's
Canvas requires it knows where to write to at instantiation """
logger.debug("Drawing overlay for {0} diffs...".format(len(diffs)))
canvas = build_blank_page_image(
width=page.width, height=page.height, file=file)
for diff in diffs:
page_words = extract_words_on_page_from_diff(diff=diff,
page_num=page.page_number)
diff_style = style_map[diff.diff_type]
canvas = diff_style.mark_words(words=page_words, canvas=canvas)
return canvas | /rho-pdf-diff-0.14.0.tar.gz/rho-pdf-diff-0.14.0/rho_pdf_diff/image_construction/diff_mask.py | 0.899332 | 0.489564 | diff_mask.py | pypi |
"""General utilities."""
import re
from typing import Iterable, Union
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import colors as mpl_colors
def pd_unique(arr):
"""Like pandas unique in that it preserves sort order.
Saves having the pandas dependency."""
idx = np.unique(arr, return_index=True)[1]
return arr[np.sort(idx)]
def is_curr_dark_bokeh():
"""Determine if the current Bokeh theme is dark.
Uses figure bgcolor and a simple luminance check."""
# yadda yadda yadda this is not really correct, no one uses gray as their bgcolor
import holoviews as hv
try:
bgcolor = hv.renderer("bokeh").theme._json["attrs"]["Figure"][
"background_fill_color"
]
except NameError as e:
# default white
bgcolor = "#FFFFFF"
r, g, b = mpl_colors.to_rgb(bgcolor)
return (0.2 * r + 0.6 * g + 0.2 * b) <= 0.5
def spread(x, dmin):
x = np.array(x, copy=False).astype(np.float64)
if len(x) <= 1:
return x
# we need to work on sorted x, but we also need to remember the indices so we can adjust the right ones
sort_inds = np.argsort(x)
x_sort = x.copy()[sort_inds]
dmin = np.broadcast_to(dmin, x.shape)[sort_inds]
# Minimizing the Total Movement for Movement to Independence Problem on a Line
# Ghadiri, Yazdanbod 2016
# https://www.researchgate.net/publication/304641457_Minimizing_the_Total_Movement_for_Movement_to_Independence_Problem_on_a_Line
# this function does things a little differently, which I think results in clearer code
slack = np.zeros_like(x_sort, dtype=np.float64)
adj = np.zeros_like(x_sort, dtype=np.float64)
for i in range(1, len(x_sort)):
# previous element's new location is x_sort[i-1] + adj[i-1]
# move to max(x_sort[i], prev + dmin)
# dmin = max(dmin[i], dmin[i-1])
local_dmin = dmin[i] + dmin[i - 1]
prev = x_sort[i - 1] + adj[i - 1]
new_x = max(x_sort[i], prev + local_dmin)
slack[i] = new_x - (prev + local_dmin)
adj[i] = new_x - x_sort[i]
new_x = x_sort.copy()
chains = np.ones_like(new_x)
for i in range(1, len(new_x)):
if slack[i] == 0:
chains[i] = chains[i - 1]
else:
chains[i] = chains[i - 1] + 1
# We break the elements into "chains", groups that are all the minimum distance apart.
# To start out, we make chains as above, simply checking whether each element is tied
# to the one before it (because that can be easily computed)
# This produces incorrect results when some elements move left, thereby entangling more.
# For example, with dmin = 3 the list [0, 4, 5, 6] is actually all connected, because
# the chain [4, 5, 6] will become [2, 5, 8] which is within range of 0.
# To adjust a single chain, we compute the median of the shifts if only rightward shifts
# are allowed, and subtract that median from each of the shifts.
# We repeat the following process until convergence:
# - Adjust each chain independently
# - If two chains are within the minimum distance of each other, merge them
def adjust(chains):
new_x = x_sort.copy()
for c in np.unique(chains):
chain_adj = adj[chains == c]
# chain_adj -= np.max(chain_adj) / 2
chain_adj -= np.mean(chain_adj)
# chain_adj -= np.median(chain_adj)
new_x[chains == c] += chain_adj
return new_x
def detach_chains(chains):
# print(chains)
for i in range(1, len(chains)):
if chains[i] == chains[i - 1]:
detached_chains = chains.copy()
detached_chains[i] = np.max(detached_chains) + 1
# print('detach', i, inds[i:], detached_chains)
if not chain_overlaps(detached_chains).any():
# print('success')
return detach_chains(detached_chains)
return chains
def chain_overlaps(chains):
adj_x = adjust(chains)
intervals = []
for c in pd_unique(chains):
inds = np.nonzero(chains == c)[0]
intervals.append(
(
adj_x[inds[0]] - dmin[inds[0]],
adj_x[inds[0]],
adj_x[inds[-1]],
adj_x[inds[-1]] + dmin[inds[-1]],
)
)
# print(intervals)
overlaps = [False]
for i in range(1, len(intervals)):
(lo1, a1, b1, hi1), (lo2, a2, b2, hi2) = intervals[i - 1], intervals[i]
# overlaps.append(b1 > lo2 or a2 < hi1)
overlaps.append(hi1 > lo2 or lo2 < hi1)
return np.array(overlaps)
overlaps = chain_overlaps(chains)
# print(chains)
# print(overlaps)
while overlaps.any():
for i in range(1, len(overlaps)):
if overlaps[i]:
# print(i, overlaps)
left, right = np.unique(chains)[i - 1 : i + 1]
chains[chains == right] = left
overlaps = chain_overlaps(chains)
# print(chains)
chains = detach_chains(chains)
# print(chains)
# print(adjust([1, 1, 1, 1, 1, 1, 1, 1, 1, 3, 2, 2]).round(1))
# print(x_sort.round(1))
# print(adjust(chains).round(1))
return adjust(chains)[np.argsort(sort_inds)]
def labelcase(text: Union[str, Iterable[str]]):
"""Converts text to a title case and adds spacing from camel case."""
if isinstance(text, str):
if "_" in text:
# snake_case
text = text.replace("_", " ")
return re.sub(r"([^ ])([A-Z])", r"\1 \2", text).title().strip()
else:
return [labelcase(t) for t in text] | /rho_plus-0.5.1.tar.gz/rho_plus-0.5.1/rho_plus/util.py | 0.695752 | 0.456289 | util.py | pypi |
from .bokeh import rho_light, rho_dark
from .bokeh import setup as bokeh_setup
from .matplotlib import setup as mpl_setup
import param
import json
import panel as pn
import warnings
from .util import is_curr_dark_bokeh
def datagrid(df):
"""Renders a DataFrame as an interactive data grid using Perspective.
Intelligently picks the color theme to use based on the current Matplotlib theme."""
persp_theme = "material-dense-dark" if is_curr_dark_bokeh() else "material-dense"
return pn.pane.Perspective(df, theme=persp_theme, sizing_mode="stretch_width")
def show_json(obj):
"""Render JSON (or a JSON-serializable object) as an interactive tree.
Intelligently picks the color theme to use based on the current Matplotlib theme."""
json_theme = "dark" if is_curr_dark_bokeh() else "light"
json_obj = json.dumps(obj, default=lambda o: repr(o))
return pn.Column(
pn.pane.JSON(
json_obj,
theme=json_theme,
height=600,
sizing_mode="stretch_width",
hover_preview=True,
),
sizing_mode="stretch_width",
background="#1E1E1E" if json_theme == "dark" else "#FFFFFF",
scroll=True,
)
def pn_setup_fonts():
"""Sets up Source Sans 3 in the CSS for Panel."""
pn.config.raw_css.append(
"""
@import url('https://fonts.googleapis.com/css2?family=Source+Sans+3:ital,wght@0,200..900;1,200..900&display=swap');
"""
)
def pn_setup(is_dark: bool = True, **kwargs):
"""Set up everything for a notebook using Panel, given the color mode to use.
Replaces a call to pn.extension: pass in kwargs to affect that"""
try:
import panel as pn
pn.extension("perspective", "gridstack", **kwargs)
pn_setup_fonts()
bokeh_setup(is_dark)
except Exception as e:
warnings.warn("Could not set up Panel: " + str(e))
class ThemedPanel(param.Parameterized):
colorMode = param.ObjectSelector("dark", ["dark", "light"], precedence=-1)
def __init__(self):
super().__init__()
if "colorMode" in pn.state.session_args:
self.colorMode = pn.state.session_args["colorMode"][0].decode().lower()
else:
try:
self.colorMode = "dark" if IS_DARK else "light"
except NameError:
self.colorMode = "dark"
@pn.depends("colorMode")
def colors_theme(self):
"""Returns a (colors, theme) tuple indicating the categorical colors and Bokeh theme."""
if self.colorMode == "light":
_theme, colors = mpl_setup(False, False)
else:
_theme, colors = mpl_setup(True, False)
theme = rho_light if self.colorMode == "light" else rho_dark
return (colors, theme) | /rho_plus-0.5.1.tar.gz/rho_plus-0.5.1/rho_plus/panel.py | 0.835551 | 0.305568 | panel.py | pypi |
"""Plotly theming."""
import json
import matplotlib as mpl
import plotly.express as px
import plotly.graph_objects as go
import plotly.io as pio
import plotly.graph_objects as go
import numpy as np
from .matplotlib import setup as mpl_setup
from .sequential_palettes import SEQUENTIAL
def register_themes():
for theme_name, is_dark in zip(("rho_light", "rho_dark"), (False, True)):
inherit = "plotly_dark" if is_dark else "plotly"
default = pio.templates[inherit].layout
templ: go.Layout = go.Layout(default)
templ.colorscale.sequential = templ.colorscale.sequentialminus = SEQUENTIAL[
"viridia"
].hex_colors()[::17]
templ.colorscale.diverging = (
SEQUENTIAL["div_icefire_shift" if is_dark else "div_coolwarm_shift"]
).hex_colors()[2:-1:18]
_theme, cs = mpl_setup(is_dark=is_dark, setup=True)
rc = mpl.rcParams
templ.paper_bgcolor = rc["axes.facecolor"]
templ.plot_bgcolor = rc["axes.facecolor"]
templ.colorway = cs
templ.margin = go.layout.Margin(b=20, l=20, r=20, t=30)
for ax in (
templ.xaxis,
templ.yaxis,
templ.scene.xaxis,
templ.scene.yaxis,
templ.scene.zaxis,
):
ax.tickcolor = rc["xtick.color"]
ax.linecolor = rc["axes.edgecolor"]
ax.gridcolor = rc["axes.edgecolor"]
ax.tickfont["color"] = rc["xtick.labelcolor"]
ax.title.font["color"] = rc["axes.labelcolor"]
ax.showline = True
ax.showgrid = False
ax.mirror = False
for ax in (templ.scene.xaxis, templ.scene.yaxis, templ.scene.zaxis):
ax.showgrid = True
ax.backgroundcolor = rc["axes.facecolor"]
templ.legend.bgcolor = rc["axes.facecolor"]
templ.legend.bordercolor = rc["legend.edgecolor"]
templ.legend.borderwidth = 2
templ.legend.font.color = rc["legend.labelcolor"]
templ.legend.title.font.color = rc["axes.labelcolor"]
# I'm tempted to keep Plotly's default of using the color of the mark for the hover color, but ultimately
# I think this is more minimal without being any harder to understand: if you hover, you're already
# looking at the data, so the extra color cue isn't especially necessary
templ.hoverlabel.bgcolor = rc["axes.facecolor"]
templ.hoverlabel.bordercolor = rc["axes.edgecolor"]
templ.hoverlabel.font.color = rc["text.color"]
data = go.layout.template.Data(
# https://github.com/plotly/plotly.py/issues/3404
# I'd rather have filled box plots by default, but Plotly doesn't support altering the default of half-transparency fill color (God only knows why...)
# so I can't make the fill color just the line color, so my choices are half transparency or transparent
box=(
go.Box(
marker_line_color=rc["axes.facecolor"],
notchwidth=0,
fillcolor="rgba(0,0,0,0)",
),
),
# same issue as box plots with violin plots: here, we have the fill because otherwise it looks a little too empty
# I'd rather have a solid meanline, but that's not an option
violin=(go.Violin(meanline=dict(visible=True)),),
scatter=(go.Scatter(marker=dict(line=dict(color=rc["axes.facecolor"]))),),
)
pio.templates[theme_name] = go.layout.Template(layout=templ, data=data)
def setup(is_dark):
pio.templates.default = "rho_dark" if is_dark else "rho_light" | /rho_plus-0.5.1.tar.gz/rho_plus-0.5.1/rho_plus/plotly.py | 0.763484 | 0.322126 | plotly.py | pypi |
"""Vega plot themes as config objects."""
import altair as alt
from .colors import LIGHT_SHADES, DARK_SHADES, LIGHT_COLORS, DARK_COLORS
from .matplotlib import rho_dark, rho_light
from .matplotlib import setup as mpl_setup
from .sequential_palettes import SEQUENTIAL
import matplotlib.colors
def color_values(theme):
# more values is closer to the original LCH space, but more data
return [
matplotlib.colors.rgb2hex(x)
for x in theme.as_mpl_cmap()([i / 15 for i in range(16)])
]
# define the theme by returning the dictionary of configurations
def rho(is_dark: bool):
theme, colors = mpl_setup(is_dark, False)
empty, lightest, light, medium, dark, darkest = (
DARK_SHADES if is_dark else LIGHT_SHADES
)
def r():
return {
"config": {
"background": "#" + theme["figure.facecolor"],
"numberFormat": ".5~r",
"range": {
"category": colors,
"heatmap": color_values(SEQUENTIAL["gouldia"]),
"diverging": color_values(
SEQUENTIAL[
"div_icefire_shift" if is_dark else "div_coolwarm_shift"
]
),
"ramp": color_values(SEQUENTIAL["viridia"]),
},
"circle": {"size": 100, "fill": colors[0],},
"square": {"size": 100, "fill": colors[0],},
"line": {"strokeWidth": 3,},
"style": {
"guide-label": {"fill": dark, "fontWeight": 400, "fontSize": 16,},
"guide-title": {
"fill": darkest,
"fontSize": 20,
"fontWeight": 400,
},
"group-title": {
"fill": darkest,
"fontSize": 22,
"fontWeight": 700,
},
},
"axis": {
"tickFontWeight": 600,
"tickColor": dark,
"domainColor": dark,
},
"axisY": {"titleAngle": 0, "titleAlign": "right"},
"axisXBand": {"labelAngle": -45},
"axisQuantitative": {"grid": False},
"legend": {
"labelColor": darkest,
"gradientHorizontalMinLength": 200,
"gradientHorizontalMaxLength": 1000,
"gradientVerticalMinLength": 200,
"gradientVerticalMaxLength": 1000,
},
"view": {
"background": "#" + theme["axes.facecolor"],
"stroke": "transparent",
},
}
}
return r
RHO_LIGHT = rho(False)()
RHO_DARK = rho(True)()
def _register_themes():
alt.themes.register("rho_dark", rho(True))
alt.themes.register("rho_light", rho(False))
def setup(is_dark: bool):
"""Sets up Altair according to the given color scheme."""
alt.themes.enable("rho_dark" if is_dark else "rho_light") | /rho_plus-0.5.1.tar.gz/rho_plus-0.5.1/rho_plus/vega.py | 0.832407 | 0.524395 | vega.py | pypi |
import textwrap
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
from .util import spread as rho_spread
from .color_util import contrast_with
def remove_crowded(ax=None):
"""Given an axis, removes the second and penultimate ticks if
the distance between the final ticks and those are less than half of the
average distance."""
if ax is None:
ax = plt.gca()
# check in view coordinates to adjust for log axes and the like
ticks = [ax.get_xticks(), ax.get_yticks()]
xticklocs = ax.transData.transform(
np.vstack([ticks[0], np.ones_like(ticks[0]) * ticks[1][0]]).T
)[:, 0]
yticklocs = ax.transData.transform(
np.vstack([np.ones_like(ticks[1]) * ticks[0][0], ticks[1]]).T
)[:, 1]
tick_locs_list = [xticklocs, yticklocs]
for i, tick_locs in enumerate(tick_locs_list):
meandist = np.diff(tick_locs).mean()
if tick_locs[1] - tick_locs[0] < 0.5 * meandist:
ticks[i] = np.array([ticks[i][0], *ticks[i][2:]])
if tick_locs[-1] - tick_locs[-2] < 0.5 * meandist:
ticks[i] = np.array([*ticks[i][:-2], ticks[i][-1]])
ax.set_xticks(ticks[0])
ax.set_yticks(ticks[1])
def smart_ticks(ax=None, xaxis=True, yaxis=True):
"""Sets the axis limits so they start and end at the minimum and maximum for each axis, a la Tufte."""
if ax is None:
ax = plt.gca()
bboxes = []
for child in ax.get_children():
if isinstance(child, mpl.collections.Collection):
bbox = child.get_datalim(ax.transData)
elif isinstance(child, mpl.lines.Line2D):
xdata, ydata = child.get_data()
if len(xdata):
bbox = mpl.transforms.Bbox.from_extents(
xdata.min(), ydata.min(), xdata.max(), ydata.max()
)
else:
continue
else:
continue
bboxes.append(bbox)
if not len(bboxes):
return None
bbox = mpl.transforms.Bbox.union(bboxes)
ax.spines["bottom"].set_bounds(bbox.xmin, bbox.xmax)
ax.spines["left"].set_bounds(bbox.ymin, bbox.ymax)
# ignore fixed locators, which are often not numeric
if xaxis and not isinstance(ax.xaxis.get_major_locator(), mpl.ticker.FixedLocator):
xlabels = ax.xaxis.get_major_formatter().format_ticks([bbox.xmin, bbox.xmax])
xlabels = [
xlabels[0],
*ax.xaxis.get_major_formatter().format_ticks(ax.get_xticks()[1:-1]),
xlabels[-1],
]
xticks = [bbox.xmin, *ax.get_xticks()[1:-1], bbox.xmax]
ax.set_xticks(xticks, xlabels)
if yaxis and not isinstance(ax.yaxis.get_major_locator(), mpl.ticker.FixedLocator):
ylabels = ax.yaxis.get_major_formatter().format_ticks([bbox.ymin, bbox.ymax])
ylabels = [
ylabels[0],
*ax.yaxis.get_major_formatter().format_ticks(ax.get_yticks()[1:-1]),
ylabels[-1],
]
yticks = [bbox.ymin, *ax.get_yticks()[1:-1], bbox.ymax]
ax.set_yticks(yticks, ylabels)
remove_crowded(ax)
def get_handles_labels(ax):
"""Gets a tuple (handles, labels). Accounts for Seaborn's use of phantom lines, so the lines
will actually have data on the screen."""
all_handles, labels = ax.get_legend_handles_labels()
if len(ax.get_lines()) == 2 * len(all_handles):
# Seaborn does line plots with a legend by first plotting all of the line data,
# then making another list that same size of phantom lines without data
handles = ax.get_lines()[: len(ax.get_lines()) // 2]
else:
# none of that weirdness
handles = all_handles
return handles, labels
def height(text, ax, **kwargs):
t = ax.text(0.5, 0.5, text, transform=ax.transAxes, **kwargs)
ax.figure.canvas.draw()
bb = t.get_window_extent()
t.remove()
return bb.height
def line_labels(ax=None, remove_legend=True, spread=None, **kwargs):
"""Does automatic line labeling, replacing a legend with side labels.
Kwargs passed to ax.text: don't pass alignment or color."""
if ax is None:
ax = plt.gca()
if spread is None:
spread = rho_spread
handles, labels = get_handles_labels(ax)
labels = ["\n".join(textwrap.wrap(label, width=15)) for label in labels]
margin = np.array([height(label, ax, **kwargs) for label in labels])
y_ends = np.array([handle.get_xydata()[-1] for handle in handles])
spreaded_y_ends = ax.transData.transform(y_ends)
spreaded_y_ends[:, 1] = spread(spreaded_y_ends[:, 1], margin)
spreaded_y_ends = ax.transData.inverted().transform(spreaded_y_ends)
xmax = ax.transData.inverted().transform(
(ax.transData.transform((y_ends[:, 0].max(), 0))[0] * 1.05, 0)
)[0]
for y_end, spreaded, handle, label in zip(y_ends, spreaded_y_ends, handles, labels):
color = handle.get_color()
ax.plot([y_end[0], xmax], [y_end[1], spreaded[1]], ls="--", lw=1, c=color)
ax.text(xmax, spreaded[1], label, ha="left", va="center", color=contrast_with(color, ax.get_facecolor()), **kwargs)
if remove_legend:
ax.legend().remove()
def ylabel_top(ax=None):
"""Moves the y-axis label to the top, wrapping as necessary."""
if ax is None:
ax = plt.gca()
texts = [
text
for text in ax.yaxis.get_children()
if isinstance(text, mpl.text.Text) and text.get_text() == ax.get_ylabel()
]
text = texts[0]
text.set_visible(False)
label_wrap = "\n".join(textwrap.wrap(ax.get_ylabel(), width=15))
ax.text(
0,
1.03,
label_wrap,
transform=ax.transAxes,
ha="right",
va="baseline",
color=text.get_color(),
fontproperties=text.get_font_properties(),
) | /rho_plus-0.5.1.tar.gz/rho_plus-0.5.1/rho_plus/matplotlib_tweaks.py | 0.798972 | 0.535584 | matplotlib_tweaks.py | pypi |
"""Bokeh theme."""
import rho_plus
import matplotlib.pyplot as plt
from bokeh.themes import Theme
themes = []
for is_dark in (True, False):
_theme, cs = rho_plus.mpl_setup(is_dark)
theme = plt.rcParams
font = "'Source Sans 3', sans-serif"
json = {
"attrs": {
"Figure": {
"background_fill_color": theme["figure.facecolor"],
"border_fill_color": theme["figure.facecolor"],
# spine of axis
"outline_line_color": theme["axes.facecolor"],
},
"Grid": {"grid_line_alpha": 0, "minor_grid_line_alpha": 0,},
"Title": {
"text_color": theme["axes.titlecolor"],
"text_font": font,
"align": "center",
"text_font_size": "1.4em",
"text_font_style": "normal",
},
"Axis": {
"major_tick_line_color": theme["xtick.color"],
"major_tick_line_width": 1,
"major_tick_out": 10,
"minor_tick_line_color": theme["xtick.color"],
"major_label_text_color": theme["xtick.labelcolor"],
"major_label_text_font": font,
"major_label_text_font_size": "1.025em",
"minor_tick_out": 0,
"axis_label_standoff": 10,
"axis_label_text_color": theme["axes.labelcolor"],
"axis_label_text_font": font,
"axis_label_text_font_size": "1.25em",
"axis_label_text_font_style": "normal",
},
"YAxis": {"major_label_orientation": "horizontal"},
"Legend": {
"spacing": 8,
"glyph_width": 15,
"label_standoff": 8,
"label_text_color": theme["legend.labelcolor"],
"label_text_font_size": "1.025em",
"border_line_alpha": 1,
"border_line_color": theme["legend.edgecolor"],
"background_fill_alpha": 0.25,
"background_fill_color": theme["axes.facecolor"],
},
"ColorBar": {
"title_text_color": theme["axes.titlecolor"],
"title_text_font": font,
"title_text_font_size": "1.025em",
"background_fill_color": theme["axes.facecolor"],
"bar_line_alpha": 0,
"major_label_text_color": theme["xtick.labelcolor"],
"major_label_text_font": font,
"major_label_text_font_size": "1.025em",
"major_tick_line_alpha": 0,
},
"Line": {"line_width": 4,},
}
}
themes.append(Theme(json=json))
rho_dark, rho_light = themes
def setup(is_dark):
theme = rho_dark if is_dark else rho_light
try:
import holoviews as hv
hv.renderer("bokeh").theme = theme
except ModuleNotFoundError:
pass | /rho_plus-0.5.1.tar.gz/rho_plus-0.5.1/rho_plus/bokeh.py | 0.706292 | 0.395543 | bokeh.py | pypi |
"""Defines a distance search structure"""
from __future__ import absolute_import, division, print_function, unicode_literals
import abc as _abc
import numpy as _np
import scipy.sparse as _sparse
import scipy.spatial.distance as _spatial_distance
class MatrixMetricSearch(object):
"""A matrix representation out of features."""
__metaclass__ = _abc.ABCMeta
def __init__(self, features, records_data):
"""
Args:
features: A matrix with rows that represent records
(corresponding to the elements in records_data) and columns
that describe a point in space for each row.
records_data: Data to return when a doc is matched. Index of
corresponds to features.
"""
self.matrix = features
self.records_data = _np.array(records_data)
def get_feature_matrix(self):
return self.matrix
def get_records(self):
return self.records_data
@staticmethod
@_abc.abstractmethod
def features_to_matrix(features):
"""
Args:
val: A list of features to be formatted.
Returns:
The transformed matrix.
"""
return
@staticmethod
@_abc.abstractmethod
def vstack(matrix_list):
"""
Args:
val: A list of features to be formatted.
Returns:
The transformed matrix.
"""
return
@_abc.abstractmethod
def _transform_value(self, val):
"""
Args:
val: A numeric value to be (potentially transformed).
Returns:
The transformed numeric value.
"""
return
@_abc.abstractmethod
def _distance(self, a_matrix):
"""
Args:
a_matrix: A matrix with rows that represent records
to search against.
records_data: Data to return when a doc is matched. Index of
corresponds to features.
Returns:
A dense array representing distance.
"""
return
def nearest_search(self, features):
"""Find the closest item(s) for each set of features in features_list.
Args:
features: A matrix with rows that represent records
(corresponding to the elements in records_data) and columns
that describe a point in space for each row.
Returns:
For each element in features_list, return the k-nearest items
and their distance scores
[[(score1_1, item1_1), ..., (score1_k, item1_k)],
[(score2_1, item2_1), ..., (score2_k, item2_k)], ...]
"""
dist_matrix = self._distance(features)
ret = []
for i in range(dist_matrix.shape[0]):
# replacing the for loop by matrix ops could speed things up
scores = dist_matrix[i]
records = self.records_data
arg_index = _np.argsort(scores)
curr_ret = list(zip(scores[arg_index], records[arg_index]))
ret.append(curr_ret)
return ret
def remove_near_duplicates(self):
"""If there are 2 or more records with 0 distance from eachother -
keep only one.
"""
dist_matrix = self._distance(self.matrix)
keeps = []
dupes = set()
for row_index in range(dist_matrix.shape[0]):
max_dist = dist_matrix[row_index].max()
for col_index in range(dist_matrix.shape[0]):
if row_index < col_index:
if dist_matrix[row_index, col_index] / max_dist <= 0.001:
dupes.add(col_index)
if not row_index in dupes:
keeps.append(row_index)
self.matrix = self.matrix[keeps]
self.records = self.records_data[keeps]
class CosineDistance(MatrixMetricSearch):
"""A matrix that implements cosine distance search against it.
cosine_distance = 1 - cosine_similarity
Note: We want items that are more similar to be closer to zero so we are
going to instead return 1 - cosine_similarity. We do this so similarity
and distance metrics can be treated the same way.
"""
def __init__(self, features, records_data):
super(CosineDistance, self).__init__(features, records_data)
m_c = self.matrix.copy()
m_c.data **= 2
self.matrix_root_sum_square = \
_np.sqrt(_np.asarray(m_c.sum(axis=1)).reshape(-1))
@staticmethod
def features_to_matrix(features):
"""
Args:
val: A list of features to be formatted.
Returns:
The transformed matrix.
"""
return _sparse.csr_matrix(features)
@staticmethod
def vstack(matrix_list):
"""
Args:
val: A list of features to be formatted.
Returns:
The transformed matrix.
"""
return _sparse.vstack(matrix_list)
def _transform_value(self, v):
return v
def _distance(self, a_matrix):
"""Vectorised cosine distance"""
# what is the implmentation of transpose? can i change the order?
dprod = self.matrix.dot(a_matrix.transpose()).transpose() * 1.0
a_c = a_matrix.copy()
a_c.data **= 2
a_root_sum_square = _np.asarray(a_c.sum(axis=1)).reshape(-1)
a_root_sum_square = \
a_root_sum_square.reshape(len(a_root_sum_square), 1)
a_root_sum_square = _np.sqrt(a_root_sum_square)
magnitude = 1.0 / (a_root_sum_square * self.matrix_root_sum_square)
return 1 - dprod.multiply(magnitude).toarray()
class UnitCosineDistance(MatrixMetricSearch):
"""A matrix that implements cosine distance search against it.
cosine_distance = 1 - cosine_similarity
Note: We want items that are more similar to be closer to zero so we are
going to instead return 1 - cosine_similarity. We do this so similarity
and distance metrics can be treated the same way.
Assumes unit-vectors and takes some shortucts:
* Uses integers instead of floats
* 1**2 == 1 so that operation can be skipped
"""
def __init__(self, features, records_data):
super(UnitCosineDistance, self).__init__(features, records_data)
self.matrix_root_sum_square = \
_np.sqrt(_np.asarray(self.matrix.sum(axis=1)).reshape(-1))
@staticmethod
def features_to_matrix(features):
"""
Args:
val: A list of features to be formatted.
Returns:
The transformed matrix.
"""
return _sparse.csr_matrix(features)
@staticmethod
def vstack(matrix_list):
"""
Args:
val: A list of features to be formatted.
Returns:
The transformed matrix.
"""
return _sparse.vstack(matrix_list)
def _transform_value(self, v):
return 1
def _distance(self, a_matrix):
"""Vectorised cosine distance"""
# what is the implmentation of transpose? can i change the order?
dprod = self.matrix.dot(a_matrix.transpose()).transpose() * 1.0
a_root_sum_square = _np.asarray(a_matrix.sum(axis=1)).reshape(-1)
a_root_sum_square = \
a_root_sum_square.reshape(len(a_root_sum_square), 1)
a_root_sum_square = _np.sqrt(a_root_sum_square)
magnitude = 1.0 / (a_root_sum_square * self.matrix_root_sum_square)
return 1 - dprod.multiply(magnitude).toarray()
class SlowEuclideanDistance(MatrixMetricSearch):
"""A matrix that implements euclidean distance search against it.
WARNING: This is not optimized.
"""
def __init__(self, features, records_data):
super(SlowEuclideanDistance, self).__init__(features, records_data)
self.matrix = self.matrix
@staticmethod
def features_to_matrix(features):
"""
Args:
val: A list of features to be formatted.
Returns:
The transformed matrix.
"""
return _np.array(features, ndmin=2)
@staticmethod
def vstack(matrix_list):
"""
Args:
val: A list of features to be formatted.
Returns:
The transformed matrix.
"""
return _np.vstack(matrix_list)
def _transform_value(self, v):
return v
def _distance(self, a_matrix):
"""Euclidean distance"""
return _spatial_distance.cdist(a_matrix, self.matrix, 'euclidean')
class DenseCosineDistance(MatrixMetricSearch):
"""A matrix that implements cosine distance search against it.
cosine_distance = 1 - cosine_similarity
Note: We want items that are more similar to be closer to zero so we are
going to instead return 1 - cosine_similarity. We do this so similarity
and distance metrics can be treated the same way.
"""
def __init__(self, features, records_data):
super(DenseCosineDistance, self).__init__(features, records_data)
self.matrix_root_sum_square = \
_np.sqrt((self.matrix ** 2).sum(axis=1).reshape(-1))
@staticmethod
def features_to_matrix(features):
"""
Args:
val: A list of features to be formatted.
Returns:
The transformed matrix.
"""
return _np.array(features, ndmin=2)
@staticmethod
def vstack(matrix_list):
"""
Args:
val: A list of features to be formatted.
Returns:
The transformed matrix.
"""
return _np.vstack(matrix_list)
def _transform_value(self, v):
return v
def _distance(self, a_matrix):
"""Vectorised cosine distance"""
# what is the implmentation of transpose? can i change the order?
dprod = self.matrix.dot(a_matrix.transpose()).transpose() * 1.0
a_root_sum_square = (a_matrix ** 2).sum(axis=1).reshape(-1)
a_root_sum_square = a_root_sum_square.reshape(len(a_root_sum_square), 1)
a_root_sum_square = _np.sqrt(a_root_sum_square)
magnitude = 1.0 / (a_root_sum_square * self.matrix_root_sum_square)
return 1 - (dprod * magnitude) | /rho-pysparnn-0.4.2.tar.gz/rho-pysparnn-0.4.2/pysparnn/matrix_distance.py | 0.966196 | 0.711581 | matrix_distance.py | pypi |
class OpenApiException(Exception):
"""The base exception class for all OpenAPIExceptions"""
class ApiTypeError(OpenApiException, TypeError):
def __init__(self, msg, path_to_item=None, valid_classes=None,
key_type=None):
""" Raises an exception for TypeErrors
Args:
msg (str): the exception message
Keyword Args:
path_to_item (list): a list of keys an indices to get to the
current_item
None if unset
valid_classes (tuple): the primitive classes that current item
should be an instance of
None if unset
key_type (bool): False if our value is a value in a dict
True if it is a key in a dict
False if our item is an item in a list
None if unset
"""
self.path_to_item = path_to_item
self.valid_classes = valid_classes
self.key_type = key_type
full_msg = msg
if path_to_item:
full_msg = "{0} at {1}".format(msg, render_path(path_to_item))
super(ApiTypeError, self).__init__(full_msg)
class ApiValueError(OpenApiException, ValueError):
def __init__(self, msg, path_to_item=None):
"""
Args:
msg (str): the exception message
Keyword Args:
path_to_item (list) the path to the exception in the
received_data dict. None if unset
"""
self.path_to_item = path_to_item
full_msg = msg
if path_to_item:
full_msg = "{0} at {1}".format(msg, render_path(path_to_item))
super(ApiValueError, self).__init__(full_msg)
class ApiAttributeError(OpenApiException, AttributeError):
def __init__(self, msg, path_to_item=None):
"""
Raised when an attribute reference or assignment fails.
Args:
msg (str): the exception message
Keyword Args:
path_to_item (None/list) the path to the exception in the
received_data dict
"""
self.path_to_item = path_to_item
full_msg = msg
if path_to_item:
full_msg = "{0} at {1}".format(msg, render_path(path_to_item))
super(ApiAttributeError, self).__init__(full_msg)
class ApiKeyError(OpenApiException, KeyError):
def __init__(self, msg, path_to_item=None):
"""
Args:
msg (str): the exception message
Keyword Args:
path_to_item (None/list) the path to the exception in the
received_data dict
"""
self.path_to_item = path_to_item
full_msg = msg
if path_to_item:
full_msg = "{0} at {1}".format(msg, render_path(path_to_item))
super(ApiKeyError, self).__init__(full_msg)
class ApiException(OpenApiException):
def __init__(self, status=None, reason=None, http_resp=None):
if http_resp:
self.status = http_resp.status
self.reason = http_resp.reason
self.body = http_resp.data
self.headers = http_resp.getheaders()
else:
self.status = status
self.reason = reason
self.body = None
self.headers = None
def __str__(self):
"""Custom error messages for exception"""
error_message = "Status Code: {0}\n"\
"Reason: {1}\n".format(self.status, self.reason)
if self.headers:
error_message += "HTTP response headers: {0}\n".format(
self.headers)
if self.body:
error_message += "HTTP response body: {0}\n".format(self.body)
return error_message
class NotFoundException(ApiException):
def __init__(self, status=None, reason=None, http_resp=None):
super(NotFoundException, self).__init__(status, reason, http_resp)
class UnauthorizedException(ApiException):
def __init__(self, status=None, reason=None, http_resp=None):
super(UnauthorizedException, self).__init__(status, reason, http_resp)
class ForbiddenException(ApiException):
def __init__(self, status=None, reason=None, http_resp=None):
super(ForbiddenException, self).__init__(status, reason, http_resp)
class ServiceException(ApiException):
def __init__(self, status=None, reason=None, http_resp=None):
super(ServiceException, self).__init__(status, reason, http_resp)
def render_path(path_to_item):
"""Returns a string representation of a path"""
result = ""
for pth in path_to_item:
if isinstance(pth, int):
result += "[{0}]".format(pth)
else:
result += "['{0}']".format(pth)
return result | /rhoas_sdks-1.0.5.tar.gz/rhoas_sdks-1.0.5/sdks/kafka_mgmt_sdk/rhoas_kafka_mgmt_sdk/exceptions.py | 0.766468 | 0.278324 | exceptions.py | pypi |
import re # noqa: F401
import sys # noqa: F401
from rhoas_kafka_mgmt_sdk.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
OpenApiModel
)
from rhoas_kafka_mgmt_sdk.exceptions import ApiAttributeError
def lazy_import():
from rhoas_kafka_mgmt_sdk.model.kafka_request_list_all_of import KafkaRequestListAllOf
from rhoas_kafka_mgmt_sdk.model.list import List
globals()['KafkaRequestListAllOf'] = KafkaRequestListAllOf
globals()['List'] = List
class KafkaRequestList(ModelComposed):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
lazy_import()
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'kind': (str,), # noqa: E501
'page': (int,), # noqa: E501
'size': (int,), # noqa: E501
'total': (int,), # noqa: E501
'items': ([bool, date, datetime, dict, float, int, list, str, none_type],), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'kind': 'kind', # noqa: E501
'page': 'page', # noqa: E501
'size': 'size', # noqa: E501
'total': 'total', # noqa: E501
'items': 'items', # noqa: E501
}
read_only_vars = {
}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, *args, **kwargs): # noqa: E501
"""KafkaRequestList - a model defined in OpenAPI
Keyword Args:
kind (str):
page (int):
size (int):
total (int):
items ([bool, date, datetime, dict, float, int, list, str, none_type]):
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
for arg in args:
if isinstance(arg, dict):
kwargs.update(arg)
else:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
constant_args = {
'_check_type': _check_type,
'_path_to_item': _path_to_item,
'_spec_property_naming': _spec_property_naming,
'_configuration': _configuration,
'_visited_composed_classes': self._visited_composed_classes,
}
composed_info = validate_get_composed_info(
constant_args, kwargs, self)
self._composed_instances = composed_info[0]
self._var_name_to_model_instances = composed_info[1]
self._additional_properties_model_instances = composed_info[2]
discarded_args = composed_info[3]
for var_name, var_value in kwargs.items():
if var_name in discarded_args and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self._additional_properties_model_instances:
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
'_composed_instances',
'_var_name_to_model_instances',
'_additional_properties_model_instances',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""KafkaRequestList - a model defined in OpenAPI
Keyword Args:
kind (str):
page (int):
size (int):
total (int):
items ([bool, date, datetime, dict, float, int, list, str, none_type]):
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
for arg in args:
if isinstance(arg, dict):
kwargs.update(arg)
else:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
constant_args = {
'_check_type': _check_type,
'_path_to_item': _path_to_item,
'_spec_property_naming': _spec_property_naming,
'_configuration': _configuration,
'_visited_composed_classes': self._visited_composed_classes,
}
composed_info = validate_get_composed_info(
constant_args, kwargs, self)
self._composed_instances = composed_info[0]
self._var_name_to_model_instances = composed_info[1]
self._additional_properties_model_instances = composed_info[2]
discarded_args = composed_info[3]
for var_name, var_value in kwargs.items():
if var_name in discarded_args and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self._additional_properties_model_instances:
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.")
@cached_property
def _composed_schemas():
# we need this here to make our import statements work
# we must store _composed_schemas in here so the code is only run
# when we invoke this method. If we kept this at the class
# level we would get an error because the class level
# code would be run when this module is imported, and these composed
# classes don't exist yet because their module has not finished
# loading
lazy_import()
return {
'anyOf': [
],
'allOf': [
KafkaRequestListAllOf,
List,
],
'oneOf': [
],
} | /rhoas_sdks-1.0.5.tar.gz/rhoas_sdks-1.0.5/sdks/kafka_mgmt_sdk/rhoas_kafka_mgmt_sdk/model/kafka_request_list.py | 0.517815 | 0.160759 | kafka_request_list.py | pypi |
import re # noqa: F401
import sys # noqa: F401
from rhoas_kafka_mgmt_sdk.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
OpenApiModel
)
from rhoas_kafka_mgmt_sdk.exceptions import ApiAttributeError
class List(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
'kind': (str,), # noqa: E501
'page': (int,), # noqa: E501
'size': (int,), # noqa: E501
'total': (int,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'kind': 'kind', # noqa: E501
'page': 'page', # noqa: E501
'size': 'size', # noqa: E501
'total': 'total', # noqa: E501
}
read_only_vars = {
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, kind, page, size, total, *args, **kwargs): # noqa: E501
"""List - a model defined in OpenAPI
Args:
kind (str):
page (int):
size (int):
total (int):
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', True)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
for arg in args:
if isinstance(arg, dict):
kwargs.update(arg)
else:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.kind = kind
self.page = page
self.size = size
self.total = total
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, kind, page, size, total, *args, **kwargs): # noqa: E501
"""List - a model defined in OpenAPI
Args:
kind (str):
page (int):
size (int):
total (int):
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
for arg in args:
if isinstance(arg, dict):
kwargs.update(arg)
else:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.kind = kind
self.page = page
self.size = size
self.total = total
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.") | /rhoas_sdks-1.0.5.tar.gz/rhoas_sdks-1.0.5/sdks/kafka_mgmt_sdk/rhoas_kafka_mgmt_sdk/model/list.py | 0.542379 | 0.223335 | list.py | pypi |
import re # noqa: F401
import sys # noqa: F401
from rhoas_kafka_mgmt_sdk.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
OpenApiModel
)
from rhoas_kafka_mgmt_sdk.exceptions import ApiAttributeError
class Values(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
'value': (float,), # noqa: E501
'timestamp': (int,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'value': 'value', # noqa: E501
'timestamp': 'timestamp', # noqa: E501
}
read_only_vars = {
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, value, *args, **kwargs): # noqa: E501
"""Values - a model defined in OpenAPI
Args:
value (float):
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
timestamp (int): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', True)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
for arg in args:
if isinstance(arg, dict):
kwargs.update(arg)
else:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.value = value
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, value, *args, **kwargs): # noqa: E501
"""Values - a model defined in OpenAPI
Args:
value (float):
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
timestamp (int): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
for arg in args:
if isinstance(arg, dict):
kwargs.update(arg)
else:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.value = value
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.") | /rhoas_sdks-1.0.5.tar.gz/rhoas_sdks-1.0.5/sdks/kafka_mgmt_sdk/rhoas_kafka_mgmt_sdk/model/values.py | 0.5769 | 0.196325 | values.py | pypi |
import re # noqa: F401
import sys # noqa: F401
from rhoas_kafka_mgmt_sdk.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
OpenApiModel
)
from rhoas_kafka_mgmt_sdk.exceptions import ApiAttributeError
def lazy_import():
from rhoas_kafka_mgmt_sdk.model.metrics_instant_query_list_all_of import MetricsInstantQueryListAllOf
globals()['MetricsInstantQueryListAllOf'] = MetricsInstantQueryListAllOf
class MetricsInstantQueryList(ModelComposed):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
lazy_import()
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'kind': (str,), # noqa: E501
'id': (str,), # noqa: E501
'items': ([bool, date, datetime, dict, float, int, list, str, none_type],), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'kind': 'kind', # noqa: E501
'id': 'id', # noqa: E501
'items': 'items', # noqa: E501
}
read_only_vars = {
}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, *args, **kwargs): # noqa: E501
"""MetricsInstantQueryList - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
kind (str): [optional] # noqa: E501
id (str): [optional] # noqa: E501
items ([bool, date, datetime, dict, float, int, list, str, none_type]): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
for arg in args:
if isinstance(arg, dict):
kwargs.update(arg)
else:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
constant_args = {
'_check_type': _check_type,
'_path_to_item': _path_to_item,
'_spec_property_naming': _spec_property_naming,
'_configuration': _configuration,
'_visited_composed_classes': self._visited_composed_classes,
}
composed_info = validate_get_composed_info(
constant_args, kwargs, self)
self._composed_instances = composed_info[0]
self._var_name_to_model_instances = composed_info[1]
self._additional_properties_model_instances = composed_info[2]
discarded_args = composed_info[3]
for var_name, var_value in kwargs.items():
if var_name in discarded_args and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self._additional_properties_model_instances:
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
'_composed_instances',
'_var_name_to_model_instances',
'_additional_properties_model_instances',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""MetricsInstantQueryList - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
kind (str): [optional] # noqa: E501
id (str): [optional] # noqa: E501
items ([bool, date, datetime, dict, float, int, list, str, none_type]): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
for arg in args:
if isinstance(arg, dict):
kwargs.update(arg)
else:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
constant_args = {
'_check_type': _check_type,
'_path_to_item': _path_to_item,
'_spec_property_naming': _spec_property_naming,
'_configuration': _configuration,
'_visited_composed_classes': self._visited_composed_classes,
}
composed_info = validate_get_composed_info(
constant_args, kwargs, self)
self._composed_instances = composed_info[0]
self._var_name_to_model_instances = composed_info[1]
self._additional_properties_model_instances = composed_info[2]
discarded_args = composed_info[3]
for var_name, var_value in kwargs.items():
if var_name in discarded_args and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self._additional_properties_model_instances:
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.")
@cached_property
def _composed_schemas():
# we need this here to make our import statements work
# we must store _composed_schemas in here so the code is only run
# when we invoke this method. If we kept this at the class
# level we would get an error because the class level
# code would be run when this module is imported, and these composed
# classes don't exist yet because their module has not finished
# loading
lazy_import()
return {
'anyOf': [
],
'allOf': [
MetricsInstantQueryListAllOf,
],
'oneOf': [
],
} | /rhoas_sdks-1.0.5.tar.gz/rhoas_sdks-1.0.5/sdks/kafka_mgmt_sdk/rhoas_kafka_mgmt_sdk/model/metrics_instant_query_list.py | 0.546496 | 0.182972 | metrics_instant_query_list.py | pypi |
import re # noqa: F401
import sys # noqa: F401
from rhoas_kafka_mgmt_sdk.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
OpenApiModel
)
from rhoas_kafka_mgmt_sdk.exceptions import ApiAttributeError
def lazy_import():
from rhoas_kafka_mgmt_sdk.model.supported_kafka_instance_types_list_all_of import SupportedKafkaInstanceTypesListAllOf
globals()['SupportedKafkaInstanceTypesListAllOf'] = SupportedKafkaInstanceTypesListAllOf
class SupportedKafkaInstanceTypesList(ModelComposed):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
lazy_import()
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'instance_types': ([bool, date, datetime, dict, float, int, list, str, none_type],), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'instance_types': 'instance_types', # noqa: E501
}
read_only_vars = {
}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, *args, **kwargs): # noqa: E501
"""SupportedKafkaInstanceTypesList - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
instance_types ([bool, date, datetime, dict, float, int, list, str, none_type]): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
for arg in args:
if isinstance(arg, dict):
kwargs.update(arg)
else:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
constant_args = {
'_check_type': _check_type,
'_path_to_item': _path_to_item,
'_spec_property_naming': _spec_property_naming,
'_configuration': _configuration,
'_visited_composed_classes': self._visited_composed_classes,
}
composed_info = validate_get_composed_info(
constant_args, kwargs, self)
self._composed_instances = composed_info[0]
self._var_name_to_model_instances = composed_info[1]
self._additional_properties_model_instances = composed_info[2]
discarded_args = composed_info[3]
for var_name, var_value in kwargs.items():
if var_name in discarded_args and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self._additional_properties_model_instances:
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
'_composed_instances',
'_var_name_to_model_instances',
'_additional_properties_model_instances',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""SupportedKafkaInstanceTypesList - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
instance_types ([bool, date, datetime, dict, float, int, list, str, none_type]): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
for arg in args:
if isinstance(arg, dict):
kwargs.update(arg)
else:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
constant_args = {
'_check_type': _check_type,
'_path_to_item': _path_to_item,
'_spec_property_naming': _spec_property_naming,
'_configuration': _configuration,
'_visited_composed_classes': self._visited_composed_classes,
}
composed_info = validate_get_composed_info(
constant_args, kwargs, self)
self._composed_instances = composed_info[0]
self._var_name_to_model_instances = composed_info[1]
self._additional_properties_model_instances = composed_info[2]
discarded_args = composed_info[3]
for var_name, var_value in kwargs.items():
if var_name in discarded_args and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self._additional_properties_model_instances:
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.")
@cached_property
def _composed_schemas():
# we need this here to make our import statements work
# we must store _composed_schemas in here so the code is only run
# when we invoke this method. If we kept this at the class
# level we would get an error because the class level
# code would be run when this module is imported, and these composed
# classes don't exist yet because their module has not finished
# loading
lazy_import()
return {
'anyOf': [
],
'allOf': [
SupportedKafkaInstanceTypesListAllOf,
],
'oneOf': [
],
} | /rhoas_sdks-1.0.5.tar.gz/rhoas_sdks-1.0.5/sdks/kafka_mgmt_sdk/rhoas_kafka_mgmt_sdk/model/supported_kafka_instance_types_list.py | 0.531696 | 0.189859 | supported_kafka_instance_types_list.py | pypi |
import re # noqa: F401
import sys # noqa: F401
from rhoas_kafka_mgmt_sdk.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
OpenApiModel
)
from rhoas_kafka_mgmt_sdk.exceptions import ApiAttributeError
class KafkaPromoteRequest(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
('desired_kafka_billing_model',): {
'min_length': 1,
},
('desired_marketplace',): {
'min_length': 1,
},
('desired_billing_cloud_account_id',): {
'min_length': 1,
},
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
'desired_kafka_billing_model': (str,), # noqa: E501
'desired_marketplace': (str,), # noqa: E501
'desired_billing_cloud_account_id': (str,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'desired_kafka_billing_model': 'desired_kafka_billing_model', # noqa: E501
'desired_marketplace': 'desired_marketplace', # noqa: E501
'desired_billing_cloud_account_id': 'desired_billing_cloud_account_id', # noqa: E501
}
read_only_vars = {
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, desired_kafka_billing_model, *args, **kwargs): # noqa: E501
"""KafkaPromoteRequest - a model defined in OpenAPI
Args:
desired_kafka_billing_model (str): The desired Kafka billing model to promote the kafka instance to. Promotion is performed asynchronously. Accepted values: ['marketplace', 'standard']
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
desired_marketplace (str): The desired billing marketplace to promote the kafka instance to. Accepted values: ['aws', 'rhm']. Only considered when desired_kafka_billing_model is 'marketplace'. [optional] # noqa: E501
desired_billing_cloud_account_id (str): The desired Kafka billing cloud account ID to promote the kafka instance to. Only considered when desired_kafka_billing_model is 'marketplace'. [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', True)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
for arg in args:
if isinstance(arg, dict):
kwargs.update(arg)
else:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.desired_kafka_billing_model = desired_kafka_billing_model
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, desired_kafka_billing_model, *args, **kwargs): # noqa: E501
"""KafkaPromoteRequest - a model defined in OpenAPI
Args:
desired_kafka_billing_model (str): The desired Kafka billing model to promote the kafka instance to. Promotion is performed asynchronously. Accepted values: ['marketplace', 'standard']
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
desired_marketplace (str): The desired billing marketplace to promote the kafka instance to. Accepted values: ['aws', 'rhm']. Only considered when desired_kafka_billing_model is 'marketplace'. [optional] # noqa: E501
desired_billing_cloud_account_id (str): The desired Kafka billing cloud account ID to promote the kafka instance to. Only considered when desired_kafka_billing_model is 'marketplace'. [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
for arg in args:
if isinstance(arg, dict):
kwargs.update(arg)
else:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.desired_kafka_billing_model = desired_kafka_billing_model
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.") | /rhoas_sdks-1.0.5.tar.gz/rhoas_sdks-1.0.5/sdks/kafka_mgmt_sdk/rhoas_kafka_mgmt_sdk/model/kafka_promote_request.py | 0.519278 | 0.191762 | kafka_promote_request.py | pypi |
import re # noqa: F401
import sys # noqa: F401
from rhoas_kafka_mgmt_sdk.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
OpenApiModel
)
from rhoas_kafka_mgmt_sdk.exceptions import ApiAttributeError
class FleetshardParameter(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
'id': (str,), # noqa: E501
'value': (str,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'id': 'id', # noqa: E501
'value': 'value', # noqa: E501
}
read_only_vars = {
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, *args, **kwargs): # noqa: E501
"""FleetshardParameter - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
id (str): [optional] # noqa: E501
value (str): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', True)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
for arg in args:
if isinstance(arg, dict):
kwargs.update(arg)
else:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""FleetshardParameter - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
id (str): [optional] # noqa: E501
value (str): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
for arg in args:
if isinstance(arg, dict):
kwargs.update(arg)
else:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.") | /rhoas_sdks-1.0.5.tar.gz/rhoas_sdks-1.0.5/sdks/kafka_mgmt_sdk/rhoas_kafka_mgmt_sdk/model/fleetshard_parameter.py | 0.506591 | 0.215433 | fleetshard_parameter.py | pypi |
import re # noqa: F401
import sys # noqa: F401
from rhoas_kafka_mgmt_sdk.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
OpenApiModel
)
from rhoas_kafka_mgmt_sdk.exceptions import ApiAttributeError
class KafkaRequestPayload(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
'name': (str,), # noqa: E501
'cloud_provider': (str,), # noqa: E501
'region': (str,), # noqa: E501
'reauthentication_enabled': (bool, none_type,), # noqa: E501
'plan': (str,), # noqa: E501
'billing_cloud_account_id': (str, none_type,), # noqa: E501
'marketplace': (str, none_type,), # noqa: E501
'billing_model': (str, none_type,), # noqa: E501
'cluster_id': (str, none_type,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'name': 'name', # noqa: E501
'cloud_provider': 'cloud_provider', # noqa: E501
'region': 'region', # noqa: E501
'reauthentication_enabled': 'reauthentication_enabled', # noqa: E501
'plan': 'plan', # noqa: E501
'billing_cloud_account_id': 'billing_cloud_account_id', # noqa: E501
'marketplace': 'marketplace', # noqa: E501
'billing_model': 'billing_model', # noqa: E501
'cluster_id': 'cluster_id', # noqa: E501
}
read_only_vars = {
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, name, *args, **kwargs): # noqa: E501
"""KafkaRequestPayload - a model defined in OpenAPI
Args:
name (str): The name of the Kafka cluster. It must consist of lower-case alphanumeric characters or '-', start with an alphabetic character, and end with an alphanumeric character, and can not be longer than 32 characters.
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
cloud_provider (str): The cloud provider where the Kafka cluster will be created in. [optional] # noqa: E501
region (str): The region where the Kafka cluster will be created in. [optional] # noqa: E501
reauthentication_enabled (bool, none_type): Whether connection reauthentication is enabled or not. If set to true, connection reauthentication on the Kafka instance will be required every 5 minutes. The default value is true. [optional] # noqa: E501
plan (str): kafka plan in a format of <instance_type>.<size_id>. [optional] # noqa: E501
billing_cloud_account_id (str, none_type): cloud account id used to purchase the instance. [optional] # noqa: E501
marketplace (str, none_type): marketplace where the instance is purchased on. [optional] # noqa: E501
billing_model (str, none_type): billing model to use. [optional] # noqa: E501
cluster_id (str, none_type): enterprise OSD cluster ID to be used for kafka creation. [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', True)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
for arg in args:
if isinstance(arg, dict):
kwargs.update(arg)
else:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.name = name
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, name, *args, **kwargs): # noqa: E501
"""KafkaRequestPayload - a model defined in OpenAPI
Args:
name (str): The name of the Kafka cluster. It must consist of lower-case alphanumeric characters or '-', start with an alphabetic character, and end with an alphanumeric character, and can not be longer than 32 characters.
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
cloud_provider (str): The cloud provider where the Kafka cluster will be created in. [optional] # noqa: E501
region (str): The region where the Kafka cluster will be created in. [optional] # noqa: E501
reauthentication_enabled (bool, none_type): Whether connection reauthentication is enabled or not. If set to true, connection reauthentication on the Kafka instance will be required every 5 minutes. The default value is true. [optional] # noqa: E501
plan (str): kafka plan in a format of <instance_type>.<size_id>. [optional] # noqa: E501
billing_cloud_account_id (str, none_type): cloud account id used to purchase the instance. [optional] # noqa: E501
marketplace (str, none_type): marketplace where the instance is purchased on. [optional] # noqa: E501
billing_model (str, none_type): billing model to use. [optional] # noqa: E501
cluster_id (str, none_type): enterprise OSD cluster ID to be used for kafka creation. [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
for arg in args:
if isinstance(arg, dict):
kwargs.update(arg)
else:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.name = name
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.") | /rhoas_sdks-1.0.5.tar.gz/rhoas_sdks-1.0.5/sdks/kafka_mgmt_sdk/rhoas_kafka_mgmt_sdk/model/kafka_request_payload.py | 0.551332 | 0.161519 | kafka_request_payload.py | pypi |
import re # noqa: F401
import sys # noqa: F401
from rhoas_kafka_mgmt_sdk.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
OpenApiModel
)
from rhoas_kafka_mgmt_sdk.exceptions import ApiAttributeError
def lazy_import():
from rhoas_kafka_mgmt_sdk.model.metrics_range_query_list_all_of import MetricsRangeQueryListAllOf
globals()['MetricsRangeQueryListAllOf'] = MetricsRangeQueryListAllOf
class MetricsRangeQueryList(ModelComposed):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
lazy_import()
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'kind': (str,), # noqa: E501
'id': (str,), # noqa: E501
'items': ([bool, date, datetime, dict, float, int, list, str, none_type],), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'kind': 'kind', # noqa: E501
'id': 'id', # noqa: E501
'items': 'items', # noqa: E501
}
read_only_vars = {
}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, *args, **kwargs): # noqa: E501
"""MetricsRangeQueryList - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
kind (str): [optional] # noqa: E501
id (str): [optional] # noqa: E501
items ([bool, date, datetime, dict, float, int, list, str, none_type]): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
for arg in args:
if isinstance(arg, dict):
kwargs.update(arg)
else:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
constant_args = {
'_check_type': _check_type,
'_path_to_item': _path_to_item,
'_spec_property_naming': _spec_property_naming,
'_configuration': _configuration,
'_visited_composed_classes': self._visited_composed_classes,
}
composed_info = validate_get_composed_info(
constant_args, kwargs, self)
self._composed_instances = composed_info[0]
self._var_name_to_model_instances = composed_info[1]
self._additional_properties_model_instances = composed_info[2]
discarded_args = composed_info[3]
for var_name, var_value in kwargs.items():
if var_name in discarded_args and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self._additional_properties_model_instances:
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
'_composed_instances',
'_var_name_to_model_instances',
'_additional_properties_model_instances',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""MetricsRangeQueryList - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
kind (str): [optional] # noqa: E501
id (str): [optional] # noqa: E501
items ([bool, date, datetime, dict, float, int, list, str, none_type]): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
for arg in args:
if isinstance(arg, dict):
kwargs.update(arg)
else:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
constant_args = {
'_check_type': _check_type,
'_path_to_item': _path_to_item,
'_spec_property_naming': _spec_property_naming,
'_configuration': _configuration,
'_visited_composed_classes': self._visited_composed_classes,
}
composed_info = validate_get_composed_info(
constant_args, kwargs, self)
self._composed_instances = composed_info[0]
self._var_name_to_model_instances = composed_info[1]
self._additional_properties_model_instances = composed_info[2]
discarded_args = composed_info[3]
for var_name, var_value in kwargs.items():
if var_name in discarded_args and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self._additional_properties_model_instances:
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.")
@cached_property
def _composed_schemas():
# we need this here to make our import statements work
# we must store _composed_schemas in here so the code is only run
# when we invoke this method. If we kept this at the class
# level we would get an error because the class level
# code would be run when this module is imported, and these composed
# classes don't exist yet because their module has not finished
# loading
lazy_import()
return {
'anyOf': [
],
'allOf': [
MetricsRangeQueryListAllOf,
],
'oneOf': [
],
} | /rhoas_sdks-1.0.5.tar.gz/rhoas_sdks-1.0.5/sdks/kafka_mgmt_sdk/rhoas_kafka_mgmt_sdk/model/metrics_range_query_list.py | 0.534127 | 0.181426 | metrics_range_query_list.py | pypi |
import re # noqa: F401
import sys # noqa: F401
from rhoas_kafka_mgmt_sdk.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
OpenApiModel
)
from rhoas_kafka_mgmt_sdk.exceptions import ApiAttributeError
class CloudRegionListAllOf(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
'items': ([bool, date, datetime, dict, float, int, list, str, none_type],), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'items': 'items', # noqa: E501
}
read_only_vars = {
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, items, *args, **kwargs): # noqa: E501
"""CloudRegionListAllOf - a model defined in OpenAPI
Args:
items ([bool, date, datetime, dict, float, int, list, str, none_type]):
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', True)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
for arg in args:
if isinstance(arg, dict):
kwargs.update(arg)
else:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.items = items
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, items, *args, **kwargs): # noqa: E501
"""CloudRegionListAllOf - a model defined in OpenAPI
Args:
items ([bool, date, datetime, dict, float, int, list, str, none_type]):
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
for arg in args:
if isinstance(arg, dict):
kwargs.update(arg)
else:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.items = items
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.") | /rhoas_sdks-1.0.5.tar.gz/rhoas_sdks-1.0.5/sdks/kafka_mgmt_sdk/rhoas_kafka_mgmt_sdk/model/cloud_region_list_all_of.py | 0.622804 | 0.200108 | cloud_region_list_all_of.py | pypi |
import re # noqa: F401
import sys # noqa: F401
from rhoas_kafka_mgmt_sdk.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
OpenApiModel
)
from rhoas_kafka_mgmt_sdk.exceptions import ApiAttributeError
def lazy_import():
from rhoas_kafka_mgmt_sdk.model.fleetshard_parameters_array import FleetshardParametersArray
globals()['FleetshardParametersArray'] = FleetshardParametersArray
class EnterpriseClusterFleetshardParameters(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
lazy_import()
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'fleetshard_parameters': (FleetshardParametersArray,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'fleetshard_parameters': 'fleetshard_parameters', # noqa: E501
}
read_only_vars = {
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, *args, **kwargs): # noqa: E501
"""EnterpriseClusterFleetshardParameters - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
fleetshard_parameters (FleetshardParametersArray): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', True)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
for arg in args:
if isinstance(arg, dict):
kwargs.update(arg)
else:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""EnterpriseClusterFleetshardParameters - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
fleetshard_parameters (FleetshardParametersArray): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
for arg in args:
if isinstance(arg, dict):
kwargs.update(arg)
else:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.") | /rhoas_sdks-1.0.5.tar.gz/rhoas_sdks-1.0.5/sdks/kafka_mgmt_sdk/rhoas_kafka_mgmt_sdk/model/enterprise_cluster_fleetshard_parameters.py | 0.550849 | 0.18462 | enterprise_cluster_fleetshard_parameters.py | pypi |
import re # noqa: F401
import sys # noqa: F401
from rhoas_kafka_mgmt_sdk.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
OpenApiModel
)
from rhoas_kafka_mgmt_sdk.exceptions import ApiAttributeError
class SupportedKafkaBillingModel(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
('id',): {
'min_length': 1,
},
('ams_resource',): {
'min_length': 1,
},
('ams_product',): {
'min_length': 1,
},
('ams_billing_models',): {
'min_items': 1,
},
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
'id': (str,), # noqa: E501
'ams_resource': (str,), # noqa: E501
'ams_product': (str,), # noqa: E501
'ams_billing_models': ([str],), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'id': 'id', # noqa: E501
'ams_resource': 'ams_resource', # noqa: E501
'ams_product': 'ams_product', # noqa: E501
'ams_billing_models': 'ams_billing_models', # noqa: E501
}
read_only_vars = {
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, id, ams_resource, ams_product, ams_billing_models, *args, **kwargs): # noqa: E501
"""SupportedKafkaBillingModel - a model defined in OpenAPI
Args:
id (str): Identifier for the Kafka billing model
ams_resource (str): AMS resource to be used. Accepted values: ['rhosak']
ams_product (str): AMS product to be used. Accepted values: ['RHOSAK', 'RHOSAKTrial', 'RHOSAKEval', 'RHOSAKCC']
ams_billing_models ([str]): List of AMS available billing models: Accepted values: ['marketplace', 'marketplace-rhm', 'marketplace-aws']
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', True)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
for arg in args:
if isinstance(arg, dict):
kwargs.update(arg)
else:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.id = id
self.ams_resource = ams_resource
self.ams_product = ams_product
self.ams_billing_models = ams_billing_models
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, id, ams_resource, ams_product, ams_billing_models, *args, **kwargs): # noqa: E501
"""SupportedKafkaBillingModel - a model defined in OpenAPI
Args:
id (str): Identifier for the Kafka billing model
ams_resource (str): AMS resource to be used. Accepted values: ['rhosak']
ams_product (str): AMS product to be used. Accepted values: ['RHOSAK', 'RHOSAKTrial', 'RHOSAKEval', 'RHOSAKCC']
ams_billing_models ([str]): List of AMS available billing models: Accepted values: ['marketplace', 'marketplace-rhm', 'marketplace-aws']
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
for arg in args:
if isinstance(arg, dict):
kwargs.update(arg)
else:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.id = id
self.ams_resource = ams_resource
self.ams_product = ams_product
self.ams_billing_models = ams_billing_models
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.") | /rhoas_sdks-1.0.5.tar.gz/rhoas_sdks-1.0.5/sdks/kafka_mgmt_sdk/rhoas_kafka_mgmt_sdk/model/supported_kafka_billing_model.py | 0.525369 | 0.170784 | supported_kafka_billing_model.py | pypi |
import re # noqa: F401
import sys # noqa: F401
from rhoas_kafka_mgmt_sdk.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
OpenApiModel
)
from rhoas_kafka_mgmt_sdk.exceptions import ApiAttributeError
class SupportedKafkaSizeBytesValueItem(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
'bytes': (int,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'bytes': 'bytes', # noqa: E501
}
read_only_vars = {
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, *args, **kwargs): # noqa: E501
"""SupportedKafkaSizeBytesValueItem - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
bytes (int): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', True)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
for arg in args:
if isinstance(arg, dict):
kwargs.update(arg)
else:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""SupportedKafkaSizeBytesValueItem - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
bytes (int): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
for arg in args:
if isinstance(arg, dict):
kwargs.update(arg)
else:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.") | /rhoas_sdks-1.0.5.tar.gz/rhoas_sdks-1.0.5/sdks/kafka_mgmt_sdk/rhoas_kafka_mgmt_sdk/model/supported_kafka_size_bytes_value_item.py | 0.560012 | 0.204501 | supported_kafka_size_bytes_value_item.py | pypi |
import re # noqa: F401
import sys # noqa: F401
from rhoas_kafka_mgmt_sdk.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
OpenApiModel
)
from rhoas_kafka_mgmt_sdk.exceptions import ApiAttributeError
def lazy_import():
from rhoas_kafka_mgmt_sdk.model.cloud_provider_list_all_of import CloudProviderListAllOf
from rhoas_kafka_mgmt_sdk.model.list import List
globals()['CloudProviderListAllOf'] = CloudProviderListAllOf
globals()['List'] = List
class CloudProviderList(ModelComposed):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
lazy_import()
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'kind': (str,), # noqa: E501
'page': (int,), # noqa: E501
'size': (int,), # noqa: E501
'total': (int,), # noqa: E501
'items': ([bool, date, datetime, dict, float, int, list, str, none_type],), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'kind': 'kind', # noqa: E501
'page': 'page', # noqa: E501
'size': 'size', # noqa: E501
'total': 'total', # noqa: E501
'items': 'items', # noqa: E501
}
read_only_vars = {
}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, *args, **kwargs): # noqa: E501
"""CloudProviderList - a model defined in OpenAPI
Keyword Args:
kind (str):
page (int):
size (int):
total (int):
items ([bool, date, datetime, dict, float, int, list, str, none_type]):
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
for arg in args:
if isinstance(arg, dict):
kwargs.update(arg)
else:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
constant_args = {
'_check_type': _check_type,
'_path_to_item': _path_to_item,
'_spec_property_naming': _spec_property_naming,
'_configuration': _configuration,
'_visited_composed_classes': self._visited_composed_classes,
}
composed_info = validate_get_composed_info(
constant_args, kwargs, self)
self._composed_instances = composed_info[0]
self._var_name_to_model_instances = composed_info[1]
self._additional_properties_model_instances = composed_info[2]
discarded_args = composed_info[3]
for var_name, var_value in kwargs.items():
if var_name in discarded_args and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self._additional_properties_model_instances:
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
'_composed_instances',
'_var_name_to_model_instances',
'_additional_properties_model_instances',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""CloudProviderList - a model defined in OpenAPI
Keyword Args:
kind (str):
page (int):
size (int):
total (int):
items ([bool, date, datetime, dict, float, int, list, str, none_type]):
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
for arg in args:
if isinstance(arg, dict):
kwargs.update(arg)
else:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
constant_args = {
'_check_type': _check_type,
'_path_to_item': _path_to_item,
'_spec_property_naming': _spec_property_naming,
'_configuration': _configuration,
'_visited_composed_classes': self._visited_composed_classes,
}
composed_info = validate_get_composed_info(
constant_args, kwargs, self)
self._composed_instances = composed_info[0]
self._var_name_to_model_instances = composed_info[1]
self._additional_properties_model_instances = composed_info[2]
discarded_args = composed_info[3]
for var_name, var_value in kwargs.items():
if var_name in discarded_args and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self._additional_properties_model_instances:
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.")
@cached_property
def _composed_schemas():
# we need this here to make our import statements work
# we must store _composed_schemas in here so the code is only run
# when we invoke this method. If we kept this at the class
# level we would get an error because the class level
# code would be run when this module is imported, and these composed
# classes don't exist yet because their module has not finished
# loading
lazy_import()
return {
'anyOf': [
],
'allOf': [
CloudProviderListAllOf,
List,
],
'oneOf': [
],
} | /rhoas_sdks-1.0.5.tar.gz/rhoas_sdks-1.0.5/sdks/kafka_mgmt_sdk/rhoas_kafka_mgmt_sdk/model/cloud_provider_list.py | 0.506347 | 0.156814 | cloud_provider_list.py | pypi |
import re # noqa: F401
import sys # noqa: F401
from rhoas_kafka_mgmt_sdk.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
OpenApiModel
)
from rhoas_kafka_mgmt_sdk.exceptions import ApiAttributeError
class ServiceAccountAllOf(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
'id': (str,), # noqa: E501
'name': (str,), # noqa: E501
'description': (str,), # noqa: E501
'client_id': (str,), # noqa: E501
'client_secret': (str,), # noqa: E501
'owner': (str,), # noqa: E501
'created_by': (str,), # noqa: E501
'created_at': (datetime,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'id': 'id', # noqa: E501
'name': 'name', # noqa: E501
'description': 'description', # noqa: E501
'client_id': 'client_id', # noqa: E501
'client_secret': 'client_secret', # noqa: E501
'owner': 'owner', # noqa: E501
'created_by': 'created_by', # noqa: E501
'created_at': 'created_at', # noqa: E501
}
read_only_vars = {
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, *args, **kwargs): # noqa: E501
"""ServiceAccountAllOf - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
id (str): server generated unique id of the service account. [optional] # noqa: E501
name (str): . [optional] # noqa: E501
description (str): . [optional] # noqa: E501
client_id (str): [optional] # noqa: E501
client_secret (str): [optional] # noqa: E501
owner (str): [optional] # noqa: E501
created_by (str): [optional] # noqa: E501
created_at (datetime): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', True)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
for arg in args:
if isinstance(arg, dict):
kwargs.update(arg)
else:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""ServiceAccountAllOf - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
id (str): server generated unique id of the service account. [optional] # noqa: E501
name (str): . [optional] # noqa: E501
description (str): . [optional] # noqa: E501
client_id (str): [optional] # noqa: E501
client_secret (str): [optional] # noqa: E501
owner (str): [optional] # noqa: E501
created_by (str): [optional] # noqa: E501
created_at (datetime): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
for arg in args:
if isinstance(arg, dict):
kwargs.update(arg)
else:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.") | /rhoas_sdks-1.0.5.tar.gz/rhoas_sdks-1.0.5/sdks/kafka_mgmt_sdk/rhoas_kafka_mgmt_sdk/model/service_account_all_of.py | 0.494873 | 0.157914 | service_account_all_of.py | pypi |
import re # noqa: F401
import sys # noqa: F401
from rhoas_kafka_mgmt_sdk.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
OpenApiModel
)
from rhoas_kafka_mgmt_sdk.exceptions import ApiAttributeError
class ServiceAccountListItemAllOf(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
'id': (str,), # noqa: E501
'client_id': (str,), # noqa: E501
'name': (str,), # noqa: E501
'owner': (str,), # noqa: E501
'created_by': (str,), # noqa: E501
'created_at': (datetime,), # noqa: E501
'description': (str,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'id': 'id', # noqa: E501
'client_id': 'client_id', # noqa: E501
'name': 'name', # noqa: E501
'owner': 'owner', # noqa: E501
'created_by': 'created_by', # noqa: E501
'created_at': 'created_at', # noqa: E501
'description': 'description', # noqa: E501
}
read_only_vars = {
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, *args, **kwargs): # noqa: E501
"""ServiceAccountListItemAllOf - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
id (str): server generated unique id of the service account. [optional] # noqa: E501
client_id (str): client id of the service account. [optional] # noqa: E501
name (str): name of the service account. [optional] # noqa: E501
owner (str): owner of the service account. [optional] # noqa: E501
created_by (str): service account created by the user. [optional] # noqa: E501
created_at (datetime): service account creation timestamp. [optional] # noqa: E501
description (str): description of the service account. [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', True)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
for arg in args:
if isinstance(arg, dict):
kwargs.update(arg)
else:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""ServiceAccountListItemAllOf - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
id (str): server generated unique id of the service account. [optional] # noqa: E501
client_id (str): client id of the service account. [optional] # noqa: E501
name (str): name of the service account. [optional] # noqa: E501
owner (str): owner of the service account. [optional] # noqa: E501
created_by (str): service account created by the user. [optional] # noqa: E501
created_at (datetime): service account creation timestamp. [optional] # noqa: E501
description (str): description of the service account. [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
for arg in args:
if isinstance(arg, dict):
kwargs.update(arg)
else:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.") | /rhoas_sdks-1.0.5.tar.gz/rhoas_sdks-1.0.5/sdks/kafka_mgmt_sdk/rhoas_kafka_mgmt_sdk/model/service_account_list_item_all_of.py | 0.495117 | 0.164449 | service_account_list_item_all_of.py | pypi |
import re # noqa: F401
import sys # noqa: F401
from rhoas_kafka_mgmt_sdk.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
OpenApiModel
)
from rhoas_kafka_mgmt_sdk.exceptions import ApiAttributeError
def lazy_import():
from rhoas_kafka_mgmt_sdk.model.error import Error
globals()['Error'] = Error
class ErrorListAllOf(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
lazy_import()
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'items': ([Error],), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'items': 'items', # noqa: E501
}
read_only_vars = {
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, items, *args, **kwargs): # noqa: E501
"""ErrorListAllOf - a model defined in OpenAPI
Args:
items ([Error]):
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', True)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
for arg in args:
if isinstance(arg, dict):
kwargs.update(arg)
else:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.items = items
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, items, *args, **kwargs): # noqa: E501
"""ErrorListAllOf - a model defined in OpenAPI
Args:
items ([Error]):
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
for arg in args:
if isinstance(arg, dict):
kwargs.update(arg)
else:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.items = items
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.") | /rhoas_sdks-1.0.5.tar.gz/rhoas_sdks-1.0.5/sdks/kafka_mgmt_sdk/rhoas_kafka_mgmt_sdk/model/error_list_all_of.py | 0.601711 | 0.199035 | error_list_all_of.py | pypi |
import re # noqa: F401
import sys # noqa: F401
from rhoas_kafka_mgmt_sdk.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
OpenApiModel
)
from rhoas_kafka_mgmt_sdk.exceptions import ApiAttributeError
class EnterpriseClusterListAllOf(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
'items': ([bool, date, datetime, dict, float, int, list, str, none_type],), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'items': 'items', # noqa: E501
}
read_only_vars = {
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, items, *args, **kwargs): # noqa: E501
"""EnterpriseClusterListAllOf - a model defined in OpenAPI
Args:
items ([bool, date, datetime, dict, float, int, list, str, none_type]):
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', True)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
for arg in args:
if isinstance(arg, dict):
kwargs.update(arg)
else:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.items = items
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, items, *args, **kwargs): # noqa: E501
"""EnterpriseClusterListAllOf - a model defined in OpenAPI
Args:
items ([bool, date, datetime, dict, float, int, list, str, none_type]):
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
for arg in args:
if isinstance(arg, dict):
kwargs.update(arg)
else:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.items = items
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.") | /rhoas_sdks-1.0.5.tar.gz/rhoas_sdks-1.0.5/sdks/kafka_mgmt_sdk/rhoas_kafka_mgmt_sdk/model/enterprise_cluster_list_all_of.py | 0.63023 | 0.206414 | enterprise_cluster_list_all_of.py | pypi |
import re # noqa: F401
import sys # noqa: F401
from rhoas_kafka_mgmt_sdk.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
OpenApiModel
)
from rhoas_kafka_mgmt_sdk.exceptions import ApiAttributeError
def lazy_import():
from rhoas_kafka_mgmt_sdk.model.supported_kafka_billing_model import SupportedKafkaBillingModel
from rhoas_kafka_mgmt_sdk.model.supported_kafka_instance_type_sizes_inner import SupportedKafkaInstanceTypeSizesInner
globals()['SupportedKafkaBillingModel'] = SupportedKafkaBillingModel
globals()['SupportedKafkaInstanceTypeSizesInner'] = SupportedKafkaInstanceTypeSizesInner
class SupportedKafkaInstanceType(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
('supported_billing_models',): {
'min_items': 1,
},
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
lazy_import()
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'supported_billing_models': ([SupportedKafkaBillingModel],), # noqa: E501
'sizes': ([SupportedKafkaInstanceTypeSizesInner],), # noqa: E501
'id': (str,), # noqa: E501
'display_name': (str,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'supported_billing_models': 'supported_billing_models', # noqa: E501
'sizes': 'sizes', # noqa: E501
'id': 'id', # noqa: E501
'display_name': 'display_name', # noqa: E501
}
read_only_vars = {
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, supported_billing_models, sizes, *args, **kwargs): # noqa: E501
"""SupportedKafkaInstanceType - a model defined in OpenAPI
Args:
supported_billing_models ([SupportedKafkaBillingModel]): A list of available kafka billing models for the instance type. Each kafka billing model item has a unique 'id'
sizes ([SupportedKafkaInstanceTypeSizesInner]): A list of Kafka instance sizes available for this instance type
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
id (str): Unique identifier of the Kafka instance type.. [optional] # noqa: E501
display_name (str): Human readable name of the supported Kafka instance type. [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', True)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
for arg in args:
if isinstance(arg, dict):
kwargs.update(arg)
else:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.supported_billing_models = supported_billing_models
self.sizes = sizes
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, supported_billing_models, sizes, *args, **kwargs): # noqa: E501
"""SupportedKafkaInstanceType - a model defined in OpenAPI
Args:
supported_billing_models ([SupportedKafkaBillingModel]): A list of available kafka billing models for the instance type. Each kafka billing model item has a unique 'id'
sizes ([SupportedKafkaInstanceTypeSizesInner]): A list of Kafka instance sizes available for this instance type
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
id (str): Unique identifier of the Kafka instance type.. [optional] # noqa: E501
display_name (str): Human readable name of the supported Kafka instance type. [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
for arg in args:
if isinstance(arg, dict):
kwargs.update(arg)
else:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.supported_billing_models = supported_billing_models
self.sizes = sizes
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.") | /rhoas_sdks-1.0.5.tar.gz/rhoas_sdks-1.0.5/sdks/kafka_mgmt_sdk/rhoas_kafka_mgmt_sdk/model/supported_kafka_instance_type.py | 0.575946 | 0.179315 | supported_kafka_instance_type.py | pypi |
import re # noqa: F401
import sys # noqa: F401
from rhoas_kafka_mgmt_sdk.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
OpenApiModel
)
from rhoas_kafka_mgmt_sdk.exceptions import ApiAttributeError
class SsoProviderAllOf(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
'name': (str,), # noqa: E501
'base_url': (str,), # noqa: E501
'token_url': (str,), # noqa: E501
'jwks': (str,), # noqa: E501
'valid_issuer': (str,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'name': 'name', # noqa: E501
'base_url': 'base_url', # noqa: E501
'token_url': 'token_url', # noqa: E501
'jwks': 'jwks', # noqa: E501
'valid_issuer': 'valid_issuer', # noqa: E501
}
read_only_vars = {
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, *args, **kwargs): # noqa: E501
"""SsoProviderAllOf - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
name (str): name of the sso provider. [optional] # noqa: E501
base_url (str): base url. [optional] # noqa: E501
token_url (str): [optional] # noqa: E501
jwks (str): [optional] # noqa: E501
valid_issuer (str): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', True)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
for arg in args:
if isinstance(arg, dict):
kwargs.update(arg)
else:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""SsoProviderAllOf - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
name (str): name of the sso provider. [optional] # noqa: E501
base_url (str): base url. [optional] # noqa: E501
token_url (str): [optional] # noqa: E501
jwks (str): [optional] # noqa: E501
valid_issuer (str): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
for arg in args:
if isinstance(arg, dict):
kwargs.update(arg)
else:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.") | /rhoas_sdks-1.0.5.tar.gz/rhoas_sdks-1.0.5/sdks/kafka_mgmt_sdk/rhoas_kafka_mgmt_sdk/model/sso_provider_all_of.py | 0.516595 | 0.163512 | sso_provider_all_of.py | pypi |
import re # noqa: F401
import sys # noqa: F401
from rhoas_kafka_mgmt_sdk.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
OpenApiModel
)
from rhoas_kafka_mgmt_sdk.exceptions import ApiAttributeError
def lazy_import():
from rhoas_kafka_mgmt_sdk.model.object_reference import ObjectReference
from rhoas_kafka_mgmt_sdk.model.version_metadata_all_of import VersionMetadataAllOf
globals()['ObjectReference'] = ObjectReference
globals()['VersionMetadataAllOf'] = VersionMetadataAllOf
class VersionMetadata(ModelComposed):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
lazy_import()
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'id': (str,), # noqa: E501
'kind': (str,), # noqa: E501
'href': (str,), # noqa: E501
'server_version': (str,), # noqa: E501
'collections': ([ObjectReference],), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'id': 'id', # noqa: E501
'kind': 'kind', # noqa: E501
'href': 'href', # noqa: E501
'server_version': 'server_version', # noqa: E501
'collections': 'collections', # noqa: E501
}
read_only_vars = {
}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, *args, **kwargs): # noqa: E501
"""VersionMetadata - a model defined in OpenAPI
Keyword Args:
id (str):
kind (str):
href (str):
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
server_version (str): [optional] # noqa: E501
collections ([ObjectReference]): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
for arg in args:
if isinstance(arg, dict):
kwargs.update(arg)
else:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
constant_args = {
'_check_type': _check_type,
'_path_to_item': _path_to_item,
'_spec_property_naming': _spec_property_naming,
'_configuration': _configuration,
'_visited_composed_classes': self._visited_composed_classes,
}
composed_info = validate_get_composed_info(
constant_args, kwargs, self)
self._composed_instances = composed_info[0]
self._var_name_to_model_instances = composed_info[1]
self._additional_properties_model_instances = composed_info[2]
discarded_args = composed_info[3]
for var_name, var_value in kwargs.items():
if var_name in discarded_args and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self._additional_properties_model_instances:
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
'_composed_instances',
'_var_name_to_model_instances',
'_additional_properties_model_instances',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""VersionMetadata - a model defined in OpenAPI
Keyword Args:
id (str):
kind (str):
href (str):
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
server_version (str): [optional] # noqa: E501
collections ([ObjectReference]): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
for arg in args:
if isinstance(arg, dict):
kwargs.update(arg)
else:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
constant_args = {
'_check_type': _check_type,
'_path_to_item': _path_to_item,
'_spec_property_naming': _spec_property_naming,
'_configuration': _configuration,
'_visited_composed_classes': self._visited_composed_classes,
}
composed_info = validate_get_composed_info(
constant_args, kwargs, self)
self._composed_instances = composed_info[0]
self._var_name_to_model_instances = composed_info[1]
self._additional_properties_model_instances = composed_info[2]
discarded_args = composed_info[3]
for var_name, var_value in kwargs.items():
if var_name in discarded_args and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self._additional_properties_model_instances:
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.")
@cached_property
def _composed_schemas():
# we need this here to make our import statements work
# we must store _composed_schemas in here so the code is only run
# when we invoke this method. If we kept this at the class
# level we would get an error because the class level
# code would be run when this module is imported, and these composed
# classes don't exist yet because their module has not finished
# loading
lazy_import()
return {
'anyOf': [
],
'allOf': [
ObjectReference,
VersionMetadataAllOf,
],
'oneOf': [
],
} | /rhoas_sdks-1.0.5.tar.gz/rhoas_sdks-1.0.5/sdks/kafka_mgmt_sdk/rhoas_kafka_mgmt_sdk/model/version_metadata.py | 0.52683 | 0.170784 | version_metadata.py | pypi |
import re # noqa: F401
import sys # noqa: F401
from rhoas_kafka_mgmt_sdk.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
OpenApiModel
)
from rhoas_kafka_mgmt_sdk.exceptions import ApiAttributeError
class ServiceAccountListAllOf(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
'kind': (str,), # noqa: E501
'items': ([bool, date, datetime, dict, float, int, list, str, none_type],), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'kind': 'kind', # noqa: E501
'items': 'items', # noqa: E501
}
read_only_vars = {
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, kind, items, *args, **kwargs): # noqa: E501
"""ServiceAccountListAllOf - a model defined in OpenAPI
Args:
kind (str):
items ([bool, date, datetime, dict, float, int, list, str, none_type]):
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', True)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
for arg in args:
if isinstance(arg, dict):
kwargs.update(arg)
else:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.kind = kind
self.items = items
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, kind, items, *args, **kwargs): # noqa: E501
"""ServiceAccountListAllOf - a model defined in OpenAPI
Args:
kind (str):
items ([bool, date, datetime, dict, float, int, list, str, none_type]):
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
for arg in args:
if isinstance(arg, dict):
kwargs.update(arg)
else:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.kind = kind
self.items = items
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.") | /rhoas_sdks-1.0.5.tar.gz/rhoas_sdks-1.0.5/sdks/kafka_mgmt_sdk/rhoas_kafka_mgmt_sdk/model/service_account_list_all_of.py | 0.594787 | 0.210908 | service_account_list_all_of.py | pypi |
import re # noqa: F401
import sys # noqa: F401
from rhoas_kafka_mgmt_sdk.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
OpenApiModel
)
from rhoas_kafka_mgmt_sdk.exceptions import ApiAttributeError
class ServiceAccountRequest(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
'name': (str,), # noqa: E501
'description': (str,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'name': 'name', # noqa: E501
'description': 'description', # noqa: E501
}
read_only_vars = {
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, name, *args, **kwargs): # noqa: E501
"""ServiceAccountRequest - a model defined in OpenAPI
Args:
name (str): The name of the service account
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
description (str): A description for the service account. [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', True)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
for arg in args:
if isinstance(arg, dict):
kwargs.update(arg)
else:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.name = name
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, name, *args, **kwargs): # noqa: E501
"""ServiceAccountRequest - a model defined in OpenAPI
Args:
name (str): The name of the service account
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
description (str): A description for the service account. [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
for arg in args:
if isinstance(arg, dict):
kwargs.update(arg)
else:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.name = name
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.") | /rhoas_sdks-1.0.5.tar.gz/rhoas_sdks-1.0.5/sdks/kafka_mgmt_sdk/rhoas_kafka_mgmt_sdk/model/service_account_request.py | 0.584983 | 0.188529 | service_account_request.py | pypi |
import re # noqa: F401
import sys # noqa: F401
from rhoas_kafka_mgmt_sdk.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
OpenApiModel
)
from rhoas_kafka_mgmt_sdk.exceptions import ApiAttributeError
class ObjectReference(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
'id': (str,), # noqa: E501
'kind': (str,), # noqa: E501
'href': (str,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'id': 'id', # noqa: E501
'kind': 'kind', # noqa: E501
'href': 'href', # noqa: E501
}
read_only_vars = {
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, id, kind, href, *args, **kwargs): # noqa: E501
"""ObjectReference - a model defined in OpenAPI
Args:
id (str):
kind (str):
href (str):
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', True)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
for arg in args:
if isinstance(arg, dict):
kwargs.update(arg)
else:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.id = id
self.kind = kind
self.href = href
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, id, kind, href, *args, **kwargs): # noqa: E501
"""ObjectReference - a model defined in OpenAPI
Args:
id (str):
kind (str):
href (str):
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
for arg in args:
if isinstance(arg, dict):
kwargs.update(arg)
else:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.id = id
self.kind = kind
self.href = href
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.") | /rhoas_sdks-1.0.5.tar.gz/rhoas_sdks-1.0.5/sdks/kafka_mgmt_sdk/rhoas_kafka_mgmt_sdk/model/object_reference.py | 0.579281 | 0.190781 | object_reference.py | pypi |
import re # noqa: F401
import sys # noqa: F401
from rhoas_kafka_mgmt_sdk.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
OpenApiModel
)
from rhoas_kafka_mgmt_sdk.exceptions import ApiAttributeError
def lazy_import():
from rhoas_kafka_mgmt_sdk.model.values import Values
globals()['Values'] = Values
class RangeQuery(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
lazy_import()
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'metric': ({str: (str,)},), # noqa: E501
'values': ([Values],), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'metric': 'metric', # noqa: E501
'values': 'values', # noqa: E501
}
read_only_vars = {
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, *args, **kwargs): # noqa: E501
"""RangeQuery - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
metric ({str: (str,)}): [optional] # noqa: E501
values ([Values]): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', True)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
for arg in args:
if isinstance(arg, dict):
kwargs.update(arg)
else:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""RangeQuery - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
metric ({str: (str,)}): [optional] # noqa: E501
values ([Values]): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
for arg in args:
if isinstance(arg, dict):
kwargs.update(arg)
else:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.") | /rhoas_sdks-1.0.5.tar.gz/rhoas_sdks-1.0.5/sdks/kafka_mgmt_sdk/rhoas_kafka_mgmt_sdk/model/range_query.py | 0.566738 | 0.211987 | range_query.py | pypi |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.