code stringlengths 114 1.05M | path stringlengths 3 312 | quality_prob float64 0.5 0.99 | learning_prob float64 0.2 1 | filename stringlengths 3 168 | kind stringclasses 1
value |
|---|---|---|---|---|---|
import os
from typing import TYPE_CHECKING
from typing import Dict
from typing import List
from typing import cast
from cleo.helpers import argument
from cleo.helpers import option
from poetry.core.utils import toml
from poetry.core.utils.collections import nested_dict_set
from ..init import InitCommand
if TYPE_CHECKING:
from poetry.console.application import Application # noqa
from poetry.console.commands.update import UpdateCommand # noqa
class PluginAddCommand(InitCommand):
name = "plugin add"
description = "Adds new plugins."
arguments = [
argument("plugins", "The names of the plugins to install.", multiple=True),
]
options = [
option(
"dry-run",
None,
"Output the operations but do not execute anything (implicitly enables --verbose).",
)
]
help = """
The <c1>plugin add</c1> command installs Poetry plugins globally.
It works similarly to the <c1>add</c1> command:
If you do not specify a version constraint, poetry will choose a suitable one based on the available package versions.
You can specify a package in the following forms:
- A single name (<b>requests</b>)
- A name and a constraint (<b>requests@^2.23.0</b>)
- A git url (<b>git+https://github.com/python-poetry/poetry.git</b>)
- A git url with a revision (<b>git+https://github.com/python-poetry/poetry.git#develop</b>)
- A git SSH url (<b>git+ssh://github.com/python-poetry/poetry.git</b>)
- A git SSH url with a revision (<b>git+ssh://github.com/python-poetry/poetry.git#develop</b>)
- A file path (<b>../my-package/my-package.whl</b>)
- A directory (<b>../my-package/</b>)
- A url (<b>https://example.com/packages/my-package-0.1.0.tar.gz</b>)\
"""
def handle(self) -> int:
from pathlib import Path
from cleo.io.inputs.string_input import StringInput
from cleo.io.io import IO
from poetry.core.pyproject.project import Project
from poetry.core.semver.helpers import parse_constraint
from poetry.factory import Factory
from poetry.packages.project_package import ProjectPackage
from poetry.repositories.installed_repository import InstalledRepository
from poetry.utils.env import EnvManager
plugins = self.argument("plugins")
# Plugins should be installed in the system env to be globally available
system_env = EnvManager.get_system_env(naive=True)
env_dir = Path(
os.getenv("POETRY_HOME") if os.getenv("POETRY_HOME") else system_env.path
)
# We check for the plugins existence first.
if env_dir.joinpath("pyproject.toml").exists():
pyproject, _ = toml.loads(
env_dir.joinpath("pyproject.toml").read_text(encoding="utf-8")
)
poetry_content = pyproject["tool"]["poetry"]
existing_packages = self.get_existing_packages_from_input(
plugins, poetry_content, "dependencies"
)
if existing_packages:
self.notify_about_existing_packages(existing_packages)
plugins = [plugin for plugin in plugins if plugin not in existing_packages]
if not plugins:
return 0
plugins = self._determine_requirements(plugins)
# We retrieve the packages installed in the system environment.
# We assume that this environment will be a self contained virtual environment
# built by the official installer or by pipx.
# If not, it might lead to side effects since other installed packages
# might not be required by Poetry but still taken into account when resolving dependencies.
installed_repository = InstalledRepository.load(
system_env, with_dependencies=True
)
root_package = None
for package in installed_repository.packages:
if package.name == "poetry":
root_package = ProjectPackage(package.name, package.version)
for dependency in package.requires:
root_package.add_dependency(dependency)
break
root_package.python_versions = ".".join(
str(v) for v in system_env.version_info[:3]
)
# We create a `pyproject.toml` file based on all the information
# we have about the current environment.
if not env_dir.joinpath("pyproject.toml").exists():
root_package.create_pyproject(env_dir)
# We add the plugins to the dependencies section of the previously
# created `pyproject.toml` file
pyproject = Project.read(env_dir.joinpath("pyproject.toml"), None)
# poetry_content = pyproject.poetry_config
new_dependencies = {}
plugin_names = []
for plugin in plugins:
if "version" in plugin:
# Validate version constraint
parse_constraint(plugin["version"])
constraint = {}
for name, value in plugin.items():
if name == "name":
continue
constraint[name] = value
if len(constraint) == 1 and "version" in constraint:
constraint = constraint["version"]
new_dependencies[plugin["name"]] = constraint
plugin_names.append(plugin["name"])
with pyproject.edit() as data:
nested_dict_set(data, ['tool', 'poetry', 'dependencies'], new_dependencies)
# From this point forward, all the logic will be deferred to
# the update command, by using the previously created `pyproject.toml`
# file.
from poetry.console.application import Application
application = cast(Application, self.application)
update_command: "UpdateCommand" = cast(
"UpdateCommand", application.find("update")
)
# We won't go through the event dispatching done by the application
# so we need to configure the command manually
update_command.set_poetry(Factory().create_poetry(env_dir))
update_command.set_env(system_env)
application._configure_installer(update_command, self._io)
argv = ["update"] + plugin_names
if self.option("dry-run"):
argv.append("--dry-run")
return update_command.run(
IO(
StringInput(" ".join(argv)),
self._io.output,
self._io.error_output,
)
)
def get_existing_packages_from_input(
self, packages: List[str], poetry_content: Dict, target_section: str
) -> List[str]:
existing_packages = []
for name in packages:
for key in poetry_content[target_section]:
if key.lower() == name.lower():
existing_packages.append(name)
return existing_packages
def notify_about_existing_packages(self, existing_packages: List[str]) -> None:
self.line(
"The following plugins are already present in the "
"<c2>pyproject.toml</c2> file and will be skipped:\n"
)
for name in existing_packages:
self.line(" • <c1>{name}</c1>".format(name=name))
self.line(
"\nIf you want to update it to the latest compatible version, "
"you can use `<c2>poetry plugin update package</c2>`.\n"
"If you prefer to upgrade it to the latest available version, "
"you can use `<c2>poetry plugin add package@latest</c2>`.\n"
) | /relaxed_poetry-0.5.2-py3-none-any.whl/poetry/console/commands/plugin/add.py | 0.713032 | 0.191536 | add.py | pypi |
import os
from cleo.helpers import argument
from cleo.helpers import option
from ..command import Command
class CacheClearCommand(Command):
name = "cache clear"
description = "Clears Poetry's cache."
arguments = [argument("cache", description="The name of the cache to clear.")]
options = [option("all", description="Clear all entries in the cache.")]
def handle(self) -> int:
from cachy import CacheManager
from poetry.locations import REPOSITORY_CACHE_DIR
cache = self.argument("cache")
parts = cache.split(":")
root = parts[0]
cache_dir = REPOSITORY_CACHE_DIR / root
try:
cache_dir.relative_to(REPOSITORY_CACHE_DIR)
except ValueError:
raise ValueError("{} is not a valid repository cache".format(root))
cache = CacheManager(
{
"default": parts[0],
"serializer": "json",
"stores": {parts[0]: {"driver": "file", "path": str(cache_dir)}},
}
)
if len(parts) == 1:
if not self.option("all"):
raise RuntimeError(
"Add the --all option if you want to clear all "
"{} caches".format(parts[0])
)
if not os.path.exists(str(cache_dir)):
self.line("No cache entries for {}".format(parts[0]))
return 0
# Calculate number of entries
entries_count = 0
for _path, _dirs, files in os.walk(str(cache_dir)):
entries_count += len(files)
delete = self.confirm(
"<question>Delete {} entries?</>".format(entries_count)
)
if not delete:
return 0
cache.flush()
elif len(parts) == 2:
raise RuntimeError(
"Only specifying the package name is not yet supported. "
"Add a specific version to clear"
)
elif len(parts) == 3:
package = parts[1]
version = parts[2]
if not cache.has("{}:{}".format(package, version)):
self.line("No cache entries for {}:{}".format(package, version))
return 0
delete = self.confirm("Delete cache entry {}:{}".format(package, version))
if not delete:
return 0
cache.forget("{}:{}".format(package, version))
else:
raise ValueError("Invalid cache key") | /relaxed_poetry-0.5.2-py3-none-any.whl/poetry/console/commands/cache/clear.py | 0.474388 | 0.157396 | clear.py | pypi |
import re
from typing import List
from typing import Optional
from typing import Tuple
from packaging.tags import Tag
from poetry.core.packages.package import Package
from poetry.core.packages.utils.link import Link
from poetry.repositories.pool import Pool
from poetry.utils.env import Env
from poetry.utils.patterns import wheel_file_re
class InvalidWheelName(Exception):
pass
class Wheel:
def __init__(self, filename: str) -> None:
wheel_info = wheel_file_re.match(filename)
if not wheel_info:
raise InvalidWheelName(f"{filename} is not a valid wheel filename.")
self.filename = filename
self.name = wheel_info.group("name").replace("_", "-")
self.version = wheel_info.group("ver").replace("_", "-")
self.build_tag = wheel_info.group("build")
self.pyversions = wheel_info.group("pyver").split(".")
self.abis = wheel_info.group("abi").split(".")
self.plats = wheel_info.group("plat").split(".")
self.tags = {
Tag(x, y, z) for x in self.pyversions for y in self.abis for z in self.plats
}
def get_minimum_supported_index(self, tags: List[Tag]) -> Optional[int]:
indexes = [tags.index(t) for t in self.tags if t in tags]
return min(indexes) if indexes else None
def is_supported_by_environment(self, env: Env) -> bool:
return bool(set(env.supported_tags).intersection(self.tags))
class Chooser:
"""
A Chooser chooses an appropriate release archive for packages.
"""
def __init__(self, pool: Pool, env: Env) -> None:
self._pool = pool
self._env = env
def choose_for(self, package: Package) -> Link:
"""
Return the url of the selected archive for a given package.
"""
links = []
for link in self._get_links(package):
if link.is_wheel and not Wheel(link.filename).is_supported_by_environment(
self._env
):
continue
if link.ext in {".egg", ".exe", ".msi", ".rpm", ".srpm"}:
continue
links.append(link)
if not links:
raise RuntimeError(f"Unable to find installation candidates for {package}")
# Get the best link
chosen = max(links, key=lambda link: self._sort_key(package, link))
if not chosen:
raise RuntimeError(f"Unable to find installation candidates for {package}")
return chosen
def _get_links(self, package: Package) -> List[Link]:
if not package.source_type:
if not self._pool.has_repository("pypi"):
repository = self._pool.repositories[0]
else:
repository = self._pool.repository("pypi")
else:
repository = self._pool.repository(package.source_reference)
links = repository.find_links_for_package(package)
hashes = {f["hash"] for f in package.files if 'hash' in f}
if not hashes:
return links
selected_links = []
for link in links:
if not link.hash:
selected_links.append(link)
continue
h = link.hash_name + ":" + link.hash
if h not in hashes:
continue
selected_links.append(link)
if links and not selected_links:
raise RuntimeError(
f"Retrieved digest for link {link.filename}({h}) not in etc/rp/lock.toml metadata {hashes}"
)
return selected_links
def _sort_key(self, package: Package, link: Link) -> Tuple:
"""
Function to pass as the `key` argument to a call to sorted() to sort
InstallationCandidates by preference.
Returns a tuple such that tuples sorting as greater using Python's
default comparison operator are more preferred.
The preference is as follows:
First and foremost, candidates with allowed (matching) hashes are
always preferred over candidates without matching hashes. This is
because e.g. if the only candidate with an allowed hash is yanked,
we still want to use that candidate.
Second, excepting hash considerations, candidates that have been
yanked (in the sense of PEP 592) are always less preferred than
candidates that haven't been yanked. Then:
If not finding wheels, they are sorted by version only.
If finding wheels, then the sort order is by version, then:
1. existing installs
2. wheels ordered via Wheel.support_index_min(self._supported_tags)
3. source archives
If prefer_binary was set, then all wheels are sorted above sources.
Note: it was considered to embed this logic into the Link
comparison operators, but then different sdist links
with the same version, would have to be considered equal
"""
support_num = len(self._env.supported_tags)
build_tag = ()
binary_preference = 0
if link.is_wheel:
wheel = Wheel(link.filename)
if not wheel.is_supported_by_environment(self._env):
raise RuntimeError(
"{} is not a supported wheel for this platform. It "
"can't be sorted.".format(wheel.filename)
)
# TODO: Binary preference
pri = -(wheel.get_minimum_supported_index(self._env.supported_tags))
if wheel.build_tag is not None:
match = re.match(r"^(\d+)(.*)$", wheel.build_tag)
build_tag_groups = match.groups()
build_tag = (int(build_tag_groups[0]), build_tag_groups[1])
else: # sdist
pri = -support_num
has_allowed_hash = int(self._is_link_hash_allowed_for_package(link, package))
# TODO: Proper yank value
yank_value = 0
return (
has_allowed_hash,
yank_value,
binary_preference,
package.version,
build_tag,
pri,
)
def _is_link_hash_allowed_for_package(self, link: Link, package: Package) -> bool:
if not link.hash:
return True
h = link.hash_name + ":" + link.hash
return h in {f["hash"] for f in package.files} | /relaxed_poetry-0.5.2-py3-none-any.whl/poetry/installation/chooser.py | 0.841728 | 0.208199 | chooser.py | pypi |
# List of differentiable operations
## Definitely useful with known solution
* **Classification (binning)**:
Assigning an event to a bin in a histogram or classifying it as a particular class label is a non-differentiable operation. Multi-class classification is a classic example in machine learning and statistics, and is typically relaxed with a sigmoid or a softmax.
* This was used in INFERNO and neos
* Alternatively, one could calculate smooth probability assignments using Kernel Density Estimation or some other kernel based approach
* **Differentiable ranking and sorting**:
Sorting is a fundamental operation. For instance, we typically sort particles by $p_T$.
* Differentiable Ranks and Sorting using Optimal Transport [https://arxiv.org/abs/1905.11885](https://arxiv.org/abs/1905.11885)
* O(nlogn) time and O(n) space complexity [https://arxiv.org/abs/2002.08871](https://arxiv.org/abs/2002.08871) and [great slides](https://raw.githubusercontent.com/mblondel/mblondel.github.io/9e103aad534d3e2d51a357c72b2485309131e719/talks/mblondel-CIRM-2020-03.pdf)
* **Differentiable clustering (partitions)**
We have a set of objects and we would like to cluster or partition them. We can think of this in terms of graph where the nodes are the objects and edges indicate two objects are in the same cluster. We want all objects in the same cluster to be connected and no objects in different clusters to be connected.
* This can be imposed if the adjacency matrix is restricted to be of the form $u u^T$, where $u$ is a softmax output. This was used in [Set2Graph: Learning Graphs From Sets](https://arxiv.org/abs/2002.08772) for vertexing and is also described in slide 27 of [this talk](https://indico.cern.ch/event/809820/contributions/3632659/attachments/1971659/3280030/GNN_NYU_3_Jan_2020.pdf).
* note: one might think of using something like this for clustering calorimeter cells to calorimeter clusters.
* **Barlow-Beeston for Monte Carlo Statistical Uncertainty:**
The statistical uncertainty on template histograms from limited statistical uncertainty can be dealth with in a clean way by jointly modelling the statistical fluctuations in the data and the statistical fluctuations in the Monte Carlo samples. This was treated in [Fitting using finite Monte Carlo samples](https://doi.org/10.1016/0010-4655(93)90005-W) (pdf from [at FermiLab](https://lss.fnal.gov/archive/other/man-hep-93-1.pdf)). In a simple one-bin example one would model as $P(n,m|\mu,\lambda) = Pois(n|\mu+\lambda)Pois(m|\tau\lambda)$ where $n$ is count in data in a signal region, $\mu$ is the unknown exepected signal rate, $\lambda$ is the unknown expected background rate (a nuisance parameter), $\tau$ is the ratio of the Monte Carlo luminosity to data luminosity, and $m$ is the count in the Monte Carlo sample. This can easily be extended to multiple bins and multiple background sources per bin, but it introduces a nuisance parameter for each component of each bin. Note in this setup the observed Monte Carlo are treated as data (since it fluctuates and is on the left of the "|"). In HistFactory language, the Monte Carlo observation $m$ would be the `Data` of a new `Channel` and the unknown background $\tau\lambda$ would be modeled with a `ShapeFactor` that would be shared with the `Channel` that has the real observed data $n$. This is typically very heavy and leads to a proliferation of nuisance parameters, which cause problems for Minuit. Thus, typically an approximate approach is used where the different background contributions are combined. In HistFactory this is what is done when using `StatErrorConfig`. This treatment is usually fine, but has corner cases when $m=0$. One interesting aspect of the Barlow-Beeston approach is that optimization on the nuisance parameter $\lambda$ decouples from optimization on $\mu$. In fact, there is a closed form solution for $\hat{\lambda}(n,m,\mu)$ (eq. 14), so optimizing the full likelihood can be thought of as a nested optimization with $\lambda$ in the inner loop. Moreover, it can be thought of as the implicit minimization used for the profile likelihood fit in neos. Several years ago George Lewis wrote a wrapper for the log-likeihood created in HistFactory so that $\lambda$ was solved exactly and only the profiled likelihood with $\mu$ was exposed to Minuit. While elegant conceptually, the implementation in RooFit did not lead to significant performance gains for the number of nuisance parameters in the models at that time. However, it would be interesting to revisit this in the context of pyhf and grad-hep. References:
* [RooBarlowBeestonLL.cxx](https://root.cern/doc/master/RooBarlowBeestonLL_8cxx_source.html) [RooBarlowBeestonLL.h](https://root.cern/doc/master/RooBarlowBeestonLL_8h_source.html)
* [A RooFit example](https://root.cern/doc/master/rf709__BarlowBeeston_8C.html)
* **ROC AUC:**
While the area under ROC curve (ROC AUC) is not usually our ultimate physics goal, it may be useful or motivated in some cases. The ROC curve is non-differentiable, but can be relaxed into a rank statistic. This was used for example in [Backdrop: Stochastic Backpropagation](https://arxiv.org/abs/1806.01337)
* Herschtal, A. and Raskutti, B. (2004). Optimising area under the roc curve using gradient descent. In Proceedings of the Twenty-first International Conference on Machine Learning, ICML ’04, pages 49–, New York, NY, USA. ACM. [doi/10.1145/1015330.1015366](https://dl.acm.org/doi/10.1145/1015330.1015366)
## Definitely useful seeking solution
* **Differentiable legend placement in plots:**
They are so annoying aren't they?
* **Differentiable peer review:**
accept/reject is so non-diffable
## Potentially useful
* **Differentiable Feature Selection by Discrete Relaxation**
See [paper](https://www.microsoft.com/en-us/research/publication/differentiable-feature-selection-by-discrete-relaxation/)
* **Gumbel Max Trick & Gumbel Machinery:**
The Gumbel-Max Trick is a method to sample from a categorical distribution $Cat(\alpha_1, \dots, \alpha_K)$, where category $k$ has $\alpha_k$
probability to be sampled among $K$ categories, and relies on the Gumbel distribution defined by the Cumulative Distribution Function.
* [Gumbel Max Trick](https://laurent-dinh.github.io/2016/11/22/gumbel-max.html)
* [Gumbel Machinery](https://cmaddis.github.io/gumbel-machinery)
* **Sparse Structured Prediction:**
See paper [Differentiable Relaxed Optimization for Sparse Structured Prediction](https://arxiv.org/abs/2001.04437)
* **Coreference resolution**:
"Coreference resolution is the task of identifying all mentions which refer to the same entity in a document." "Coreference resolution can be regarded as a clustering problem: each cluster corresponds to a single entity and consists of all its mentions in a given text." From Optimizing Differentiable Relaxations of Coreference Evaluation Metrics [https://arxiv.org/abs/1704.04451](https://arxiv.org/abs/1704.04451)
| /relaxed-0.1.2.tar.gz/relaxed-0.1.2/list_of_operations.md | 0.956533 | 0.989781 | list_of_operations.md | pypi |
relaxed_types
=============
This library provides a DSL to do type check in Python. The following is provided:
* ``typed_return``: Decorator used to verify the type of the return value
* ``check_type``: Checks if a value matches to type and predicate specifications
* ``Any``: A sentinel object that matches any python object used with ``check_type`` or ``typed_returned``
* ``Values``: A predicate function that matches the specified values instead of specifications
* ``Or``: A predicate function that performs ensures that one of the specifications match
* ``And``: A predicate function that performs ensures all specifications match
* ``ReturnTypeError``: The exception that ``check_type`` raises if a type check fails
The main goal of this library is to have a simple way to ensure return types dynamically via ``typed_return``.
typed_return
------------
Lists
+++++
The following snippet shows how to perform a type check (list of integers):
.. code:: python
>>> @typed_return([int])
... def func(v):
... return v + [3, 4]
...
>>> func([1, 2])
[1, 2, 3, 4]
>>> func([1, 2.0])
Traceback (most recent call last):
...
relaxed_types.ReturnTypeError: Type mismatch for 2.0, expected <type 'int'>. Outer value: [1, 2.0, 3, 4]
Tuples
++++++
Different from lists, tuples have a fixed size. The tuple specification length has to match the value length.
.. code:: python
>>> @typed_return( (str, int) )
... def func(v):
... return v
...
>>> func( ('hello', 123) )
('hello', 123)
>>> func( ('hello', 'world') )
Traceback (most recent call last):
...
relaxed_types.ReturnTypeError: Type mismatch for 'world', expected <type 'int'>. Outer value: ('hello', 'world')
Sets
++++
Sets behave the same as lists:
.. code:: python
>>> @typed_return({str})
... def func(x):
... return x.union({"test"})
...
>>> func({"a", "b"})
set(['a', 'test', 'b'])
>>> func({"a", "b", 1, 2, 3})
Traceback (most recent call last):
...
relaxed_types.ReturnTypeError: Type mismatch for 1, expected <type 'str'>. Outer value: set(['a', 1, 2, 3, 'test', 'b'])
Dictionaries
++++++++++++
It is possible to specify the expected types for dictionary key values. All keys specified must exist in the dictionary —- the value ``Any`` can be specified as a key in order to validate additional keys.
.. code:: python
>>> @typed_return({"name": str, "age": int})
... def func(v):
... v['test'] = 'test'
... return v
...
>>> func({"name": "John Doe", "age": 21})
{'test': 'test', 'age': 21, 'name': 'John Doe'}
>>> func({"name": "Guy", "age": "47"})
Traceback (most recent call last):
...
relaxed_types.ReturnTypeError: Type mismatch for '47', expected <type 'int'>. Outer value: {'test': 'test', 'age': '47', 'name': 'Guy'}
The following example shows how to specify a dictionary with key ``name`` as ``str`` and any other key as ``int``.
.. code:: python
>>> from relaxed_types import *
>>> @typed_return({"name": str, Any: int})
... def func(x):
... return x
...
>>> func({"name": "John Doe", "b": 2, "c": 3})
{"name": "John Doe", "b": 2, "c": 3}
Predicates
++++++++++
Predicates allow you to create custom type checks.
A predicate is a function that expects an object and returns a boolean: ``True`` means the object passed in matches the expectations and ``False`` means it does not.
The following snippet ensures `func` only returns odd numbers:
.. code:: python
>>> def odd(x):
... return x % 2 != 0
...
>>> @typed_return(odd)
... def func(v):
... return v * 3
...
>>> func(1)
3
>>> func(2)
Traceback (most recent call last):
...
relaxed_types.ReturnTypeError: Type mismatch for 6, expected <function odd at ...>. Outer value: 6
Because of predicate support, you can integrate ``relaxed_types`` with other libraries, such as voluptuous_:
.. code:: python
>>> from voluptuous import Length
>>> @typed_return([int], Length(min=10, max=100))
... def func(l):
... return l * 2
...
>>> func(range(10))
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
>>> func(range(3))
Traceback (most recent call last):
...
voluptuous.LengthInvalid: length of value must be at least 10
The only issue with this integration is that it might either raise ``ReturnTypeError`` or
an exception that inherits from ``voluptuous.errors.Invalid``.
Values
++++++
Predicate function that matches the specified values (not specifications). This is useful to test for literals:
.. code:: python
>>> func(0)
0
>>> func(1)
1
>>> func(2)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "relaxed_types/__init__.py", line 16, in newfn
check_type(result, expected_type, outer_value=result, extra=extra)
File "relaxed_types/checks.py", line 22, in check_type
_check_predicate(value, expected_type, outer_value)
File "relaxed_types/checks.py", line 35, in _check_predicate
_fail(value, expected_type, outer_value, msg=expected_type.__doc__)
File "relaxed_types/checks.py", line 85, in _fail
raise ReturnTypeError(msg, value)
relaxed_types.exceptions.ReturnTypeError: Expected "2" to be in (0, 1)
Or
++
Predicate function that matches at least one specification:
.. code:: python
>>> @typed_return(Or(int, float))
... def func(x):
... return x
...
>>> func(1)
1
>>> func(1.0)
1.0
>>> func("1")
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "relaxed_types/__init__.py", line 16, in newfn
check_type(result, expected_type, outer_value=result, extra=extra)
File "relaxed_types/checks.py", line 22, in check_type
_check_predicate(value, expected_type, outer_value)
File "relaxed_types/checks.py", line 35, in _check_predicate
_fail(value, expected_type, outer_value, msg=expected_type.__doc__)
File "relaxed_types/checks.py", line 85, in _fail
raise ReturnTypeError(msg, value)
relaxed_types.exceptions.ReturnTypeError: '1' did not match Or(<type 'int'>, <type 'float'>).
More details about the last check: Type mismatch for '1', expected <type 'float'>. Outer value: '1'
And
+++
Predicate function that matches all specifications:
.. code:: python
>>> from relaxed_types import *
>>> @typed_return({"i": And(int, lambda x: x > 0)})
... def func(x):
... return {"i": x}
...
>>> func(1)
{'i': 1}
>>> func(1.0)
Traceback (most recent call last):
...
relaxed_types.exceptions.ReturnTypeError: 1.0 did not match And(<type 'int'>, <function <lambda> at 0x105f7a848>).
More details about the last check: Type mismatch for 1.0, expected <type 'int'>. Outer value: 1.0
>>> func(-1)
Traceback (most recent call last):
...
relaxed_types.exceptions.ReturnTypeError: -1 did not match And(<type 'int'>, <function <lambda> at 0x105f7a848>).
More details about the last check: Type mismatch for -1, expected <function <lambda> at 0x105f7a848>. Outer value: -1
Combining all together
++++++++++++++++++++++
It's possible to combine lists, tuples, dictionaries, predicates, and any Python type.
.. code:: python
>>> @typed_return(int, lambda x: x > 0)
... def func1(x):
... return x + 10
...
>>>
>>> func1(10)
20
>>> func1(-100)
Traceback (most recent call last):
...
relaxed_types.ReturnTypeError: Type mismatch for -90, expected <type 'int'>. Outer value: -90
>>> @typed_return([int], lambda x: len(x) > 0)
... def func1(x):
... return x
...
>>>
>>> func1([1, 2])
[1, 2]
>>> func1([])
Traceback (most recent call last):
...
relaxed_types.ReturnTypeError: Type mismatch for [], expected [<type 'int'>]. Outer value: []
>>> @typed_return([ {"name": lambda x: x.upper() == x} ])
... def func2(x):
... return x
...
>>>
>>> func2([{"name": "JOHN DOE"}])
[{'name': 'JOHN DOE'}]
>>> func2([{"name": "test"}])
Traceback (most recent call last):
...
relaxed_types.ReturnTypeError: Type mismatch for 'test', expected <function <lambda> at 0x10e325758>. Outer value: [{'name': 'test'}]
>>> @typed_return([{"data": Any, "id": And(int, lambda x: x > 0)}])
... def func3(x):
... return x
...
>>> func3([{"data": "price=10", "id": 1}])
[{'data': 'price=10', 'id': 1}]
>>> func3([{"data": 10, "id": 2}])
[{'data': 10, 'id': 2}]
>>> func3([{"data": {"price": 10}, "id": 2}])
[{'data': {'price': 10}, 'id': 2}]
.. _voluptuous: https://github.com/alecthomas/voluptuous | /relaxed_types-1.0.1.tar.gz/relaxed_types-1.0.1/README.rst | 0.927601 | 0.82925 | README.rst | pypi |
from __future__ import annotations
import asyncio
import base64
import dataclasses
import datetime
import functools
import inspect
import json
import signal
import weakref
from typing import (Any, Awaitable, Callable, Iterable, Mapping, Optional,
Protocol, Union)
def json_object_hook(dct: Mapping[str, Any]) -> Any:
if '$encoding' in dct:
try:
decoder: Callable[[str], str] = {
'base64': base64.standard_b64decode,
'': lambda data: data,
}[dct['$encoding']]
return decoder(dct['data'])
except KeyError:
# Either dct does not contain data or has an encoding that we can't
# handle.
pass
return dct
class JSONEncoder(json.JSONEncoder):
@functools.singledispatchmethod
def default(self, obj: Any) -> Any:
if dataclasses.is_dataclass(obj):
return dataclasses.asdict(obj)
try:
it = iter(obj)
except TypeError:
pass
else:
return list(it)
return super(JSONEncoder, self).default(obj)
@default.register
def _datetime(self, obj: datetime.datetime) -> str:
return obj.isoformat()
@default.register
def _bytes(self, obj: bytes) -> Union[str, Mapping[str, Any]]:
try:
return obj.decode('utf-8')
except UnicodeDecodeError:
return {
'$encoding': 'base64',
'data': base64.standard_b64encode(obj),
}
def is_async_callable(obj: Any) -> bool:
if not callable(obj):
return False
return (
inspect.iscoroutinefunction(obj) or
inspect.iscoroutinefunction(obj.__call__)
)
TerminationEvent = Callable[[], Awaitable[None]]
class TerminationPolicy(Protocol):
async def attach(self) -> Optional[TerminationEvent]: ...
class NoTerminationPolicy(TerminationPolicy):
async def attach(self) -> Optional[TerminationEvent]:
return None
class SoftTerminationPolicy(TerminationPolicy):
_tasks: weakref.WeakKeyDictionary[asyncio.Task[Any], asyncio.Event]
_timeout_sec: Optional[float]
def __init__(self, *, timeout_sec: Optional[float] = None):
self._tasks = weakref.WeakKeyDictionary()
self._timeout_sec = timeout_sec
async def _terminate_task(self, task: asyncio.Task[Any]) -> None:
event = self._tasks.get(task)
if event is not None:
event.set()
if task.done():
return
if self._timeout_sec is not None:
loop = asyncio.get_running_loop()
loop.call_later(self._timeout_sec, task.cancel)
def terminate_task(self, task: asyncio.Task[Any]) -> None:
asyncio.run_coroutine_threadsafe(
self._terminate_task(task),
task.get_loop(),
)
def terminate_all(self) -> None:
for task in self._tasks:
self.terminate_task(task)
async def attach(self) -> Optional[TerminationEvent]:
task = asyncio.current_task()
assert task is not None
try:
event = self._tasks[task]
except KeyError:
event = asyncio.Event()
self._tasks[task] = event
async def wait() -> None:
await event.wait()
return wait
class SignalTerminationPolicy(TerminationPolicy):
_signals: Iterable[signal.Signals]
_delegate: SoftTerminationPolicy
def __init__(self, *,
signals: Optional[Iterable[signal.Signals]] = None,
timeout_sec: Optional[float] = None):
if signals is None:
signals = [signal.SIGINT, signal.SIGTERM]
self._signals = signals
self._delegate = SoftTerminationPolicy(timeout_sec=timeout_sec)
async def attach(self) -> Optional[TerminationEvent]:
loop = asyncio.get_running_loop()
task = asyncio.current_task()
assert task is not None
event = self._delegate.attach()
for sig in self._signals:
loop.add_signal_handler(sig, self._delegate.terminate_task, task)
return await event | /relay-sdk-0.3.1.tar.gz/relay-sdk-0.3.1/src/relay_sdk/util.py | 0.797872 | 0.213111 | util.py | pypi |
"The main class for a client to connect to the Relay service API"
from __future__ import annotations
import json
import logging
import os
from typing import Any, Optional, Union
from .client import new_session
from .decorators import Decorators
from .events import Events
from .outputs import Outputs
from .util import json_object_hook
from .workflows import Workflows
class UnresolvableException(Exception):
pass
class DynamicMetaclass(type):
def __getattr__(self, name: str) -> Dynamic:
return Dynamic(name)
class Dynamic(metaclass=DynamicMetaclass):
"""A query interface for inspecting a spec.
This class allows arbitrary traversal that can be converted to a query to
the metadata API.
"""
def __init__(self, name: str, parent: Optional[Dynamic] = None) -> None:
self._name = name
self._parent = parent
def __getattr__(self, name: str) -> Dynamic:
return Dynamic(name, parent=self)
def __str__(self) -> str:
if self._parent is None:
return self._name
return '{0}[{1}]'.format(self._parent, json.dumps(self._name))
class Interface:
"""An Interface object connects client code to the metadata service."""
def __init__(self, api_url: Optional[str] = None,
configure_logging: Optional[bool] = None):
self._client = new_session(api_url=api_url)
if configure_logging is None:
configure_logging = os.environ.get('RELAY') == 'true'
if configure_logging:
logging.basicConfig(
format='%(asctime)s %(name)-12s %(levelname)-8s %(message)s',
level=logging.INFO,
)
def get(self, q: Optional[Union[Dynamic, str]] = None) -> Any:
"""Retrieve values from the metadata service
Args:
q: A particular parameter to query the value of.
Returns:
The value of the queried parameter as a string.
If no query was provided, all available parameters will be
returned in a json map
"""
params = {}
if q is not None:
params['q'] = str(q)
r = self._client.get('http+api://api/spec', params=params)
r.raise_for_status()
data = json.loads(r.text, object_hook=json_object_hook)
if not data['complete']:
raise UnresolvableException()
return data['value']
@property
def decorators(self) -> Decorators:
"""Manipulate UI decorators for this action."""
return Decorators(self._client)
@property
def events(self) -> Events:
"""Accessor for Events methods"""
return Events(self._client)
@property
def outputs(self) -> Outputs:
"""Accessor for Outputs methods"""
return Outputs(self._client)
@property
def workflows(self) -> Workflows:
"""Accessor for Workflows methods"""
return Workflows(self._client) | /relay-sdk-0.3.1.tar.gz/relay-sdk-0.3.1/src/relay_sdk/interface.py | 0.914818 | 0.204739 | interface.py | pypi |
from functools import partial
import torch
from torch import nn
import torch.nn.functional as F
from local_attention import LocalAttention
from relay_transformer.reversible import ReversibleSequence, SequentialSequence
# helper fns
def default(val, d):
return d if val is None else val
# classes
class PreNorm(nn.Module):
def __init__(self, dim, fn):
super().__init__()
self.norm = nn.LayerNorm(dim)
self.fn = fn
def forward(self, x, **kwargs):
x = self.norm(x)
return self.fn(x, **kwargs)
class FeedForward(nn.Module):
def __init__(self, dim, mult = 4):
super().__init__()
self.net = nn.Sequential(
nn.Linear(dim, dim * mult),
nn.LeakyReLU(inplace = True),
nn.Linear(dim * mult, dim)
)
def forward(self, x, **kwargs):
return self.net(x)
class FullAttention(nn.Module):
def __init__(self, causal = False):
super().__init__()
self.causal = causal
def forward(self, q, k, v):
_, _, t, d = q.shape
dots = torch.einsum('bhid,bhjd->bhij', q, k) * (d ** -0.5)
if self.causal:
mask = torch.ones((t, t), device=q.device).triu_(1).bool()
dots.masked_fill_(mask, float('-inf'))
del mask
attn = dots.softmax(dim=-1)
return torch.einsum('bhjd,bhij->bhid', v, attn)
class SelfAttention(nn.Module):
def __init__(self, dim, heads = 8, causal = False, window_size = 256, full_attention = True):
super().__init__()
self.heads = heads
self.to_qkv = nn.Linear(dim, dim * 3, bias = False)
self.to_out = nn.Linear(dim, dim)
self.attn = FullAttention(causal = causal) if full_attention else LocalAttention(window_size, causal = causal, rel_pos_emb_config = (dim // heads, heads))
def forward(self, x):
b, t, d, h = *x.shape, self.heads
q, k, v = self.to_qkv(x).chunk(3, dim=-1)
merge_heads = lambda t: t.reshape(*t.shape[:2], h, -1).transpose(1, 2)
q, k, v = map(merge_heads, (q, k, v))
out = self.attn(q, k, v)
out = out.transpose(1, 2).reshape_as(x)
out = self.to_out(out)
return out
class AttentionLayer(nn.Module):
def __init__(self, dim, heads = 8, causal = False, window_size = 256, relay_tokens_per_window = 2, global_attention = False):
super().__init__()
self.heads = heads
self.global_attn = SelfAttention(dim, causal = causal, heads = heads, full_attention = True) if global_attention else None
self.ws_with_relays = window_size + relay_tokens_per_window
self.seq_per_relay = window_size // relay_tokens_per_window
self.local_attn = SelfAttention(dim, causal = causal, heads = heads, window_size = self.ws_with_relays)
def forward(self, x, **kwargs):
b, _, d = x.shape
if self.global_attn is not None:
# slice out relay tokens
x = x.reshape(b, -1, self.seq_per_relay + 1, d)
relay_tokens = x[:, :, 0]
# have relay tokens attend to each other, passing information from afar
relay_tokens = self.global_attn(relay_tokens) + relay_tokens
relay_tokens = relay_tokens.unsqueeze(2)
# concat relay tokens back to sequence for local attention to extract
x = torch.cat((relay_tokens, x[:, :, 1:].clone()), dim=2)
x = x.reshape(b, -1, d)
x = self.local_attn(x)
return x
class RelayTransformer(nn.Module):
def __init__(self, dim, max_seq_len, depth, causal = False, heads = 8, window_size = 256, relay_tokens_per_window = 2, depth_start_relay_attn = None, reversible = False):
super().__init__()
depth_start_relay_attn = default(depth_start_relay_attn, depth // 2)
assert depth_start_relay_attn > 1 and depth_start_relay_attn <= depth, 'invalid depth for which to start relay attention'
self.relay_token_emb = nn.Parameter(torch.zeros(1, dim))
self.window_size = window_size
self.relay_tokens_per_window = relay_tokens_per_window
layers = nn.ModuleList([])
for ind in range(depth):
layer_num = ind + 1
relay_attends = layer_num >= depth_start_relay_attn
wrapper_fn = partial(PreNorm, dim)
attn = AttentionLayer(dim, heads = heads, causal = causal, window_size = window_size, relay_tokens_per_window = relay_tokens_per_window, global_attention = relay_attends)
feedforward = FeedForward(dim)
layers.append(nn.ModuleList([
wrapper_fn(attn),
wrapper_fn(feedforward)
]))
execute_type = ReversibleSequence if reversible else SequentialSequence
self.layers = execute_type(layers)
self.pad_to_multiple = window_size
def forward(self, x, **kwargs):
b, t, d = x.shape
relay_token_every = self.window_size // self.relay_tokens_per_window
# concat relay tokens to input, interspersed evenly throughout sequence
x = x.reshape(b, -1, relay_token_every, d)
relay_tokens = self.relay_token_emb[None, None, ...].expand(b, x.shape[1], -1, -1)
x = torch.cat((relay_tokens, x), dim = 2)
inp_with_relay_shape = x.shape
x = x.reshape(b, -1, d)
# attention and feedforward
x = self.layers(x)
# remove relay tokens
x = x.reshape(*inp_with_relay_shape)
out = x[:, :, 1:].reshape(b, -1, d)
return out
class RelayTransformerLM(nn.Module):
def __init__(self, num_tokens, dim, max_seq_len, depth, causal = False, heads = 8, window_size = 256, relay_tokens_per_window = 2, depth_start_relay_attn = None, reversible = False):
super().__init__()
assert (window_size % relay_tokens_per_window) == 0, 'window size must be divisible by the relay tokens to be interspersed in it'
self.max_seq_len = max_seq_len
self.token_emb = nn.Embedding(num_tokens, dim)
self.transformer = RelayTransformer(dim, max_seq_len, depth, causal = causal, heads = heads, window_size = window_size, relay_tokens_per_window = relay_tokens_per_window, depth_start_relay_attn = depth_start_relay_attn, reversible = reversible)
self.to_logits = nn.Linear(dim, num_tokens)
def forward(self, x, **kwargs):
x = self.token_emb(x)
x = self.transformer(x, **kwargs)
return self.to_logits(x) | /relay_transformer-0.0.5-py3-none-any.whl/relay_transformer/relay_transformer.py | 0.960389 | 0.418459 | relay_transformer.py | pypi |
Relay Web UI:

Mesos Web UI:

Relay.Mesos: Run Relay and Mesos
==========
In short, Relay.Mesos runs Relay as a Mesos framework. By combining
both of these tools, we can solve control loop problems that arise in
distributed systems. An example problem Relay.Mesos might solve is to
spin up queue consumers to maintain or minimize a queue size. You could also
use Relay.Mesos to set a target CPU usage over time for all instances of
a particular task running on your mesos cluster.
What is Relay?
----------
Relay is "a thermostat for distributed systems." It is a tool that
attempts to make a metric timeseries as similar to a target
as possible, and it works like thermostat does for temperature.
[Details on Relay's Github page.](
https://github.com/sailthru/relay/blob/master/README.md)
What is Mesos?
----------
Apache Mesos is "a distributed systems kernel." It pools resources from
networked machines and then provides a platform that executes code over
those resources. It's basically a bin-packing scheduler and resource
manager that identifies which resources are available and then provides
ways to use those resources.
[Details on Mesos's landing page.](http://mesos.apache.org/)
[White paper about Mesos (this is good
reading)](http://mesos.berkeley.edu/mesos_tech_report.pdf)
What is Relay.Mesos?
----------
Relay.Mesos will iteratively ask Mesos to run tasks on the cluster.
These tasks will either eventually increase or eventually decrease some
measured metric. Relay.Mesos will quickly learn how the metric changes
over time and tune its requests to Mesos so it can minimize the difference
between the metric and a desired target value for that metric.
Quickstart
==========
1. Install Docker
- https://docs.docker.com/installation
- (if on a mac, you may need boot2docker and don't forget to add env vars to your .profile)
- (if on ubuntu, you may need 3.16 kernel or later)
1. Identify docker in /etc/hosts
# I added this to my /etc/hosts file:
# 192.168.59.103 localdocker
# If you use boot2docker, this should work:
# $ echo "$(boot2docker ip) localdocker" | sudo tee -a /etc/hosts
1. Run the demo script.
- When you run this for the first time, docker may need to download a
lot of the required images to get mesos running on your computer
# ./bin/demo.sh # run the demo
Background
==========
Relay.Mesos is made up of two primary components: a Mesos framework and
a Relay event loop. Relay continuously requests that the mesos
framework run a number of tasks. The framework receives resource
offers from mesos and, if the most recent Relay request can be fulfilled,
it will attempt to fulfill it by spinning up "warmer" or "cooler" tasks.
If Relay requests can't be fulfilled because
Mesos cluster is at capacity, then Relay will continue to ask to spin up
tasks, but nothing will happen.
If no mesos resource offers are available for a long time, Relay.Mesos
will become starved for resources. This can result in Relay.Mesos
building up a history of error between the target and the metric. If
Relay.Mesos has been starved for Mesos resources for a while, when
resources become available again, Relay might initially ask for too many
resources because it's learned that asking for a lot of tasks to spin up
results in very little or no difference in the metric. In any case, it
will quickly re-learn the right thing to do.
In Relay.Mesos, as with Relay generally, there are 4 main components:
metric, target, warmer and cooler.
The ```metric``` and ```target``` are both python generator functions
(ie timeseries), that, when called, each yield a number. The
```metric``` is a signal that we're monitoring and manipulating. The
```target``` represents a desired value that Relay attempts to make the
```metric``` mirror as closely as possible.
The ```warmer``` and ```cooler``` are expected to (eventually) modify
the metric. Executing a ```warmer``` will increase the metric.
Executing a ```cooler``` will decrease the metric. In Relay.Mesos, the
```warmer``` and ```cooler``` are bash commands. These may be executed in
your docker containers, if you wish.
Examples:
----------
(See QuickStart for a demo using Docker containers)
#### Autoscaling processes that run, complete, and then exit:
Relay.Mesos can ensure that the number of jobs running at any given
time is enough to consume a queue.
Metric = queue size
Target = 0
Warmer = "./start-worker.sh"
(Cooler would not be defined)
Relay.Mesos can schedule the number of consumers or servers running at a
particular time of day
Metric = number of consumers
Target = max_consumers * is_night # this could work too: sin(time_of_day) * max_consumers
Warmer = "./start-consumer.sh"
(Cooler would not be defined)
Relay.Mesos can attempt to maintain a desired amount of cpu usage
Metric = cpu_used - expected_cpu_used
Target = 0
Cooler = "run a bash command that uses the cpu"
(Warmer not defined)
#### Autoscaling long-running processes that never die.
Relay.Mesos can auto-scale the number of web-servers running:
Metric = desired number of web servers (as function of current load)
Target = number of webserver instances currently running
Warmer = Marathon API call to increase # webserver instances by 1
Cooler = Marathon API call to decrease # webserver instances by 1
Relay.Mesos can guarantee a minimum number of running redis instances
Metric = max(min_instances, desired num of redis instances)
Target = current number of redis instances
Warmer = API call to increase # redis instances by 1
Cooler = API call to decrease # redis instances by 1
##### Math side-note if you need help calculating a Metric function
A Metric function that might ensure that the number of instances is
between some bounds could use the following equation:
```
(Qsize - Qminsize) / (Qmaxsize - Qminsize) * (Imax - Imin) + Imin
```
where
```
Qsize = current queue size
Qmax = maximum expected queue size
Qmin = minimum expected queue size (ie 0)
Imax = Max desired num of instances
Imin = Min desired num of instances
```
To get you thinking in the right direction, consider this scenario:
Perhaps you have a real-valued metric that is much larger than the
number of tasks/instances you may be auto scaling. Perhaps you also don't know
the max and min values of the metric, but you have a mean and standard
deviation. You can experiment with a metric function that bounces
between -1 and 1, with occasional numbers beyond the range. For
instance, you could try the below function, and also perhaps have the
mean and standard deviation iteratively update over time:
Metric = `(Qsize - Qmean) // Qstdev` # the // means integer division
# rather than floating point division
# 1 / 2 == .5 VS 1 // 2 = 0`
Target = 0
Warmer = "cmd to add more servers"
Cooler = "cmd to remove some servers"
More complex metrics might use other scaling functions, a logistic
function, probabilistic expressions or regression functions.
When auto-scaling long-running processes, you may need to set the
```--relay_delay``` (ie. min num seconds between warmer / cooler calls)
to a number larger than the default value of 1 second. Also, if you
find that the long-running process is already mesos-aware (ie running
via Marathon), it might make it more sense for you to use
[Relay](http://www.github.com/sailthru/relay) rather than Relay.Mesos.
Configuration Options:
----------
All configuration options specific to Relay.Mesos are visible when you
run one of the following commands:
```
$ docker-compose run relay relay.mesos -h
# or, if you have relay.mesos installed locally
$ relay.mesos -h
```
Configuration options can also be passed in via environment variables
Relay.Mesos options are prefixed with `RELAY_MESOS`. For instance:
RELAY_MESOS_MASTER=zk://zookeeper:2181/mesos
Relay-only options (ie those that start with "RELAY_"):
RELAY_DELAY=60
| /relay.mesos-2.0.zip/relay.mesos-2.0/README.md | 0.581303 | 0.808105 | README.md | pypi |
from __future__ import division
from collections import deque
import numpy as np
import os
from os.path import abspath, dirname, join
import subprocess
import sys
import time
import threading
from relay import log, configure_logging, add_zmq_log_handler
from relay import util
from relay import argparse_shared as at
def start_webui():
cwd = join(dirname(dirname(abspath(__file__))), 'web/src')
log.info("Starting node.js webui in a subshell")
subprocess.Popen(
'cd %s ; node index.js' % cwd, shell=True,
preexec_fn=os.setsid) # guarantee that the child process exits with me
@util.coroutine
def window(n, initial_data=()):
win = deque(initial_data, n)
while 1:
win.append((yield win))
def calc_weight(errdata):
sp = np.fft.fft(errdata)[1: len(errdata) // 2]
if sp.sum() == 0: # there is no variation in the signal
log.warn('no variation in the signal. fft cannot continue')
return 1
# get the phase in radians # -np.pi < phase <= +np.pi
phase = np.angle(sp) # radians
# find the amplitude integral of neighboring samples.
# search <360 degrees to left of most recent sample's phase
# p_k = phase - degrees_between_samples * k # kth phase
amplitude_integrals = np.abs(np.sin(phase)) # iteratively updated
# samples per cycle
kth = len(errdata) / np.arange(1, len(errdata) // 2)
num_degrees_between_samples = 2 * np.pi / kth
p_k = phase.copy()
while (kth > 0).any():
# find amplitude of a sign wave at specific phase
p_k -= num_degrees_between_samples
amplitude_integrals += np.abs(np.sin(p_k))
kth -= 1
idxs = kth > 0
not_idxs = ~idxs
kth = kth[idxs]
p_k[not_idxs] = 0
num_degrees_between_samples[not_idxs] = 0
# get the amplitude of each frequency in the fft spectrum
amplitude = np.abs(sp)
return (
# np.sin(phase)
(np.sin(phase) / amplitude_integrals)
* (amplitude / amplitude.sum())
).sum()
def create_ramp_plan(err, ramp):
"""
Formulate and execute on a plan to slowly add heat or cooling to the system
`err` initial error (PV - SP)
`ramp` the size of the ramp
A ramp plan might yield MVs in this order at every timestep:
[5, 0, 4, 0, 3, 0, 2, 0, 1]
where err == 5 + 4 + 3 + 2 + 1
"""
if ramp == 1: # basecase
yield int(err)
while True:
yield 0
# np.arange(n).sum() == err
# --> solve for n
# err = (n - 1) * (n // 2) == .5 * n**2 - .5 * n
# 0 = n**2 - n --> solve for n
n = np.abs(np.roots([.5, -.5, 0]).max())
niter = int(ramp // (2 * n)) # 2 means add all MV in first half of ramp
MV = n
log.info('Initializing a ramp plan', extra=dict(
ramp_size=ramp, err=err, niter=niter))
for x in range(int(n)):
budget = MV
for x in range(niter):
budget -= MV // niter
yield int(np.sign(err) * (MV // niter))
yield int(budget * np.sign(err))
MV -= 1
while True:
yield 0
def validate_ns_or_sysexit(ns):
ex = 0
if None in [ns.target, ns.metric]:
log.error("you must define a --metric and --target!")
ex = 1
if ns.warmer is None and ns.cooler is None:
log.error("you must define either a --warmer or a --cooler!")
ex = 1
if ex:
build_arg_parser().print_usage()
sys.exit(1)
def evaluate_stop_condition(errdata, stop_condition):
"""
Call the user-defined function: stop_condition(errdata)
If the function returns -1, do nothing. Otherwise, sys.exit.
"""
if stop_condition:
return_code = stop_condition(list(errdata))
if return_code != -1:
log.info(
'Stop condition triggered! Relay is terminating.',
extra=dict(return_code=return_code))
sys.exit(return_code)
def main(ns):
validate_ns_or_sysexit(ns)
configure_logging(True)
if ns.sendstats:
if ns.sendstats == 'webui':
add_zmq_log_handler('ipc:///tmp/relaylog')
start_webui()
else:
add_zmq_log_handler(ns.sendstats)
log.info(
"Starting relay!", extra={k: str(v) for k, v in ns.__dict__.items()})
metric = ns.metric()
target = ns.target()
errhist = window(ns.lookback)
ramp_index = 0
while True:
SP = next(target) # set point
PV = next(metric) # process variable
err = (SP - PV)
log.debug('got metric value', extra=dict(PV=PV, SP=SP))
if ramp_index < ns.ramp:
if ramp_index == 0:
plan = create_ramp_plan(err, ns.ramp)
ramp_index += 1
MV = next(plan)
errdata = errhist.send(0)
else:
errdata = errhist.send(err)
weight = calc_weight(errdata)
MV = int(round(err - weight * sum(errdata) / len(errdata)))
log.info('data', extra=dict(data=[
err, weight,
sum(errdata) / len(errdata)]))
if MV > 0:
if ns.warmer:
log.debug('adding heat', extra=dict(MV=MV, err=err))
threading.Thread(target=ns.warmer, args=(MV,)).start()
else:
log.warn('too cold')
elif MV < 0:
if ns.cooler:
log.debug('removing heat', extra=dict(MV=MV, err=err))
threading.Thread(target=ns.cooler, args=(MV,)).start()
else:
log.warn('too hot')
else:
log.debug(
'stabilized PV at setpoint', extra=dict(MV=MV, PV=PV, SP=SP))
time.sleep(ns.delay)
evaluate_stop_condition(list(errdata), ns.stop_condition)
build_arg_parser = at.build_arg_parser([
at.group(
"What is Relay optimizing?",
at.metric, at.target),
at.group(
"Instruct Relay how to heat or cool your metric",
at.warmer, at.cooler),
at.group(
"Some optional Relay parameters",
at.delay, at.lookback, at.ramp, at.sendstats, at.stop_condition),
]) | /relay.runner-0.1.9.zip/relay.runner-0.1.9/relay/runner.py | 0.406391 | 0.258776 | runner.py | pypi |
import json
import redis
DEFAULT_REDIS_PORT = 6379
class RedisConention():
"""Redis Connetion
:param host: hostname for redis
:param port: port for redis
"""
def __init__(self, host, port):
self.host = host
self.port = port
class RedisWrapper():
"""Relay Specific Redis Wrapper.
:param projectKey: LaunchDarkly project key
:param environmentKey: LaunchDarkly environment key.
:param conn: (optional) redis connection string
"""
def __init__(self, host, port, logger, projectKey, environmentKey):
self.logger = logger
self.projectKey = projectKey
self.environmentKey = environmentKey
self.redis = redis.Redis(host=host, port=port)
def _formatKeyName(self):
"""Return formatted redis key name."""
keyName = 'ld:{0}:{1}:features'.format(
self.projectKey,
self.environmentKey
)
return keyName
@staticmethod
def connectionStringParser(uri):
"""Parse Connection string to extract host and port.
:param uri: full URI for redis connection in the form of
host:port
:returns: list of RedisConnection objects
"""
redisConnections = []
rawConnections = uri.split(',')
connections = [
connection for connection in rawConnections if len(connection) > 0
]
for connection in connections:
rawConnection = connection.split(':')
if len(rawConnection) == 1:
host = rawConnection[0].strip()
port = DEFAULT_REDIS_PORT
elif len(rawConnection) == 2:
host = rawConnection[0].strip()
port = int(rawConnection[1])
else:
raise Exception("unable to parse redis connection string.")
redisConnection = RedisConention(host, port)
redisConnections.append(redisConnection)
return redisConnections
def getFlagRecord(self, featureKey):
"""Get feature flag record from redis.
:param featureKey: key for feature flag
"""
keyName = self._formatKeyName()
flag = self.redis.hget(keyName, featureKey)
if flag is not None:
return flag
else:
raise Exception('redis key not found.')
def updateFlagRecord(self, state, featureKey):
"""Update redis record with new state.
:param state: state for feature flag
:param featureKey: key for feature flag
"""
keyName = self._formatKeyName()
parsedFlag = json.loads(self.getFlagRecord(featureKey).decode('utf-8'))
parsedFlag['on'] = state
parsedFlag['version'] += 1
updatedFlag = json.dumps(parsedFlag).encode('utf-8')
self.logger.info('updating {0} to {1}'.format(featureKey, state))
self.redis.hset(keyName, featureKey, updatedFlag) | /relaycommander-0.0.11-py3-none-any.whl/relay_commander/redis.py | 0.696578 | 0.248363 | redis.py | pypi |
import launchdarkly_api
class LaunchDarklyApi():
"""Wrapper for the LaunchDarkly API"""
def __init__(self, apiKey, projectKey=None, environmentKey=None, logger=None):
"""Instantiate a new LaunchDarklyApi instance.
:param apiKey: API Access Key for LaunchDarkly
:param projectKey: Key for project
:param environmentKey: Environment in which to pull state from
:param featureKey: Feature flag key to pull state from
"""
self.apiKey = apiKey
self.projectKey = projectKey
self.environmentKey = environmentKey
self.logger = logger
# get new LD client
configuration = launchdarkly_api.Configuration()
configuration.api_key['Authorization'] = apiKey
self.client = launchdarkly_api.ProjectsApi(
launchdarkly_api.ApiClient(configuration))
self.feature = launchdarkly_api.FeatureFlagsApi(
launchdarkly_api.ApiClient(configuration))
def formatHostname(self, key):
"""Returns formatted hostname for an environment.
:param key: environment key
"""
return "{0}".format(key)
def getEnvironments(self, projectKey):
"""Returns List of Environments for a Project.
Includes name, key, and mobile key, and formatted hostname.
:param projectKey: Key for project
:returns: Collection of Environments
"""
resp = self.client.get_project(projectKey)
envs = []
for env in resp.environments:
env = dict(
key=env.key,
api_key=env.api_key,
client_id=env.id,
hostname=self.formatHostname(env.key)
)
envs.append(env)
return envs
def updateFlag(self, state, featureKey):
"""Update the flag status for the specified feature flag
:param state: New feature flag state
:param featureKey: Feature flag key
:returns: boolean status of the feature flag attribute "on"
"""
buildEnv = "/environments/" + self.environmentKey + "/on"
patchComment = [{"op": "replace", "path": buildEnv, "value": state}]
return self.feature.patch_feature_flag(
self.projectKey,
featureKey,
patchComment
) | /relaycommander-0.0.11-py3-none-any.whl/relay_commander/ld.py | 0.716814 | 0.217732 | ld.py | pypi |
import torch
import torch.nn as nn
from relaynet_pytorch.net_api import sub_module as sm
class ReLayNet(nn.Module):
"""
A PyTorch implementation of ReLayNet
Coded by Shayan and Abhijit
param ={
'num_channels':1,
'num_filters':64,
'num_channels':64,
'kernel_h':7,
'kernel_w':3,
'stride_conv':1,
'pool':2,
'stride_pool':2,
'num_classes':10
}
"""
def __init__(self, params):
super(ReLayNet, self).__init__()
self.encode1 = sm.EncoderBlock(params)
params['num_channels'] = 64
self.encode2 = sm.EncoderBlock(params)
# params['num_channels'] = 64 # This can be used to change the numchannels for each block
self.encode3 = sm.EncoderBlock(params)
self.bottleneck = sm.BasicBlock(params)
params['num_channels'] = 128
self.decode1 = sm.DecoderBlock(params)
self.decode2 = sm.DecoderBlock(params)
self.decode3 = sm.DecoderBlock(params)
params['num_channels'] = 64
self.classifier = sm.ClassifierBlock(params)
def forward(self, input):
e1, out1, ind1 = self.encode1.forward(input)
e2, out2, ind2 = self.encode2.forward(e1)
e3, out3, ind3 = self.encode3.forward(e2)
bn = self.bottleneck.forward(e3)
d3 = self.decode1.forward(bn, out3, ind3)
d2 = self.decode2.forward(d3, out2, ind2)
d1 = self.decode3.forward(d2, out1, ind1)
prob = self.classifier.forward(d1)
return prob
@property
def is_cuda(self):
"""
Check if model parameters are allocated on the GPU.
"""
return next(self.parameters()).is_cuda
def save(self, path):
"""
Save model with its parameters to the given path. Conventionally the
path should end with "*.model".
Inputs:
- path: path string
"""
print('Saving model... %s' % path)
torch.save(self, path) | /relaynet_pytorch-1.1-py3-none-any.whl/relaynet_pytorch/relay_net.py | 0.920861 | 0.404919 | relay_net.py | pypi |
import torch
import numpy as np
from torch.nn.modules.loss import _Loss
from torch.autograd import Function, Variable
import torch.nn as nn
import torch
import numpy as np
from torch.nn.modules.loss import _Loss
from torch.autograd import Function, Variable
import torch.nn as nn
import torch.nn.functional as F
class DiceCoeff(nn.Module):
"""Dice coeff for individual examples"""
def __init__(self):
super(DiceCoeff, self).__init__()
def forward(self, input, target):
inter = torch.dot(input, target) + 0.0001
union = torch.sum(input ** 2) + torch.sum(target ** 2) + 0.0001
t = 2 * inter.float() / union.float()
return t
def dice_coeff(input, target):
"""Dice coeff for batches"""
if input.is_cuda:
s = Variable(torch.FloatTensor(1).cuda().zero_())
else:
s = Variable(torch.FloatTensor(1).zero_())
for i, c in enumerate(zip(input, target)):
s = s + DiceCoeff().forward(c[0], c[1])
return s / (i + 1)
class DiceLoss(_Loss):
def forward(self, output, target, weights=None, ignore_index=None):
"""
output : NxCxHxW Variable
target : NxHxW LongTensor
weights : C FloatTensor
ignore_index : int index to ignore from loss
"""
eps = 0.0001
output = output.exp()
encoded_target = output.detach() * 0
if ignore_index is not None:
mask = target == ignore_index
target = target.clone()
target[mask] = 0
encoded_target.scatter_(1, target.unsqueeze(1), 1)
mask = mask.unsqueeze(1).expand_as(encoded_target)
encoded_target[mask] = 0
else:
encoded_target.scatter_(1, target.unsqueeze(1), 1)
if weights is None:
weights = 1
intersection = output * encoded_target
numerator = 2 * intersection.sum(0).sum(1).sum(1)
denominator = output + encoded_target
if ignore_index is not None:
denominator[mask] = 0
denominator = denominator.sum(0).sum(1).sum(1) + eps
loss_per_channel = weights * (1 - (numerator / denominator))
return loss_per_channel.sum() / output.size(1)
class CrossEntropyLoss2d(nn.Module):
def __init__(self, weight=None, size_average=True):
super(CrossEntropyLoss2d, self).__init__()
self.nll_loss = nn.CrossEntropyLoss(weight, size_average)
def forward(self, inputs, targets):
return self.nll_loss(inputs, targets)
class CombinedLoss(nn.Module):
def __init__(self):
super(CombinedLoss, self).__init__()
self.cross_entropy_loss = CrossEntropyLoss2d()
self.dice_loss = DiceLoss()
def forward(self, input, target, weight):
# TODO: why?
target = target.type(torch.LongTensor).cuda()
input_soft = F.softmax(input,dim=1)
y2 = torch.mean(self.dice_loss(input_soft, target))
y1 = torch.mean(torch.mul(self.cross_entropy_loss.forward(input, target), weight))
y = y1 + y2
return y | /relaynet_pytorch-1.1-py3-none-any.whl/relaynet_pytorch/net_api/losses.py | 0.914315 | 0.552268 | losses.py | pypi |
import torch
import torch.nn as nn
class BasicBlock(nn.Module):
'''
param ={
'num_channels':1,
'num_filters':64,
'kernel_h':7,
'kernel_w':3,
'stride_conv':1,
'pool':2,
'stride_pool':2,
'num_classes':10
}
'''
def __init__(self, params):
super(BasicBlock, self).__init__()
padding_h = int((params['kernel_h'] - 1) / 2)
padding_w = int((params['kernel_w'] - 1) / 2)
self.conv = nn.Conv2d(in_channels=params['num_channels'], out_channels=params['num_filters'],
kernel_size=(params['kernel_h'], params['kernel_w']),
padding=(padding_h, padding_w),
stride=params['stride_conv'])
self.batchnorm = nn.BatchNorm2d(num_features=params['num_filters'])
self.prelu = nn.PReLU()
def forward(self, input):
out_conv = self.conv(input)
out_bn = self.batchnorm(out_conv)
out_prelu = self.prelu(out_bn)
return out_prelu
class EncoderBlock(BasicBlock):
def __init__(self, params):
super(EncoderBlock, self).__init__(params)
self.maxpool = nn.MaxPool2d(kernel_size=params['pool'], stride=params['stride_pool'], return_indices=True)
def forward(self, input):
out_block = super(EncoderBlock, self).forward(input)
out_encoder, indices = self.maxpool(out_block)
return out_encoder, out_block, indices
class DecoderBlock(BasicBlock):
def __init__(self, params):
super(DecoderBlock, self).__init__(params)
self.unpool = nn.MaxUnpool2d(kernel_size=params['pool'], stride=params['stride_pool'])
def forward(self, input, out_block, indices):
unpool = self.unpool(input, indices)
concat = torch.cat((out_block, unpool), dim=1)
out_block = super(DecoderBlock, self).forward(concat)
return out_block
class ClassifierBlock(nn.Module):
def __init__(self, params):
super(ClassifierBlock, self).__init__()
self.conv = nn.Conv2d(params['num_channels'], params['num_class'], params['kernel_c'], params['stride_conv'])
self.softmax = nn.Softmax2d()
def forward(self, input):
out_conv = self.conv(input)
#out_logit = self.softmax(out_conv)
return out_conv | /relaynet_pytorch-1.1-py3-none-any.whl/relaynet_pytorch/net_api/sub_module.py | 0.932898 | 0.337627 | sub_module.py | pypi |
from __future__ import annotations
from pydantic import BaseModel, Field, field_validator
from time import time
from typing import (Any, Callable, Generic, NamedTuple,
Optional, TYPE_CHECKING, TypeVar)
from .consts import DEFAULT_CHANNEL, DEFAULT_EVENT_TYPE, FORBIDDEN_CHARACTERS
from .utils import truncate, validate_forbidden_characters
if TYPE_CHECKING:
from .relay import Relay
else:
Relay = Any # this is a hack but BaseModel won't validate anymore...
class SourceInfo(BaseModel):
relay: Optional["Relay"] = None
emitter: Optional[Callable] = None
T = TypeVar('T', bound=Any)
class Event(Generic[T], BaseModel):
"""
Represents a generic event with data of type `T`.
This event encapsulates data payloads for communication between different
parts of a system. Each event carries a type, a communication channel,
source information, and a timestamp.
Attributes:
----------
- `data (T)`: The main payload or data for the event.
- `channel (str)`: Communication channel for broadcasting.
- `event_type (str)`: Type of the event for broadcasting.
- `source (SourceInfo)`: Origin or source of the event (optional).
- `time (float)`: Timestamp when the event was created.
Constants:
---------
- `DEFAULT (str)`: Default value for `event_type` and `channel`.
Parameters:
----------
- `data (T)`: The main payload or data for the event.
- `event_type (str, optional)`: Type of the event. Defaults to 'DEFAULT'.
- `channel (str, optional)`: Communication channel. Defaults to 'DEFAULT'.
- `source (SourceInfo, optional)`: Source of the event. Defaults to None.
Example:
-------
```python
event = Event(data={"message": "Hello!"},
event_type="GREETING",
channel="MAIN",
source=SourceInfo(relay=my_relay_child, func=my_function))
```
"""
data: T = ...
channel: str = DEFAULT_CHANNEL
event_type: str = DEFAULT_EVENT_TYPE
source: Optional[SourceInfo] = None
time: float = Field(default_factory=time)
@field_validator('channel', 'event_type', mode="before")
def check_forbidden_characters(cls, v:str) -> str:
"""
Validate if the given value contains forbidden characters.
Raises:
------
ValueError: If forbidden characters are found in the value.
Returns:
-------
The original value if no forbidden characters are found.
"""
return validate_forbidden_characters(v, FORBIDDEN_CHARACTERS)
def __str__(self) -> str:
"""
Return a user-friendly string representation of the Event instance.
This method provides a readable representation of the Event instance,
suitable for display to end-users or for logging purposes.
Returns:
-------
`str`
User-friendly representation of the Event instance.
"""
data_repr = repr(self.data)
channel_repr = repr(self.channel)
event_type_repr = repr(self.event_type)
source_repr = repr(self.source)
time_repr = repr(self.time)
return (f"Event(data={truncate(data_repr, 50)}, "
f"channel={channel_repr}, "
f"event_type={event_type_repr}, "
f"source={source_repr}, "
f"time={time_repr})") | /relaypy-async-0.1.1.tar.gz/relaypy-async-0.1.1/relay/event.py | 0.946929 | 0.589007 | event.py | pypi |
import asyncio
import inspect
import functools
import logging
from pydantic import BaseModel
from typing import Any, get_args, get_origin
from .bindings import Bindings, Listener, Emitter, Binding
from .event import Event, SourceInfo
from .utils import type_check, truncate
logging.basicConfig(level=logging.DEBUG,
format=('[%(levelname)s] [%(asctime)s] '
'[%(module)s:%(lineno)d] %(message)s'),
datefmt='%Y-%m-%d %H-%M-%S')
logger = logging.getLogger(__name__)
RED = "\033[1;31m"; RST = "\033[0;0m"
class Relay:
class NoEmit(BaseModel):
""" tells @emits wrapper not to emit the event, just return the data """
data: Any
def __init__(self, bindings_config:list[Binding]=None):
"""
Initializes a Relay instance.
This constructor accepts `bindings_config` - a list of bindings that
represent the relationship between an emitter method and a listener
method. These methods don't have to be bound to an instance when passed
to the constructor; they are automatically bound to the Relay instance.
Passing the configuration when creating the Relay instance allows
you to set up complex event emitting and listening configurations
without needing to manually bind each method.
The method raises `ValueError` if any of the provided bindings are
of incorrect type.
Parameters:
----------
- `bindings_config` (`list[Binding]`, optional): A list of bindings
defining the `Relay` instance's behavior. Each binding consists of a
method and its channel and event type. It can either be an `Emitter`
or `Listener` instance. Default is None which means no bindings.
Usage:
-----
```python
class SampleRelay(Relay):
@Relay.listens
def listener(self, event: Event): ...
@Relay.emits
def emitter(self) -> Event: ...
# Create bindings
emitter_binding = Emitter(
method=SampleRelay.emitter,
channel="channel1",
event_type="event1"
)
listener_binding = Listener(
method=SampleRelay.listener,
channel="channel1",
event_type="event1"
)
# Use bindings in the Relay initialization
relay = SampleRelay(bindings_config=[emitter_binding, listener_binding])
```
In this example, when `emitter()` is called, it will emit an event
whose `channel` and `event_type` are `"channel1"` and `"event1"`,
respectively. When this event is emitted, `listener` will be invoked
with the corresponding event instance.
Returns:
-------
The method doesn't return anything, but initializes a `Relay`
instance with the provided bindings configuration.
Raises:
------
- `ValueError`: If a binding in the bindings_config list isn't
instance of `Emitter` or `Listener`.
"""
if bindings_config:
for binding in bindings_config:
method = getattr(self, binding.method.__name__)
if isinstance(binding, Emitter):
_binding = Emitter(method=method,
event_type=binding.event_type,
channel=binding.channel)
elif isinstance(binding, Listener):
_binding = Listener(method=method,
event_type=binding.event_type,
channel=binding.channel,
source=binding.source)
else:
raise ValueError(f"Invalid binding type: {type(binding)}")
self.add_binding(_binding)
@classmethod
async def emit(cls, event:Event):
"""
IMPORTANT:
---------
You should `await` this method to ensure that the
asynchronous tasks it spawns (for notifying listeners) are scheduled
properly.
Asynchronously emits a given event to all compatible listeners
registered with the `Bindings` class.
This method propagates the provided event to all the listener methods
which are registered for the event's `channel` and `event_type`.
However, if some listeners expect events from a particular source,
the event source will be checked before delivering to them.
Listeners will receive the event asynchronously, and any exceptions
raised by the listeners are caught and logged, ensuring that one
listener's exception will not halt the distribution of the event to
other listeners.
Parameters:
----------
- `event (Event[Any])`: The event instance containing the data to be
emitted. This should include information like `channel`, `event_type`,
and the source of the event optionally.
Usage:
-----
```python
# Example to emit an event
event = Event(data="Hello, World!",
channel="greetings",
event_type="hello")
await Relay.emit(event)
```
Note:
----
- It's essential that the `Bindings` class has been populated with the
necessary listeners for the event to be effectively delivered.
- If listeners have source restrictions specified, it's crucial that the
`event` parameter contains accurate source information.
- For best practices, always `await` this method, even though not doing
so might work in some scenarios.
Returns:
-------
None. However, side effects include calling all the compatible listener
methods with the provided event.
"""
def source_compatible(s_event:SourceInfo,
s_listener:SourceInfo) -> bool:
""" returns True if event source if compatible with listener
source (that is, if listener is expecting an event only
from a specific source)
"""
listn_relay = None if s_listener is None else s_listener.relay
listn_emitter = None if s_listener is None else s_listener.emitter
event_relay = None if s_event is None else s_event.relay
event_emitter = None if s_event is None else s_event.emitter
if listn_relay != None and event_relay != listn_relay:
return False
if listn_emitter != None and event_emitter != listn_emitter:
return False
return True
async def safe_method(event, method):
""" async call the bound method, catch any exceptions """
try:
result = await method(event)
except Exception as e:
logger.exception(f"{RED}Exception in executing emission: {e}. "
f"Event: <{event}>, Method: <{method}>{RST}")
listeners:list[Listener] = Bindings.get_by_event(event.channel,
event.event_type,
filter_=Listener)
for listener in listeners:
if not source_compatible(event.source, listener.source):
continue
method = listener.method
asyncio.create_task(safe_method(event, method))
@classmethod
def emits(cls, func):
"""
A class method decorator that allows methods within a `Relay` or its
child class to emit events with data validation against the provided
type hints. It ensures that methods are asynchronous and their return
type matches the expected event's payload type.
This decorator performs two types of checks:
1. **Static Checks** - Checks that:
- a. The decorated method is asynchronous.
- b. The method has a return type hint.
2. **Dynamic Checks** - Checks that:
- a. The method belongs to a class inheriting from `Relay`.
- b. The actual returned data matches the type hint.
If a method returns an instance of `NoEmit`, the event emission is
bypassed, and only the data contained within is returned.
Parameters:
----------
- `func (Callable)`: The method to be decorated.
Returns:
-------
- `Callable`: The wrapped function that includes event emission logic.
Raises:
------
- `TypeError`: If the method does not meet the criteria specified above.
Example:
--------
```python
class CustomRelay(Relay):
@emits
async def some_method(self, arg1) -> SomeDataType:
# some logic...
return some_data # This data will be emitted as event payload
```
Note:
----
- The decorated method must belong to a class that either is or
inherits from `Relay`.
- For conditional event emission, the method can return an instance
of `NoEmit` to skip the emission but still validate the data type.
- Bindings to this method are found in `Bindings` class.
"""
# STATIC CHECK
# make sure that the decorated method is a coroutine
if not asyncio.iscoroutinefunction(func):
raise TypeError(
f"The method '{func.__name__}' must be asynchronous. The "
f"'@emits' decorator can only be applied to async methods.")
# get the return type hint
signature = inspect.signature(func)
ret_annotation = signature.return_annotation
# Ensure an explicit return type is provided
if ret_annotation is inspect.Signature.empty:
raise TypeError(
f"The method '{func.__name__}' that is decorated by "
"@Relay.emits, must have an explicit return type "
"hint. For example, 'def method_name() -> str:'. If "
"the method does not return anything, use '-> None'. "
"If the method can return anything, use '-> typing.Any'.")
@functools.wraps(func) # preserve func metadata
async def wrapper(self, *args, **kwargs):
# DYNAMIC CHECK
# make sure that self is an instance of Relay or its children
if not isinstance(self, Relay):
raise TypeError(
f"The method '{func.__name__}' that is decorated by "
"@Relay.emits, must be a method of a class that "
"inherits from Relay.")
result = await func(self, *args, **kwargs)
# If the return type hint is `NoEmit`, don't emit the event
no_emit = isinstance(result, cls.NoEmit)
data = result.data if no_emit else result
if not type_check(data, ret_annotation):
data_truncated = truncate(data, 50)
raise TypeError(
f"Return value: -> {data_truncated} <- of type "
f"{type(data)} does not match the inferred type "
f"{ret_annotation} hinted to the decorated method "
f"'{func.__name__}(self, ...)'.")
if no_emit:
return result.data
# emit
method = getattr(self, func.__name__)
emitters = Bindings.get_by_method(method, filter_=Emitter)
for emitter in emitters:
await cls.emit(
Event(data=result, channel=emitter.channel,
event_type=emitter.event_type,
source=SourceInfo(relay=self, emitter=method)))
return result
return wrapper
@classmethod
def listens(cls, func):
"""
Decorator that validates the data of an `Event` parameter passed
to the decorated method.
1. Statically - Makes sure the decorated method has `event:Event[T]`
or `event:Event` as a parameter.
2. Dynamically - Validates the data of the received event against
the type hint in the method signature.
The `@receives` decorator is intended for methods that have a
parameter named 'event', which should be an instance of the
`Event` class. This decorator will validate the data contained
within the `Event` against the specified type hint.
The schema or type is inferred from the type hint of the 'event'
parameter. For instance, if the method signature is
`def method_name(event: Event[SomeModel])` or `Event[int|str]` or
`Event[Union[str, int]]`, etc. the data inside `event` will be
validated against the respective type hint.
If the `event` parameter's type hint doesn't include a specific
type (e.g., `Event` without `[SomeType]`), the event data won't
be validated.
If the data does not match the expected type or schema, a
`TypeError` is raised.
Parameters:
----------
- `func (Callable)`: The method to be decorated. Must have a
parameter named 'event'.
Returns:
-------
- `Callable`: The wrapped function that includes data validation.
Raises:
------
- `TypeError`: If the 'event' parameter is missing from the method
or if the method's type hint for 'event' is not of type `Event`.
- Other exceptions may be raised if internal validation crashes.
Example:
--------
```python
class SomeRelay(Relay):
@receives
def some_method(self, event: Event[SomeModel]):
# NOTE: event.data will be validated against SomeModel
# some logic here
@receives
def some_other_method(self, event: Event):
# NOTE: same as Event[Any] - event.data will not be validated
# some logic here
```
Note:
----
- The decorated method must belong to a class that either is or
inherits from `Relay`.
- The decorated method must have an 'event' parameter, and the type hint
for this parameter should be `Event` with an appropriate type or schema.
"""
# STATIC CHECK
# make sure that the decorated method is a coroutine
if not asyncio.iscoroutinefunction(func):
raise TypeError(
f"The method '{func.__name__}' must be asynchronous. The "
f"'@listens' decorator can only be applied to async methods.")
params = inspect.signature(func).parameters
if 'event' not in params:
raise TypeError(f"The method '{func.__name__}' must have an 'event'"
" parameter for the '@receives' decorator to work.")
annotation = params['event'].annotation
# If the annotation is just `Event` without a type hint, set it to Any
if annotation is Event:
annotation = Event[Any]
origin = get_origin(annotation)
if origin is not Event:
raise TypeError("The @receives decorator can only be applied to "
"methods with `Event` as their parameter type.")
# Get the actual data schema from the annotation
event_args = get_args(annotation)
event_schema:BaseModel = None
if event_args: # assumes first annotated argument is the event schema
event_schema = event_args[0]
@functools.wraps(func) # preserve func metadata
async def wrapper(self, event: Event[Any], *args, **kwargs):
# DYNAMIC CHECK
# make sure that self is an instance of Relay or its children
if not isinstance(self, Relay):
raise TypeError(
f"The method '{func.__name__}' that is decorated by "
"@Relay.receives, must be a method of a class that "
"inherits from Relay.")
if not type_check(event.data, event_schema):
data_truncated = truncate(event.data, 50)
raise TypeError(
f"Event data: -> {data_truncated} <- of type "
f"{type(event.data)} does not match the inferred type "
f"{event_schema} hinted to the decorated method "
f"'{func.__name__}(self, event:Event[T])'.")
return await func(self, event, *args, **kwargs)
return wrapper
# Binding methods - these methods are used to add/remove bindings
def remove_binding_relay(self):
"""
Removes all bindings associated with a specific relay.
Parameters:
----------
- `self (Relay)`: The relay whose bindings are to be removed.
"""
Bindings.remove_relay(self)
@classmethod
def add_binding(cls, binding: Emitter | Listener):
"""
Adds a binding to the `Bindings` class.
Parameters:
----------
- `binding (Binding)`: The binding to be added. (Listener or Emitter)
Raises:
------
- `ValueError`: If the binding's method does not belong to a class
inheriting from `Relay`.
"""
Bindings.add(binding)
@classmethod
def remove_binding(cls, binding: Emitter | Listener):
"""
Removes a binding from the `Bindings` class.
Parameters:
----------
- `binding (Binding)`: The binding to be removed. (Listener or Emitter)
"""
Bindings.remove(binding)
@classmethod
def clear_bindings(cls):
"""
Removes all bindings from the `Bindings` class.
"""
Bindings.clear() | /relaypy-async-0.1.1.tar.gz/relaypy-async-0.1.1/relay/relay.py | 0.868116 | 0.450903 | relay.py | pypi |
[](https://github.com/asahi417/relbert/blob/master/LICENSE)
[](https://badge.fury.io/py/relbert)
[](https://pypi.python.org/pypi/relbert/)
[](https://pypi.python.org/pypi/relbert/)
# RelBERT
We release the package `relbert` that includes the official implementation of
***Distilling Relation Embeddings from Pre-trained Language Models*** ([https://aclanthology.org/2021.emnlp-main.712/](https://aclanthology.org/2021.emnlp-main.712/))
that has been accepted by the [**EMNLP 2021 main conference**](https://2021.emnlp.org/)
### What's RelBERT?
RelBERT is a state-of-the-art lexical relation embedding model (i.e. model representing any word pair such as "Paris-France" as a fixed-length vector) based on large-scale pretrained masked language models. RelBERT also establishes a very strong baseline to solve analogies in a zero-shot transfer fashion and even outperform strong few-shot models such as [GPT-3](https://arxiv.org/abs/2005.14165) and [Analogical Proportion (AP)](https://aclanthology.org/2021.acl-long.280/).
| | SAT (full) | SAT | U2 | U4 | Google | BATS |
|:-------------------|-------------:|------:|-----:|-----:|---------:|-------:|
| [GloVe](https://nlp.stanford.edu/projects/glove/) | 48.9 | 47.8 | 46.5 | 39.8 | 96 | 68.7 |
| [FastText](https://fasttext.cc/) | 49.7 | 47.8 | 43 | 40.7 | 96.6 | 72 |
| [RELATIVE](http://josecamachocollados.com/papers/relative_ijcai2019.pdf) | 24.9 | 24.6 | 32.5 | 27.1 | 62 | 39 |
| [pair2vec](https://arxiv.org/abs/1810.08854) | 33.7 | 34.1 | 25.4 | 28.2 | 66.6 | 53.8 |
| [GPT-2 (AP)](https://aclanthology.org/2021.acl-long.280/) | 41.4 | 35.9 | 41.2 | 44.9 | 80.4 | 63.5 |
| [RoBERTa (AP)](https://aclanthology.org/2021.acl-long.280/) | 49.6 | 42.4 | 49.1 | 49.1 | 90.8 | 69.7 |
| [GPT-2 (tuned AP)](https://aclanthology.org/2021.acl-long.280/) | 57.8 | 56.7 | 50.9 | 49.5 | 95.2 | 81.2 |
| [RoBERTa (tuned AP)](https://aclanthology.org/2021.acl-long.280/) | 55.8 | 53.4 | 58.3 | 57.4 | 93.6 | 78.4 |
| [GPT3 (zeroshot)](https://arxiv.org/abs/2005.14165) | 53.7 | - | - | - | - | - |
| [GPT3 (fewshot)](https://arxiv.org/abs/2005.14165) | 65.2 | - | - | - | - | - |
| ***RelBERT*** | ***72.2*** | ***72.7*** | ***65.8*** | ***65.3*** | ***94.2*** | ***79.3*** |
[comment]: <> (| ***RelBERT (triplet)*** | ***67.9*** | ***67.7*** | ***68.0*** | ***63.2*** | ***94.2*** | ***78.9*** |)
[comment]: <> (| ***RelBERT (nce)*** | ***72.2*** | ***72.7*** | ***65.8*** | ***65.3*** | ***94.2*** | ***79.3*** |)
We also report the performance of RelBERT universal relation embeddings on lexical relation classification datasets, which reinforces the capability of RelBERT to model relations.
All datasets are public and available in the following links: [analogy questions](https://github.com/asahi417/AnalogyTools/releases/download/0.0.0/analogy_test_dataset.zip), [lexical relation classification](https://github.com/asahi417/AnalogyTools/releases/download/0.0.0/lexical_relation_dataset.zip).
Please have a look our paper to know more about RelBERT and [AnalogyTool](https://github.com/asahi417/AnalogyTools) or [AP paper](https://aclanthology.org/2021.acl-long.280/) for more information about the datasets.
### What can we do with `relbert`?
In this repository, we release a python package `relbert` to work around with RelBERT and its checkpoints via [huggingface modelhub](https://huggingface.co/models) and [gensim](https://radimrehurek.com/gensim/).
In brief, what you can do with the `relbert` is summarized as below:
- **Get a high quality embedding vector** given a pair of word
- **Get similar word pairs (nearest neighbors)**
- **Reproduce the results** of our EMNLP 2021 paper.
## Get Started
```shell
pip install relbert
```
## Play with RelBERT
RelBERT can give you a high-quality relation embedding vector of a word pair. First, you need to define the model class with a RelBERT checkpoint.
```python
from relbert import RelBERT
model = RelBERT()
```
Then you give a word pair to the model to get the embedding.
```python
# the vector has (1024,)
v_tokyo_japan = model.get_embedding(['Tokyo', 'Japan'])
```
Let's run a quick experiment to check the embedding quality. Given candidate lists `['Paris', 'France']`, `['music', 'pizza']`, and `['London', 'Tokyo']`, the pair which shares
the same relation with the `['Tokyo', 'Japan']` is `['Paris', 'France']`. Would the RelBERT embedding be possible to retain it with simple cosine similarity?
```python
from relbert import cosine_similarity
v_paris_france, v_music_pizza, v_london_tokyo = model.get_embedding([['Paris', 'France'], ['music', 'pizza'], ['London', 'Tokyo']])
cosine_similarity(v_tokyo_japan, v_paris_france)
>>> 0.999
cosine_similarity(v_tokyo_japan, v_music_pizza)
>>> 0.991
cosine_similarity(v_tokyo_japan, v_london_tokyo)
>>> 0.996
```
Bravo! The distance between `['Tokyo', 'Japan']` and `['Paris', 'France']` is the closest among the candidates.
In fact, this pipeline is how we evaluate the RelBERT on the analogy question.
### Nearest Neighbours of RelBERT
To get the similar word pairs in terms of the RelBERT embedding, we convert the RelBERT embedding to a gensim model file with a fixed vocabulary.
Specifically, we take the vocabulary of the [RELATIVE embedding](http://josecamachocollados.com/papers/relative_ijcai2019.pdf) that is released as a part of
[Analogy Tool](https://github.com/asahi417/AnalogyTools#relative-embedding), and generate the embedding for all the word pairs with RelBERT (`asahi417/relbert-roberta-large`).
Following the original vocabulary representation, words are joined by `__` and multiple token should be combined by `_` such as `New_york__Tokyo`.
The RelBERT embedding gensim file can be found [here](https://drive.google.com/file/d/1z3UeWALwf6EkujI3oYUCwkrIhMuJFdRA/view?usp=sharing). For example, you can get the nearest neighbours as below.
```python
from gensim.models import KeyedVectors
model = KeyedVectors.load_word2vec_format('gensim_model.bin', binary=True)
model.most_similar('Tokyo__Japan')
>>> [('Moscow__Russia', 0.9997282028198242),
('Cairo__Egypt', 0.9997045993804932),
('Baghdad__Iraq', 0.9997043013572693),
('Helsinki__Finland', 0.9996970891952515),
('Paris__France', 0.999695897102356),
('Damascus__Syria', 0.9996891617774963),
('Bangkok__Thailand', 0.9996803998947144),
('Madrid__Spain', 0.9996673464775085),
('Budapest__Hungary', 0.9996543526649475),
('Beijing__China', 0.9996539354324341)]
```
## Citation
If you use any of these resources, please cite the following [paper](https://arxiv.org/abs/2110.15705):
```
@inproceedings{ushio-etal-2021-distilling,
title = "Distilling Relation Embeddings from Pretrained Language Models",
author = "Ushio, Asahi and
Camacho-Collados, Jose and
Schockaert, Steven",
booktitle = "Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing",
month = nov,
year = "2021",
address = "Online and Punta Cana, Dominican Republic",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2021.emnlp-main.712",
doi = "10.18653/v1/2021.emnlp-main.712",
pages = "9044--9062",
abstract = "Pre-trained language models have been found to capture a surprisingly rich amount of lexical knowledge, ranging from commonsense properties of everyday concepts to detailed factual knowledge about named entities. Among others, this makes it possible to distill high-quality word vectors from pre-trained language models. However, it is currently unclear to what extent it is possible to distill relation embeddings, i.e. vectors that characterize the relationship between two words. Such relation embeddings are appealing because they can, in principle, encode relational knowledge in a more fine-grained way than is possible with knowledge graphs. To obtain relation embeddings from a pre-trained language model, we encode word pairs using a (manually or automatically generated) prompt, and we fine-tune the language model such that relationally similar word pairs yield similar output vectors. We find that the resulting relation embeddings are highly competitive on analogy (unsupervised) and relation classification (supervised) benchmarks, even without any task-specific fine-tuning. Source code to reproduce our experimental results and the model checkpoints are available in the following repository: https://github.com/asahi417/relbert",
}
```
| /relbert-2.0.2.tar.gz/relbert-2.0.2/README.md | 0.743541 | 0.965706 | README.md | pypi |
import logging
import signal
import sys
import time
from concurrent import futures
from google.cloud.pubsub_v1.subscriber.scheduler import ThreadScheduler
from .client import Subscriber
from .middleware import run_middleware_hook
from .subscription import Callback
logger = logging.getLogger(__name__)
class Worker:
"""A Worker manages the subscriptions which consume Google PubSub messages.
Facilitates the creation of subscriptions if not already created,
and the starting and stopping the consumption of them.
:param subscriptions: list :class:`~rele.subscription.Subscription`
"""
def __init__(
self,
subscriptions,
gc_project_id=None,
credentials=None,
default_ack_deadline=None,
threads_per_subscription=None,
):
self._subscriber = Subscriber(gc_project_id, credentials, default_ack_deadline)
self._futures = []
self._subscriptions = subscriptions
self.threads_per_subscription = threads_per_subscription
def setup(self):
"""Create the subscriptions on a Google PubSub topic.
If the subscription already exists, the subscription will not be
re-created. Therefore, it is idempotent.
"""
for subscription in self._subscriptions:
self._subscriber.create_subscription(subscription.name, subscription.topic)
def start(self):
"""Begin consuming all subscriptions.
When consuming a subscription, a ``StreamingPullFuture`` is returned from
the Google PubSub client library. This future can be used to
manage the background stream.
The futures are stored so that they can be cancelled later on
for a graceful shutdown of the worker.
"""
run_middleware_hook("pre_worker_start")
for subscription in self._subscriptions:
executor_kwargs = {
"thread_name_prefix": "ThreadPoolExecutor-ThreadScheduler"
}
executor = futures.ThreadPoolExecutor(
max_workers=self.threads_per_subscription, **executor_kwargs
)
scheduler = ThreadScheduler(executor=executor)
self._futures.append(
self._subscriber.consume(
subscription_name=subscription.name,
callback=Callback(subscription),
scheduler=scheduler,
)
)
run_middleware_hook("post_worker_start")
def run_forever(self, sleep_interval=1):
"""Shortcut for calling setup, start, and _wait_forever.
:param sleep_interval: Number of seconds to sleep in the ``while True`` loop
"""
self.setup()
self.start()
self._wait_forever(sleep_interval=sleep_interval)
def stop(self, signal=None, frame=None):
"""Manage the shutdown process of the worker.
This function has two purposes:
1. Cancel all the futures created.
2. And close all the database connections
opened by Django. Even though we cancel the connections
for every execution of the callback, we want to be sure
that all the database connections are closed
in this process.
Exits with code 0 for a clean exit.
:param signal: Needed for `signal.signal <https://docs.python.org/3/library/signal.html#signal.signal>`_ # noqa
:param frame: Needed for `signal.signal <https://docs.python.org/3/library/signal.html#signal.signal>`_ # noqa
"""
run_middleware_hook("pre_worker_stop", self._subscriptions)
for future in self._futures:
future.cancel()
run_middleware_hook("post_worker_stop")
sys.exit(0)
def _wait_forever(self, sleep_interval):
logger.info("Consuming subscriptions...")
while True:
time.sleep(sleep_interval)
def create_and_run(subs, config):
"""
Create and run a worker from a list of Subscription objects and a config
while waiting forever, until the process is stopped.
We stop a worker process on:
- SIGINT
- SIGTSTP
:param subs: List :class:`~rele.subscription.Subscription`
:param config: :class:`~rele.config.Config`
"""
print(f"Configuring worker with {len(subs)} subscription(s)...")
for sub in subs:
print(f" {sub}")
worker = Worker(
subs,
config.gc_project_id,
config.credentials,
config.ack_deadline,
config.threads_per_subscription,
)
# to allow killing runrele worker via ctrl+c
signal.signal(signal.SIGINT, worker.stop)
signal.signal(signal.SIGTERM, worker.stop)
try:
signal.signal(signal.SIGTSTP, worker.stop)
except:
# SIGSTP doesn't exist on windows, so we use SIGBREAK instead
signal.signal(signal.SIGBREAK, worker.stop)
worker.run_forever() | /rele-les-transformations-1.2.0.tar.gz/rele-les-transformations-1.2.0/rele/worker.py | 0.488527 | 0.228565 | worker.py | pypi |
import logging
import time
from rele.middleware import BaseMiddleware
class LoggingMiddleware(BaseMiddleware):
"""Default logging middleware.
Logging format has been configured for Prometheus.
"""
def __init__(self):
self._logger = None
def setup(self, config, **kwargs):
self._logger = logging.getLogger(__name__)
self._app_name = config.app_name
def _build_data_metrics(
self, subscription, message, status, start_processing_time=None
):
result = {
"agent": self._app_name,
"topic": subscription.topic,
"status": status,
"subscription": subscription.name,
"attributes": dict(message.attributes),
}
if start_processing_time is not None:
end_processing_time = time.time()
result["duration_seconds"] = round(
end_processing_time - start_processing_time, 3
)
return result
def pre_publish(self, topic, data, attrs):
self._logger.debug(
f"Publishing to {topic}",
extra={
"pubsub_publisher_attrs": attrs,
"metrics": {
"name": "publications",
"data": {"agent": self._app_name, "topic": topic},
},
},
)
def post_publish_success(self, topic, data, attrs):
self._logger.info(
f"Successfully published to {topic}",
extra={
"pubsub_publisher_attrs": attrs,
"metrics": {
"name": "publications",
"data": {"agent": self._app_name, "topic": topic},
},
},
)
def post_publish_failure(self, topic, exception, message):
self._logger.exception(
f"Exception raised while publishing message "
f"for {topic}: {str(exception.__class__.__name__)}",
exc_info=True,
extra={
"metrics": {
"name": "publications",
"data": {"agent": self._app_name, "topic": topic},
},
"subscription_message": message,
},
)
def pre_process_message(self, subscription, message):
self._logger.debug(
f"Start processing message for {subscription}",
extra={
"metrics": {
"name": "subscriptions",
"data": self._build_data_metrics(subscription, message, "received"),
}
},
)
def post_process_message_success(self, subscription, start_time, message):
self._logger.info(
f"Successfully processed message for {subscription}",
extra={
"metrics": {
"name": "subscriptions",
"data": self._build_data_metrics(
subscription, message, "succeeded", start_time
),
}
},
)
def post_process_message_failure(
self, subscription, exception, start_time, message
):
self._logger.error(
f"Exception raised while processing message "
f"for {subscription}: {str(exception.__class__.__name__)}",
exc_info=True,
extra={
"metrics": {
"name": "subscriptions",
"data": self._build_data_metrics(
subscription, message, "failed", start_time
),
},
"subscription_message": message,
},
)
def pre_worker_stop(self, subscriptions):
self._logger.info(f"Cleaning up {len(subscriptions)} subscription(s)...") | /rele-les-transformations-1.2.0.tar.gz/rele-les-transformations-1.2.0/rele/contrib/logging_middleware.py | 0.717507 | 0.189727 | logging_middleware.py | pypi |
# Release bot [](https://travis-ci.org/user-cont/release-bot) [](https://badge.fury.io/py/release-bot) [](https://ci.centos.org/job/release-bot-push/)
Automate releases on Github and PyPi.
## Description
This is a bot that helps maintainers deliver their software to users. It is meant to watch github repositories for
release pull requests. The PR must be named in one of the following formats:
* `0.1.0 release` if you want to create the "0.1.0" upstream release
* `new major release`, release-bot would then initiate a release from e.g. "1.2.3" to "2.0.0"
* `new minor release` e.g. "1.2.3" to "1.3.0"
* `new patch release` e.g. "1.2.3" to "1.2.4"
Release-bot now works with [SemVer](https://semver.org/) only.
Once the PR is merged, bot will create a new Github release and a PyPi release respectively.
Changelog will be pulled from root of the
repository and must be named `CHANGELOG.md`. Changelog for the new
version must begin with version heading, i.e `# 0.1.0`.
Everything between this heading and the heading for previous version will be pulled into the changelog.
Alternatively, you can let the bot do the boring work, update `__version__`
variable and fill changelog with commit messages from git log.
You can trigger this action by creating an issue and name it the same as you would do for a release PR, e.g. `0.1.0 release`, `new major release`, `new minor release`, `new patch release`.
All you have to do after that is merge the PR that the bot will make.
The bot works with
[pypa/setuptools_scm](https://github.com/pypa/setuptools_scm/) plugin. If
you're using it, you don't need to care about `__version__` at all. You can be
also sure that the bot will make the PyPI release correctly — before it
releases the software, it checks out the tag in the git repo.
A `release-conf.yaml` file is required. See [Configuration](#configuration) section for details.
Once a Github release is complete, bot will upload this release to PyPI.
Note that you have to setup your login details (see [Requirements](#requirements)).
## Try it locally
```
$ pip install release-bot
```
Other possible installations are through
[Docker](#docker-image), [OpenShift](#openshift-template), [Arch User Repository](#arch-user-repository).
First interaction with release bot may be automated releases on Github. Let's do it.
#### 1. Create upstream repository or use existing one
This is meant to be upstream repository where new releases will be published.
Within upstream repository create `release-conf.yaml` file which contains info on how to release the specific project.
Copy and edit [release-conf.yaml](release-conf-example.yaml).
At the end of `release-conf.yaml` add this line of code:
```yaml
# whether to allow bot to make PRs based on issues
trigger_on_issue: true
```
For possible advanced setup check [the documentation for an upstream repository](#upstream-repository).
#### 2. Create `conf.yaml`
Create configuration file `conf.yaml`. You can use [one](conf.yaml) from this repository. You will need to generate a [Github personal access token](https://help.github.com/articles/creating-a-personal-access-token-for-the-command-line/).
Recommended permissions for access token are: `repo`, `delete_repo`, `user`.
At the end of `conf.yaml` add this line of code:
```yaml
# Name of the account that the github_token belongs to
# Only needed for triggering the bot on an issue.
github_username: <your_github_username>
```
**Note**: This file **should not** be stored in upstream repository as it contains sensitive data.
For possible advanced setup check [the documentation for a private repository](#private-repository).
Also, see [requirements](#requirements) in case you want include PyPi releases.
#### 3. Run release-bot
At this point, release-bot is installed. At least two configuration files are set `release-conf.yaml` and `conf.yaml` (optionally `.pypirc`).
Launch bot by a command:
```$ release-bot -c <path_to_conf.yaml> --debug```
You can scroll down and see debug information of running bot.
#### 4. Make a new release
- Create an issue having `0.0.1 release` as a title in your upstream repository. You can select your own version numbers.
- Wait for the bot to make a new PR based on this issue (refresh interval is set in `conf.yaml`).
- Once the PR is merged bot will make a new release.
- Check release page of your upstream repository at GitHub and you should see new release `0.0.1`.
Since now, feel free to create releases automatically just by creating issues.
# Documentation
## Configuration
There are two yaml configuration files:
1. `conf.yaml` -- a config for the bot itself with some sensitive data (recommended to store in private repo)
2. `release-conf.yaml` -- stored in upstream repository and contains info on how to release the specific project.
## Private repository
You need to setup a git repository, where you'll store the `conf.yaml` and `.pypirc` files.
If this is not a local repository, make sure it's private so you prevent any private info leaking out.
If the path to `conf.yaml` is not passed to bot with `-c/--configuration`,
bot will try to find it in current working directory.
Here are the `conf.yaml` configuration options:
| Option | Description | Required |
|------------------------------|-------------------|---------------|
| `repository_name` | Name of your Github repository | Yes |
| `repository_owner` | Owner of the repository | Yes |
| `github_token` | [Github personal access token](https://help.github.com/articles/creating-a-personal-access-token-for-the-command-line/) | Yes |
| `github_username` | Name of the account that the `github_token` belongs to. Only needed for triggering the bot on an issue. | No |
| `github_app_installation_id` | Installation ID (a number) of the Github app. | No |
| `github_app_id` | ID (a number) of the Github app. | No |
| `github_app_cert_path` | Path to a certificate which Github provides as an auth mechanism for Github apps. | No |
| `refresh_interval` | Time in seconds between checks on repository. Default is 180 | No |
| `clone_url` | URL used to clone your Github repository. By default, `https` variant is used. | No |
Sample config named [conf.yaml](conf.yaml) can be found in this repository.
Regarding `github_token`, it's usually a good idea to create a Github account for the bot
(and use its Github API token)
so you can keep track of what changes were made by bot and what are your own.
You can also create a Github app and use it as an authentication mechanism for
the bot. For that you need to specify the three config values prefixed with
`github_app`.
**Note:** If the Upstream repository is a [Private Github repository](https://help.github.com/en/articles/setting-repository-visibility#about-repository-visibility), it is required to specify the SSH URL
of the repository as the `clone_url` option in `conf.yaml`. This will allow the bot to authenticate using SSH, when fetching from the Upstream repository.
## Upstream repository
You also have to have a `release-conf.yaml` file in the root of your upstream project repository.
Here are possible options:
| Option | Meaning | Required |
|---------------|---------------|---------------|
| `changelog` | List of changelog entries. If empty, changelog defaults to `$version release` | No |
| `author_name` | Author name for changelog. If not set, author of the merge commit is used | No |
| `author_email`| Author email for changelog. If not set, author of the merge commit is used | No |
| `pypi` | Whether to release on pypi. True by default | No |
| `pypi_project`| Name of your PyPI repository | No |
| `trigger_on_issue`| Whether to allow bot to make PRs based on issues. False by default. | No |
| `labels` | List of labels that bot will put on issues and PRs | No |
Sample config named [release-conf-example.yaml](release-conf-example.yaml) can be found in this repository.
## Requirements
Are specified in `requirements.txt`.
You have to setup your PyPI login details in `$HOME/.pypirc` as described in
[PyPI documentation](https://packaging.python.org/tutorials/distributing-packages/#create-an-account).
## Docker image
To make it easier to run this, release-bot is available as an
[source-to-image](https://github.com/openshift/source-to-image) builder image.
You can then create the final image like this:
```
$ s2i build $CONFIGURATION_REPOSITORY_URL usercont/release-bot app-name
```
where $CONFIGURATION_REPOSITORY_URL is link to repository with conf.yaml and .pypirc files.
To test it locally, you can the run the final image like this:
```
$ docker run <app-name>
```
once all changes, configuration files exist in GitHub and git repository contains needed files,
you can try to create an issue in your GitHub repository with string like "X.Y.Z release"
and you can see log like this:
```
$ docker run meta-test-family-bot
---> Setting up ssh key...
Agent pid 12
Identity added: ./.ssh/id_rsa (./.ssh/id_rsa)
11:47:36.212 configuration.py DEBUG Loaded configuration for fedora-modularity/meta-test-family
11:47:36.212 releasebot.py INFO release-bot v0.4.1 reporting for duty!
11:47:36.212 github.py DEBUG Fetching release-conf.yaml
11:47:51.636 releasebot.py DEBUG No merged release PR found
11:47:52.196 releasebot.py INFO Found new release issue with version: 0.8.4
11:47:55.578 releasebot.py DEBUG No more open issues found
11:47:56.098 releasebot.py INFO Making a new PR for release of version 0.8.5 based on an issue.
11:47:57.608 utils.py DEBUG ['git', 'clone', 'https://github.com/fedora-modularity/meta-test-family.git', '.']
...
```
## OpenShift template
You can also run this bot in OpenShift using [openshift-template.yml](openshift-template.yml) in this repository.
You must set two environment variables, the `$APP_NAME` is the name of your release-bot deployment,
and `$CONFIGURATION_REPOSITORY` which contains configuration for the release-bot.
The contents of the repository are described [above](#docker-image).
Note that if you use private repository (which you **absolutely** should),
you will need to set up a new [OpenShift secret](https://docs.openshift.com/container-platform/3.7/dev_guide/secrets.html) named
`release-bot-secret` to authenticate. It can be a ssh private key that you can use to access the repository
(for GitHub see [deploy keys](https://developer.github.com/v3/guides/managing-deploy-keys/)).
Here's an [guide](https://blog.openshift.com/deploy-private-git-repositories/) on
how to do that in OpenShift GUI, or another
[guide](https://blog.openshift.com/deploying-from-private-git-repositories/)
that uses `oc` commandline tool.
By default, the release-bot builder image won't update itself when a
new version of this image is pushed to docker hub.
You can change it by uncommenting lines with `#importPolicy:`
and `#scheduled: true` in [openshift-template.yml](openshift-template.yml).
Then the image will be pulled on a new release.
## Arch User Repository
For Arch or Arch based Linux distributions, you can install the bot from the [AUR Package](https://aur.archlinux.org/packages/release-bot).
You can use your favourite AUR Helper to install the package. For instance:
```
$ aurman -S release-bot
```
You can also install it by using the [PKGBUILD](https://aur.archlinux.org/cgit/aur.git/tree/PKGBUILD?h=release-bot) from the AUR repository.
To build the package, download the PKGBUILD and exectute:
```
$ makepkg -cs #c flag cleans the extra remaining source and compiled files. s flag installs the dependencies if you don't have it.
```
To install the package execute,
```
$ sudo pacman -U release-bot-...tar.xz
```
# Contributing
If you are interested in making contribution to release-bot project, please read [Contribution guide](/CONTRIBUTING.md) for more information.
| /release-bot-0.7.1.tar.gz/release-bot-0.7.1/README.md | 0.638272 | 0.928733 | README.md | pypi |
from __future__ import annotations
from pathlib import Path
from shutil import copytree
from shutil import move
import toml
from wheel.wheelfile import WheelFile
import release_gitter as rg
from release_gitter import removeprefix
PACKAGE_NAME = "pseudo"
def download(config) -> list[Path]:
release = rg.fetch_release(
rg.GitRemoteInfo(config.hostname, config.owner, config.repo), config.version
)
asset = rg.match_asset(
release,
config.format,
version=config.version,
system_mapping=config.map_system,
arch_mapping=config.map_arch,
)
files = rg.download_asset(asset, extract_files=config.extract_files)
# Optionally execute post command
if config.exec:
rg.check_call(config.exec, shell=True)
return files
def read_metadata():
config = toml.load("pyproject.toml").get("tool", {}).get("release-gitter")
if not config:
raise ValueError("Must have configuration in [tool.release-gitter]")
args = []
for key, value in config.items():
key = "--" + key
if key == "--format":
args += [value]
elif isinstance(value, dict):
for sub_key, sub_value in value.items():
args = [key, f"{sub_key}={sub_value}"] + args
elif isinstance(value, list):
for sub_value in value:
args = [key, sub_value] + args
else:
args = [key, value] + args
return rg._parse_args(args)
class _PseudoBuildBackend:
# Should allow passing args as `--build-option`
_gitter_args = None
def prepare_metadata_for_build_wheel(
self, metadata_directory, config_settings=None
):
# Createa .dist-info directory containing wheel metadata inside metadata_directory. Eg {metadata_directory}/{package}-{version}.dist-info/
print("Prepare meta", metadata_directory, config_settings)
metadata = read_metadata()
version = removeprefix(metadata.version, "v")
# Returns distinfo dir?
dist_info = Path(metadata_directory) / f"{PACKAGE_NAME}-{version}.dist-info"
dist_info.mkdir()
# Write metadata
pkg_info = dist_info / "METADATA"
pkg_info.write_text(
"\n".join(
[
"Metadata-Version: 2.1",
f"Name: {PACKAGE_NAME}",
f"Version: {version}",
]
)
)
# Write wheel info
wheel_info = dist_info / "WHEEL"
wheel_info.write_text(
"\n".join(
[
"Wheel-Version: 1.0",
"Root-Is-Purelib: true",
"Tag: py2-none-any",
"Tag: py3-none-any",
]
)
)
return str(dist_info)
def build_sdist(self, sdist_directory, config_settings=None):
# Builds a .tar.gz and places it in specified sdist_directory
# That should contain a toplevel drectory of `name-version` containing source files and the pyproject.toml
# HACK: This isn't needed or used
p = Path(sdist_directory + ".dist-info")
return p
def build_wheel(
self, wheel_directory, config_settings=None, metadata_directory=None
):
metadata_directory = Path(metadata_directory)
metadata = read_metadata()
version = removeprefix(metadata.version, "v")
wheel_directory = Path(wheel_directory)
wheel_directory.mkdir(exist_ok=True)
wheel_scripts = wheel_directory / f"{PACKAGE_NAME}-{version}.data/scripts"
wheel_scripts.mkdir(parents=True, exist_ok=True)
copytree(metadata_directory, wheel_directory / metadata_directory.name)
metadata = read_metadata()
files = download(metadata)
for file in files:
move(file, wheel_scripts / file.name)
print(f"ls {wheel_directory}: {list(wheel_directory.glob('*'))}")
wheel_filename = f"{PACKAGE_NAME}-{version}-py2.py3-none-any.whl"
with WheelFile(wheel_directory / wheel_filename, "w") as wf:
print("Repacking wheel as {}...".format(wheel_filename), end="")
# sys.stdout.flush()
wf.write_files(wheel_directory)
return wheel_filename
_BACKEND = _PseudoBuildBackend()
prepare_metadata_for_build_wheel = _BACKEND.prepare_metadata_for_build_wheel
build_sdist = _BACKEND.build_sdist
build_wheel = _BACKEND.build_wheel | /release-gitter-2.1.1.tar.gz/release-gitter-2.1.1/pseudo_builder.py | 0.610453 | 0.165458 | pseudo_builder.py | pypi |
from __future__ import print_function, absolute_import, division, unicode_literals
_package_data = dict(
full_package_name='release_info',
version_info=(0, 3, 6),
__version__='0.3.6',
version_timestamp='2022-01-16 09:04:17',
author='Anthon van der Neut',
author_email='a.van.der.neut@ruamel.eu',
description='automatically updated python release information',
keywords='pypi statistics',
entry_points='python_release_info=release_info.__main__:main',
# entry_points=None,
license='Copyright Ruamel bvba 2007-2020',
since=2020,
# status="α|β|stable", # the package status on PyPI
# data_files="",
install_requires=[],
tox=dict(
env='3',
),
print_allowed=True,
python_requires='>=3',
# config_dir='python_release_info/config.ini',
)
version_info = _package_data['version_info']
__version__ = _package_data['__version__']
_cligen_data = """\
# all tags start with an uppercase char and can often be shortened to three and/or one
# characters. If a tag has multiple uppercase letter, only using the uppercase letters is a
# valid shortening
# Tags used:
# !Commandlineinterface, !Cli,
# !Option, !Opt, !O
# - !Option [all, !Action store_true, !Help build sdist and wheels for all platforms]
# !PreSubparserOption, !PSO
# !Help, !H
# !Argument, !Arg
# - !Arg [files, nargs: '*', !H files to process]
# !Module # make subparser function calls imported from module
# !Instance # module.Class: assume subparser method calls on instance of Class imported from module
# !Action # either one of the actions in subdir _action (by stem of the file) or e.g. "store_action"
# !Config YAML/INI/PON read defaults from config file
# !AddDefaults ' (default: %(default)s)'
# !Prolog (sub-)parser prolog/description text (for multiline use | )
# !Epilog (sub-)parser epilog text (for multiline use | )
# !NQS used on arguments, makes sure the scalar is non-quoted e.g for instance/method/function
# call arguments, when cligen knows about what argument a keyword takes, this is not needed
!Cli 0:
- !Instance release_info.release_info.ReleaseInfo
- !AddDefaults ' (default: %(default)s)'
- !Config [INI, python_release_info/config.ini]
- !Option [verbose, v, !Help increase verbosity level, !Action count]
- !Option [dir, !Help 'base directory for all downloads and extraction']
# - !Option [config, !Help directory for config file, default: '~/.config/python_release_info/']
- !O [force, !Action store_true, !Help 'force download (and extraction), normally skipped if already there']
- !O [type, !Help 'compiler type to work on: [cpython, tbd]', default: 'cpython']
- update:
- !Help download release_info.pon to config directory (if --dir specified also download new versions)
- !Option [extract, !Help extract newly downloaded versions, !Action store_true]
- !Option [build, b, !Help newly extracted versions]
- !Option [delay, !H delay updating for DELAY days, type: int]
- current:
- !Help list of current major.minor.micro versions
- !Option [dd, !Action date, default: today, metavar: DATE, !Help 'show versions current on %(metavar)s)']
- pre:
- !H list of not yet finalized releases
- !Option [dd, !Action date, default: today, metavar: DATE, !Help 'show versions current on %(metavar)s)']
- download:
- !H download/extract a particular version
- !Opt [extract, !Action store_true, !Help extract downloaded tar file]
- !Arg [version]
# - !Option [test, !Action store_true, !Help don't import version/packagedata from . (for testing cligen)]
# - !Option [all, !Action store_true, !Help build sdist and wheels for all platforms]
# - !Option [linux, !Action store_true, !Help build linux wheels using manylinux]
# - !Arg [args, nargs: '*', !H you have to do this]
# - !Prolog 'Prolog for the parser'
# - !Epilog 'Epilog for the parser'
""" # NOQA
def release_info():
from .release_info import release_info as ri # NOQA
return ri | /release_info-0.3.6.tar.gz/release_info-0.3.6/__init__.py | 0.631481 | 0.196845 | __init__.py | pypi |
import re
from release_mgr.git import get_commit_for_tag, get_tags, git
class Version:
"""A semver object."""
release_ver = re.compile(r"^(v)?[0-9]+\.[0-9]+\.[0-9]+(-[A-z0-9]+)?$")
def __init__(self, major, minor, patch, suffix=None, use_v_prefix=False):
self.major = major
self.minor = minor
self.patch = patch
self.suffix = suffix
self.use_v_prefix = use_v_prefix
@staticmethod
def is_version_string(verstr):
return bool(Version.release_ver.match(verstr))
@classmethod
def latest_version(cls):
tags = [
tag[len("refs/tags/") :] for tag in get_tags() if tag[len("refs/tags/") :]
]
if not tags:
commits = git("log", "--reverse", "--format=%H").split("\n")
return (
commits[0],
cls(0, 0, 0),
)
versions = sorted(map(Version.from_str, tags))
latest_version = versions[-1]
last_version_commit = get_commit_for_tag(str(latest_version))
return (
last_version_commit,
latest_version,
)
@classmethod
def from_str(cls, verstr):
if not Version.is_version_string(verstr):
raise Exception("Got unexpected input: {verstr}".format(verstr=verstr))
major, minor, patch = verstr.split(".")
suffix = None
if "-" in patch:
patch, suffix = patch.split("-")
use_v_prefix = False
if major.startswith("v"):
major = major[1:]
use_v_prefix = True
return cls(
int(major),
int(minor),
int(patch),
suffix,
use_v_prefix=use_v_prefix,
)
def increment_patch(self):
self.patch += 1
def increment_minor(self):
self.minor += 1
self.patch = 0
def increment_major(self):
self.major += 1
self.minor = 0
self.patch = 0
def __str__(self):
return "{prefix}{major}.{minor}.{patch}{suffix}".format(
major=self.major,
minor=self.minor,
patch=self.patch,
suffix="-" + self.suffix if self.suffix else "",
prefix="v" if self.use_v_prefix else "",
)
def __eq__(self, other):
if not isinstance(other, Version):
return False
return (
self.major == other.major
and self.minor == other.minor
and self.patch == other.patch
and self.suffix == other.suffix
)
def __lt__(self, other):
for version_part in ["major", "minor", "patch"]:
ours = getattr(self, version_part)
theirs = getattr(other, version_part)
if ours > theirs:
return False
if theirs > ours:
return True
# Same version part value
if self.suffix == other.suffix:
return False
suffix_precedence = {
"beta": -1,
"tc": 0,
"rc": 1,
None: 2,
}
our_suffix = suffix_precedence.get(self.suffix, -2)
their_suffix = suffix_precedence.get(other.suffix, -2)
return our_suffix < their_suffix
def __le__(self, other):
return self == other or self < other | /release_mgr-0.3.1-py3-none-any.whl/release_mgr/version.py | 0.667256 | 0.162845 | version.py | pypi |
# Release Tools [](https://github.com/Bitergia/release-tools/actions?query=workflow:tests+branch:master+event:push) [](https://codecov.io/gh/Bitergia/release-tools)
Set of tools to generate Python releases.
With this package, Python maintainers are able to automate
many of the boring and time consuming tasks related with
packages and releases.
These tools are based in the way GitLab project generates its
releases. You have more information about their motivation
[here](https://gitlab.com/gitlab-org/gitlab-foss/issues/17826).
This software is licensed under GPL3 or later.
## Features
This package allows us to:
* Automate the creation of release notes
* Bump up the release version based on the release notes
* Publish a release in a Git repository
## Requirements
* Python >= 3.7
* Poetry >= 1.0
## Installation
To install the release tools from the source code you'll need
to install `poetry`. We use [poetry](https://python-poetry.org/)
for dependency management and packaging. You can install it
following its [documentation](https://python-poetry.org/docs/#installation).
Once you have installed it, you can download the source code with git:
```
# Get release-tools source code
$ git clone https://github.com/Bitergia/release-tools
```
Move to the directory and install the software and the dependencies:
```
$ cd release-tools
$ poetry install
```
## Workflow
Together with these tools, this package provides an **opinionated
way** to generate the release of a Python package. We think
releases must be automated and provide useful information
to end users so they can understand better the changes between
versions. Our tools fulfill those requirements.
There are also some **assumptions** to take into account:
* We use git repositories.
* We use [semantic versioning](https://semver.org/) for numbering our packages.\
Version numbers are defined in the variable `__version__` which is
stored on a file called `_version.py`. This file must be tracked on the git
repository.
* We use `poetry` and the file `pyproject.toml`
(see [PEP518](https://www.python.org/dev/peps/pep-0518/)) to manage
build system dependencies. If you don't have this file, you can use
the command `poetry init` to create it from scratch. This file must
be tracked on the git repository.
The **workflow** is defined by the next steps:
```
changelog -> semverup -> notes -> publish
```
- Developers use `changelog` script to generate changelog **entry
notes**. They contain basic information about their changes in
the code (e.g a new feature; a fixed bug). The notes should
**explain** the change to a reader who has **zero context** about
software details.\
We **recommend** to create one of these entries for each pull
request or merge request.\
These notes are stored under the directory `releases/unreleased`.
- Once we are ready to create a new release, we call `semverup`.
It will increase the **version** according to semantic versioning
and the type of changelog entries generated between releases.
- When the version is increased, we run `notes` to generate the
**release notes** using the unreleased changelog entries.
- Finally, we **publish** the release in the Git repository creating
a **commit** that will contain the new release notes and the new
version files. A **tag** is also created with the new version number.
To do it, we call to `publish` script. This script also removes
the entries in `released/unreleased` directory.
This is an example of the basic usage:
```
# Create some changelog entries
$ changelog -t "Fix bug #666" -c fixed
Changelog entry 'fix-bug-#666.yml' created
$ changelog -t "Add support for deleting entries" -c added
Changelog entry 'add-support-for-deleting-entries.yml' created
# Increase the version number
$ semverup
0.2.0
# Generate the release notes
$ notes "WebApp" 0.2.0
Release notes file '0.2.0.md' created
# Publish the release in a public repository
$ publish 0.2.0 "John Smith <jsmith@example.com>" --push origin
Cleaning directories...done
Adding files to the release commit...done
Creating release commit...done
Publishing release in origin...done
```
## Tools
### changelog
This interactive tool creates note entries about the changes in
the code. Developers can use this tool to create these notes that
will be included in the changelog or in the release notes.
You will need to run this script inside of the Git where you store
the project.
It will guide you to create a new entry. You can select the title
and the type of the change.
```
>> Please specify the title of your change: Fix bug #666
```
```
>> Please specify the category of your change
1. New feature (added)
2. Bug fix (fixed)
3. Breaking change (changed)
4. New deprecation (deprecated)
5. Feature removal (removed)
6. Security fix (security)
7. Performance improvement (performance)
8. Dependencies updated (dependency)
9. Other (other)
: 2
```
Each category updates a different version number:
- Major version: `changed` and `removed`.
- Minor version: `added`, `deprecated`, `security`, `performance` and `other`.
- Patch version: `fixed` and `dependency`.
At the end of the process, a text editor will open to let you review
the entry and make the final changes. The editor will be the default
defined in your system.
```
title: 'Fix bug #666'
category: fixed
author: John Smith <jsmith@example.com>
issue: 666
notes: >
The bug was making impossible to cast a spell on
a magician.
```
New entries will be stored in "releases/unreleased" directory.
This directory must be available under the Git root path.
```
Changelog entry 'fix-bug-#666.yml' created
```
If you don't want to create a new entry and see only the final result,
please active '--dry-run' flag.
```
$ changelog --dry-run
```
You can skip some parts of the process providing information
in advance such as the title (`-t`) or the category (`-c`)
of the entry.
```
$ changelog -t "Fix bug #666" -c fixed
```
### semverup
This script increments the version number following semver specification
and using the note entries generated with `changelog` tool.
```
$ semverup
0.2.0
```
### notes
When you run this script, it will generate the release notes of the
package tracked by the current Git repository.
You'll need to provide the `name` of the package and the `version`
of the new release. The script will generate a Markdown document
under the `releases` directory using the changelog entries stored
on `releases/unreleased`. Take into account the argument `name`
is only used as the title of the document.
```
$ notes "MyApp" 0.2.0
Release notes file '0.2.0.md' created
```
Changelog entries included in the release notes are moved to a new
directory in 'unreleased/processed'. If you are running multiple
release candidates, and you don't want to include the same notes in
successive release candidates, use the flag '--pre-release'.
If you also want to add the content of these release notes to the `NEWS`
file, use the flag `--news`.
```
$ notes "MyApp" 0.2.0 --news
Release notes file '0.2.0.md' created
News file updated to 0.2.0
```
If you just want to see the final result of the notes
but not generate a new file, please activate `--dry-run` flag.
```
$ notes "MyApp" 0.2.0 --dry-run
## MyApp 0.2.0 - (2020-03-04)
**Bug fixes:**
* Fix bug #666
The bug was making impossible to cast a spell on
a magician.
```
If you want to add the contributor names of these release notes to the
AUTHORS file, use the flag `--authors`.
```
$ notes "MyApp" 0.2.0 --authors
Release notes file '0.2.0.md' created
Authors file updated
```
### publish
This script will generate a new release in the repository.
This will consist on creating a commit and a tag with the
new release notes and the updated version files.
To run it, you'll need to provide the version number and
the author of the new release.
```
$ publish 0.2.0 "Jonh Smith <jsmith@example.com>"
Cleaning directories...done
Adding files to the release commit...done
Creating release commit...done
```
By default the command doesn't push the commit release to a
remote repository. To force it, use the parameter `--push`
including the name of the remote where commits will be pushed.
```
$ publish 0.2.0 "John Smith <jsmith@example.com>" --push origin
Cleaning directories...done
Adding files to the release commit...done
Creating release commit...done
Publishing release in origin...done
```
It is also possible to push only the commit release and its tag.
To do so, set `--only-push` together with `--push` option.
```
$ publish 0.2.0 "John Smith <jsmith@example.com>" --push origin --only-push
Publishing release in origin...done
```
## Troubleshooting
### How can I change the default editor used by `changelog`?
By default, `changelog` will use the editor defined in the `EDITOR`
environment variable. You can define your own editor updating this
variable.
```
export EDITOR=/bin/nano
```
If this variable doesn't exist it will try with `vim` or `nano`
in that order.
### What's the format of the changelog entries?
Changelog entries use [YAML format](https://yaml.org/).
Remember you can write blocks of text using `>` character at the beginning
of each block. See the next example:
```
title: 'Example of notes'
category: fixed
author: John Smith <jsmith@example.com>
issue: 1
notes: >
Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod
tempor incididunt ut labore et dolore magna aliqua.
Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi
ut aliquip ex ea commodo consequat.
```
### Error: version file not found
The tools did not found a `_version.py` file. This file must also
be tracked on your repository. It must contain a variable named
`__version__`. The value must be a string following semantic
versioning format.
```
$ cat _version.py
__version__ = "3.6.5"
```
### Error: version number '\<version\>' in '\<filepath\>' is not a valid semver string
The format of the version number is invalid. We use semantic
versioning format to set version numbers. The value must be a `str`.
Change the version number and check the
[semantic versioning rules](https://semver.org/) in case of doubt.
### Error: pyproject file not found
The tools did not found a `pyproject.toml` file. This file must also
be tracked on your repository. It contains information needed by `poetry`
to build the software package. If you don't have this file
you can create a new one using `poetry init`.
```
$ poetry init
```
### Error: pathspec '\<filepath\>' did not match any files; code error: 128
The file `<filepath>` must be tracked by your git repository. Add it to
your repo. Usually you'll get this error if you forgot to add your
changelog entry notes to the repository.
### Error: tag '\<tag\>' already exists; code error: 128
If you have a existing tag with the same version, you can expect
this error. You can delete the tag using `git tag -d version` and
create the release commit again using publish.
```
$ git tag -d 0.2.0
$ publish 0.2.0 "John Smith <jsmith@example.com>" --push origin
```
### Error: error: src refspec '\<branch\>' does not match any
You can expect this error if you are not using `master` as your default
branch. You can change this in the codebase (push method of the publish.py)
if you are using any other branch as default.
If you are using `main` as default branch, change `master` to `main`.
```
- project.repo.push(remote, 'master')
+ project.repo.push(remote, 'main')
```
You can use `publish` and set `--only-push` together with `--push` option
as the release is committed but not pushed yet.
```
$ publish 0.2.0 "John Smith <jsmith@example.com>" --push origin --only-push
Publishing release in origin...done
```
### Error: Authentication failed for '\<github-url\>'; code error: 128
If the release commit is created and you failed to publish the release
because of invalid credentials for git, you can use `publish` and
set `--only-push` together with `--push` option as the release is committed
but not pushed yet.
```
$ publish 0.2.0 "John Smith <jsmith@example.com>" --push origin --only-push
Publishing release in origin...done
```
## License
Licensed under GNU General Public License (GPL), version 3 or later.
| /release_tools-0.6.0.tar.gz/release_tools-0.6.0/README.md | 0.505127 | 0.939637 | README.md | pypi |
import os
import click
import yaml
from release_tools.entry import (CategoryChange,
ChangelogEntry,
determine_filepath)
from release_tools.project import Project
from release_tools.repo import RepositoryError
def title_prompt():
"""Prompt title to read the title of a change"""
prompt_msg = ">> Please specify the title of your change"
return prompt_msg
def category_prompt():
"""Prompt category to read the type of a change"""
prompt_msg = ">> Please specify the category of your change\n\n"
prompt_msg += "\n".join(["{}. {} ({})".format(cat.value, cat.title, cat.category)
for cat in CategoryChange])
prompt_msg += "\n\n"
return prompt_msg
def validate_title(ctx, param, value):
"""Check title option values."""
value = value.strip("\n\r ")
if not value:
raise click.BadParameter("title cannot be empty")
return value
def validate_category(ctx, param, value):
"""Check category option values.
Valid values for a category are integer indexes and
strings.
"""
# Check if the value is an index
try:
value = int(value)
except ValueError:
# The value is not an index.
# Check if it is a valid category string.
try:
CategoryChange[value.upper()]
except KeyError:
msg = "valid options are {}".format(CategoryChange.values())
raise click.BadParameter(msg)
else:
return value
# Check if the index is in range.
try:
category = CategoryChange(value).category
return category
except ValueError:
msg = "please select an index between 1 and {}".format(len(CategoryChange))
raise click.BadParameter(msg)
@click.command()
@click.option('-t', '--title', prompt=title_prompt(),
callback=validate_title,
help="Title for the changelog entry.")
@click.option('-c', '--category', prompt=category_prompt(),
callback=validate_category,
help="The category of the change.")
@click.option('--dry-run', is_flag=True,
help="Do not generate an entry. Print to the standard output instead.")
@click.option('--overwrite', is_flag=True,
help="Force to replace an existing entry.")
@click.option('--editor/--no-editor', default=True,
help="Open entry in the default editor.")
def changelog(title, category, dry_run, overwrite, editor):
"""Interactive tool to create unreleased Changelog entries.
This tool will help you to create valid Changelog entries
for a Git repository. You will need to run this script inside
that repository.
It will guide you to create a new entry. At the end of the process,
a text editor will open to let you review the entry and make the
final changes. The editor will be the default defined in your
system.
You can skip some parts of the process providing information
in advance such as the title ('-t') or the category ('-c')
of the entry.
New entries will be stored in "releases/unreleased" directory.
This directory must be available under the Git root path. If you
don't want to create a new entry and see only the final result,
please active '--dry-run' flag.
In the case an entry with the same title already exists, an error
will be raised. Use '--overwrite' to force to replace the existing
entry.
You can also use this tool to create entries in a Git submodule.
Just run the script under the submodule directory.
"""
click.echo()
try:
project = Project(os.getcwd())
except RepositoryError as e:
raise click.ClickException(e)
dirpath = check_changelog_entries_dir(project)
content = create_changelog_entry_content(title, category,
run_editor=editor)
content = validate_changelog_entry(content)
click.echo()
if dry_run:
click.echo(content)
else:
write_changelog_entry(dirpath, title, content,
overwrite=overwrite)
def check_changelog_entries_dir(project):
"""Check and create the changelog directory."""
dirpath = project.unreleased_changes_path
if os.path.exists(dirpath):
return dirpath
click.echo("Error: Changelog entries directory does not exist.")
if not click.confirm("Do you want to create it?"):
msg = "Changelog entries directory is needed to continue."
raise click.ClickException(msg)
try:
os.makedirs(dirpath, mode=0o755)
except OSError as ex:
msg = "Unable to create directory. {}".format(str(ex))
raise click.ClickException(msg)
else:
click.echo("New directory {} created".format(dirpath))
return dirpath
def create_changelog_entry_content(title, category, author=None, issue=None,
run_editor=True):
"""Generates the content of a changelog entry."""
entry = ChangelogEntry(title, category, author=author, issue=issue)
contents = entry.to_dict()
stream = yaml.dump(contents, sort_keys=False,
explicit_start=True)
# Allow the user to edit the final content of the entry
if run_editor:
content = click.edit(stream)
else:
content = stream
return content
def validate_changelog_content(content):
"""Validate the contents of an entry in a file."""
if not content:
msg = "Aborting due to empty entry content"
raise click.ClickException(msg)
try:
data = yaml.safe_load(content)
return True
except yaml.YAMLError as exc:
pm = exc.problem_mark
click.echo("Error: Invalid format on line {} at position {}".format(pm.line, pm.column))
return False
def validate_changelog_entry(content):
"""Checks if the file has a valid format."""
is_valid = False
while not is_valid:
is_valid = validate_changelog_content(content)
if not is_valid:
if click.confirm("The changes will be lost if the file is invalid.\nDo you want to edit it?",
default=True, abort=True):
# Allow the user to edit the content of the entry
content = click.edit(content)
return content
def write_changelog_entry(dirpath, title, content, overwrite=False):
"""Store the contents of an entry in a file."""
filepath = determine_filepath(dirpath, title)
mode = 'w' if overwrite else 'x'
try:
filename = os.path.basename(filepath)
with open(filepath, mode=mode) as f:
f.write(content)
except FileExistsError:
msg = "Changelog entry {} already exists. Use '--overwrite' to replace it.".format(filename)
raise click.ClickException(msg)
else:
click.echo("Changelog entry '{}' created".format(filename))
if __name__ == '__main__':
changelog() | /release_tools-0.6.0.tar.gz/release_tools-0.6.0/release_tools/changelog.py | 0.521959 | 0.211641 | changelog.py | pypi |
from collections import defaultdict
from dataclasses import dataclass, field
from typing import Dict, Any, Optional, List, DefaultDict
import click
from boltons.cacheutils import cached, LRI
from pydantic import BaseModel
from releaseherald.plugins.interface import CommandOptions
@dataclass
class FromCommandline:
"""
This class can be used to annotate a PluginConfig attribute, it connects the annotated attribute to
the passed commandline
Attributes:
command: the command the option need to attached to
option: the commandline option
"""
command: str
option: click.Option
@dataclass
class UpdateMapping:
field_name: str
option_name: str
@dataclass
class CommandOptionsInfo:
options: List[click.Option] = field(default_factory=list)
update_mappings: List[UpdateMapping] = field(default_factory=list)
class PluginConfig(BaseModel):
"""
A helper base class for easier declarative plugin configuration.
- can be used with [Configuration.parse_sub_config][releaseherald.configuration.Configuration.parse_sub_config]
for easier parsing
- Attributes can be Annotated types, which can contain
[FromCommandline][releaseherald.plugins.plugin_config.FromCommandline] Annotation, that make the config setting
overridable from commandline
#Usage
```python
class MyPluginConfig(PluginConfig):
non_overridable_value: str = "some default
# use type Annotation to connect the attribute wit the commandline Option
overridable_value: Annotated[str, FromCommandline(
"generate",
click.Option(
param_decls=["--override"],
help="override the overrideable value",
)
)] = "default for overrideable value"
class MyPlugin:
@releaseherald.plugins.hookimpl
def process_config(self, config: Configuration):
# parse the config with the helper
self.my_config = config.parse_sub_config("my_config", MyPluginConfig)
@releaseherald.plugins.hookimpl
def get_command_options(self, command: str) -> Optional[CommandOptions]:
# just use the helper to return the right thing
return self.my_config.get_command_options(command)
@releaseherald.plugins.hookimpl
def on_start_command(self, command: str, kwargs: Dict[str, Any]):
# use the helper to reflect commandline overrides in the config
self.my_config.update(command, kwargs)
```
"""
@classmethod
@cached(LRI())
def _get_command_options_info(cls) -> Dict[str, CommandOptionsInfo]:
command_options: DefaultDict[str, CommandOptionsInfo] = defaultdict(
CommandOptionsInfo
)
for config_field in cls.__fields__.values():
metadata = getattr(config_field.outer_type_, "__metadata__", None)
if not metadata:
continue
for annotation in metadata:
if not isinstance(annotation, FromCommandline):
continue
command = command_options[annotation.command]
command.options.append(annotation.option)
if not annotation.option.name:
raise AttributeError(
f"{config_field.name} field config option should have a name "
f"in {cls.__name__}"
)
command.update_mappings.append(
UpdateMapping(config_field.name, annotation.option.name)
)
return dict(command_options)
def get_command_options(self, command: str) -> Optional[CommandOptions]:
"""
Generate command options from Annotated fields which can be returned directly from
[get_command_options hook][releaseherald.plugins.hookspecs.get_command_options]
Args:
command: the command these command options are registered with
Returns:
The command options that the [get_command_options hook][releaseherald.plugins.hookspecs.get_command_options] expects
"""
command_options: CommandOptionsInfo = self._get_command_options_info().get(
command
)
if command_options:
def default_opts_callback(default_options: Dict[str, Any]):
for update_mapping in command_options.update_mappings:
default_options[update_mapping.option_name] = getattr(
self, update_mapping.field_name
)
return CommandOptions(command_options.options, default_opts_callback)
return None
def update(self, command: str, kwargs: Dict[str, Any]) -> None:
"""
Update itself from commandline options, can be used in
[on_start_command hook][releaseherald.plugins.hookspecs.on_start_command]
Args:
command: the command
kwargs: the commandline args for the command
"""
command_options: CommandOptionsInfo = self._get_command_options_info().get(
command
)
if command_options:
for update_mapping in command_options.update_mappings:
setattr(
self,
update_mapping.field_name,
kwargs[update_mapping.option_name],
) | /plugins/plugin_config.py | 0.931525 | 0.335215 | plugin_config.py | pypi |
from pathlib import Path
from typing import Optional, Pattern, List
from boltons.iterutils import pairwise
from git import Commit, Repo # type: ignore
from pydantic import BaseModel, root_validator, parse_obj_as
import releaseherald.plugins
from releaseherald.configuration import (
Configuration,
DEFAULT_FRAGMENTS_DIR,
DEFAULT_VERSION_TAG_PATTERN,
)
from releaseherald.plugins.base import get_tags, get_news_between_commits
from releaseherald.plugins.interface import (
SubmoduleNews,
CommitInfo,
News,
MutableProxy,
VersionNews,
)
class SubmoduleConfig(BaseModel):
"""
Attributes:
name: The name of the submodule as referenced by git
display_name: This name is passed to the rendering to be used. **_Default:_** same as `name`
news_fragments_directory: the news fragment directory relative to submodule root. **_Default:_** `news_fragment`
version_tag_pattern: The version tag pattern in this submodule. **_Default:_** `"(?P<version>(\d*)\.(\d*)\.(\d*))"`
"""
name: str
display_name: str = None # type: ignore
news_fragments_directory: Path = DEFAULT_FRAGMENTS_DIR
version_tag_pattern: Pattern = DEFAULT_VERSION_TAG_PATTERN
@root_validator
def default_display_name(cls, values):
values = values.copy()
display_name = values.get("display_name")
values["display_name"] = display_name or values["name"]
return values
class Submodules:
def __init__(self) -> None:
self.config: Optional[Configuration] = None
self.submodule_config: List[SubmoduleConfig] = []
@releaseherald.plugins.hookimpl
def process_config(self, config: Configuration):
self.config = config
submodules = getattr(config, "submodules", None)
if submodules:
self.submodule_config = parse_obj_as(List[SubmoduleConfig], submodules)
setattr(config, "submodules", self.submodule_config)
@releaseherald.plugins.hookimpl
def get_version_news(
self,
repo: Repo,
commit_from: CommitInfo,
commit_to: CommitInfo,
news: List[News],
version_news: MutableProxy[VersionNews],
):
submodule_news = get_submodule_news(
commit_from.commit, commit_to.commit, self.submodule_config
)
version_news.value.submodule_news = submodule_news
def get_submodule_commit(commit: Commit, name: str) -> Optional[Commit]:
repo = commit.repo
try:
submodule = repo.submodules[name]
sha = (commit.tree / submodule.path).hexsha
except KeyError:
# this case the submodule either not exist or not exist at that commit we are looking into
return None
srepo = submodule.module()
return srepo.commit(sha)
def get_submodule_news(
commit_from: Commit, commit_to: Commit, submodules: List[SubmoduleConfig]
) -> List[SubmoduleNews]:
news = []
for submodule in submodules:
submodule_from = get_submodule_commit(commit_from, submodule.name)
submodule_to = get_submodule_commit(commit_to, submodule.name)
if not submodule_from or not submodule_to:
continue
srepo = submodule_from.repo
tag_commits = [
tag.commit
for tag in get_tags(srepo, submodule.version_tag_pattern)
if srepo.is_ancestor(submodule_from, tag.commit)
and srepo.is_ancestor(tag.commit, submodule_to)
]
commits = [submodule_to, *tag_commits, submodule_from]
snews = SubmoduleNews(name=submodule.name, display_name=submodule.display_name)
for c_to, c_from in pairwise(commits):
snews.news.extend(
get_news_between_commits(
c_from, c_to, submodule.news_fragments_directory
)
)
news.append(snews)
return news | /plugins/submodules.py | 0.819641 | 0.225481 | submodules.py | pypi |
import datetime
from dataclasses import dataclass, field
from typing import Optional, Dict, Any, List, Generic, TypeVar
import click
from git import TagReference, Commit # type: ignore
from releaseherald.configuration import DefaultOptionsCallable
VT = TypeVar("VT")
@dataclass
class CommandOptions:
"""
Represent a list of commandline options need to be registered to one of the
cli command, together with a callable that can promote config settings as
the default to the options.
Attributes:
options: list of options will be attached to a cli command
default_options_callbacks:
a callback that promote config settings to defaults to the above options
"""
options: List[click.Option]
default_opts_callback: DefaultOptionsCallable
class MutableProxy(Generic[VT]):
"""
Generic proxy object to make it possible to mutate/replace params in
consecutive hooks
Attributes:
value VT: the value the proxy holds
"""
def __init__(self, value: Optional[VT] = None):
self._value = value
@property
def value(self):
return self._value
@value.setter
def value(self, value: Optional[VT]):
self._value = value
@dataclass
class CommitInfo:
"""
Attributes:
tag: The tag used to find this commit
commit: The commit
name: The name of the tag or "Unreleased"
date:
The date when the tag was attached to the commit if the tag is annotated,
else the commit date
"""
tag: Optional[TagReference]
commit: Commit
@property
def name(self) -> str:
return self.tag.name if self.tag else "Unreleased"
@property
def date(self) -> datetime.datetime:
return datetime.datetime.fromtimestamp(
self.tag.tag.tagged_date
if self.tag and self.tag.tag
else self.commit.committed_date
)
@dataclass
class News:
"""
Represent a single newsfile
Attributes:
file_name: file name of the news fragment
content: the content of the news file
metadata: a data store for plugins to attach extra data
"""
file_name: str
content: str
metadata: Dict[str, Any] = field(default_factory=dict)
@dataclass
class SubmoduleNews:
"""
Represent a list of news for a given submodule.
Attributes:
name: The submodule name
display_name: The display_name of the submodule
news: The news
metadata: a data store for plugins to attach extra data
"""
name: str
display_name: str
news: List[News] = field(default_factory=list)
metadata: Dict[str, Any] = field(default_factory=dict)
@dataclass
class VersionNews:
"""
Represent a given version with all the collected information
Attributes:
news: the list of news
tag: the tag used to identigy this version
from_commit: the earlier commit
to_commit: the later commit
date: date of this version
submodule_news: news for every submodule for this release
metadata: a data store for plugins to attach extra data
"""
news: List[News]
tag: str
version: str
from_commit: CommitInfo
to_commit: CommitInfo
date: datetime.datetime
submodule_news: List[SubmoduleNews] = field(default_factory=list)
metadata: Dict[str, Any] = field(default_factory=dict)
@dataclass
class Output:
"""
Represent the output rendered from the collected news for all the collected versions
Attributes:
format: could be used advertise the format of the content
content: any plugin specific format
metadata: a data store for plugins to attach extra data
"""
format: str
content: Any
metadata: Dict[str, Any] = field(default_factory=dict) | /plugins/interface.py | 0.925643 | 0.303809 | interface.py | pypi |
import re
from abc import ABC, abstractmethod
from enum import Enum
from pathlib import Path
from typing import List, Optional, Dict, Type
import parse
from pydantic import BaseModel
import releaseherald.plugins
from releaseherald.configuration import Configuration
from releaseherald.plugins.interface import VersionNews, News
class ParserType(str, Enum):
RE = "re"
PARSE = "parse"
class FilenameMetadataExtractorConfig(BaseModel):
"""
Attributes:
type: [re](https://docs.python.org/3/library/re.html) or [parse](https://github.com/r1chardj0n3s/parse#readme)
pattern: the pattern for the corresponding type, Should contains groups/fields
target_attribute: if provided the resulting dictionary will be merged into that field of the metadata
"""
type: ParserType
pattern: str
target_attribute: Optional[str]
CONFIG_ATTRIBUTE = "filename_metadata_extractor"
class Extractor(ABC):
def __init__(self, pattern: str):
pass
@abstractmethod
def match(self, text: str) -> Optional[Dict[str, str]]:
...
class RegexExtractor(Extractor):
def __init__(self, pattern: str):
super().__init__(pattern)
self.pattern = re.compile(pattern)
def match(self, text: str) -> Optional[Dict[str, str]]:
match = self.pattern.match(text)
if match:
return match.groupdict()
return None
class ParseExtractor(Extractor):
def __init__(self, pattern: str):
super().__init__(pattern)
self.pattern = parse.compile(pattern)
def match(self, text: str) -> Optional[Dict[str, str]]:
result = self.pattern.parse(text)
if result:
return result.named
return None
MATCHER_FACTORY_MAP: Dict[ParserType, Type[Extractor]] = {
ParserType.RE: RegexExtractor,
ParserType.PARSE: ParseExtractor,
}
class FilenameMetadataExtractor:
def __init__(self) -> None:
self.target_attribute: Optional[str] = None
self.extractor: Optional[Extractor] = None
@releaseherald.plugins.hookimpl
def process_config(self, config: Configuration):
extractor_config = config.parse_sub_config(
CONFIG_ATTRIBUTE, FilenameMetadataExtractorConfig
)
if extractor_config:
self.target_attribute = extractor_config.target_attribute
self.extractor = MATCHER_FACTORY_MAP[extractor_config.type](
extractor_config.pattern
)
@releaseherald.plugins.hookimpl
def process_version_news(self, version_news: List[VersionNews]):
if self.extractor is None:
return
for version in version_news:
self.process_news_list(version.news)
for submodule in version.submodule_news:
self.process_news_list(submodule.news)
def process_news_list(self, news_list: List[News]):
for news in news_list:
self.extend_news(news)
def extend_news(self, news: News):
if self.extractor is None:
return
metadata = self.extractor.match(Path(news.file_name).name)
if metadata:
target = news.metadata
if self.target_attribute:
target = news.metadata.setdefault(self.target_attribute, {})
target.update(metadata) | /plugins/metadata_extractor.py | 0.80329 | 0.266327 | metadata_extractor.py | pypi |
from typing import List, Dict, Any, Optional
import pluggy
from git import Repo, Tag
from releaseherald.configuration import Configuration
from releaseherald.plugins import CommitInfo
from releaseherald.plugins.interface import (
MutableProxy,
VersionNews,
News,
Output,
CommandOptions,
)
hookspec = pluggy.HookspecMarker("releaseherald")
@hookspec
def process_config(config: Configuration):
"""
Called as the first callback to the plugin. It can use this to
initiate itself based in the configuration read from the config file.
If also has a chance to change the configuration. It can for example
parse and validate it's own sub configuration and replace the dict in `config`
with a more manageable object.
Args:
config: The configuration
"""
pass
@hookspec
def get_command_options(command: str) -> Optional[CommandOptions]:
"""
This callback give chance to a plugin to add commandline options to
various commands. It is called with the name of the command.
Args:
command: something
Returns:
collection of `click.Options` that are added to the cli command
and a callable that can set the configured value as the default for the
cli option"""
pass
@hookspec
def on_start_command(command: str, kwargs: Dict[str, Any]):
"""
Called before a cli command start to execute.
Args:
command: the name of the command
kwargs: the parameters the command called with
"""
pass
@hookspec
def process_tags(repo: Repo, tags: List[Tag]):
"""
Args:
repo: the git repository
tags:
List of tags releaseherald consider as the versions
it needs to collect newsfragments. The plugin is free
to manipulate the list of tags complex filtering can be
implemented here.
"""
pass
@hookspec
def process_commits(repo: Repo, tags: List[Tag], commits: List[CommitInfo]):
"""
The aim of this hook is to collect the list of commits based on the tags.
The plugin supposed to modify the `commits` list. The default plugin, just
turns the tags into [CommitInfo][releaseherald.plugins.interface.CommitInfo].
Args:
repo: the git repository
tags: the tags collected by [process_tags][releaseherald.plugins.hookspecs.process_tags]
commits: Info about each commits
"""
pass
@hookspec
def get_news_between_commits(
repo: Repo,
commit_from: CommitInfo,
commit_to: CommitInfo,
news: List[News],
):
"""
In this hook the plugin can alter the collected `news` between the two commits. It is
called for every consecutive commit pairs processed by
[process_commits][releaseherald.plugins.hookspecs.process_commits].
Args:
repo: the git repository
commit_from: The earlier commit
commit_to: The later commit
news: The list of news that previous plugins collected, can be altered by the plugin
"""
pass
@hookspec
def get_version_news(
repo: Repo,
commit_from: CommitInfo,
commit_to: CommitInfo,
news: List[News],
version_news: MutableProxy[VersionNews],
):
"""
In this hook plugins can produce a wrapper around the the list of news that represent a
version in releaseherald datastructure. Called for every consecutive commit pairs processed by
[process_commits][releaseherald.plugins.hookspecs.process_commits] with the news processed by
[get_news_between_commits][releaseherald.plugins.hookspecs.get_news_between_commits] for the
same two commits.
Args:
repo: the git repository
commit_from: The earlier commit
commit_to: The later commit
news: The list of news collected by
[get_news_between_commits][releaseherald.plugins.hookspecs.get_news_between_commits]
version_news: The version news representing a version with the changes between two commits
"""
pass
@hookspec
def process_version_news(version_news: List[VersionNews]):
"""
This hook give a chance for the plugin to alter the list of versions.
Args:
version_news: All the version/news collected so far
"""
pass
@hookspec
def generate_output(version_news: List[VersionNews], output: MutableProxy[Output]):
"""
The plugin can generate an output in memory in any kind of format it want. It also has a
chance to alter or replace an output generated by any previous plugins
Args:
version_news:
All the version/news collected and processed by
[process_version_news][releaseherald.plugins.hookspecs.process_version_news]
output: Output in plugin specific format
"""
pass
@hookspec
def write_output(output: Output):
"""
The plugin should do its final output step here. Write to file, to stdout or send a mail,
upload to some service whatever desired.
Args:
output: the output from [generate_output][releaseherald.plugins.hookspecs.generate_output]
"""
pass | /plugins/hookspecs.py | 0.904223 | 0.410874 | hookspecs.py | pypi |
Release Log Parser
==================
Software packages usually include textual files describing noteworthy
changes in each subsequent release. There exist several variants (or
formats) of such files.
This package provides Python framework for parsing the most often used
formats of such release log files. Support for any new format can be
easily added.
Release Logs
============
``Release Log`` is a textual file included in a software package, which
contains descriptions of existing releases of the package. Such a
file is normally included in each distributed archive of the package
and is present in its VCS repository.
Little or no effort has been invested into standartization of release
log formats. There exists a plethora of variations which differ more
or less considerably. The choice of a particular variation for a given
package depends mostly on the language this package is written in and
the distribution system adopted for this package. Authors' preferences
play certain role as well.
Despite the diversity of release log formats, similarities between
them overnumber their differences. The following observations hold true:
1. Release logs are plaintext files.
2. Within a file, each release is described by a separate entry.
3. Each such entry consists of a heading, containing at least the
version number and date of the release, and a textual block discussing
the changes introduced with this release.
4. Entries are arranged in reverse chronological order, the most
recent release being described first.
5. Format of the headings is consistent throughout the given release
log.
6. Entry description is usually a list of changes. However, more
verbose and general descriptions may also appear within it. In
general, it is safest to assume the description to be an opaque block
of arbitrary text.
7. Release logs can contain additional textual information before the
first release entry (a "prologue") and after the last release entry
(an "epilogue").
Supported Formats
=================
Most frequently used release log formats can be grouped into three
main families:
``GNU-style`` release logs
These are normally used by GNU software. Such log files are usually named
"NEWS". Example heading lines are::
version 1.30 - Sergey Poznyakoff, 2017-12-17
Version 1.18 - 2018-08-21
* Version 4.2, 2014-05-23
``Perl-style`` release logs
These are the "Changes" files included in each Perl package
distributed via CPAN. Example heading lines::
2.00 2018-03-08
1.01 Sat Jul 7 19:11:35 2018
``Python package`` release logs
The "CHANGES.txt" files found in many Python packages. Example heading
lines:
v2.0.1, 2014/12/14 -- Update token generator
2.7 (23 June 2018)
The special feature of the first heading variant is that the first
line of the changeset description follows the heading on the same
physical line. Quite often this is the only line in the description.
Usage
=====
The ``ReleaseLog`` class is a fabric returning actual release history
implementation, depending on the first argument to its constructor.
Typical usage::
rl = ReleaseLog('GNU', content, count=1)
The two mandatory arguments are the format name and the list of lines
obtained from the release log file.
Valid format names for this version of ``releaselogparser`` are:
``GNU``, ``NEWS``
GNU-style news file.
``CPAN``, ``Changes``
Perl-style release log.
``Python``, ``python``
Python-style release log.
Supported keyword arguments are:
start = *N*
Start parsing from the entry *N*. Entries are numbered from 0.
stop = *N*
Stop parsing on the entry *N*.
count = *N*
Collect at most *N* entries
If all three keywords are given, the actual range of history entries
is computed as
[start, min(start+count, stop)]
Two derived classes are provided that read input data from various
sources:
class ``ReleaseLogFile``
------------------------
The ``ReleaseLogFile`` class reads release log from the file::
rl = ReleaseLogFile(fmt, file [, kwargs...])
Here, ``fmt`` is the name of the format, ``file`` is the name of the
input file, and ``kwargs`` are keyword arguments described above.
class ``ReleaseLogURL``
-----------------------
The ``ReleaseLogURL`` class reads log entries from a URL::
rl = ReleaseLogURL(fmt, url [, kwargs...])
Acessing release information
----------------------------
The returned object can be indexed to obtain particular log
entries. Indices start with 0, which corresponds to the most recent
entry, e.g.:
entry = cl[0]
The ``entry`` is an object of class ``Release``, which has three
attributes:
``version``
Release version number.
``date``
Date and time of the release (a datetime object)
``descr``
Textual description of the release - a list of lines.
The obtained entry can be printed as string, e.g.:
print(entry)
The output format is as shown in the example below:
Version 1.0, released at 2018-08-19 15:30:00
Example
=======
The following simple program reads release log entries from the file
``NEWS`` and prints them on the standard output::
from releaselogparser.input import ReleaseLogFile
for log in ReleaseLogFile('GNU', 'NEWS'):
print(log)
print('\n'.join(log.descr))
Extending Release Log
=====================
Implementing support for new release log format is fairly easy. To do
so, provide a class inherited from ``ReleaseHistory``. This base class has
the following attributes:
``format``
List of names for this format. Names from this list can be used
interchangeably to identify this log format, e.g. as a first
argument to the ``ReleaseLog`` or derived constructor.
``filename``
Name of the file used normally for release logs in this format.
``header``
Compiled regular expression that returns a match for
history entry heading lines. The expression must contain two named
groups: ``version``, which returns part of the string corresponding
to the release version number, and ``date``, returning its
timestamp.
If it contains a named group ``rest``, part of the header string
corresponding to this group will be added to the ``descr`` list of
the created history entry.
``end_of_entry_rx``
Compiled regular expression that matches end of entry. Can be
``None``, if not needed.
The file with the definition of the inherited class must be placed in
the directory ``releaselogparser/format`` reachable from the Python search path
for module files.
The following example implements a simplified version of CHANGES.txt log
format::
import re
from releaselogparser import ReleaseHistory
class ChangesLogFormat(ReleaseHistory):
format = ['changes']
filename = 'CHANGES.txt'
header = re.compile("""^[vV](?P<version>\d[\d.]*)\s*
,\s*
(?P<date>.*?)
\s+-+\s*
(?P<rest>.*)$
""", re.X)
More sophisticated implementations can overload the ``parse_header``
method of the parent class. This method is defined as follows::
def parse_header(self, line):
If the input ``line`` is an entry header, the method should return
a triplet::
(date, version, first_line)
where ``date`` is textual representation of the date of the release,
``version`` is the release version string, and ``first_line`` is the
first line of the description (can be None).
If the line is not a valid entry header, the method returns
``(None, None, None)``.
The ``releaselog`` utility
==========================
The ``releaselog`` tool reads release logs in various formats from a
given file or URL. Its usage is::
releaselog [OPTIONS] FILE-or-URL
The argument is treated as file name by default. To read from a URL,
use the ``--url`` option.
Options:
``-H FORMAT``, ``--format=FORMAT``
Read logs in the given format.
``-f N``, ``--from=N``, ``--start=N``
Start from *N* th entry.
``-t N``, ``--to=N``, ``--stop=N``
End on *N* th entry.
``-n COUNT``, ``--count=COUNT``
Read at most that much entries.
``-u``, ``--url``
Treat argument as URL
``-l``, ``--list``
List supported formats
``--version``
Show program version number and exit.
``-h``, ``--help``
Show a short help message and exit.
| /releaselogparser-1.0.2.tar.gz/releaselogparser-1.0.2/README.rst | 0.927511 | 0.680958 | README.rst | pypi |
=====
Usage
=====
To use Releases, mimic the format seen in `its own changelog
<https://raw.github.com/bitprophet/releases/master/docs/changelog.rst>`_ or in
`Fabric's changelog
<https://raw.github.com/fabric/fabric/master/sites/www/changelog.rst>`_.
Specifically:
* Install ``releases`` and update your Sphinx ``conf.py`` to include it in the
``extensions`` list setting: ``extensions = ['releases']``.
* Also set the ``releases_release_uri`` and ``releases_issue_uri`` top
level options - they determine the targets of the issue & release links
in the HTML output. Both must include a ``{number}`` slug (for use
with `str.format`) where the release/issue number should go; the older
``%s`` style is also acceptable.
* Alternately, if your project is hosted on Github, set the
``releases_github_path`` setting instead, to e.g.
``account/project``. Releases will then use an appropriate Github
URL for both releases and issues.
* If ``releases_release_uri`` or ``releases_issue_uri`` are *also*
configured, they will be preferred over ``releases_github_path``.
(If only one is configured, the other link type will continue using
``releases_github_path``.)
* See `Fabric's docs/conf.py
<https://github.com/fabric/fabric/blob/4afd33e971f1c6831cc33fd3228013f7484fbe35/docs/conf.py#L31>`_
for an example.
* You may optionally set ``releases_debug = True`` to see debug output
while building your docs.
* If your changelog includes "simple" pre-1.0 releases derived from a
single branch (i.e. without stable release lines & semantic versioning)
you may want to set ``releases_unstable_prehistory = True``.
* This is also useful if you've just imported a non-Releases changelog,
where your issues are all basic list-items and you don't want to go
through and add bug/feature/support/etc roles.
* See :ref:`the appropriate conceptual docs <unstable-prehistory>` for
details on this behavior.
* Create a Sphinx document named ``changelog.rst`` containing a bulleted list
somewhere at its topmost level.
* If you wish to use a different document name, use another config option
(as per previous bullet point), ``releases_document_name``. E.g.
``releases_document_name = "CHANGES"`` would cause Releases to mutate a
file called ``CHANGES.rst`` instead of ``changelog.rst``.
* It is possible to target multiple changelog files for mutation by setting
``releases_document_name`` to a list of strings instead of a single
string, e.g. ``releases_document_name = ['project_1/changelog',
'project_2/changes', 'changelog']``.
* Releases only modifies the bulleted list in these files and does not
touch other elements; this allows you to place paragraphs, comments etc
at the top (or bottom) of the document.
* List items are to be ordered chronologically with the newest ones on top.
* As you fix issues, put them on the top of the list.
* As you cut releases, put those on the top of the list and they will
include the issues below them.
* Issues with no releases above them will end up in a specially marked
"Unreleased" section of the rendered changelog.
* Bullet list items should use the ``support``, ``feature`` or ``bug``
roles to mark issues, or ``release`` to mark a release. These special roles
must be the first element in each list item.
* Line-items that do not start with any issue role will be considered bugs
(both in terms of inclusion in releases, and formatting) and, naturally,
will not be given a hyperlink.
* Issue roles are of the form ``:type:`number[ keyword]```. Specifically:
* ``number`` is used to generate the link to the actual issue in your issue
tracker (going by the ``releases_issue_uri`` option). It's used for both
the link target & (part of) the link text.
* If ``number`` is given as ``-`` or ``0`` (as opposed to a "real" issue
number), no issue link will be generated. You can use this for items
without a related issue.
* Keywords are optional and may be one of:
* ``backported``: Given on *support* or *feature* issues to denote
backporting to bugfix releases; such issues will show up in both
release types. E.g. placing ``:support:`123 backported``` in your
changelog below releases '1.1.1' and '1.2.0' will cause it to appear
in both of those releases' lists.
* ``major``: Given on *bug* issues to denote inclusion in feature,
instead of bugfix, releases. E.g. placing ``:bug:`22 major``` below
releases '1.1.1' and '1.2.0' will cause it to appear in '1.2.0'
**only**.
* ``(N.N+)`` where ``N.N`` is a valid release line, e.g. ``1.1`` or
``2.10``: Given on issues (usually *bugs*) to denote minimum release
line. E.g. when actively backporting most bugs to release lines 1.2,
1.3 and 1.4, you might specify ``:bug:`55 (1.3+)``` to note that bug
55 only applies to releases in 1.3 and above - not 1.2.
* A `semantic version range spec covering minor+major version numbers
<https://python-semanticversion.readthedocs.io/en/latest/reference.html#version-specifications-the-spec-class>`_
such as ``(<2.0)`` or ``(>=1.0,<3.1)``. A more powerful version of
``(N.N+)`` allowing annotation of issues belonging to specific major
versions.
.. note::
It is possible to give *both* a regular keyword
(``backported``/``major``) *and* a spec (``(N.N+)``/``(>=1.0)``) in
the same issue. However, giving two keywords or two specs at the same
time makes no sense & is not allowed.
* Regular Sphinx content may be given after issue roles and will be preserved
as-is when rendering. For example, in ``:bug:`123` Fixed a bug, thanks
`@somebody`!``, the rendered changelog will preserve/render "Fixed a bug,
thanks ``@somebody``!" after the issue link.
* Release roles are of the form ``:release:`number <date>```.
* You may place a comma-separated (whitespace optional) list of issue
numbers after the release role, and this will limit the issues included
in that release to that explicit list.
* Otherwise, releases include all relevant issues as outlined above and
in :doc:`/concepts`.
Then build your docs; in the rendered output, ``changelog.html`` should show
issues grouped by release, as per the above rules. Examples: `Releases' own
rendered changelog
<http://releases.readthedocs.io/en/latest/changelog.html>`_, `Fabric's
rendered changelog <http://www.fabfile.org/changelog.html>`_.
Optional styling additions
==========================
If you have any nontrivial changelog entries (e.g. whose description spans
multiple paragraphs or includes their own bulleted lists, etc) you may run into
`docutils' rather enthusiastic bulleted list massaging
<http://docutils.sourceforge.net/sandbox/html4strict/data/simple-lists.html>`_
which can then make your releases look different from one another.
To help combat this, it may be useful to add the following rule to the Sphinx
theme you're using::
div#changelog > div.section > ul > li > p:only-child {
margin-bottom: 0;
}
.. note::
Some themes, like `Alabaster <http://github.com/bitprophet/alabaster>`_,
may already include this style rule.
| /releases-2.0.0.tar.gz/releases-2.0.0/docs/usage.rst | 0.851583 | 0.784938 | usage.rst | pypi |
# Contributing guide
If you are planning to develop `releaseup`, or want to use the latest commit of
`releaseup` on your local machine, you might want to install it from the source.
This installation is not recommended for users who want to use the stable
version of `releaseup`. The steps below describe the installation process of
`releaseup`'s latest commit. It also describes how to test `releaseup`'s
codebase and build `releaseup`'s documentation.
**Note**: `releaseup` uses
[Scikit-HEP's developer information](https://scikit-hep.org/developer) as a
reference for all the development work. The guide is a general and much more
explained collection of documentation available for developing `Scikit-HEP`
packages. `releaseup` is not a `Scikit-HEP` package, but it still loosely
follows this developer guide as it is absolutely amazing!
## Installing releaseup
We recommend using a virtual environment to install `releaseup`. This would
isolate the library from your global `Python` environment, which would be
beneficial for reproducing bugs, and the overall development of `releaseup`. The
first step would be to clone `releaseup` -
```
git clone https://github.com/Saransh-cpp/releaseup.git
```
and then we can change the current working directory and enter `releaseup` -
```
cd releaseup
```
### Creating a virtual environment
A virtual environment can be set up and activated using `venv` in both `UNIX`
and `Windows` systems.
**UNIX**:
```
python3 -m venv .env
. .env/bin/activate
```
**Windows**:
```
python -m venv .env
.env\bin\activate
```
### Installation
The developer installation of `releaseup` comes with a lot of options -
- `test`: the test dependencies
- `docs`: extra dependencies to build and develop `releaseup`'s documentation
- `dev`: installs the `test` and `docs` dependencies
These options can be used with `pip` with the editable (`-e`) mode of
installation in the following ways -
```
pip install -e .[dev,test]
```
For example, if you want to install the `docs` dependencies along with the
dependencies included above, use -
```
pip install -e .[dev,test,docs]
```
### Adding releaseup for notebooks
`releaseup` can be added to the notebooks using the following commands -
```
python -m ipykernel install --user --name releaseup
```
## Activating pre-commit
`releaseup` uses a set of `pre-commit` hooks and the `pre-commit` bot to format,
type-check, and prettify the codebase. The hooks can be installed locally
using -
```
pre-commit install
```
This would run the checks every time a commit is created locally. The checks
will only run on the files modified by that commit, but the checks can be
triggered for all the files using -
```
pre-commit run --all-files
```
If you would like to skip the failing checks and push the code for further
discussion, use the `--no-verify` option with `git commit`.
## Testing
**TODO: ADD TESTS**
### Running tests with coverage locally
The coverage value can be obtained while running the tests using `pytest-cov` in
the following way -
```
python -m pytest -ra --cov=releaseup tests/
```
## Documenting releaseup
`releaseup`'s documentation is mainly written in the form of
[docstrings](https://peps.python.org/pep-0257/) and
[Markdown](https://en.wikipedia.org/wiki/Markdown). The docstrings include the
description, arguments, examples, return values, and attributes of a class or a
function, and the `.md` files enable us to render this documentation on
`releaseup`'s documentation website.
`releaseup` primarily uses [MkDocs](https://www.mkdocs.org/) and
[mkdocstrings](https://mkdocstrings.github.io/) for rendering documentation on
its website. The configuration file (`mkdocs.yml`) for `MkDocs` can be found
[here](https://github.com/Saransh-cpp/releaseup/blob/main/mkdocs.yml). The
documentation is deployed on <https://readthedocs.io>
[here](https://releaseup.readthedocs.io/en/latest/).
Ideally, with the addition of every new feature to `releaseup`, documentation
should be added using comments, docstrings, and `.md` files.
### Building documentation locally
The documentation is located in the `docs` folder of the main repository. This
documentation can be generated using the `docs` dependencies of `releaseup` in
the following way -
```
mkdocs serve
```
The commands executed above will clean any existing documentation build, create
a new build (in `./site/`), and serve it on your `localhost`. To just build the
documentation, use -
```
mkdocs build
```
## Nox
The fastest way to start with development is to use nox. If you don't have nox,
you can use `pipx run nox` to run it without installing, or `pipx install nox`.
If you don't have pipx (pip for applications), then you can install with with
`pip install pipx` (the only case were installing an application with regular
pip is reasonable). If you use macOS, then pipx and nox are both in brew, use
`brew install pipx nox`.
To use, run `nox`. This will lint and test using every installed version of
Python on your system, skipping ones that are not installed. You can also run
specific jobs:
```console
$ nox -s lint # Lint only
$ nox -s tests-3.9 # Python 3.9 tests only
$ nox -s docs -- serve # Build and serve the docs
$ nox -s build # Make an SDist and wheel
```
The default sessions (`lint` and `tests`) can be executed using -
```
nox
```
### Running pre-commit with nox
The `pre-commit` hooks can be run with `nox` in the following way -
```
nox -s lint
nox -s pylint
```
### Running tests with nox
Tests can be run with `nox` in the following way -
```
nox -s tests
```
### Building documentation with nox
Docs can be built with `nox` in the following way -
```
nox -s docs
```
Use the following command if you want to deploy the docs on `localhost` -
```
nox -s docs -- serve
```
| /releaseup-0.1.0.tar.gz/releaseup-0.1.0/CONTRIBUTING.md | 0.510496 | 0.947962 | CONTRIBUTING.md | pypi |
# Contributor Covenant Code of Conduct
## Our Pledge
We as members, contributors, and leaders pledge to make participation in our
community a harassment-free experience for everyone, regardless of age, body
size, visible or invisible disability, ethnicity, sex characteristics, gender
identity and expression, level of experience, education, socio-economic status,
nationality, personal appearance, race, religion, or sexual identity and
orientation.
We pledge to act and interact in ways that contribute to an open, welcoming,
diverse, inclusive, and healthy community.
## Our Standards
Examples of behavior that contributes to a positive environment for our
community include:
- Demonstrating empathy and kindness toward other people
- Being respectful of differing opinions, viewpoints, and experiences
- Giving and gracefully accepting constructive feedback
- Accepting responsibility and apologizing to those affected by our mistakes,
and learning from the experience
- Focusing on what is best not just for us as individuals, but for the overall
community
Examples of unacceptable behavior include:
- The use of sexualized language or imagery, and sexual attention or advances of
any kind
- Trolling, insulting or derogatory comments, and personal or political attacks
- Public or private harassment
- Publishing others' private information, such as a physical or email address,
without their explicit permission
- Other conduct which could reasonably be considered inappropriate in a
professional setting
## Enforcement Responsibilities
Community leaders are responsible for clarifying and enforcing our standards of
acceptable behavior and will take appropriate and fair corrective action in
response to any behavior that they deem inappropriate, threatening, offensive,
or harmful.
Community leaders have the right and responsibility to remove, edit, or reject
comments, commits, code, wiki edits, issues, and other contributions that are
not aligned to this Code of Conduct, and will communicate reasons for moderation
decisions when appropriate.
## Scope
This Code of Conduct applies within all community spaces, and also applies when
an individual is officially representing the community in public spaces.
Examples of representing our community include using an official e-mail address,
posting via an official social media account, or acting as an appointed
representative at an online or offline event.
## Enforcement
Instances of abusive, harassing, or otherwise unacceptable behavior may be
reported to the community leaders responsible for enforcement at
saransh0701@gmail.com. All complaints will be reviewed and investigated promptly
and fairly.
All community leaders are obligated to respect the privacy and security of the
reporter of any incident.
## Enforcement Guidelines
Community leaders will follow these Community Impact Guidelines in determining
the consequences for any action they deem in violation of this Code of Conduct:
### 1. Correction
**Community Impact**: Use of inappropriate language or other behavior deemed
unprofessional or unwelcome in the community.
**Consequence**: A private, written warning from community leaders, providing
clarity around the nature of the violation and an explanation of why the
behavior was inappropriate. A public apology may be requested.
### 2. Warning
**Community Impact**: A violation through a single incident or series of
actions.
**Consequence**: A warning with consequences for continued behavior. No
interaction with the people involved, including unsolicited interaction with
those enforcing the Code of Conduct, for a specified period of time. This
includes avoiding interactions in community spaces as well as external channels
like social media. Violating these terms may lead to a temporary or permanent
ban.
### 3. Temporary Ban
**Community Impact**: A serious violation of community standards, including
sustained inappropriate behavior.
**Consequence**: A temporary ban from any sort of interaction or public
communication with the community for a specified period of time. No public or
private interaction with the people involved, including unsolicited interaction
with those enforcing the Code of Conduct, is allowed during this period.
Violating these terms may lead to a permanent ban.
### 4. Permanent Ban
**Community Impact**: Demonstrating a pattern of violation of community
standards, including sustained inappropriate behavior, harassment of an
individual, or aggression toward or disparagement of classes of individuals.
**Consequence**: A permanent ban from any sort of public interaction within the
community.
## Attribution
This Code of Conduct is adapted from the [Contributor Covenant][homepage],
version 2.0, available at
https://www.contributor-covenant.org/version/2/0/code_of_conduct.html.
Community Impact Guidelines were inspired by
[Mozilla's code of conduct enforcement ladder](https://github.com/mozilla/diversity).
[homepage]: https://www.contributor-covenant.org
For answers to common questions about this code of conduct, see the FAQ at
https://www.contributor-covenant.org/faq. Translations are available at
https://www.contributor-covenant.org/translations.
| /releaseup-0.1.0.tar.gz/releaseup-0.1.0/CODE_OF_CONDUCT.md | 0.57523 | 0.678155 | CODE_OF_CONDUCT.md | pypi |
from typing import List, Set, Tuple
from releasy.repository import CommitSet
class ContributorSet():
def __init__(self,
commits: CommitSet = None,
previous_authors: Set[str] = None) -> None:
self._commits = CommitSet()
self.committers = set[str]()
self.authors = set[str]()
self.newcomers = set[str]()
if commits:
self._commits = commits
self.committers = set[str](commit.committer for commit in commits)
self.authors = set[str](commit.author for commit in commits)
if previous_authors:
self.newcomers = self.authors - previous_authors
def __len__(self):
return len(self.authors)
def frequency(self):
contributors_frequency = dict[str,int]()
total_contributions = 0
for author in (commit.author for commit in self._commits):
contributors_frequency[author] \
= contributors_frequency.get(author, 0) +1
total_contributions += 1
for contributor, contributions in contributors_frequency.items():
contributors_frequency[contributor] \
= round(
contributors_frequency[contributor] \
* 100 \
/ total_contributions,
2)
return contributors_frequency
def top(self, top: int = None, percent: int = None) -> Tuple[str, int]:
if top and percent:
raise ValueError("must use top or percent argument")
contributors = sorted(
self.frequency().items(),
key=lambda t: (-t[1], t[0]))
if top == None and percent == None:
return contributors
if top:
return contributors[0:top]
contributions = 0
for pos, (contributor, frequency) in enumerate(contributors):
contributions += frequency
if contributions >= percent:
return contributors[0:pos+1]
return []
def commits(self, contributors: List[str]) -> CommitSet:
def get_contributor(contributor):
if isinstance(contributor, tuple):
return contributor[0]
return contributor
contributors = set(
get_contributor(contributor) for contributor in contributors)
commits = [commit for commit in self._commits
if commit.author in contributors
or commit.committer in contributors]
commits = CommitSet(commits)
return commits | /releasy_lib-4.2.3-py3-none-any.whl/releasy/contributor.py | 0.774455 | 0.327883 | contributor.py | pypi |
from datetime import datetime, timedelta, timezone
from typing import Dict, List, Set, Tuple
import pygit2
from releasy.repository import Commit, CommitSet, DiffDelta, Repository, RepositoryProxy, Tag
class GitRepository(RepositoryProxy):
"""
A Repository proxy to Git
"""
def __init__(self, path) -> None:
super().__init__()
self.path = path
self.name = path
self.git: pygit2.Repository = pygit2.Repository(path)
self.commit_cache = CommitCache(self.git)
self.repository: Repository = None
def fetch_tags(self) -> Set[Tag]:
rtags = [ref for ref in self.git.references.objects
if ref.name.startswith('refs/tags/')]
tags: Set[Tag] = set()
for rtag in rtags:
tag = self._get_tag(rtag)
if tag:
tags.add(tag)
return tags
def _get_tag(self, rtag: pygit2.Reference) -> Tag:
ref = self.git.get(rtag.target)
if ref.type == pygit2.GIT_OBJ_COMMIT:
commit = self.repository.get_commit(ref.hex)
#TODO time
tag = Tag(self.repository, rtag.shorthand, commit)
return tag
elif ref.type == pygit2.GIT_OBJ_TAG: # annotatted tag
peel = rtag.peel()
if peel.type == pygit2.GIT_OBJ_COMMIT:
commit = self.repository.get_commit(rtag.peel().hex)
rtag_ref: pygit2.Tag = ref
try:
message = rtag_ref.message
except:
message = ''
if rtag_ref.tagger:
tagger = f"{rtag_ref.tagger.name} <{rtag_ref.tagger.email}>"
time_tzinfo = timezone(timedelta(minutes=rtag_ref.tagger.offset))
time = datetime.fromtimestamp(float(rtag_ref.tagger.time), time_tzinfo)
tag = Tag(self.repository, rtag.shorthand, commit, message, tagger,
time)
else:
tag = Tag(self.repository, rtag.shorthand, commit, message)
return tag
return None
def fetch_commit(self, commit_id: str) -> Commit:
rcommit = self.commit_cache.fetch_commit(commit_id)
committer_tzinfo = timezone(timedelta(minutes=rcommit.committer.offset))
committer_time = datetime.fromtimestamp(float(rcommit.committer.time), committer_tzinfo)
author_tzinfo = timezone(timedelta(minutes=rcommit.author.offset))
author_time = datetime.fromtimestamp(float(rcommit.author.time), author_tzinfo)
try:
message = rcommit.name,
except:
message = ''
commit = Commit(
self.repository,
rcommit.hex,
message,
f"{rcommit.committer.name} <{rcommit.committer.email}>",
committer_time,
f"{rcommit.author.name} <{rcommit.author.email}>",
author_time)
return commit
def fetch_commit_parents(self, commit: Commit) -> CommitSet:
commit_ref: pygit2.Commit = self.commit_cache.fetch_commit(commit.id)
parents = CommitSet()
for parent_ref in commit_ref.parents:
parent = self.repository.get_commit(parent_ref.hex)
parents.add(parent)
return parents
def diff(self, commit_a: Commit, commit_b: Commit, parse_delta:bool = False) -> DiffDelta:
diff_result = self.git.diff(commit_a.id, commit_b.id)
files = set()
if parse_delta:
for delta in diff_result.deltas:
if delta.new_file.path:
files.add(delta.new_file.path)
if delta.old_file.path:
files.add(delta.old_file.path)
delta = DiffDelta(
diff_result.stats.insertions,
diff_result.stats.deletions,
diff_result.stats.files_changed,
files)
return delta
class CommitCache:
"""
Implement a cache to improve fech commit performance
"""
def __init__(self, git: pygit2.Repository) -> None:
self.git = git
self.cache: Dict[str, pygit2.Commit] = {}
def fetch_commit(self, commit_id: str) -> pygit2.Commit:
if commit_id not in self.cache:
commit_ref: pygit2.Commit = self.git.get(commit_id)
self.cache[commit_id] = commit_ref
return self.cache[commit_id] | /releasy_lib-4.2.3-py3-none-any.whl/releasy/repository_git.py | 0.529993 | 0.200382 | repository_git.py | pypi |
from typing import Any, List, Tuple
from releasy.release import Commit2ReleaseMapper, Release
from releasy.repository import Commit, CommitSet
from .miner_base import AbstractMiner
from .project import Project
class MixedHistoryCommitMiner(AbstractMiner):
def __init__(self) -> None:
super().__init__()
self.c2r = dict[Commit, Release]()
def mine(self, project: Project, *args) -> Tuple[Project, Any]:
self.project = project
self._mine_commits()
return (self.project, [self.c2r])
def _mine_commits(self) -> None:
release_commits = set(map(lambda release: release.tag.commit,
self.project.releases))
for release in self.project.releases:
commits = CommitSet()
tails = CommitSet()
loop_detector = set()
commits_to_track: List[Commit] = [release.head]
if release.head not in self.c2r:
self.c2r[release.head] = set()
self.c2r[release.head].add(release)
while commits_to_track:
commit = commits_to_track.pop()
commits.add(commit)
loop_detector.add(commit)
if commit.parents:
for parent in commit.parents:
if parent not in release_commits:
if parent not in loop_detector:
commits_to_track.append(parent)
else:
tails.add(commit)
else:
tails.add(commit)
release.tails = tails
release.commits = commits
class HistoryCommitMiner(AbstractMiner):
def __init__(self) -> None:
super().__init__()
self.c2r = Commit2ReleaseMapper()
def mine(self, project: Project, *args) -> Tuple[Project, Any]:
self.project = project
self._assign_heads()
self._mine_releases()
return (self.project, [self.c2r])
def _assign_heads(self):
for release in sorted(
self.project.releases,
key=lambda r: (r.time, r.version)):
if not self.c2r.get_release(release.head):
self.c2r.assign_commit(release.head, release)
def _mine_releases(self) -> None:
for release in sorted(
self.project.releases,
key=lambda r: (r.time, r.version)):
commits, tails = self._mine_commits(release)
release.commits = commits
release.tails = tails
def _mine_commits(self, release):
commit = release.head
commit_release = self.c2r.get_release(commit)
if commit_release and release not in commit_release:
return CommitSet(), CommitSet()
commits = CommitSet()
tails = CommitSet()
commits_to_track: List[Commit] = [commit]
while commits_to_track:
commit = commits_to_track.pop()
self.c2r.assign_commit(commit, release)
commits.add(commit)
if not commit.parents:
tails.add(commit)
for parent in commit.parents:
if not self.c2r.get_release(parent):
commits_to_track.append(parent)
else:
tails.add(commit)
return commits, tails | /releasy_lib-4.2.3-py3-none-any.whl/releasy/miner_commit.py | 0.672224 | 0.15662 | miner_commit.py | pypi |
from releng_tool.util.enum import Enum
class ConfKey(Enum):
"""
configuration file keys
Defines a series of attributes which define every support project
configuration key supported by this tool. Project configuration keys
are in a lowercase format.
Attributes:
CACHE_EXT_TRANSFORM: cache extension transform
DEFINTERN: packages are internal (implicitly)
EXTENSIONS: project releng-extension list
EXTEN_PKGS: project external packages list
EXTRA_LEXCEPTS: project-permitted spdx exceptions
EXTRA_LICENSES: project-permitted spdx licenses
LICENSE_HEADER: license header information
OVERRIDE_REV: revision overriding dictionary
OVERRIDE_SITES: site overriding dictionary
OVERRIDE_TOOLS: extract-tool overriding
PKGS: project's package (name) list
PREREQUISITES: project's host-tool prerequisites
QUIRKS: configure quirks to apply
SBOM_FORMAT: project's default sbom format to generate
SYSROOT_PREFIX: project's default sys-root prefix
URL_MIRROR: mirror base site for url fetches
URLOPEN_CONTEXT: context to use for urlopen
"""
CACHE_EXT_TRANSFORM = 'cache_ext'
DEFINTERN = 'default_internal'
EXTENSIONS = 'extensions'
EXTEN_PKGS = 'external_packages'
EXTRA_LEXCEPTS = 'extra_license_exceptions'
EXTRA_LICENSES = 'extra_licenses'
LICENSE_HEADER = 'license_header'
OVERRIDE_REV = 'override_revisions'
OVERRIDE_SITES = 'override_sites'
OVERRIDE_TOOLS = 'override_extract_tools'
PKGS = 'packages'
PREREQUISITES = 'prerequisites'
QUIRKS = 'quirks'
SBOM_FORMAT = 'sbom_format'
SYSROOT_PREFIX = 'sysroot_prefix'
URL_MIRROR = 'url_mirror'
URLOPEN_CONTEXT = 'urlopen_context'
class ListenerEvent(Enum):
"""
releng listener event types
Defines a series of event types that are supported for an extension to
listen on.
Attributes:
CONFIG_LOADED: event after a configuration is processed
POST_BUILD_STARTED: event before a post-build event starts
POST_BUILD_FINISHED: event after a post-build event ends
"""
CONFIG_LOADED = 'config-loaded'
POST_BUILD_STARTED = 'post-build-started'
POST_BUILD_FINISHED = 'post-build-finished'
class Rpk(Enum):
"""
releng package keys (postfixes)
Defines a series of attributes which define every support package
configuration key supported by this tool. Package configuration keys
are in an uppercase format.
Attributes:
BUILD_SUBDIR: sub-directory in fetched to find root src
DEPS: list of package dependencies
DEVMODE_IGNORE_CACHE: whether or not ignore cache
DEVMODE_REVISION: devmode-rev to acquire from srcs
EXTENSION: filename extension for package (if needed)
EXTERNAL: whether or not package is considered "external"
EXTOPT: extension-defined package modifiers (if any)
EXTRACT_TYPE: extraction type for sources
FETCH_OPTS: fetch options (if any)
FIXED_JOBS: fixed job count for the project
GIT_CONFIG: git configurations to set (if any)
GIT_DEPTH: git fetch depth (if any)
GIT_REFSPECS: additional git refspecs to fetch (if any)
GIT_SUBMODULES: fetch any submodules (if any)
GIT_VERIFY_REVISION: verify signed revisions
HOST_PROVIDES: host tools the package will provide
INSTALL_TYPE: install container target for the package
INTERNAL: whether or not package is considered "internal"
LICENSE: license information for the package
LICENSE_FILES: source file(s) with license information
NO_EXTRACTION: whether or not package extraction is done
PATCH_SUBDIR: sub-directory in fetched to apply patches
PREFIX: system root prefix override (if needed)
REVISION: revision to acquire from sources (if any)
SITE: site where to fetch package sources
SKIP_REMOTE_CONFIG: skip any remote configuration
SKIP_REMOTE_SCRIPTS: skip any remote scripts
STRIP_COUNT: strip count for archive extract
TYPE: type of project the package is
VCS_TYPE: type of project the package's fetch source is
VERSION: the version of the package
# (package type - common)
CONF_DEFS: package-type configuration definitions
CONF_ENV: package-type configuration environment values
CONF_OPTS: package-type configuration options
BUILD_DEFS: package-type build definitions
BUILD_ENV: package-type build environment values
BUILD_OPTS: package-type build options
ENV: package-type environment values (all stages)
INSTALL_DEFS: package-type install definitions
INSTALL_ENV: package-type install environment values
INSTALL_OPTS: package-type install options
# (package type - autotools)
AUTOTOOLS_AUTORECONF: autotools /w autoreconf
# (package type - cmake)
CMAKE_BUILD_TYPE: the cmake build type to use
CMAKE_NOINSTALL: skip cmake install stage
# (package type - make)
MAKE_NOINSTALL: skip make install stage
# (package type - meson)
MESON_NOINSTALL: skip meson install stage
# (package type - python)
PYTHON_INTERPRETER: python interpreter
PYTHON_SETUP_TYPE: python setup type to build/install with
# (package type - scons)
SCONS_NOINSTALL: skip scons install stage
"""
BUILD_SUBDIR = 'BUILD_SUBDIR'
DEPS = 'DEPENDENCIES'
DEVMODE_IGNORE_CACHE = 'DEVMODE_IGNORE_CACHE'
DEVMODE_REVISION = 'DEVMODE_REVISION'
EXTENSION = 'EXTENSION'
EXTERNAL = 'EXTERNAL'
EXTOPT = 'EXTOPT'
EXTRACT_TYPE = 'EXTRACT_TYPE'
FETCH_OPTS = 'FETCH_OPTS'
FIXED_JOBS = 'FIXED_JOBS'
GIT_CONFIG = 'GIT_CONFIG'
GIT_DEPTH = 'GIT_DEPTH'
GIT_REFSPECS = 'GIT_REFSPECS'
GIT_SUBMODULES = 'GIT_SUBMODULES'
GIT_VERIFY_REVISION = 'GIT_VERIFY_REVISION'
HOST_PROVIDES = 'HOST_PROVIDES'
INSTALL_TYPE = 'INSTALL_TYPE'
INTERNAL = 'INTERNAL'
LICENSE = 'LICENSE'
LICENSE_FILES = 'LICENSE_FILES'
NO_EXTRACTION = 'NO_EXTRACTION'
PATCH_SUBDIR = 'PATCH_SUBDIR'
PREFIX = 'PREFIX'
REVISION = 'REVISION'
SITE = 'SITE'
SKIP_REMOTE_CONFIG = 'SKIP_REMOTE_CONFIG'
SKIP_REMOTE_SCRIPTS = 'SKIP_REMOTE_SCRIPTS'
STRIP_COUNT = 'STRIP_COUNT'
TYPE = 'TYPE'
VCS_TYPE = 'VCS_TYPE'
VERSION = 'VERSION'
# (package type - common)
CONF_DEFS = 'CONF_DEFS'
CONF_ENV = 'CONF_ENV'
CONF_OPTS = 'CONF_OPTS'
BUILD_DEFS = 'BUILD_DEFS'
BUILD_ENV = 'BUILD_ENV'
BUILD_OPTS = 'BUILD_OPTS'
ENV = 'ENV'
INSTALL_DEFS = 'INSTALL_DEFS'
INSTALL_ENV = 'INSTALL_ENV'
INSTALL_OPTS = 'INSTALL_OPTS'
# (package type - autotools)
AUTOTOOLS_AUTORECONF = 'AUTOTOOLS_AUTORECONF'
# (package type - cmake)
CMAKE_BUILD_TYPE = 'CMAKE_BUILD_TYPE'
CMAKE_NOINSTALL = 'CMAKE_NOINSTALL'
# (package type - make)
MAKE_NOINSTALL = 'MAKE_NOINSTALL'
# (package type - meson)
MESON_NOINSTALL = 'MESON_NOINSTALL'
# (package type - python)
PYTHON_INTERPRETER = 'PYTHON_INTERPRETER'
PYTHON_SETUP_TYPE = 'PYTHON_SETUP_TYPE'
# (package type - scons)
SCONS_NOINSTALL = 'SCONS_NOINSTALL'
class GlobalAction(Enum):
"""
specific stage action to perform
A user can request a (global) action to perform over the default process.
For example, a user can request to "fetch" and only the fetching stage will
occur for registered packages.
Attributes:
CLEAN: clean the working state
DISTCLEAN: pristine state clean with cache/dl clear
EXTRACT: process all packages through extraction stage
FETCH: process all packages through fetch stage
INIT: initialize example structure
LICENSES: generate license information for a project
MRPROPER: pristine state clean (e.g. configurations)
PATCH: process all packages through patch stage
SBOM: generate sbom files for the project
STATE: dump configuration state information
"""
CLEAN = 'clean'
DISTCLEAN = 'distclean'
EXTRACT = 'extract'
FETCH = 'fetch'
INIT = 'init'
LICENSES = 'licenses'
MRPROPER = 'mrproper'
PATCH = 'patch'
SBOM = 'sbom'
STATE = 'state'
class PkgAction(Enum):
"""
package-specific stage action to perform
A user can request a package action to perform over the default process.
When a package-specific action is requested, the process will perform all
dependencies for the target action's stage before completing the target
stage. For example, a user can request to perform a package's "extract"
stage, which will result in ensure the package's fetch stage is complete
(and possibility other package dependencies) performing (and stopping after)
the extraction stage.
Attributes:
BUILD: process a package till end of the build stage
CLEAN: process a package till end of the clean stage
CONFIGURE: process a package till end of the configure stage
DISTCLEAN: pristine state clean state of the package with cache/dl clear
EXEC: perform an action in the package's directory
EXTRACT: process a package till end of the extraction stage
FETCH: process a package till end of the fetch stage
INSTALL: process a package till end of the install stage
LICENSE: generate license information for a package
PATCH: process a package till end of the patch stage
REBUILD: perform a re-build of a package
REBUILD_ONLY: perform a re-build of a package and stop
RECONFIGURE: perform a re-configuration of a package
RECONFIGURE_ONLY: perform a re-configuration of a package and stop
REINSTALL: perform a re-install of a package
"""
BUILD = 'build'
CLEAN = 'clean'
CONFIGURE = 'configure'
DISTCLEAN = 'distclean'
EXEC = 'exec'
EXTRACT = 'extract'
FETCH = 'fetch'
INSTALL = 'install'
LICENSE = 'license'
PATCH = 'patch'
REBUILD = 'rebuild'
REBUILD_ONLY = 'rebuild_only'
RECONFIGURE = 'reconfigure'
RECONFIGURE_ONLY = 'reconfigure_only'
REINSTALL = 'reinstall'
class PackageType(Enum):
"""
package types
Defines supported package types for deciding which method if configuring,
building and installing is performed.
Attributes:
AUTOTOOLS: autotools-based package
CMAKE: cmake-based package
MAKE: make-based package
MESON: meson-based package
PYTHON: python-based package
SCONS: scons-based package
SCRIPT: releng script-based package
"""
AUTOTOOLS = 'autotools'
CMAKE = 'cmake'
MAKE = 'make'
MESON = 'meson'
PYTHON = 'python'
SCONS = 'scons'
SCRIPT = 'script'
class PackageInstallType(Enum):
"""
package install types
Defines supported package installation types for deciding which the location
to push resources during the installation phase.
Attributes:
HOST: install to the host container
IMAGES: install to the images container
STAGING: install to the staging container
STAGING_AND_TARGET: install to the staging and target containers
TARGET: install to the target container
"""
HOST = 'host'
IMAGES = 'images'
STAGING = 'staging'
STAGING_AND_TARGET = 'staging_and_target'
TARGET = 'target'
class PythonSetupType(Enum):
"""
python setup types
Defines supported Python setup types for deciding which method build and
install commands/arguments are utilized.
Attributes:
DISTUTILS: distutils build packager
FLIT: Flit build packager
HATCH: Hatch build packager
PDM: PDM build packager
PEP517: pep517 build packager
POETRY: Poetry build packager
SETUPTOOLS: setuptools build packager
"""
DISTUTILS = 'distutils'
FLIT = 'flit'
HATCH = 'hatch'
PDM = 'pdm'
PEP517 = 'pep517'
POETRY = 'poetry'
SETUPTOOLS = 'setuptools'
class SbomFormatType(Enum):
"""
sbom format types
Defines supported output formats for a generated SBOM file.
Attributes:
ALL: all supported format types
CSV: a CSV file
HTML: an HTML file
JSON: a JSON file
JSON_SPDX: a SPDX-compliant JSON file
RDP_SPDX: a SPDX-compliant RDP (XML) file
TEXT: a plain text file
XML: an XML file
"""
ALL = 'all'
CSV = 'csv'
HTML = 'html'
JSON = 'json'
JSON_SPDX = 'json-spdx'
RDP_SPDX = 'rdp-spdx'
TEXT = 'text'
XML = 'xml'
class VcsType(Enum):
"""
version control system types
Defines supported version control system types for decided which fetching
processing is used when acquiring resources.
Attributes:
CVS: concurrent versions system
GIT: git
HG: mercurial
LOCAL: no version control (local interim-development package)
NONE: no version control (placeholder package)
P4: perforce
RSYNC: rsync
SCP: secure copy
SVN: subversion
URL: url (http, https, ftp, file, etc.)
"""
BZR = 'bzr'
CVS = 'cvs'
GIT = 'git'
HG = 'hg'
LOCAL = 'local'
NONE = 'none'
PERFORCE = 'perforce'
RSYNC = 'rsync'
SCP = 'scp'
SVN = 'svn'
URL = 'url'
# key used to track "global" local sources configuration
GBL_LSRCS = '*'
# default CMake build type to use
DEFAULT_CMAKE_BUILD_TYPE = 'RelWithDebInfo'
# list of values to consider as "unset"
UNSET_VALUES = [
'-',
'unset',
] | /releng-tool-0.17.0.tar.gz/releng-tool-0.17.0/releng_tool/defs.py | 0.592195 | 0.165458 | defs.py | pypi |
from collections import defaultdict
from releng_tool.util.io import ensure_dir_exists
from releng_tool.util.log import debug
from releng_tool.util.log import verbose
import math
import os
import pickle
try:
from time import clock as capture_clock
except ImportError:
from time import monotonic as capture_clock
# optional imports
try:
# disable xwindows backend (as it is not required and may cause issue with
# systems without a display configured)
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
has_matplotlib = True
except ImportError:
has_matplotlib = False
# filename for the statistics database
STATISTICS_NAME = 'statistics.dat'
class RelengStats:
"""
statistics tracking
Registry provides a means for external implementation to hook into various
stages of a release engineering process.
Args:
opts: options used to configure the engine
Attributes:
cache: cache of statistics for this runtime
dat_file: file to store persisted statistics
data: dictionary of data that can be persisted
opts: options used to configure the engine
out_dir: directory to generate final statistics to
"""
def __init__(self, opts):
self.cache = defaultdict(lambda: defaultdict(dict))
self.opts = opts
self.out_dir = os.path.join(self.opts.out_dir, 'misc')
self.dat_file = os.path.join(self.out_dir, STATISTICS_NAME)
self.data = {}
def load(self):
"""
load any persisted statistics
Will load any statistics which may have been persisted from a previous
run. This is to help render a "complete" report of statistics when
re-running releng-tool with packages which may already been completed.
"""
if not os.path.exists(self.dat_file):
return
try:
with open(self.dat_file, 'rb') as f:
self.data = pickle.load(f)
debug('loaded statistics')
except IOError:
verbose('failed to load original statistics (io error)')
except ValueError:
verbose('failed to load original statistics (pickle error)')
def save(self, desc=None):
"""
save statistics for future reference
Will save any statistics which should be persisted for future
considerations. This is to help render a "complete" report of statistics
when re-running releng-tool with packages which may already been
completed.
Args:
desc (optional): description of this save event (for logging)
"""
if not ensure_dir_exists(self.out_dir):
verbose('unable to generate output directory for statistics')
return
if desc:
desc = ' ({})'.format(desc)
else:
desc = ''
try:
with open(self.dat_file, 'wb') as f:
pickle.dump(self.data, f, protocol=2) # 2 for py2/py3 support
debug('saved statistics' + desc)
except IOError:
verbose('failed to save statistics' + desc)
def track_duration_start(self, pkg, stage):
"""
track a duration start
To be invoked when tracking the start of a package event for a given
stage. This call is to be used with ``track_duration_end``, to help
track the duration of a package's stage.
Args:
pkg: the package
stage: the stage which has started
"""
self.cache[pkg][stage]['start'] = capture_clock()
def track_duration_end(self, pkg, stage, save=True):
"""
track a duration end
To be invoked when tracking the end of a package event for a given
stage. This call is to be used with ``track_duration_start``, to help
track the duration of a package's stage.
Args:
pkg: the package
stage: the stage which has ended
save (optional): automatically save the duration (default: True)
"""
end_time = capture_clock()
start_time = self.cache[pkg][stage]['start']
if 'duration' not in self.data:
self.data['duration'] = {}
if pkg not in self.data['duration']:
self.data['duration'][pkg] = {}
if stage not in self.data['duration'][pkg]:
self.data['duration'][pkg][stage] = {}
self.data['duration'][pkg][stage] = end_time - start_time
if save:
self.save(desc='{}-{}'.format(pkg, stage))
def generate(self):
"""
generate a final report of statistics
To be invoked at the end of a releng-tool process, this call will
generate reports/etc. for any tracked statistics information based on
the current and previous invoked executions (if any).
"""
if not ensure_dir_exists(self.out_dir):
verbose('unable to generate output directory for statistics')
return
self._generate_duration()
def _generate_duration(self):
"""
generate duration-related statistics
When generating a statistics report, this call creating/adds information
about durations which may have been captured.
"""
if 'duration' not in self.data:
return
durations = self.data['duration']
pkgs = list(durations.keys())
pkgs = sorted(pkgs)
categories = set()
for pkg_data in durations.values():
categories.update(pkg_data.keys())
categories = sorted(categories)
ordered_categories = [
'boot',
'fetch',
'extract',
'patch',
'configure',
'build',
'install',
'post',
]
for ordered_category in ordered_categories:
if ordered_category not in categories:
ordered_categories.remove(ordered_category)
for category in categories:
if category not in ordered_categories:
ordered_categories.append(category)
categories = ordered_categories
# duration statistics to csv
verbose('generating duration statistics (csv)...')
dur_csv = os.path.join(self.out_dir, 'durations.csv')
try:
with open(dur_csv, 'w') as f:
# header
f.write('# pkg')
for category in categories:
f.write(',' + category)
f.write('\n')
# data
for pkg in pkgs:
f.write(pkg)
for category in categories:
value = durations[pkg].get(category, 0)
f.write(',' + str(int(value)))
f.write('\n')
except IOError as e:
verbose('failed to write duration statistics: {}', e)
# duration statistics to plot (if available)
generate_pdf = True
if not has_matplotlib:
generate_pdf = False
debug('duration statistics plot not supported (no matplotlib)')
elif isinstance(mpl.__version__, tuple) and mpl.__version__ < (2, 1):
generate_pdf = False
debug('duration statistics plot not supported (old matplotlib)')
elif 'releng.stats.no_pdf' in self.opts.quirks:
generate_pdf = False
debug('duration statistics plot disabled by quirk')
if generate_pdf:
verbose('generating duration statistics (pdf)...')
BAR_HEIGHT = 0.4
EXTRA_HEIGHT = 1
FIG_WIDTH = 10
fig_height_pkgs = (BAR_HEIGHT + EXTRA_HEIGHT) * len(pkgs)
fig_height_total = (BAR_HEIGHT + EXTRA_HEIGHT) * (len(pkgs) + 1)
figsize_pkgs = (FIG_WIDTH, fig_height_pkgs)
figsize_total = (FIG_WIDTH, fig_height_total)
fig_pkgs, ax_pkgs = plt.subplots(figsize=figsize_pkgs)
fig_total, ax_total = plt.subplots(figsize=figsize_total)
axs = [ax_pkgs, ax_total]
figs = [fig_pkgs, fig_total]
pkgs.reverse()
pkgs_total = list(pkgs)
pkgs_total.insert(0, 'total')
offset = [0] * len(pkgs)
offset_total = [0] * len(pkgs_total)
for category in categories:
width = []
width_total = []
total = 0
for pkg in pkgs:
if category in durations[pkg]:
duration = durations[pkg][category]
width.append(duration)
width_total.append(duration)
total += duration
else:
width.append(0)
width_total.append(0)
width_total.insert(0, total)
ax_pkgs.barh(pkgs, width, height=BAR_HEIGHT,
left=offset, label=category)
ax_total.barh(pkgs_total, width_total, height=BAR_HEIGHT,
left=offset_total, label=category)
offset = np.add(offset, width)
offset_total = np.add(offset_total, width_total)
# provide some spacing near the right
MIN_OFFSET = 10
xlim = int(math.ceil(max(offset) / 10.)) * 10
if xlim - max(offset) < MIN_OFFSET:
xlim += MIN_OFFSET
ax_pkgs.set_xlim([0, xlim])
xlim_total = int(math.ceil(max(offset_total) / 10.)) * 10
if xlim_total - max(offset_total) < MIN_OFFSET:
xlim_total += MIN_OFFSET
ax_total.set_xlim([0, xlim_total])
# labels
for ax in axs:
ax.set_title('Package Stage Durations')
ax.set_xlabel('Duration (seconds)')
ax.legend()
ax.grid(axis='x', linestyle=':', linewidth=0.4)
# ensure rotated labels state in render area
for fig in figs:
fig.tight_layout()
# generate figures
dur_pdf = os.path.join(self.out_dir, 'durations.pdf')
fig_pkgs.savefig(dur_pdf)
dur_pdf_total = os.path.join(self.out_dir, 'durations-total.pdf')
fig_total.savefig(dur_pdf_total)
# close/cleanup figures
plt.close() | /releng-tool-0.17.0.tar.gz/releng-tool-0.17.0/releng_tool/stats.py | 0.924176 | 0.273144 | stats.py | pypi |
from releng_tool import __version__ as releng_version
from releng_tool.util.log import err
from runpy import run_path
import inspect
import os
import sys
def releng_include(file_path):
"""
include/execute a script
The provided call will execute code at the provided file path. The path
will be relative to the caller's script, unless an absolute path is
provided. The executed script will be initialized with globals matching
the caller's script.
An example when using in the context of script helpers is as follows:
.. code-block:: python
# load "my-other-script" found alongside the current script
releng_include('my-other-script')
Args:
file_path: the script to invoke
"""
caller_stack = inspect.stack()[1]
if os.path.isabs(file_path):
target_script = file_path
else:
invoked_script = caller_stack[1]
invoked_script_base = os.path.dirname(invoked_script)
target_script = os.path.join(invoked_script_base, file_path)
ctx_globals = caller_stack[0].f_globals
run_path(target_script, init_globals=ctx_globals)
def require_version(version, quiet=False, critical=True):
"""
perform a required-version check
Enables a caller to explicitly check for a required releng-tool version.
Invoking this function with a dotted-separated ``version`` string, the
string will be parsed and compared with the running releng-tool version.
If the required version is met, this method will have no effect. In the
event that the required version is not met, the exception ``SystemExit``
will be raised if the critical flag is set; otherwise this call will
return ``False``.
An example when using in the context of script helpers is as follows:
.. code-block:: python
# ensure we are using releng-tool v1
releng_require_version('1.0.0')
Args:
version: dotted-separated version string
quiet (optional): whether or not to suppress output
critical (optional): whether or not to stop execution on failure
Returns:
``True`` if the version check is met; ``False`` if the version check
has failed
Raises:
SystemExit: if the version check fails with ``critical=True``
"""
rv = True
if version:
requested = version.split('.')
current = releng_version.split('.')
rv = requested <= current
if not rv:
if not quiet:
args = {
'detected': releng_version,
'required': version,
}
err('''
required releng-tool version check has failed
This project has indicated a required minimum version of releng-tool to
be installed on this system; however, an older version has been
detected:
(required) {required}
(detected) {detected}
Please update to a more recent version:
https://docs.releng.io/install/
'''.strip().format(**args))
if critical:
sys.exit(-1)
return rv | /releng-tool-0.17.0.tar.gz/releng-tool-0.17.0/releng_tool/support.py | 0.636127 | 0.211396 | support.py | pypi |
class RelengToolException(Exception):
"""
base exception for all custom releng-tool exceptions
"""
class RelengToolSilentException(RelengToolException):
"""
exception to trigger a stop with an error message already printed
"""
class RelengToolInvalidConfigurationScript(RelengToolSilentException):
"""
exception thrown when a project's configuration file could not be loaded
"""
class RelengToolInvalidConfigurationSettings(RelengToolSilentException):
"""
exception thrown when a project's configuration file has invalid settings
"""
class RelengToolInvalidOverrideConfigurationScript(RelengToolSilentException):
"""
exception thrown when a project's override configuration file could
not be loaded
"""
class RelengToolMissingConfigurationError(RelengToolException):
"""
exception thrown when missing a project's configuration file
"""
def __init__(self, path):
super(RelengToolMissingConfigurationError, self).__init__('''\
missing configuration file
The configuration file cannot be found. Ensure the configuration file exists
in the working directory or the provided root directory:
{}
'''.strip().format(path))
class RelengToolMissingExecCommand(RelengToolException):
"""
exception thrown when a missing a command for a package's exec call
"""
def __init__(self, pkg):
super(RelengToolMissingExecCommand, self).__init__('''\
missing package command
A request has been made to execute a command for a package; however, no command
has been provided. Ensure after specifying an exec call that the following
argument defines the command to be executed.
releng-tool {}-exec "mycmd arg1 arg2"
'''.strip().format(pkg))
class RelengToolMissingPackagesError(RelengToolException):
"""
exception thrown when a project's configuration does not provide any pkgs
"""
def __init__(self, path, key):
super(RelengToolMissingPackagesError, self).__init__('''\
no defined packages
The configuration file does not have any defined packages. Ensure a package
list exists with the name of packages to be part of the releng process:
{}
{} = ['liba', 'libb', 'libc']
'''.strip().format(path, key))
class RelengToolOutsidePathError(RelengToolException):
"""
exception thrown when unexpectedly interacting outside of a path
"""
class RelengToolWarningAsError(RelengToolException):
"""
exception thrown for a warning being triggered as an error
""" | /releng-tool-0.17.0.tar.gz/releng-tool-0.17.0/releng_tool/exceptions.py | 0.836154 | 0.270968 | exceptions.py | pypi |
class RelengPackage:
"""
a releng package
A package tracks the name, options and dependencies of the package.
Args:
name: the name of the package
version: the version of the package
Attributes:
asc_file: file containing ascii-armored data to validate this package
build_dir: directory for a package's buildable content
build_output_dir: build output directory for the package process
build_subdir: override for a package's buildable content (if applicable)
build_tree: the build tree directory for a package
cache_dir: cache directory for the package (if applicable)
cache_file: cache file for the package (if applicable)
def_dir: directory for the package definition
deps: list of dependencies for this package
devmode: whether the package has a devmode revision
devmode_ignore_cache: whether or not cache files should be ignored
ext_modifiers: extension-defined modifiers (dict)
extract_type: extraction type override (for extensions, if applicable)
fetch_opts: fetch options (if applicable)
fixed_jobs: fixed job count for this specific package
git_config: git config options to apply (if applicable)
git_depth: git fetch depth (if applicable)
git_refspecs: additional git refspecs to fetch (if applicable)
git_submodules: fetch any git submodules (if applicable)
git_verify_revision: verify signed git revisions
hash_file: file containing hashes to validate this package
hash_relaxed: whether hash checks can be relaxed
host_provides: host tools the package will provide
install_type: install container for the package (target, staged, etc.)
is_internal: whether or not this package is an project internal package
license: license(s) of the package
license_files: list of files in sources holding license information
local_srcs: whether this package is acquired locally
name: name of the package
no_extraction: whether or not this package will extract
nv: name-version value of the package
patch_subdir: override for a package's patch base (if applicable)
prefix: system root prefix override (if applicable)
revision: revision to use to fetch from vcs (if applicable)
site: site to acquire package assets
skip_remote_config: whether or not to skip any remote configuration
skip_remote_scripts: whether or not to skip any remote scripts
strip_count: archive extraction strip count (if applicable)
type: package type (script-based, cmake, etc.)
vcs_type: vcs type of the package (git, file, etc.)
version: package version
(package type - common)
build_defs: package-type build definitions
build_env: package-type build environment overrides
build_opts: package-type build option overrides
conf_defs: package-type configuration definitions
conf_env: package-type configuration environment overrides
conf_opts: package-type configuration option overrides
install_defs: package-type installation definitions
install_env: package-type installation environment overrides
install_opts: package-type installation option overrides
(package type - autotools)
autotools_autoreconf: flag to invoke autoreconf
(package type - cmake)
cmake_build_type: cmake build type to use
cmake_noinstall: flag to disable the install stage for a cmake project
(package type - make)
make_noinstall: flag to disable the install stage for a make project
(package type - meson)
meson_noinstall: flag to disable the install stage for a meson project
(other - python)
python_interpreter: python interpreter to invoke stages with
python_setup_type: setup type to build/install with
(package type - scons)
scons_noinstall: flag to disable the install stage for a scons project
"""
def __init__(self, name, version):
self.name = name
self.version = version
if version:
self.nv = '{}-{}'.format(name, version)
else:
self.nv = self.name
# (commons)
self.asc_file = None
self.build_dir = None
self.build_subdir = None
self.build_output_dir = None
self.build_tree = None
self.cache_dir = None
self.cache_file = None
self.def_dir = None
self.deps = []
self.devmode = None
self.devmode_ignore_cache = None
self.fetch_opts = None
self.fixed_jobs = None
self.hash_file = None
self.hash_relaxed = None
self.host_provides = None
self.ext_modifiers = None
self.extract_type = None
self.install_type = None
self.is_internal = None
self.license = None
self.license_files = None
self.local_srcs = False
self.no_extraction = False
self.patch_subdir = None
self.prefix = None
self.revision = None
self.site = None
self.skip_remote_config = None
self.skip_remote_scripts = None
self.strip_count = None
self.type = None
self.vcs_type = None
# (package type - common)
self.build_defs = None
self.build_env = None
self.build_opts = None
self.conf_defs = None
self.conf_env = None
self.conf_opts = None
self.install_defs = None
self.install_env = None
self.install_opts = None
# (package type - autotools)
self.autotools_autoreconf = None
# (package type - cmake)
self.cmake_build_type = None
self.cmake_noinstall = None
# (package type - make)
self.make_noinstall = None
# (package type - meson)
self.meson_noinstall = None
# (other - git)
self.git_config = None
self.git_depth = None
self.git_refspecs = None
self.git_submodules = None
self.git_verify_revision = None
# (other - python)
self.python_interpreter = None
self.python_setup_type = None
# (package type - scons)
self.scons_noinstall = None | /releng-tool-0.17.0.tar.gz/releng-tool-0.17.0/releng_tool/packages/package.py | 0.768299 | 0.341445 | package.py | pypi |
from releng_tool.util.enum import Enum
import hashlib
class PkgKeyType(Enum):
"""
package key type
Enumeration of types supported when fetching configuration values defined by
a package definition.
Attributes:
BOOL: boolean value
DICT: dictionary value
DICT_STR_STR: dictionary of string pairs value
DICT_STR_STR_OR_STR: dictionary of string pairs or a string value
DICT_STR_STR_OR_STRS: dictionary of string pairs or strings value
STR: single string value
STRS: one or more strings value
INT_NONNEGATIVE: non-negative integer value
INT_POSITIVE: positive integer value
"""
BOOL = 'bool'
DICT = 'dict'
DICT_STR_STR = 'dict_str_str'
DICT_STR_STR_OR_STR = 'dict_str_str_or_str'
DICT_STR_STR_OR_STRS = 'dict_str_str_or_strs'
STR = 'str'
STRS = 'strs'
INT_NONNEGATIVE = 'int_nonnegative'
INT_POSITIVE = 'int_positive'
def pkg_cache_key(site):
"""
generate a cache key for a provided package's site
Package's may share caching data if their sites match. This call returns a
calculated "cache key" for a provided cache site.
Returns:
the cache key
"""
return hashlib.sha1(site.encode('utf_8')).hexdigest() # noqa: S324
def pkg_key(pkg, type_):
"""
generate a package key for a given type string
Generates a compatible "package key" for a unsanitized package name ``pkg``
of a specific key ``type``. The package string is "cleaned" to replaces
select characters (such as dashes) with underscores and becomes uppercase.
For example, consider the package name "my-awesome-module". For a package
key "VERSION", the complete key for this package is
"MY_AWESOME_MODULE_VERSION".
Args:
pkg: the package name
type_: the package key type
Returns:
the completed package key
"""
clean = pkg
for c in [' ', '*', '-', '.', ':', '?', '|']:
clean = clean.replace(c, '_')
return '{}_{}'.format(clean.upper(), type_) | /releng-tool-0.17.0.tar.gz/releng-tool-0.17.0/releng_tool/packages/__init__.py | 0.889289 | 0.358971 | __init__.py | pypi |
from releng_tool.exceptions import RelengToolException
class RelengToolInvalidPackageConfiguration(RelengToolException):
"""
exception thrown when a package configuration has an issue
"""
class RelengToolConflictingConfiguration(RelengToolInvalidPackageConfiguration):
"""
raised when two package configuration values conflict with each other
"""
def __init__(self, args):
super(RelengToolConflictingConfiguration, self).__init__('''\
package has conflicting configuration values: {pkg_name}
({desc})
(keys: {pkg_key1}, {pkg_key2})
'''.strip().format(**args))
class RelengToolConflictingLocalSrcsPath(RelengToolInvalidPackageConfiguration):
"""
raised when a detected local sourced package path matches the root directory
"""
def __init__(self, args):
super(RelengToolConflictingLocalSrcsPath, self).__init__('''\
conflicting local-sources package path and root directory: {pkg_name}
(root: {root})
(path: {path})
'''.strip().format(**args))
class RelengToolCyclicPackageDependency(RelengToolInvalidPackageConfiguration):
"""
raised when a cyclic package dependency is detected
"""
def __init__(self, args):
super(RelengToolCyclicPackageDependency, self).__init__('''\
cyclic package dependency detected: {pkg_name}
'''.strip().format(**args))
class RelengToolInvalidPackageKeyValue(RelengToolInvalidPackageConfiguration):
"""
raised when a package key is using an unsupported value
"""
def __init__(self, args):
super(RelengToolInvalidPackageKeyValue, self).__init__('''\
package configuration has an invalid value: {pkg_name}
(key: {pkg_key}, expects: {expected_type})
'''.strip().format(**args))
class RelengToolMissingPackageRevision(RelengToolInvalidPackageConfiguration):
"""
raised when a required package revision has not been defined
"""
def __init__(self, args):
super(RelengToolMissingPackageRevision, self).__init__('''\
package defines vcs-type ({vcs_type}) but no version/revision: {pkg_name}
(missing either key: {pkg_key1}, {pkg_key2})
'''.strip().format(**args))
class RelengToolInvalidPackageScript(RelengToolInvalidPackageConfiguration):
"""
raised when a package script has an issue loading (e.g. syntax error)
"""
def __init__(self, args):
super(RelengToolInvalidPackageScript, self).__init__('''\
{traceback}
unable to load package script: {script}
{description}
'''.strip().format(**args))
class RelengToolMissingPackageScript(RelengToolInvalidPackageConfiguration):
"""
raised when a package script cannot be found
"""
def __init__(self, args):
super(RelengToolMissingPackageScript, self).__init__('''\
unknown package provided: {pkg_name}
(script) {script}
'''.strip().format(**args))
class RelengToolMissingPackageSite(RelengToolInvalidPackageConfiguration):
"""
raised when a package site has not been defined with a vcs-type set
"""
def __init__(self, args):
super(RelengToolMissingPackageSite, self).__init__('''\
package defines vcs-type ({vcs_type}) but no site: {pkg_name}
(key: {pkg_key})
'''.strip().format(**args))
class RelengToolUnknownExtractType(RelengToolInvalidPackageConfiguration):
"""
raised when a package defined an unknown extract type
"""
def __init__(self, args):
super(RelengToolUnknownExtractType, self).__init__('''\
unknown extract type value provided
(package: {pkg_name}, key: {pkg_key})
'''.strip().format(**args))
class RelengToolUnknownInstallType(RelengToolInvalidPackageConfiguration):
"""
raised when a package defined an unknown install type
"""
def __init__(self, args):
super(RelengToolUnknownInstallType, self).__init__('''\
unknown install type value provided
(package: {pkg_name}, key: {pkg_key})
'''.strip().format(**args))
class RelengToolUnknownPackageType(RelengToolInvalidPackageConfiguration):
"""
raised when a package defined an unknown package type
"""
def __init__(self, args):
super(RelengToolUnknownPackageType, self).__init__('''\
unknown package type value provided
(package: {pkg_name}, key: {pkg_key})
'''.strip().format(**args))
class RelengToolUnknownPythonSetupType(RelengToolInvalidPackageConfiguration):
"""
raised when a package defined an unknown python setup type
"""
def __init__(self, args):
super(RelengToolUnknownPythonSetupType, self).__init__('''\
unknown python setup type value provided
(package: {pkg_name}, key: {pkg_key})
'''.strip().format(**args))
class RelengToolUnknownVcsType(RelengToolInvalidPackageConfiguration):
"""
raised when a package defined an unknown vcs type
"""
def __init__(self, args):
super(RelengToolUnknownVcsType, self).__init__('''\
unknown vcs type value provided
(package: {pkg_name}, key: {pkg_key})
'''.strip().format(**args))
class RelengToolStageFailure(RelengToolException):
"""
exception thrown when a stage event has an issue
"""
class RelengToolBootstrapStageFailure(RelengToolStageFailure):
"""
exception thrown when a boostrap stage event has an issue
"""
class RelengToolBuildStageFailure(RelengToolStageFailure):
"""
exception thrown when a build stage event has an issue
"""
class RelengToolConfigurationStageFailure(RelengToolStageFailure):
"""
exception thrown when a configuration stage event has an issue
"""
class RelengToolExecStageFailure(RelengToolStageFailure):
"""
exception thrown when an execute-request event has an issue
"""
class RelengToolExtractionStageFailure(RelengToolStageFailure):
"""
exception thrown when an extraction stage event has an issue
"""
class RelengToolInstallStageFailure(RelengToolStageFailure):
"""
exception thrown when an install stage event has an issue
"""
class RelengToolLicenseStageFailure(RelengToolStageFailure):
"""
exception thrown when a license stage event has an issue
"""
class RelengToolPatchStageFailure(RelengToolStageFailure):
"""
exception thrown when a patch stage event has an issue
"""
class RelengToolPostStageFailure(RelengToolStageFailure):
"""
exception thrown when a post stage event has an issue
""" | /releng-tool-0.17.0.tar.gz/releng-tool-0.17.0/releng_tool/packages/exceptions.py | 0.885347 | 0.38367 | exceptions.py | pypi |
from releng_tool.util.io import ensure_dir_exists
from releng_tool.util.log import err
from releng_tool.util.log import log
from releng_tool.util.log import verbose
from releng_tool.util.log import warn
import os
def initialize_sample(opts):
"""
initialize a sample project
Generates a sample provided in the root directory to help new users or new
project get started.
Args:
opts: options for this run
Returns:
``True`` if the sample project could be initialized; ``False`` if an
issue has occurred generating the sample project
"""
root_dir = opts.root_dir
if not ensure_dir_exists(root_dir):
return False
if os.listdir(root_dir):
err('unable to initialize sample project is non-empty directory')
return False
sample_dir = os.path.join(root_dir, 'package', 'sample')
success = True
if ensure_dir_exists(sample_dir):
# sample project
sample_defs = os.path.join(root_dir, 'package', 'sample', 'sample')
try:
with open(sample_defs, 'w') as f:
f.write('''\
#!/usr/bin/env python
# -*- coding: utf-8 -*-
SAMPLE_DEPENDENCIES = []
SAMPLE_LICENSE = ['<license name>']
SAMPLE_LICENSE_FILES = ['<license file>']
SAMPLE_SITE = '<location for sources>'
SAMPLE_TYPE = '<package-type>'
SAMPLE_VERSION = '<package-version>'
''')
verbose('written sample file')
except IOError as e:
err('unable to generate a sample file')
verbose(str(e))
success = False
else:
success = False
# .gitignore
try:
project_gitignore = os.path.join(root_dir, '.gitignore') # (assumption)
with open(project_gitignore, 'w') as f:
f.write('''\
# releng-tool
/cache/
/dl/
/output/
.releng-flag-*
''')
verbose('written .gitignore file')
except IOError as e:
err('unable to generate a .gitignore file')
verbose(str(e))
success = False
# releng project
try:
project_defs = os.path.join(root_dir, 'releng')
with open(project_defs, 'w') as f:
f.write('''\
#!/usr/bin/env python
# -*- coding: utf-8 -*-
packages = [
'sample',
]
''')
verbose('written releng file')
except IOError as e:
err('unable to generate a releng file')
verbose(str(e))
success = False
if success:
log('initialized empty releng-tool project')
else:
warn('partially initialized a releng-tool project')
return success | /releng-tool-0.17.0.tar.gz/releng-tool-0.17.0/releng_tool/engine/init.py | 0.479504 | 0.177775 | init.py | pypi |
from io import open
from releng_tool.tool import RelengTool
from releng_tool.util.log import err
import sys
try:
import configparser
except ImportError:
import ConfigParser as configparser
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
#: executable used to run git commands
GIT_COMMAND = 'git'
#: list of environment keys to filter from a environment dictionary
GIT_SANITIZE_ENV_KEYS = [
# disable repository location overrides
'GIT_ALTERNATE_OBJECT_DIRECTORIES',
'GIT_DIR',
'GIT_INDEX_FILE',
'GIT_OBJECT_DIRECTORY',
'GIT_WORK_TREE',
# remove the possibility for authenticated prompts
'GIT_ASKPASS',
'SSH_ASKPASS',
# misc
'GIT_FLUSH',
# perforce-related options
'P4AUDIT',
'P4CLIENT',
'P4CLIENTPATH',
'P4CONFIG',
'P4PORT',
'P4PASSWD',
]
#: dictionary of environment entries append to the environment dictionary
GIT_EXTEND_ENV = {
# prevent the terminal prompt from being shown
'GIT_TERMINAL_PROMPT': '0',
}
class GitTool(RelengTool):
"""
git host tool
Provides addition helper methods for git-based tool interaction.
"""
def extract_submodule_revision(self, git_dir):
"""
extract a submodule revision
Attempts to extract the HEAD reference of a submodule based off a
provided git Git repository. This is to help support processing Git
submodules which do not have a branch/version explicitly set for module,
which is required for (at least) recursive submodule processing.
Args:
git_dir: the git repository
Returns:
the revision; ``None`` when a revision cannot be extracted
"""
rv, ref = self.execute_rv('--git-dir=' + git_dir, 'show-ref', '--head')
if rv != 0:
err('failed to extract a submodule revision')
return None
# a `--head` fetch may fetch more than one reference; extract the first
# entry and remove any known ref prefix from it
revision = ref.split(None, 2)[1]
if revision.startswith('refs/heads/'):
revision = revision[len('refs/heads/'):]
elif revision.startswith('refs/remotes/origin/'):
revision = revision[len('refs/remotes/origin/'):]
return revision
def parse_cfg_file(self, target):
"""
return a configuration parser for a provided git configuration file
Returns a prepared configuration parser based of a Git configuration
file provided. In the event that the file cannot be parsed, this call
will return a ``None`` value.
Args:
target: the file to parse
Returns:
the parser; ``None`` when a parsing error is detected
"""
with open(target, mode='r', encoding='utf_8') as f:
data = '\n'.join(f.readlines())
return self.parse_cfg_str(data)
def parse_cfg_str(self, value):
"""
return a configuration parser for a provided git configuration string
Returns a prepared configuration parser based of a Git configuration
value provided. In the event that the value cannot be parsed, this call
will return a ``None`` value.
Args:
value: the value to parse
Returns:
the parser; ``None`` when a parsing error is detected
"""
cfg = configparser.ConfigParser(allow_no_value=True)
try:
if sys.version_info >= (3, 0):
cfg.read_string(value)
else:
# strip whitespaces from lines for python 2.7
value = '\n'.join([line.strip() for line in value.splitlines()])
fp = StringIO(value)
cfg.readfp(fp) # pylint: disable=W1505
except configparser.Error:
return None
return cfg
#: git host tool helper
GIT = GitTool(GIT_COMMAND,
env_sanitize=GIT_SANITIZE_ENV_KEYS, env_include=GIT_EXTEND_ENV) | /releng-tool-0.17.0.tar.gz/releng-tool-0.17.0/releng_tool/tool/git.py | 0.624408 | 0.184804 | git.py | pypi |
from releng_tool.util.io import _execute
from releng_tool.util.io import execute
from releng_tool.util.log import debug
from releng_tool.util.log import err
from releng_tool.util.string import is_sequence_not_string
import os
import re
class RelengTool(object):
"""
a host tool
Provides a series of host tools methods to assist in validating the
existence of a host tool as well as the execution of a host tool.
Attributes:
detected: tracking whether or not a tool is available on the host system
Args:
tool: the file name of the tool
exists_args (optional): argument value to check for existence (no-op)
env_sanitize (optional): environment variables to sanitize
env_include (optional): environment variables to always include
"""
detected = {}
def __init__(self, tool, exists_args=None, env_sanitize=None,
env_include=None):
self.include = env_include
self.sanitize = env_sanitize
# allow a system to override a host tool path
override_tool_key = 'RELENG_' + re.sub(r'[^A-Z0-9]', '', tool.upper())
self.tool = os.environ.get(override_tool_key, tool)
if exists_args is not None:
self.exists_args = exists_args
else:
self.exists_args = ['--version']
def execute(self, args=None, cwd=None, quiet=False, env=None, poll=False,
capture=None):
"""
execute the host tool with the provided arguments (if any)
Runs the host tool described by ``args`` until completion.
Args:
args (optional): the list of arguments for the tool
cwd (optional): working directory to use
quiet (optional): whether or not to suppress output
env (optional): environment variables to include
poll (optional): force polling stdin/stdout for output data
capture (optional): list to capture output into
Returns:
``True`` if the execution has completed with no error; ``False`` if
the execution has failed
"""
rv = self._execute(args=args, cwd=cwd, quiet=quiet, env=env, poll=poll,
capture=capture)
return (rv == 0)
def execute_rv(self, *args, **kwargs):
"""
execute the host tool with the provided arguments (if any)
Runs the host tool described by ``args`` until completion.
Args:
*args (optional): arguments to add to the command
**cwd: working directory to use
**env: environment variables to include
Returns:
the return code of the execution request
"""
out = []
rv = self._execute(list(args),
cwd=kwargs.get('cwd'),
env=kwargs.get('env'),
capture=out, quiet=True)
return rv, '\n'.join(out)
def _execute(self, args=None, cwd=None, quiet=False, env=None, poll=False,
capture=None):
"""
execute the host tool with the provided arguments (if any)
Runs the host tool described by ``args`` until completion.
Args:
args (optional): the list of arguments for the tool
cwd (optional): working directory to use
quiet (optional): whether or not to suppress output
env (optional): environment variables to include
poll (optional): force polling stdin/stdout for output data
capture (optional): list to capture output into
Returns:
the return code of the execution request
"""
if not self.exists():
return 1
if args and not is_sequence_not_string(args):
err('invalid argument type provided into execute (should be list): '
+ str(args))
return 1
final_env = None
if self.include or self.sanitize or env:
final_env = os.environ.copy()
if self.sanitize:
for key in self.sanitize:
final_env.pop(key, None)
if self.include:
final_env.update(self.include)
if env:
final_env.update(env)
final_args = self._invoked_tool()
if args:
final_args.extend(args)
return _execute(final_args, cwd=cwd, env=final_env, quiet=quiet,
critical=False, poll=poll, capture=capture)
def _invoked_tool(self):
"""
returns the tool arguments to be invoked
Provides the arguments used to invoke the tool for an execution
request. This is typically the executable's name/path; however,
in some scenarios, a tool may override how a tool is invoked.
Returns:
tool arguments to invoke
"""
return [self.tool]
def exists(self):
"""
return whether or not the host tool exists
Returns whether or not the tool is available on the host for use.
Returns:
``True``, if the tool exists; ``False`` otherwise
"""
if self.tool in RelengTool.detected:
return RelengTool.detected[self.tool]
if execute([self.tool] + self.exists_args, quiet=True, critical=False):
debug('{} tool is detected on this system', self.tool)
RelengTool.detected[self.tool] = True
else:
debug('{} tool is not detected on this system', self.tool)
RelengTool.detected[self.tool] = False
return RelengTool.detected[self.tool] | /releng-tool-0.17.0.tar.gz/releng-tool-0.17.0/releng_tool/tool/__init__.py | 0.798383 | 0.280666 | __init__.py | pypi |
from __future__ import unicode_literals
from functools import partial
from releng_tool.util.io import ensure_dir_exists
from releng_tool.util.io import path_remove
from releng_tool.util.log import err
from shutil import Error as ShutilError
from shutil import copyfile as shutil_copyfile
from shutil import copystat as shutil_copystat
import os
import sys
if sys.version_info[0] >= 3: # noqa: PLR2004
_copyfile = partial(shutil_copyfile, follow_symlinks=False)
_copystat = partial(shutil_copystat, follow_symlinks=False)
else:
_copyfile = shutil_copyfile
_copystat = shutil_copystat
def path_copy(src, dst, quiet=False, critical=True, dst_dir=None):
"""
copy a file or directory into a target file or directory
This call will attempt to copy a provided file or directory, defined by
``src`` into a destination file or directory defined by ``dst``. If ``src``
is a file, then ``dst`` is considered to be a file or directory; if ``src``
is a directory, ``dst`` is considered a target directory. If a target
directory or target file's directory does not exist, it will be
automatically created. In the event that a file or directory could not be
copied, an error message will be output to standard error (unless ``quiet``
is set to ``True``). If ``critical`` is set to ``True`` and the specified
file/directory could not be copied for any reason, this call will issue a
system exit (``SystemExit``).
An example when using in the context of script helpers is as follows:
.. code-block:: python
# (stage)
# my-file
releng_copy('my-file', 'my-file2')
# (stage)
# my-file
# my-file2
releng_copy('my-file', 'my-directory/')
# (stage)
# my-directory/my-file
# my-file
# my-file2
releng_copy('my-directory/', 'my-directory2/')
# (stage)
# my-directory/my-file
# my-directory2/my-file
# my-file
# my-file2
Args:
src: the source directory or file
dst: the destination directory or file\\* (\\*if ``src`` is a file)
quiet (optional): whether or not to suppress output
critical (optional): whether or not to stop execution on failure
dst_dir (optional): force hint that the destination is a directory
Returns:
``True`` if the copy has completed with no error; ``False`` if the copy
has failed
Raises:
SystemExit: if the copy operation fails with ``critical=True``
"""
success = False
errmsg = None
try:
if os.path.isfile(src):
attempt_copy = True
if dst_dir:
base_dir = dst
else:
base_dir = os.path.dirname(dst)
if base_dir and not os.path.isdir(base_dir):
attempt_copy = ensure_dir_exists(base_dir, quiet=quiet)
else:
attempt_copy = True
if attempt_copy:
if os.path.isdir(dst):
dst = os.path.join(dst, os.path.basename(src))
if os.path.islink(src):
target = os.readlink(src)
if os.path.islink(dst) or os.path.isfile(dst):
path_remove(dst)
os.symlink(target, dst)
else:
_copyfile(src, dst)
_copystat(src, dst)
success = True
elif os.path.exists(src):
if src == dst:
errmsg = "'{!s}' and '{!s}' " \
"are the same folder".format(src, dst)
elif _copy_tree(src, dst, quiet=quiet, critical=critical):
success = True
else:
errmsg = 'source does not exist: {}'.format(src)
except (IOError, ShutilError) as e:
errmsg = str(e)
if not quiet and errmsg:
err('unable to copy source contents to target location\n'
' {}', errmsg)
if not success and critical:
sys.exit(-1)
return success
def path_copy_into(src, dst, quiet=False, critical=True):
"""
copy a file or directory into a target directory
This call will attempt to copy a provided file or directory, defined by
``src`` into a destination directory defined by ``dst``. If a target
directory does not exist, it will be automatically created. In the event
that a file or directory could not be copied, an error message will be
output to standard error (unless ``quiet`` is set to ``True``). If
``critical`` is set to ``True`` and the specified file/directory could
not be copied for any reason, this call will issue a system exit
(``SystemExit``).
An example when using in the context of script helpers is as follows:
.. code-block:: python
# (stage)
# my-file
releng_copy_into('my-file', 'my-directory')
# (stage)
# my-directory/my-file
# my-file
releng_copy_into('my-directory', 'my-directory2')
# (stage)
# my-directory/my-file
# my-directory2/my-file
# my-file
Args:
src: the source directory or file
dst: the destination directory
quiet (optional): whether or not to suppress output
critical (optional): whether or not to stop execution on failure
Returns:
``True`` if the copy has completed with no error; ``False`` if the copy
has failed
Raises:
SystemExit: if the copy operation fails with ``critical=True``
"""
return path_copy(src, dst, quiet=quiet, critical=critical, dst_dir=True)
def _copy_tree(src_folder, dst_folder, quiet=False, critical=True):
if not ensure_dir_exists(dst_folder, quiet=quiet, critical=critical):
return False
for entry in os.listdir(src_folder):
src = os.path.join(src_folder, entry)
dst = os.path.join(dst_folder, entry)
if os.path.islink(src):
target = os.readlink(src)
if os.path.islink(dst) or os.path.isfile(dst):
path_remove(dst)
os.symlink(target, dst)
_copystat(src, dst)
elif os.path.isdir(src):
_copy_tree(src, dst, quiet=quiet, critical=critical)
else:
_copyfile(src, dst)
_copystat(src, dst)
_copystat(src_folder, dst_folder)
return True | /releng-tool-0.17.0.tar.gz/releng-tool-0.17.0/releng_tool/util/io_copy.py | 0.606848 | 0.151906 | io_copy.py | pypi |
import os
try:
basestring # noqa: B018 pylint: disable=E0601
except NameError:
basestring = str
try:
from collections.abc import Sequence
except ImportError:
from collections import Sequence # pylint: disable=W1512
def expand(obj, kv=None):
"""
perform variable expansion on strings
This expand utility method will attempt to expand variables in detected
string types. For a detected string which contains substrings in the form of
``$value`` or ``${value}``, these substrings will be replaced with their
respective key-value (if provided) or environment variable value. For
substrings which do not have a matching variable value, the substrings will
be replaced with an empty value. If a dictionary is provided, keys and
values will be checked if they can be expanded on. If a list/set is
provided, each value which be checked if it can be expanded on. If a
dictionary key is expanded to match another key, a key-value pair can be
dropped. If a set may result in a smaller set if expanded values result in
duplicate entries.
An example when using in the context of script helpers is as follows:
.. code-block:: python
import os
...
os.environ['MY_ENV'] = 'my-environment-variable'
value = releng_expand('$MY_ENV')
print(value)
# will output: my-environment-variable
Args:
obj: the object
kv (optional): key-values pairs to use
Returns:
the expanded object
"""
if isinstance(obj, basestring):
try:
idx = obj.index('$')
except ValueError:
return obj
if kv:
final_kv = dict(os.environ)
final_kv.update(kv)
else:
final_kv = os.environ
rv = obj[:idx]
objlen = len(obj)
while idx < objlen:
c = obj[idx:idx + 1]
if c == '$':
nc = obj[idx + 1:idx + 2]
if not nc or nc == '$' or nc.isspace():
rv += c
idx += 1
elif nc == '{':
try:
eidx = obj.index('}', idx + 1)
var = obj[idx + 2:eidx]
if var in final_kv:
rv += final_kv[var]
idx = eidx
except ValueError:
rv += obj[idx:]
break
else:
var = nc
idx += 1
nc = obj[idx + 1:idx + 2]
while nc and nc != '$' and (nc.isalnum() or nc == '_'):
var += nc
idx += 1
nc = obj[idx + 1:idx + 2]
if var in final_kv:
rv += final_kv[var]
else:
rv += c
idx += 1
elif isinstance(obj, dict):
rv = {}
for key, value in obj.items():
rv[expand(key, kv=kv)] = expand(value, kv=kv)
elif isinstance(obj, list):
rv = []
for value in obj:
rv.append(expand(value, kv=kv))
elif isinstance(obj, set):
rv = set()
for value in obj:
rv.add(expand(value, kv=kv))
else:
rv = obj
return rv
def interpret_dictionary_strings(obj):
"""
interpret a dictionary of key-value strings from the provided object
Attempts to interpret one or more key-value string pairs from a provided
object. If a key-value string dictionary value is provided, it will be
returned. In the case where an unexpected type is detected, this method will
return ``None``.
Args:
obj: the object to interpret
Returns:
the dictionary; otherwise ``None``
"""
rv = None
if isinstance(obj, dict):
rv = obj
for key, value in obj.items():
if not isinstance(key, basestring):
rv = None
break
elif value is not None and not isinstance(value, basestring):
rv = None
break
return rv
def interpret_string(obj):
"""
interpret a string, if any, from the provided object
Attempts to interpret string from a provided object. If a string value is
provided, it will be returned. In the case where an unexpected type is
detected, this method will return ``None``.
Args:
obj: the object to interpret
Returns:
the string; otherwise ``None``
"""
rv = None
if isinstance(obj, basestring):
rv = obj
return rv
def interpret_strings(obj):
"""
interpret strings, if any, from the provided object
Attempts to interpret one or more strings from a provided object. Returned
will be an iterable containing one or more strings. If a string value is
provided, it will be returned inside iterable container. If an iterable
container is provided, the same container will be returned. In the case
where an unexpected type is detected, this method will return ``None``.
Args:
obj: the object to interpret
Returns:
sequence of zero or more strings; otherwise ``None``
"""
rv = None
if isinstance(obj, Sequence):
if isinstance(obj, basestring):
rv = [obj]
else:
rv = obj
for child in obj:
if not isinstance(child, basestring):
rv = None
break
return rv
def interpret_zero_to_one_strings(obj):
"""
interpret a dictionary of zero-to-one strings from the provided object
Attempts to interpret one or more zero-to-one strings from a provided
object. A zero-to-one string is a string-based key which may or may not have
an associated value assigned to it. If a key-value string dictionary value
is provided, it will be returned. If the sequence of strings or a single
string is provided, a dictionary will be populated with matching keys to
provided names with empty string values. In the case where an unexpected
type is detected, this method will return ``None``.
Args:
obj: the object to interpret
Returns:
the dictionary; otherwise ``None``
"""
rv = None
if isinstance(obj, dict):
rv = obj
for key, value in obj.items():
if not isinstance(key, basestring):
rv = None
break
elif value is not None and not isinstance(value, basestring):
rv = None
break
elif isinstance(obj, Sequence):
rv = {}
if isinstance(obj, basestring):
rv[obj] = ''
else:
for child in obj:
if not isinstance(child, basestring):
rv = None
break
rv[child] = ''
return rv
def is_sequence_not_string(obj):
"""
return whether or not the provided object is a non-string sequence
Returns ``True`` if the provided ``obj`` is a sequence type but is also not
a string; ``False`` otherwise.
Args:
obj: the object to interpret
Returns:
whether or not a non-string sequence
"""
return isinstance(obj, Sequence) and not isinstance(obj, basestring) | /releng-tool-0.17.0.tar.gz/releng-tool-0.17.0/releng_tool/util/string.py | 0.76533 | 0.373619 | string.py | pypi |
# A flag (or option) can be driven by the existence of a file (i.e. a file
# flag). When an instance operates with file flags, two modes are approached in
# this implementation. Either the state of file flags are unknown and are
# attempted to be read. Once read, the instance can handle their process
# accordingly based off these flag states. The other path is if there is a
# request to configure file flags. If a file flag is being configured, the
# intent would be to configure the one (or multiple) file flag state and have
# the running instance shutdown.
from releng_tool.util.enum import Enum
from releng_tool.util.io import touch
from releng_tool.util.log import err
import os
class FileFlag(Enum):
"""
file flag result states
Attributes:
CONFIGURED: file flag was configured
EXISTS: file flag exists
NOT_CONFIGURED: unable to configure the file flag
NO_EXIST: file flag does not exist
"""
CONFIGURED = 'configured'
EXISTS = 'exists'
NOT_CONFIGURED = 'not_configured'
NO_EXIST = 'no_exist'
def check_file_flag(file):
"""
check a file flag
Attempt to read a file flag state by checking for the file's existence.
Args:
file: the filename
Returns:
``FileFlag.EXISTS`` if the flag is enabled; ``FileFlag.NO_EXIST`` if the
flag is not enabled
"""
return process_file_flag(file, None)
def process_file_flag(file, flag, quiet=False):
"""
process a file flag event
Will either write a file flag configuration event or attempt to read a file
flag state. If the ``flag`` option is set to ``True``, this process event
will assume that this instance is attempting to configure a file flag (on)
state and generate the target file flag on the system. If the flag option is
set to ``False``, the file's existence will be checked to reflect whether or
not the flag is considered enabled.
Args:
file: the filename
flag: the flag option to used; ``None`` to check flag state
quiet: suppression of any error messages to standard out
Returns:
``FileFlag.EXISTS`` if the flag is enabled; ``FileFlag.NO_EXIST`` if the
flag is not enabled; ``FileFlag.CONFIGURED`` if the flag was
configured as requested; ``FileFlag.NOT_CONFIGURED`` if the flag
could not be configured as requested
"""
if flag:
# When checking if the file flag exists, attempt to update the access/
# modified times. For the case where may experience issues creating the
# file flag themselves (permission errors, etc.), fallback on just the
# existence of the file flag to still be considered as configured.
if touch(file):
rv = FileFlag.CONFIGURED
elif os.path.isfile(file):
rv = FileFlag.CONFIGURED
else:
rv = FileFlag.NOT_CONFIGURED
if not quiet:
err('unable to configure file flag: {}', file)
elif flag is None and os.path.isfile(file):
rv = FileFlag.EXISTS
else:
rv = FileFlag.NO_EXIST
return rv | /releng-tool-0.17.0.tar.gz/releng-tool-0.17.0/releng_tool/util/file_flags.py | 0.746416 | 0.545104 | file_flags.py | pypi |
class TopologicalSorter:
"""
utility used to generate a topological sorted list
The goal of this utility is to generate a topological sorted list (using
depth-first search) based on one or more provided nodes in a prepared graph.
When ``sort`` is invoked, the sorter will check the graph to leaf nodes and
visit nodes until a sorted list can be returned. Leaf nodes are determined
by the provided sorting function ``sort_func`` which will expect a returned
collection of leaf nodes for an object in a graph. This sorting method will
return an order prioritizing leaf nodes instead of parent nodes. The sorter
can handle being passed multiple nodes in a graph at any time; however, only
if the graph's structure does not change (i.e. graph edges are not changed).
Args:
sort_func: a function to return leaf nodes for a node being
visited during the sorting process
Attributes:
sorted: list of currently sorted objects
"""
def __init__(self, sort_func):
assert sort_func, 'no sort function provided'
self.sorted = []
self._state = {}
self._sort_func = sort_func
def sort(self, obj):
"""
generate/update a topological sorted list from a object in a graph
From a provided graph object ``obj``, a topological sorted (depth-first
search) list will be updated and returned. This sort call can be invoked
multiple times.
Args:
obj: vertex in a graph to sort from
Returns:
sorted list of objects; ``None`` if a cyclic graph has been
detected
The returned list of objects should not be directly modified until
all sorting calls (if multiple sort operations a desired) are
completed.
"""
if self._visit(obj):
return self.sorted
return None
def reset(self):
"""
reset the state of the sorter
Resets tracked state information contained in the sorter and clears the
known sorted list of vertices.
"""
self.sorted = []
self._state = {}
def _visit(self, obj):
"""
depth-first search topological sort visit call
From a provided graph object ``obj``, a topological sorted (depth-first
search) list will be updated and returned. This sort call can be invoked
multiple times.
Args:
obj: vertex in a graph to sort from
Returns:
``True``, if the sorting was successful; ``False``, if a cyclic
graph has been detected
"""
if obj not in self._state:
self._state[obj] = ''
if self._state[obj] == 'P':
return True
if self._state[obj] == 'T':
return False
self._state[obj] = 'T'
for child in self._sort_func(obj):
if not self._visit(child):
return False
self._state[obj] = 'P'
self.sorted.append(obj)
return True | /releng-tool-0.17.0.tar.gz/releng-tool-0.17.0/releng_tool/util/sort.py | 0.954963 | 0.836187 | sort.py | pypi |
from __future__ import print_function
from releng_tool.exceptions import RelengToolWarningAsError
import sys
#: flag to track the enablement of debug messages
RELENG_LOG_DEBUG_FLAG = False
#: flag to track the disablement of colorized messages
RELENG_LOG_NOCOLOR_FLAG = False
#: flag to track the enablement of verbose messages
RELENG_LOG_VERBOSE_FLAG = False
#: flag to track if warnings should be treated as errors
RELENG_LOG_WERROR_FLAG = False
def log(msg, *args):
"""
log a message
Logs a (normal) message to standard out with a trailing new line.
.. code-block:: python
log('this is a message')
Args:
msg: the message
*args: an arbitrary set of positional and keyword arguments used when
generating a formatted message
"""
__log('', '', msg, sys.stdout, *args)
def debug(msg, *args):
"""
log a debug message
Logs a debug message to standard out with a trailing new line. By default,
debug messages will not be output to standard out unless the instance is
configured with debugging enabled.
.. code-block:: python
debug('this is a debug message')
Args:
msg: the message
*args: an arbitrary set of positional and keyword arguments used when
generating a formatted message
"""
if RELENG_LOG_DEBUG_FLAG:
__log('(debug) ', '\033[2m', msg, sys.stdout, *args)
def err(msg, *args):
"""
log an error message
Logs an error message to standard error with a trailing new line and (if
enabled) a red colorization.
.. code-block:: python
err('this is an error message')
Args:
msg: the message
*args: an arbitrary set of positional and keyword arguments used when
generating a formatted message
"""
sys.stdout.flush()
__log('(error) ', '\033[1;31m', msg, sys.stderr, *args)
sys.stderr.flush()
def hint(msg, *args):
"""
log a hint message
Logs a hint message to standard out with a trailing new line and (if
enabled) a cyan colorization.
.. code-block:: python
hint('this is a hint message')
Args:
msg: the message
*args: an arbitrary set of positional and keyword arguments used when
generating a formatted message
"""
__log('', '\033[1;36m', msg, sys.stdout, *args)
def is_verbose():
"""
report if the instance is configured with verbose messaging
Allows a caller to determine whether or not the instance is actively
configured with verbose messaging. This allow a caller to have the option to
decide whether or not it needs to prepare a message for a ``verbose`` call,
if the message to be built may include a performance cost.
.. code-block:: python
if is_verbose():
msg = generate_info()
verbose(msg)
Returns:
whether or not the instance is configured with verbose messaging
"""
return RELENG_LOG_VERBOSE_FLAG
def note(msg, *args):
"""
log a notification message
Logs a notification message to standard out with a trailing new line and (if
enabled) an inverted colorization.
.. code-block:: python
note('this is a note message')
Args:
msg: the message
*args: an arbitrary set of positional and keyword arguments used when
generating a formatted message
"""
__log('', '\033[7m', msg, sys.stdout, *args)
def success(msg, *args):
"""
log a success message
Logs a success message to standard error with a trailing new line and (if
enabled) a green colorization.
.. code-block:: python
success('this is a success message')
Args:
msg: the message
*args: an arbitrary set of positional and keyword arguments used when
generating a formatted message
"""
__log('(success) ', '\033[1;32m', msg, sys.stdout, *args)
def verbose(msg, *args):
"""
log a verbose message
Logs a verbose message to standard out with a trailing new line and (if
enabled) an inverted colorization. By default, verbose messages will not be
output to standard out unless the instance is configured with verbosity.
.. code-block:: python
verbose('this is a verbose message')
Args:
msg: the message
*args: an arbitrary set of positional and keyword arguments used when
generating a formatted message
"""
if RELENG_LOG_VERBOSE_FLAG:
__log('(verbose) ', '\033[2m', msg, sys.stdout, *args)
def warn(msg, *args):
"""
log a warning message
Logs a warning message to standard error with a trailing new line and (if
enabled) a purple colorization.
.. code-block:: python
warn('this is a warning message')
Args:
msg: the message
*args: an arbitrary set of positional and keyword arguments used when
generating a formatted message
Raises:
RelengToolWarningAsError: when warnings-are-errors is configured
"""
sys.stdout.flush()
if RELENG_LOG_WERROR_FLAG:
raise RelengToolWarningAsError(msg.format(*args))
__log('(warn) ', '\033[1;35m', msg, sys.stderr, *args)
sys.stderr.flush()
def __log(prefix, color, msg, file, *args):
"""
utility logging method
A log method to help format a message based on provided prefix and color.
Args:
prefix: prefix to add to the message
color: the color to apply to the message
msg: the message
file: the file to write to
*args: an arbitrary set of positional and keyword arguments used when
generating a formatted message
"""
if RELENG_LOG_NOCOLOR_FLAG:
color = ''
post = ''
else:
post = '\033[0m'
msg = str(msg)
if args:
msg = msg.format(*args)
print('{}{}{}{}'.format(color, prefix, msg, post), file=file)
def releng_log_configuration(debug_, nocolor, verbose_, werror):
"""
configure the global logging state of the running instance
Adjusts the running instance's active state for logging-related
configuration values. This method is best invoked near the start of the
process's life cycle to provide consistent logging output. This method does
not required to be invoked to invoke provided logging methods.
Args:
debug_: toggle the enablement of debug messages
nocolor: toggle the disablement of colorized messages
verbose_: toggle the enablement of verbose messages
werror: toggle the enablement of warnings-are-errors
"""
global RELENG_LOG_DEBUG_FLAG
global RELENG_LOG_NOCOLOR_FLAG
global RELENG_LOG_VERBOSE_FLAG
global RELENG_LOG_WERROR_FLAG
RELENG_LOG_DEBUG_FLAG = debug_
RELENG_LOG_NOCOLOR_FLAG = nocolor
RELENG_LOG_VERBOSE_FLAG = verbose_
RELENG_LOG_WERROR_FLAG = werror | /releng-tool-0.17.0.tar.gz/releng-tool-0.17.0/releng_tool/util/log.py | 0.68637 | 0.22448 | log.py | pypi |
import os
import types
def extend_script_env(env, extra):
"""
extend a partially filtered environment (globals) for a run_path event
When invoking ``run_path`` [1], a dictionary of globals is provided to
pre-populate a script's globals before execution. Inside the releng process,
the command ``run_path`` is invoked several times to help load settings and
package-specific scripts. To exist in sharing releng-provided constants and
also assisting in allow some-level of sharing user-defined constants, the
list of globals to be populated can be extended each execution and be passed
into a following script. Not all global options are desired to be passed.
For example, Python magic options and referenced built-in functions. This
method can be used to easily extend an existing dictionary of globals while
also filtering out undesired entries output from external scripts.
[1]: https://docs.python.org/3/library/runpy.html
Args:
env: the environment to update
extra: the globals to add to the environment
Returns:
the same environment passed in
"""
extra_copy = extra.copy()
for key, value in extra.items():
# remove python magic objects (if any)
if key.startswith('__') and key.endswith('__'):
extra_copy.pop(key)
# remove imported built-in functions
elif isinstance(value, types.BuiltinFunctionType):
extra_copy.pop(key)
# remove imported functions
elif isinstance(value, types.FunctionType):
extra_copy.pop(key)
# remove imported modules
elif isinstance(value, types.ModuleType):
extra_copy.pop(key)
env.update(extra_copy)
return env
# unique default helper for env_value
__ENV_VALUE_DEFAULT = object()
def env_value(key, value=__ENV_VALUE_DEFAULT):
"""
helper to easily fetch or configure an environment variable
Provides a caller a simple method to fetch or configure an environment
variable for the current context. This call is the same as if one directly
fetched from or managed a key-value with ``os.environ``. If ``value`` is not
provided, the environment variable's value (if set) will be returned. If
``value`` is set to a value of ``None``, any set environment variable will
be removed.
An example when using in the context of script helpers is as follows:
.. code-block:: python
# get an environment variable
value = releng_env('KEY')
# set an environment variable
releng_env('KEY', 'VALUE')
Args:
key: the environment key
value (optional): the environment value to set
Returns:
the value of the environment variable
"""
if value is __ENV_VALUE_DEFAULT:
return os.environ.get(key)
if value is None:
if key in os.environ:
del os.environ[key]
else:
os.environ[key] = value
return value | /releng-tool-0.17.0.tar.gz/releng-tool-0.17.0/releng_tool/util/env.py | 0.806358 | 0.45538 | env.py | pypi |
from __future__ import unicode_literals
from releng_tool.util.io import ensure_dir_exists
from releng_tool.util.io import path_remove
from releng_tool.util.log import err
from shutil import move
import os
import stat
import sys
def path_move(src, dst, quiet=False, critical=True, dst_dir=None):
"""
move a file or directory into a target file or directory
This call will attempt to move a provided file or directory's contents,
defined by ``src`` into a destination file or directory defined by ``dst``.
If ``src`` is a file, then ``dst`` is considered to be a file or directory;
if ``src`` is a directory, ``dst`` is considered a target directory. If a
target directory or target file's directory does not exist, it will be
automatically created.
In the event that a file or directory could not be moved, an error message
will be output to standard error (unless ``quiet`` is set to ``True``). If
``critical`` is set to ``True`` and the specified file/directory could not
be moved for any reason, this call will issue a system exit
(``SystemExit``).
An example when using in the context of script helpers is as follows:
.. code-block:: python
# (input)
# my-directory/another-file
# my-file
# my-file2
releng_move('my-file', 'my-file3')
releng_move('my-directory/', 'my-directory2/')
releng_move('my-file2', 'my-directory2/')
# (output)
# my-directory2/another-file
# my-directory2/my-file2
# my-file3
Args:
src: the source directory or file
dst: the destination directory or file\\* (\\*if ``src`` is a file)
quiet (optional): whether or not to suppress output
critical (optional): whether or not to stop execution on failure
dst_dir (optional): force hint that the destination is a directory
Returns:
``True`` if the move has completed with no error; ``False`` if the move
has failed
Raises:
SystemExit: if the copy operation fails with ``critical=True``
"""
success = True
if src == dst:
return True
if os.path.isfile(src) and not dst_dir:
parent_dir = os.path.dirname(dst)
if parent_dir and not os.path.isdir(parent_dir):
success = ensure_dir_exists(parent_dir, quiet=quiet)
elif not os.path.isdir(dst):
if os.path.exists(dst):
path_remove(dst)
success = ensure_dir_exists(dst, quiet=quiet)
else:
src_dir = os.path.realpath(src)
dst_dir = os.path.realpath(dst)
if dst_dir.startswith(src_dir):
if not quiet:
err('unable to move source contents to target location\n'
' attempt to move directory into a child subdirectory')
if critical:
sys.exit(-1)
return False
if success:
try:
if os.path.isfile(src):
if os.path.isfile(dst):
path_remove(dst)
move(src, dst)
else:
_path_move(src, dst)
except Exception as e:
success = False
if not quiet:
err('unable to move source contents to target location\n'
' {}', e)
if not success and critical:
sys.exit(-1)
return success
def path_move_into(src, dst, quiet=False, critical=True):
"""
move a file or directory into a target directory
This call will attempt to move a provided file or directory's contents,
defined by ``src`` into a destination directory defined by ``dst``. If a
target directory directory does not exist, it will be automatically created.
In the event that a file or directory could not be moved, an error message
will be output to standard error (unless ``quiet`` is set to ``True``). If
``critical`` is set to ``True`` and the specified file/directory could not
be moved for any reason, this call will issue a system exit
(``SystemExit``).
An example when using in the context of script helpers is as follows:
.. code-block:: python
# (input)
# my-directory/another-file
# my-file
# my-file2
releng_move('my-file', 'my-file3')
releng_move('my-directory', 'my-directory2')
releng_move('my-file2', 'my-directory2')
# (output)
# my-directory2/another-file
# my-directory2/my-file2
# my-file3
Args:
src: the source directory or file
dst: the destination directory
quiet (optional): whether or not to suppress output
critical (optional): whether or not to stop execution on failure
Returns:
``True`` if the move has completed with no error; ``False`` if the move
has failed
Raises:
SystemExit: if the copy operation fails with ``critical=True``
"""
return path_move(src, dst, quiet=quiet, critical=critical, dst_dir=True)
def _path_move(src, dst):
"""
move the provided directory into the target directory (recursive)
Attempts to move the provided directory into the target directory. In the
event that a file or directory could not be moved due to an error, this
function will typically raise an OSError exception for `pathMove` to handle.
In the chance that a file cannot be moved due to permission issues, this
function can attempt to adjust permissions to specific paths to help in
the moving processes (e.g. dealing with read-only files or other strict
permissions setup during a build process).
Args:
src: the source directory
dst: the destination directory
Raises:
OSError: if a path could not be moved
"""
# ensure a caller has read/write access before hand to prepare for moving
# (e.g. if marked as read-only) and ensure contents can be fetched as well
try:
st = os.stat(src)
if not (st.st_mode & stat.S_IRUSR) or not (st.st_mode & stat.S_IWUSR):
os.chmod(src, st.st_mode | stat.S_IRUSR | stat.S_IWUSR)
except OSError:
pass
entries = os.listdir(src)
for entry in entries:
src_path = os.path.join(src, entry)
dst_path = os.path.join(dst, entry)
if os.path.isdir(src_path) and not os.path.islink(src_path):
if os.path.isdir(dst_path):
_path_move(src_path, dst_path)
else:
if os.path.exists(dst_path):
path_remove(dst_path)
move(src_path, dst_path)
else:
if os.path.exists(dst_path):
path_remove(dst_path)
move(src_path, dst_path)
# remove directory
os.rmdir(src) | /releng-tool-0.17.0.tar.gz/releng-tool-0.17.0/releng_tool/util/io_move.py | 0.67854 | 0.198084 | io_move.py | pypi |
from releng_tool.tool.git import GIT
from releng_tool.util.io import ensure_dir_exists
from releng_tool.util.log import err
from releng_tool.util.log import log
from releng_tool.util.log import note
import os
import sys
import tarfile
def fetch(opts):
"""
support fetching from perforce sources
With provided fetch options (``RelengFetchOptions``), the fetch stage will
be processed.
Args:
opts: fetch options
Returns:
``True`` if the fetch stage is completed; ``False`` otherwise
"""
assert opts
cache_file = opts.cache_file
name = opts.name
revision = opts.revision
site = opts.site
work_dir = opts.work_dir
if not GIT.exists():
err('unable to fetch package; git (for perforce) is not installed')
return None
note('fetching {}...'.format(name))
sys.stdout.flush()
p4env = {}
try:
p4root, view_dir = site.rsplit(' ', 1)
except ValueError:
err('''\
improper perforce site defined
The provided Perforce site does not define both the Perforce service as well
as the depot path to synchronize. For example:
perforce+guest@tcp4:p4.example.com:1666 //my-srcs
Site: {}''', site)
return None
# check if there is a user defined in the root; if so, extract
if '@' in p4root:
p4user, p4root = p4root.rsplit('@', 1)
if p4user:
p4env['P4USER'] = p4user
# configure the service to use
p4env['P4PORT'] = p4root
log('checking out sources')
if revision:
target_path = '{}@{}'.format(view_dir, revision)
else:
target_path = view_dir
if not GIT.execute(['p4', 'clone', target_path],
cwd=work_dir, env=p4env):
err('unable to clone sources')
return None
log('caching sources')
# ensure cache file's directory exists
cache_dir = os.path.abspath(os.path.join(cache_file, os.pardir))
if not ensure_dir_exists(cache_dir):
return None
def perforce_filter(info):
if info.name.endswith('.git'):
return None
return info
with tarfile.open(cache_file, 'w:gz') as tar:
tar.add(work_dir, arcname=name, filter=perforce_filter)
return cache_file | /releng-tool-0.17.0.tar.gz/releng-tool-0.17.0/releng_tool/fetch/perforce.py | 0.476823 | 0.271692 | perforce.py | pypi |
from __future__ import print_function
from releng_tool.util.log import err
from releng_tool.util.log import log
from releng_tool.util.log import note
from releng_tool.util.log import warn
import contextlib
import os
import sys
try:
from urllib.request import urlopen
except ImportError:
from urllib2 import urlopen
#: size of blocks read when downloading a resource
REQUEST_READ_BLOCKSIZE = 8192
def fetch(opts):
"""
support fetching from url sources
With provided fetch options (``RelengFetchOptions``), the fetch stage will
be processed.
Args:
opts: fetch options
Returns:
``True`` if the fetch stage is completed; ``False`` otherwise
"""
assert opts
cache_file = opts.cache_file
name = opts.name
site = opts.site
is_mirror_attempt = opts._mirror
urlopen_context = opts._urlopen_context
filename = os.path.basename(cache_file)
note('fetching {}...', name)
sys.stdout.flush()
log('requesting: ' + site)
try:
with contextlib.closing(urlopen(site, context=urlopen_context)) as rsp:
total = 0
if 'content-length' in rsp.headers:
try:
total = int(rsp.headers['content-length'])
total_str = display_size(total)
except ValueError:
pass
read = 0
with open(cache_file, 'wb') as f:
while True:
buf = rsp.read(REQUEST_READ_BLOCKSIZE)
if not buf:
break
read += len(buf)
read_str = display_size(read)
if total != read:
if total > 0:
pct = 100 * float(read) / float(total)
print('[{:02.0f}%] {}: {} of {} '.format(
pct, filename, read_str, total_str), end='\r')
else:
print(' {}: {} '.format(
filename, read_str), end='\r')
f.write(buf)
except Exception as e:
log_func = warn if is_mirror_attempt else err
log_func('failed to download resource\n'
' {}', e)
return None
# cleanup any download progress prints
if read > 0:
log('')
log('completed download ({})', display_size(read))
return cache_file
def display_size(val):
"""
return a human-readable count value for the provided byte count
Accepts a byte count value and returns a string with a count value and
binary prefix which describes the respective size.
Args:
val: the value (in bytes) to interpret
Returns:
the human-readable size
"""
SZ = 1024.
for unit in ['B', 'KiB', 'MiB', 'GiB']:
if abs(val) < SZ:
return '{:3.1f} {}'.format(val, unit)
val /= SZ
return '{:.1f} TiB'.format(val) | /releng-tool-0.17.0.tar.gz/releng-tool-0.17.0/releng_tool/fetch/url.py | 0.523908 | 0.231647 | url.py | pypi |
from releng_tool.tool.cvs import CVS
from releng_tool.util.io import ensure_dir_exists
from releng_tool.util.io import interpret_stem_extension
from releng_tool.util.log import err
from releng_tool.util.log import log
from releng_tool.util.log import note
import os
import sys
import tarfile
def fetch(opts):
"""
support fetching from cvs sources
With provided fetch options (``RelengFetchOptions``), the fetch stage will
be processed.
Args:
opts: fetch options
Returns:
``True`` if the fetch stage is completed; ``False`` otherwise
"""
assert opts
cache_file = opts.cache_file
name = opts.name
revision = opts.revision
site = opts.site
work_dir = opts.work_dir
cache_basename = os.path.basename(cache_file)
cache_stem, __ = interpret_stem_extension(cache_basename)
if not CVS.exists():
err('unable to fetch package; cvs is not installed')
return None
note('fetching {}...', name)
sys.stdout.flush()
try:
cvsroot, module = site.rsplit(' ', 1)
except ValueError:
err('''\
improper cvs site defined
The provided CVS site does not define both the CVSROOT as well as the target
module to checkout. For example:
:pserver:anonymous@cvs.example.com:/var/lib/cvsroot mymodule
Site: {}''', site)
return None
log('checking out sources')
if not CVS.execute(['-d', cvsroot, 'checkout', '-d', cache_stem,
'-r', revision, module], cwd=work_dir):
err('unable to checkout module')
return None
cvs_module_dir = os.path.join(work_dir, cache_stem)
if not os.path.exists(cvs_module_dir):
err('no sources available for the provided revision')
return None
log('caching sources')
cache_dir = os.path.abspath(os.path.join(cache_file, os.pardir))
if not ensure_dir_exists(cache_dir):
return None
def cvs_filter(info):
if info.name.endswith('CVS'):
return None
return info
with tarfile.open(cache_file, 'w:gz') as tar:
tar.add(cvs_module_dir, arcname=cache_stem, filter=cvs_filter)
return cache_file | /releng-tool-0.17.0.tar.gz/releng-tool-0.17.0/releng_tool/fetch/cvs.py | 0.501953 | 0.260078 | cvs.py | pypi |
from releng_tool.tool.bzr import BZR
from releng_tool.util.io import ensure_dir_exists
from releng_tool.util.log import err
from releng_tool.util.log import log
from releng_tool.util.log import note
from releng_tool.util.log import verbose
from releng_tool.util.log import warn
import os
import sys
try:
CERTIFI_MISSING_WARNED = False
import certifi
except ImportError:
certifi = None
def fetch(opts):
"""
support fetching from bzr sources
With provided fetch options (``RelengFetchOptions``), the fetch stage will
be processed.
Args:
opts: fetch options
Returns:
``True`` if the fetch stage is completed; ``False`` otherwise
"""
assert opts
cache_file = opts.cache_file
name = opts.name
revision = opts.revision
site = opts.site
if not BZR.exists():
err('unable to fetch package; bzr is not installed')
return None
note('fetching {}...', name)
sys.stdout.flush()
cache_dir = os.path.abspath(os.path.join(cache_file, os.pardir))
if not ensure_dir_exists(cache_dir):
return None
export_opts = [
'export',
cache_file,
site,
'--format=tgz',
'--root=' + name,
'--revision=' + revision,
]
# some environments may have issue export bzr sources due to certificate
# issues; this quirk allows injecting certifi-provided certificates for
# all bzr exports
if 'releng.bzr.certifi' in opts._quirks:
global CERTIFI_MISSING_WARNED
if certifi:
verbose('performing bzr fetch with certifi certificates')
pkg_site = certifi.where()
export_opts.append('-Ossl.ca_certs=' + pkg_site)
elif not CERTIFI_MISSING_WARNED:
CERTIFI_MISSING_WARNED = True
warn('''\
unable to perform bzr fetch with certifi certificates
A quirk has been enabled to export bzr images using certifi
certificates; however, certifi is not installed on this system.
''')
log('exporting sources')
if not BZR.execute(export_opts, poll=True):
err('unable to export module')
return None
return cache_file | /releng-tool-0.17.0.tar.gz/releng-tool-0.17.0/releng_tool/fetch/bzr.py | 0.503662 | 0.188866 | bzr.py | pypi |
from releng_tool.tool.rsync import RSYNC
from releng_tool.util.io import interpret_stem_extension
from releng_tool.util.io import prepare_arguments
from releng_tool.util.log import err
from releng_tool.util.log import log
from releng_tool.util.log import note
from releng_tool.util.string import expand
import os
import sys
import tarfile
def fetch(opts):
"""
support fetching from rsync sources
With provided fetch options (``RelengFetchOptions``), the fetch stage will
be processed.
Args:
opts: fetch options
Returns:
``True`` if the fetch stage is completed; ``False`` otherwise
"""
assert opts
cache_file = opts.cache_file
name = opts.name
site = opts.site
work_dir = opts.work_dir
cache_basename = os.path.basename(cache_file)
cache_stem, __ = interpret_stem_extension(cache_basename)
if not RSYNC.exists():
err('unable to fetch package; rsync is not installed')
return None
note('fetching {}...', name)
sys.stdout.flush()
# options
fetch_opts = {
'--recursive': '', # default recursive call
}
if opts.extra_opts:
fetch_opts.update(expand(opts.extra_opts))
# argument building
fetch_args = [
]
fetch_args.extend(prepare_arguments(fetch_opts))
# sanity check provided arguments
for fetch_arg in fetch_args:
if '--remove-source-files' in fetch_arg:
err('option `--remove-source-files` not permitted')
return None
elif not fetch_arg.startswith('-'):
err('invalid fetch option provided:', fetch_arg)
return None
fetch_args.append(site) # source directory
fetch_args.append(work_dir) # destination directory
if not RSYNC.execute(fetch_args, cwd=work_dir):
err('unable to rsync from source')
return None
log('successfully invoked rsync for source')
with tarfile.open(cache_file, 'w:gz') as tar:
tar.add(work_dir, arcname=cache_stem)
return cache_file | /releng-tool-0.17.0.tar.gz/releng-tool-0.17.0/releng_tool/fetch/rsync.py | 0.527803 | 0.225438 | rsync.py | pypi |
import datetime
import wsme.types
class File(wsme.types.Base):
"""A representation of a single file, identified by its contents rather
than its filename. Depending on context, this may contain URLs to download
or upload the file."""
#: The size of the file, in bytes
size = int
#: The sha512 digest of the file contents
digest = unicode
#: The digest algorithm (reserved for future expansion;
#: must always be 'sha512')
algorithm = unicode
#: The visibility level of this file. When making an upload, the uploader
#: is (legally!) responsible for selecting the correct visibility level.
visibility = wsme.types.wsattr(
wsme.types.Enum(unicode, 'public', 'internal'),
mandatory=True)
#: The regions containing an instance of this file. This field is generally
#: omitted except where specified
instances = [unicode]
#: The URL from which this file can be downlaoded via HTTP GET
get_url = wsme.types.wsattr(unicode, mandatory=False)
#: The URL to which this file can be uploaded via HTTP PUT. The URL
#: requires the request content-type to be ``application/octet-stream``.
put_url = wsme.types.wsattr(unicode, mandatory=False)
class UploadBatch(wsme.types.Base):
"""An upload batch describes a collection of related files that
are uploaded together -- similar to a version-control commit. The
message and files list must be non-empty."""
#: Identifier for this batch
id = wsme.types.wsattr(int, mandatory=False)
#: The date and time when this upload occurred. This will be added by the
#: server and need not be specified when making a new upload.
uploaded = wsme.types.wsattr(datetime.datetime, mandatory=False)
#: The author (uploader) of the batch. Do not include this when submitting
#: a batch for upload; it will be filled in based on the request
#: authentication.
author = wsme.types.wsattr(unicode, mandatory=False)
#: The message for the batch. Format this like a version-control message.
message = wsme.types.wsattr(unicode, mandatory=True)
#: The collection of files in this batch, keyed by filename. Note that
#: filenames containing path separators (``\`` and ``/``) will be rejected the
#: tooltool client.
files = wsme.types.wsattr({unicode: File}, mandatory=True) | /relengapi-tooltool-1.0.0.tar.gz/relengapi-tooltool-1.0.0/relengapi/blueprints/tooltool/types.py | 0.80077 | 0.262024 | types.py | pypi |
import sqlalchemy as sa
from relengapi.blueprints.tooltool import types
from relengapi.lib import db
allowed_regions = ('us-east-1', 'us-west-1', 'us-west-2')
class File(db.declarative_base('tooltool')):
"""An file, identified by size and digest. The server may have zero
or many copies of a file."""
__tablename__ = 'tooltool_files'
id = sa.Column(sa.Integer, primary_key=True)
size = sa.Column(sa.Integer, nullable=False)
sha512 = sa.Column(sa.String(128), unique=True, nullable=False)
visibility = sa.Column(sa.Enum('public', 'internal'), nullable=False)
instances = sa.orm.relationship('FileInstance', backref='file')
# note that changes to this dictionary will not be reflected to the DB;
# add or delete BatchFile instances directly instead.
@property
def batches(self):
return {bf.filename: bf.batch for bf in self._batches}
def to_json(self, include_instances=False):
rv = types.File(
size=self.size,
digest=self.sha512,
algorithm='sha512',
visibility=self.visibility)
if include_instances:
rv.instances = [i.region for i in self.instances]
return rv
class FileInstance(db.declarative_base('tooltool')):
"""A verified instance of a file in a single region."""
__tablename__ = 'tooltool_file_instances'
file_id = sa.Column(
sa.Integer, sa.ForeignKey('tooltool_files.id'), primary_key=True)
region = sa.Column(
sa.Enum(*allowed_regions), primary_key=True)
class BatchFile(db.declarative_base('tooltool')):
"""An association of upload batches to files, with filenames"""
__tablename__ = 'tooltool_batch_files'
file_id = sa.Column(sa.Integer, sa.ForeignKey('tooltool_files.id'), primary_key=True)
file = sa.orm.relationship("File", backref="_batches")
batch_id = sa.Column(sa.Integer, sa.ForeignKey('tooltool_batches.id'), primary_key=True)
batch = sa.orm.relationship("Batch", backref="_files")
filename = sa.Column(sa.Text, nullable=False)
class Batch(db.declarative_base('tooltool')):
"""Upload batches, with batch metadata, linked to the uploaded files"""
__tablename__ = 'tooltool_batches'
id = sa.Column(sa.Integer, primary_key=True)
uploaded = sa.Column(db.UTCDateTime, index=True, nullable=False)
author = sa.Column(sa.Text, nullable=False)
message = sa.Column(sa.Text, nullable=False)
# note that changes to this dictionary will not be reflected to the DB;
# add or delete BatchFile instances directly instead.
@property
def files(self):
return {bf.filename: bf.file for bf in self._files}
def to_json(self):
return types.UploadBatch(
id=self.id,
uploaded=self.uploaded,
author=self.author,
message=self.message,
files={n: f.to_json() for n, f in self.files.iteritems()})
class PendingUpload(db.declarative_base('tooltool')):
"""Files for which upload URLs have been generated, but which haven't yet
been uploaded. This table is used to poll for completed uploads, and to
prevent trusting files for which there is an outstanding signed upload URL."""
__tablename__ = 'tooltool_pending_upload'
file_id = sa.Column(
sa.Integer, sa.ForeignKey('tooltool_files.id'),
nullable=False, primary_key=True)
expires = sa.Column(db.UTCDateTime, index=True, nullable=False)
region = sa.Column(
sa.Enum(*allowed_regions), nullable=False)
file = sa.orm.relationship('File', backref='pending_uploads') | /relengapi-tooltool-1.0.0.tar.gz/relengapi-tooltool-1.0.0/relengapi/blueprints/tooltool/tables.py | 0.763924 | 0.205236 | tables.py | pypi |
from typing import Any
class ParamBase:
def __init__(self, name):
self.name = name
self.json = {name: {}}
def to_json(self):
return self.json
class Parameters:
def __init__(self, parameters):
self.parameters = parameters
def _check_param(self, param):
if isinstance(param, dict):
return param
elif isinstance(param, ParamBase):
return param.json
else:
raise ValueError(
"Parameters must be a ParamBase instances or a dictionary"
)
def to_json(self):
if isinstance(self.parameters, list):
dict_params = {}
for p in self.parameters:
dict_params.update(self._check_param(p))
return dict_params
elif isinstance(self.parameters, dict):
return self.parameters
else:
return self.parameters.json
def _format_name(self, name:str):
return "{{" + name + "}}"
class StringParam(ParamBase):
def __init__(self, name, long: bool = False, title="Text Input", description=""):
super().__init__(name)
self.title = title
self.description = description
self.long = long
self.json = {
name: {
"title": title,
"description": description,
"type": "string",
}
}
if self.long:
self.json[name]["metadata"] = {"content_type": "long_text"}
class NumberParam(ParamBase):
def __init__(
self,
name,
max: int = None,
min: int = None,
title="Number Input",
description="",
):
self.name = name
self.title = title
self.description = description
self.max = max
self.min = min
self.json = {
name: {
"title": title,
"description": description,
"type": "number",
}
}
if self.max:
self.json[name]["max"] = max
if self.min:
self.json[name]["min"] = min
class OptionsParam:
def __init__(
self,
name,
options,
title="Options",
description="",
):
self.name = name
self.title = title
self.description = description
self.options = options
self.json = {
name: {
"title": title,
"description": description,
"type": "string",
"enum": options,
}
}
class StringListParam(ParamBase):
def __init__(
self,
name,
title="Text List Input",
description="",
):
self.name = name
self.title = title
self.description = description
self.json = {
name: {
"title": title,
"description": description,
"type": "array",
"items": {"type": "string"},
}
}
class JsonParam:
def __init__(
self,
name,
title="JSON Input",
description="",
):
self.name = name
self.title = title
self.description = description
self.json = {
name: {
"title": title,
"description": description,
"type": "object",
}
}
class JsonListParam(ParamBase):
def __init__(
self,
name,
title="JSON List Input",
description="",
):
self.name = name
self.title = title
self.description = description
self.json = {
name: {
"title": title,
"description": description,
"type": "array",
"items": {"type": "object"},
}
}
class FileParam(ParamBase):
def __init__(
self,
name,
title="File Input",
description="",
):
self.name = name
self.title = title
self.description = description
self.json = {
name: {
"title": title,
"description": description,
"type": "string",
"metadata": {"content_type": "file", "accepted_file_types": []},
}
} | /relevanceai-chains-1.0.1.tar.gz/relevanceai-chains-1.0.1/relevanceai/params.py | 0.853348 | 0.220384 | params.py | pypi |
import json
import requests
from relevanceai._request import handle_response
from relevanceai import config
from relevanceai.auth import Auth
from relevanceai.params import Parameters
def create(name, description="", parameters={}, id=None, auth=None):
chain = Chain(
name=name, description=description, parameters=parameters, id=id, auth=auth
)
return chain
def load(id, auth=None):
if auth is None:
auth = config.auth
response = requests.get(
f"https://api-{auth.region}.stack.tryrelevance.com/latest/studios/{auth.project}/{id}",
json={
"filters": [
{
"field": "studio_id",
"condition": "==",
"condition_value": id,
"filter_type": "exact_match",
},
{
"field": "project",
"condition": "==",
"condition_value": auth.project,
"filter_type": "exact_match",
},
]
},
)
res = handle_response(response)
chain = Chain(name="", description="", parameters={}, id=id, auth=auth)
return chain
def load_from_json(filepath_or_json):
if isinstance(filepath_or_json, str):
with open(filepath_or_json, "r") as f:
chain_json = json.load(f)
else:
chain_json = filepath_or_json
chain = Chain(
name=chain_json["title"],
description=chain_json["description"],
parameters=chain_json["params_schema"]["properties"],
id=chain_json["studio_id"],
)
chain.add(chain_json["transformations"]["steps"])
return chain
class Chain:
def __init__(
self,
name: str,
description: str = "",
parameters={},
id: str = None,
auth: Auth = None,
):
self.name = name
self.description = description
self._parameters = parameters
self.steps = []
# generate random id if none
self.random_id = False
if id is None:
import uuid
id = str(uuid.uuid4())
self.random_id = True
self.id = id
self.auth: Auth = config.auth if auth is None else auth
@property
def parameters(self):
return Parameters(self._parameters)
params = parameters
def add(self, steps):
if isinstance(steps, list):
self.steps.extend(steps)
else:
self.steps.append(steps)
def _transform_steps(self, steps):
chain_steps = [step.steps[0] for step in steps]
unique_ids = []
for step in chain_steps:
if step["name"] in unique_ids:
raise ValueError(
f"Duplicate step name {step['name']}, please rename the step name with Step(step_name=step_name)."
)
unique_ids.append(step["name"])
return chain_steps
def _trigger_json(
self, values: dict = {}, return_state: bool = True, public: bool = False
):
data = {
"return_state": return_state,
"studio_override": {
"public": public,
"transformations": {"steps": self._transform_steps(self.steps)},
"params_schema": {"properties": self.parameters.to_json()},
},
"params": values,
}
data["studio_id"] = self.id
data["studio_override"]["studio_id"] = self.id
return data
def run(self, parameters={}, full_response: bool = False):
url = f"https://api-{self.auth.region}.stack.tryrelevance.com/latest/studios/{self.auth.project}"
response = requests.post(
f"{url}/trigger",
json=self._trigger_json(parameters),
headers=self.auth.headers,
)
res = handle_response(response)
if isinstance(res, dict):
if ("errors" in res and res["errors"]) or full_response:
return res
elif "output" in res:
return res["output"]
return res
def _json(self):
data = {
"title": self.name,
"description": self.description,
"version": "latest",
"project": self.auth.project,
"public": False,
"params_schema": {"properties": self.parameters.to_json()},
"transformations": {"steps": self._transform_steps(self.steps)},
}
data["studio_id"] = self.id
return data
def deploy(self):
url = f"https://api-{self.auth.region}.stack.tryrelevance.com/latest/studios"
response = requests.post(
f"{url}/bulk_update",
json={"updates": [self._json()]},
headers=self.auth.headers,
)
res = handle_response(response)
print("Studio deployed successfully to id ", self.id)
if self.random_id:
print(
"Your studio id is randomly generated, to ensure you are updating the same chain you should specify the id on rai.create(id=id) ",
)
print("\n=============Low Code Notebook================")
print(
f"You can share/visualize your chain as an app in our low code notebook here: https://chain.relevanceai.com/notebook/{self.auth.region}/{self.auth.project}/{self.id}/app"
)
print("\n=============with Requests================")
print("Here is an example of how to run the chain with API: ")
print(
f"""
import requests
requests.post(https://api-{self.auth.region}.stack.tryrelevance.com/latest/studios/{self.id}/trigger_limited", json={{
"project": "{self.auth.project}",
"params": {{
YOUR PARAMS HERE
}}
}})
"""
)
print("\n=============with Python SDK================")
print("Here is an example of how to run the chain with Python: ")
print(
f"""
import relevanceai as rai
chain = rai.load("{self.id}")
chain.run({{YOUR PARAMS HERE}})
"""
)
return self.id
def to_json(self, filepath, return_json=False):
if return_json:
with open(filepath, "w") as f:
json.dump(self._json(), f)
print("Chain saved to ", filepath)
else:
return self._json()
def reset(self):
self.steps = [] | /relevanceai-chains-1.0.1.tar.gz/relevanceai-chains-1.0.1/relevanceai/chain.py | 0.490724 | 0.192634 | chain.py | pypi |
import requests
from relevanceai import config
from relevanceai.auth import Auth
from relevanceai._request import handle_response
from relevanceai.steps.vector_search import VectorSimilaritySearch
from typing import Any, Dict, List, Optional, Union
class Dataset:
def __init__(
self,
id: str,
auth: Auth = None,
):
self.id = id
self.auth: Auth = config.auth if auth is None else auth
try:
from vecdb.collections.dataset import Dataset
from vecdb.api.local import Client
self.vecdb_client = Client(
f"{self.auth.project}:{self.auth.api_key}:{self.auth.region}",
authenticate=False,
)
except ImportError:
raise ImportError(
"vecdb is not installed. Please install vecdb with `pip install vecdb`"
)
self.db = Dataset(api=self.vecdb_client.api, dataset_id=self.id)
def insert(
self,
documents: List = None,
ids: List[str] = None,
data: List[str] = None,
metadata: List[Dict[str, Any]] = None,
vector: List[List[float]] = None,
encoders: List = None,
*args,
**kwargs,
):
return self.db.insert(
documents=documents,
ids=ids,
data=data,
metadata=metadata,
vector=vector,
encoders=encoders,
*args,
**kwargs,
)
def search(
self,
text: str,
field: str = "text_vector_",
page_size: int = 5,
model: str = "all-mpnet-base-v2",
return_as_step: bool = False,
):
if "_vector_" not in field:
field = f"{field}_vector_"
if return_as_step:
return VectorSimilaritySearch(
dataset_id=self.id,
query=text,
vector_field=field,
model=model,
page_size=page_size
)
else:
return VectorSimilaritySearch(
dataset_id=self.id,
query=text,
vector_field=field,
model=model,
page_size=page_size
).run()
def list_datasets(auth: Auth = None):
auth: Auth = config.auth if auth is None else auth
try:
from vecdb.collections.dataset import Dataset
from vecdb.api.local import Client
vecdb_client = Client(
f"{auth.project}:{auth.api_key}:{auth.region}",
authenticate=False,
)
except ImportError:
raise ImportError(
"vecdb is not installed. Please install vecdb with `pip install vecdb`"
)
return vecdb_client.list_datasets() | /relevanceai-chains-1.0.1.tar.gz/relevanceai-chains-1.0.1/relevanceai/datasets.py | 0.643217 | 0.171269 | datasets.py | pypi |
import requests
from relevanceai.steps._base import StepBase
class PromptCompletion(StepBase):
"""Generate text using LLMs
Generate text from a large language model like GPT.
Args:
prompt (str): The prompt that is fed to the model.
model ((Optional) str): The model to use for completion. If using gpt3.5, if you do not set your own API Key you will be charged 1.33 credits per 1000 characters of input and output. For other models, Make sure to set an API key.
history ((Optional) list): Conversation history to be passed into the prompt. For example, [{role: 'user', message: 'Hello, my name is Bob.'}, {role: 'ai', message: 'Hello Bob, how are you?'}].
system_prompt ((Optional) str): System prompt to be passed into the GPT chat completion prompts.
strip_linebreaks ((Optional) bool): Whether to strip linebreaks from the output.
temperature ((Optional) int): Temperature of the selected model. Typically, higher temperature means more random output.
validators ((Optional) list): Validate that the LLM produces output in the expected format, and re-prompt to fix issues if not.
Returns:
answer ((Optional) str): {'type': 'string'}
prompt (str): {'type': 'string'}
user_key_used ((Optional) bool): {'type': 'boolean'}
validation_history ((Optional) list): {'type': 'array', 'items': {'type': 'object', 'properties': {'role': {'type': 'string', 'enum': ['user', 'ai']}, 'message': {'type': 'string'}}, 'required': ['role', 'message'], 'additionalProperties': False}, 'metadata': {'advanced': True, 'title': 'Conversation history', 'description': "Conversation history to be passed into the prompt. For example, [{role: 'user', message: 'Hello, my name is Bob.'}, {role: 'ai', message: 'Hello Bob, how are you?'}]."}}
"""
def __init__(
self,
prompt: str,
model: str = None,
history: list = None,
system_prompt: str = None,
strip_linebreaks: bool = None,
temperature: int = None,
validators: list = None,
step_name: str = "prompt_completion",
*args,
**kwargs,
) -> None:
self.prompt = prompt
self.model = model
self.history = history
self.system_prompt = system_prompt
self.strip_linebreaks = strip_linebreaks
self.temperature = temperature
self.validators = validators
self.step_name = step_name
self._outputs = ["answer", "prompt", "user_key_used", "validation_history"]
self.outputs = [f"steps.{self.step_name}.output.{a}" for a in self._outputs]
super().__init__(*args, **kwargs)
@property
def steps(self):
step_params = {
"prompt": self.prompt,
}
if self.model is not None:
step_params["model"] = self.model
if self.history is not None:
step_params["history"] = self.history
if self.system_prompt is not None:
step_params["system_prompt"] = self.system_prompt
if self.strip_linebreaks is not None:
step_params["strip_linebreaks"] = self.strip_linebreaks
if self.temperature is not None:
step_params["temperature"] = self.temperature
if self.validators is not None:
step_params["validators"] = self.validators
return [
{
"transformation": "prompt_completion",
"name": self.step_name,
"foreach": "",
"output": {output: f"{{{{ {output} }}}}" for output in self._outputs},
"params": step_params,
}
] | /relevanceai-chains-1.0.1.tar.gz/relevanceai-chains-1.0.1/relevanceai/steps/prompt_completion.py | 0.794544 | 0.393531 | prompt_completion.py | pypi |
from relevanceai.steps._base import StepBase
class VectorSimilaritySearch(StepBase):
"""Vector similarity search on Relevance dataset
Search your dataset based on semantic similarity.
Args:
dataset_id (str): The ID of the dataset to search.
query (str): The query to search for.
vector_field (str): The name of the field that contains the vector.
model (str): The model name to use.
content_field ((Optional) str):
page_size ((Optional) int): The number of results to return.
Returns:
results (list): {'type': 'array', 'items': {'type': ['string', 'object']}}
"""
def __init__(
self,
dataset_id: str,
query: str,
vector_field: str,
model: str,
content_field: str = None,
page_size: int = 5,
step_name: str = "vector_similarity_search",
*args,
**kwargs,
) -> None:
self.dataset_id = dataset_id
self.query = query
self.vector_field = vector_field
self.model = model
self.content_field = content_field
self.page_size = page_size
self.step_name = step_name
self._outputs = ["results"]
self.outputs = [f"steps.{self.step_name}.output.{a}" for a in self._outputs]
super().__init__(*args, **kwargs)
@property
def steps(self):
step_params = {
"dataset_id": self.dataset_id,
"query": self.query,
"vector_field": self.vector_field,
"model": self.model,
}
if self.content_field is not None:
step_params["content_field"] = self.content_field
if self.page_size is not None:
step_params["page_size"] = self.page_size
return [
{
"transformation": "search",
"name": self.step_name,
"foreach": "",
"output": {output: f"{{{{ {output} }}}}" for output in self._outputs},
"params": step_params,
}
] | /relevanceai-chains-1.0.1.tar.gz/relevanceai-chains-1.0.1/relevanceai/steps/vector_search.py | 0.885359 | 0.504455 | vector_search.py | pypi |
import requests
from relevanceai import config
from relevanceai.auth import Auth
from relevanceai._request import handle_response
from relevanceai.params import Parameters, ParamBase
class StepBase:
def __init__(
self, name="step", description="a step", parameters={}, id="new", auth=None
):
self.name = name
self.description = description
if isinstance(parameters, Parameters):
self.parameters = parameters.to_json()
elif isinstance(parameters, ParamBase):
self.parameters = parameters.to_json()
else:
self.parameters = parameters
self.id = id
self.auth: Auth = config.auth if auth is None else auth
def _trigger_json(
self, values: dict = {}, return_state: bool = True, public: bool = False
):
return {
"studio_id": self.id,
"return_state": return_state,
"params": values,
"state_override": {
"steps": {},
"params": values,
},
"studio_override": {
"studio_id": self.id,
"public": public,
"transformations": {"steps": self.steps},
"params_schema": {"properties": self.parameters},
},
}
def run(self, parameters={}, full_response: bool = False):
url = f"https://api-{self.auth.region}.stack.tryrelevance.com/latest/studios/{self.auth.project}"
response = requests.post(
f"{url}/trigger",
json=self._trigger_json(parameters),
headers=self.auth.headers,
)
res = handle_response(response)
if isinstance(res, dict):
if ("errors" in res and res["errors"]) or full_response:
return res
elif "output" in res:
return res["output"]
return res
def _json(self):
return {
"title": self.name,
"description": self.description,
"version": "latest",
"project": self.auth.project,
"studio_id": self.id,
"public": False,
"params_schema": {"properties": self.parameters},
"transformations": {"steps": self.steps},
}
def deploy(self):
url = f"https://api-{self.auth.region}.stack.tryrelevance.com/latest/studios"
response = requests.post(
f"{url}/bulk_update",
json={"updates": [self._json()]},
headers=self.auth.headers,
)
return handle_response(response) | /relevanceai-chains-1.0.1.tar.gz/relevanceai-chains-1.0.1/relevanceai/steps/_base.py | 0.493409 | 0.236483 | _base.py | pypi |
import numpy as np
import pandas as pd
from abc import abstractmethod
from typing import List, Tuple, Dict, Any, Optional
from pathlib import Path
from dataclasses import dataclass
from typing import List
from collections import Counter
from doc_utils import DocUtils
from vectorops.datasets import data_dict
BASE_DIR = Path.cwd().resolve().parent
DATA_PATH = BASE_DIR / 'data'
## TODO: Refactor dataset class
@dataclass
class DatasetBase:
vector_label: str
vector_name: str
dataset_id: str
@abstractmethod
def _load_data(self, docs: Dict[str, Any]):
"""Load data from docs"""
raise NotImplementedError
@dataclass
class LocalDataset(DatasetBase, DocUtils):
def __init__(self, dataset_info: dict, k: int = -1, norm: bool = True):
"""
Initialise dataset manager from dataset info and data dict
"""
self.dataset_info = dataset_info
self.vector_label = dataset_info['vector_label']
self.vector_name = dataset_info['vector_name']
self.dataset_id = dataset_info['dataset_id']
self.dataset_filename = dataset_info['dataset_filename']
self.metadata_filename = dataset_info.get('metadata_filename')
self.k = k
self.norm = norm
self._load_data()
super().__init__(
vector_label=self.vector_label,
vector_name=self.vector_name,
dataset_id=self.dataset_id
)
def _load_data(self):
"""
Load vector dataset
"""
dataset_full = data_dict[self.dataset_filename][:self.k]
if Path(DATA_PATH.joinpath(f'{self.metadata_filename}.csv')).exists():
metadata = data_dict[self.metadata_filename]
else:
metadata_cols = [col for col in dataset_full.columns
if not any(s in col for s in ['_vector_', '_id', 'insert_date_'])]
metadata = dataset_full[metadata_cols]
self.metadata_filename = self.dataset_filename.replace('max', 'metadata')
metadata.to_csv(
DATA_PATH.joinpath(f"{self.dataset_id}", f"{self.metadata_filename}.csv"),
encoding='utf-8', index=None
)
vectors = dataset_full[self.vector_name]
vectors = np.array([x for x in vectors])
self.vectors = vectors
self.labels = metadata[[self.vector_label]]
self.metadata = metadata
if self.norm:
self.vectors = self.vectors / np.linalg.norm(self.vectors, axis=1).reshape(-1, 1)
labels_unique = pd.Series(metadata[self.vector_label].unique()).sort_values()
vectors_unique = self._get_vector_unique(labels_unique)
metadata_unique = metadata.loc[labels_unique.index]
vectors_lut = {v: vectors_unique[i] for i, v in enumerate(labels_unique)}
self.labels_unique = labels_unique
self.vectors_unique = vectors_unique
self.metadata_unique = metadata_unique
self.vectors_lut = vectors_lut
@dataclass
class DatasetManager(DatasetBase, DocUtils):
def __init__(self,
k: int = -1, norm: bool = True, dataset_info: Optional[dict] = None
):
"""
Initialise dataset manager from dataset info and data dict
"""
if dataset_info:
self.dataset_info = dataset_info
self.vector_label = dataset_info['vector_label']
self.vector_name = dataset_info['vector_name']
self.dataset_id = dataset_info['dataset_id']
self.dataset_filename = dataset_info['dataset_filename']
self.metadata_filename = dataset_info.get('metadata_filename')
# self.docs = docs
self.k = k
self.norm = norm
self._load_data()
super().__init__(
vector_label=self.vector_label,
vector_name=self.vector_name,
dataset_id=self.dataset_id
)
def _load_data(self):
"""
Load vector dataset
"""
dataset_full = data_dict[self.dataset_filename]
if Path(DATA_PATH.joinpath(f'{self.metadata_filename}.csv')).exists():
metadata = data_dict[self.metadata_filename]
else:
metadata_cols = [col for col in dataset_full.columns
if not any(s in col for s in ['_vector_', '_id', 'insert_date_'])]
metadata = dataset_full[metadata_cols]
self.metadata_filename = self.dataset_filename.replace('max', 'metadata')
metadata.to_csv(
DATA_PATH.joinpath(f"{self.dataset_id}", f"{self.metadata_filename}.csv"),
encoding='utf-8', index=None
)
vectors = dataset_full[self.vector_name]
vectors = np.array([x for x in vectors])
self.vectors = vectors
self.labels = metadata[[self.vector_label]]
self.metadata = metadata
if self.norm:
self.vectors = self.vectors / np.linalg.norm(self.vectors, axis=1).reshape(-1, 1)
labels_unique = pd.Series(metadata[self.vector_label].unique()).sort_values()
vectors_unique = self._get_vector_unique(labels_unique)
metadata_unique = metadata.loc[labels_unique.index]
vectors_lut = {v: vectors_unique[i] for i, v in enumerate(labels_unique)}
self.labels_unique = labels_unique
self.vectors_unique = vectors_unique
self.metadata_unique = metadata_unique
self.vectors_lut = vectors_lut
def __len__(self):
return len(self.vectors)
@property
def len_unique(self):
return len(self.vectors_unique)
@property
def shape(self):
return self.vectors.shape
@property
def shape_unique(self):
return self.vectors_unique.shape
def _get_vector_unique(self,
labels_unique: pd.Series
) -> np.ndarray:
'''
Averaging unique vectors
'''
vectors_unique = self.vectors[labels_unique.index]
labels = [l for llist in self.labels.values.tolist() for l in llist]
c = Counter(labels)
non_unique_labels = [k for k, v in c.items() if v>1]
for i, v in enumerate(labels_unique):
if v in non_unique_labels:
vectors_unique[i] = self.get_search_word_vector(v)
return vectors_unique
def get_matching_items(self,
search_word: str,
metadata_cols: List = None,
unique: bool = False
) -> pd.DataFrame:
'''
Get matching items of a given search word
'''
if unique:
vector_label_lut = self.metadata_unique[self.vector_label].apply(lambda x : x.lower())
search_word_mask = vector_label_lut.str.contains(search_word.lower())
metadata_cols = [self.vector_label] + metadata_cols if metadata_cols else self.metadata_unique.columns
matching_items = self.metadata_unique[metadata_cols].loc[search_word_mask]
else:
vector_label_lut = self.metadata[self.vector_label].apply(lambda x : x.lower())
search_word_mask = vector_label_lut.str.contains(search_word.lower())
metadata_cols = [self.vector_label] + metadata_cols if metadata_cols else self.metadata.columns
matching_items = self.metadata[metadata_cols].loc[search_word_mask]
return matching_items
def get_search_word_vector(self,
search_word: str,
k: int = -1
) -> np.ndarray:
'''
Return average of search word vectors of a given search word
'''
vector_labels = self.labels[self.vector_label].apply(lambda x : x.lower())
search_word_mask = vector_labels.str.contains(search_word.lower())
search_word_vectors = self.vectors[search_word_mask][:k]
return np.mean(search_word_vectors, axis=0)
if __name__ == '__main__':
dataset_info = {
'vector_label': 'product_name',
'vector_name': 'product_name_imagetext_vector_',
'dataset_filename': 'ecommerce-6.uniq_id.7000.product_name_imagetext.7000.max',
'metadata_filename': 'ecommerce-6.uniq_id.7000.product_name_imagetext.7000.metadata',
'dataset_id':'ecommerce-6'
}
dm = DatasetManager(dataset_info, data_dict)
print(dm.vectors)
print(len(dm)) | /relevanceai-vectorops-0.1.2.tar.gz/relevanceai-vectorops-0.1.2/vectorops/dataset_manager.py | 0.665193 | 0.358241 | dataset_manager.py | pypi |
import numpy as np
import pandas as pd
import plotly.figure_factory as ff
import plotly.graph_objects as go
from dataclasses import dataclass
from typing import List, Optional
from vectorops.constants import *
from vectorops.utils.distance_utils import *
from vectorops.dataset_manager import DatasetManager
from vectorops.projection_manager import ProjectionManager
@dataclass
class DistPlot(ProjectionManager):
def __init__(self,
dm: DatasetManager,
queries: List[str],
items: List[str],
encoder: Optional[Union[Base2Vec, SentenceTransformer2Vec]] = None,
):
"""
Initialise dataset manager from dataset info and data dict
"""
super().__init__(dm)
self.queries = queries
self.items = items
self.encoder = encoder
self.fig = go.Figure()
def plot(
self,
encoder: Optional[Union[Base2Vec, SentenceTransformer2Vec]] = None,
trim: bool = False
):
"""
Plot Radar plot
"""
if encoder: self.encoder = encoder
hist_data = self._encode_items(items=self.items, encoder=self.encoder)
if trim:
hist_data_no_outliers = []
for data in hist_data:
data_mean, data_std = np.mean(data), np.std(data)
cut_off = data_std * 3
lower, upper = data_mean - cut_off, data_mean + cut_off
hist_data_no_outliers.append([x for x in data if x > lower and x < upper])
hist_data = hist_data_no_outliers
self.fig = ff.create_distplot(hist_data, self.items, bin_size=.2)
self.fig.update_layout(
title_text=f"{self.dataset_id} <br>{self.vector_label}: {self.vector_name} <br>Encoder: {self.encoder}",
xaxis_title='Distribution',
yaxis_title='Count',
)
self.fig.update_layout(legend_title_text=self.vector_label)
return self.fig | /relevanceai-vectorops-0.1.2.tar.gz/relevanceai-vectorops-0.1.2/vectorops/projection/distplot.py | 0.915771 | 0.349283 | distplot.py | pypi |
import numpy as np
import pandas as pd
import plotly.express as px
import plotly.graph_objects as go
from dataclasses import dataclass
from typing import List, Optional
from vectorops.constants import *
from vectorops.utils.distance_utils import *
from vectorops.dataset_manager import DatasetManager
from vectorops.projection_manager import ProjectionManager
@dataclass
class RadarProjection(ProjectionManager):
def __init__(self,
dm: DatasetManager,
queries: List[str],
items: List[str],
encoder: Optional[Union[Base2Vec, SentenceTransformer2Vec]] = None,
):
"""
Initialise dataset manager from dataset info and data dict
"""
super().__init__(dm)
self.queries = queries
self.items = items
self.encoder = encoder
self.fig = go.Figure()
def plot(self,
encoder: Optional[Union[Base2Vec, SentenceTransformer2Vec]] = None,
trim: bool = True
):
"""
Plot Radar plot
"""
if encoder: self.encoder = encoder
theta_vectors = self._encode_items(items=self.items, encoder=self.encoder)
plot_title = f"{self.dataset_id} <br>{self.vector_label}: {self.vector_name} <br>Encoder: {self.encoder}"
self.fig.update_layout(title=plot_title)
r_min = 1
r_max = 0
for query in self.queries:
qv = self._clip_encode_text(query, norm=self.norm)
r = [cosine_sim(tv, qv) for tv in theta_vectors]
r_min = min(r_min, min(r))
r_max = max(r_max, max(r))
self.fig.add_trace(go.Scatterpolar(
r= r,
theta=self.items,
fill='toself',
name=query,
connectgaps=True,
)
)
if trim:
self.fig.update_layout(
polar=dict(
radialaxis=dict(
visible=True,
range=[r_min, r_max]
)),
)
return self.fig | /relevanceai-vectorops-0.1.2.tar.gz/relevanceai-vectorops-0.1.2/vectorops/projection/radar.py | 0.879406 | 0.336549 | radar.py | pypi |
import numpy as np
from typing import List, Dict, Tuple, Union
from .encode_utils import *
from constants import *
def vector_op(
operation: VectorOperation,
vectors: List[np.ndarray],
axis: int = 0
) -> np.ndarray:
"""
Perform vector operations (op, v1, v2)
"""
if operation == 'avg':
return np.mean(vectors, axis=axis)
elif operation == 'sum':
return np.sum(vectors, axis=axis)
elif operation == 'min':
return np.min(vectors, axis=axis)
elif operation == 'max':
return np.max(vectors, axis=axis)
elif operation == 'add':
return np.add.reduce(vectors, axis=axis)
elif operation == 'subtract':
return np.subtract.reduce(vectors, axis=axis)
elif not isinstance(operation, VectorOperation):
raise ValueError(f'Not a valid vector operation {operation} {VectorOperation}')
def axes_to_vector(
axes_labels: List[str],
vectors_unique_lut: dict,
) -> List[np.ndarray]:
"""
Calculate the axes vectors defined by user
"""
axes_vectors = []
for label in axes_labels:
f_vector = vectors_unique_lut.get(label)
axes_vectors.append(f_vector)
return axes_vectors
def formulae_to_vector(
formulae: List[str],
dataset_info: dict,
encoder: Union[None, str] = None
) -> List[np.ndarray]:
"""
Calculate the vector of the math formulae defined by the user
"""
formulae_vectors = []
for formula in formulae:
op = formula[0]
v1 = formula[1]
v2 = formula[2]
if not op or op not in VectorOperation:
raise ValueError(f'Invalid operator {VectorOperation}')
# print(encoder)
if encoder=='clip':
v1_encoded = clip_encode_text(v1)
v2_encoded = clip_encode_text(v2)
f_vector = vector_op(operation=op, vectors=[v1_encoded, v2_encoded])
elif encoder=='context_avg':
v1_encoded = get_search_word_vector(v1, dataset_info=dataset_info)
v2_encoded = get_search_word_vector(v2, dataset_info=dataset_info)
f_vector = vector_op(operation=op, vectors=[v1_encoded, v2_encoded])
else:
if dataset_info['vectors_lut'].get(v1) is None:
raise ValueError(f"{v1} not in dataset {dataset_info['dataset_id']}")
elif dataset_info['vectors_lut'].get(v1) is None:
raise ValueError(f"{v2} not in dataset {dataset_info['dataset_id']}")
f_vector = vector_op(operation=op,
vectors=[ dataset_info['vectors_lut'].get(v1),
dataset_info['vectors_lut'].get(v2) ])
formulae_vectors.append(f_vector)
return formulae_vectors | /relevanceai-vectorops-0.1.2.tar.gz/relevanceai-vectorops-0.1.2/vectorops/utils/formulae_utils.py | 0.485112 | 0.519338 | formulae_utils.py | pypi |
import numpy as np
import pandas as pd
import plotly.express as px
import plotly.graph_objects as go
from typing import List, Dict, Tuple, Union
from nn_utils import get_nn_idx
from constants import *
## TODO: Refactor into class
def plot_projection_df(
projection_matrix: np.ndarray,
axes_matrix: np.ndarray,
x_axis_value: VectorOperation,
y_axis_value: VectorOperation,
dataset_info: dict,
plot_nn: Union[None, int] = None,
plot_labels: bool = False
):
"""
Takes projection matrix and returns df
"""
def build_projection_df():
"""
Build project df from projection matrix
"""
if not plot_nn:
labels_df = pd.DataFrame(dataset_info['vectors_lut'].keys(),
columns=[dataset_info['vector_label']])
data_df = pd.DataFrame(projection_matrix,
columns=[str(x_axis_value), str(y_axis_value)])
legend = None
else:
search_vector_nn_idx_1 = get_nn_idx(vectors=dataset_info['vectors'],
selected_vec=axes_matrix[0])
nn_1_labels = dataset_info['metadata'][[dataset_info['vector_label']]].loc[search_vector_nn_idx_1][:plot_nn]
nn_1_labels[''] = 'x'
search_vector_nn_idx_2 = get_nn_idx(vectors=dataset_info['vectors'],
selected_vec=axes_matrix[1])
nn_2_labels = dataset_info['metadata'][[dataset_info['vector_label']]].loc[search_vector_nn_idx_2][:plot_nn]
nn_2_labels['neighbours'] = 'y'
labels_df = pd.concat([nn_1_labels, nn_2_labels], axis=0).reset_index()
data_df = pd.DataFrame(projection_matrix[labels_df.index],
columns=[str(x_axis_value), str(y_axis_value)])
legend = 'neighbours'
return pd.concat([labels_df, data_df], axis=1), legend
def clean_vector_op_label(axis_value: VectorOperation) -> str:
"""
Converting operation tuple to axis label for px scatter
"""
return ''.join(c for c in str(axis_value).replace(',', ',<br>') if c not in set('",()\]\''))
df, legend = build_projection_df()
text = df[dataset_info['vector_label']] if (plot_labels) else None
labels = {}
for axis in [ str(x_axis_value), str(y_axis_value) ]:
if axis.op is None:
labels[str(axis)] = axis.query
else:
labels[str(axis)] = '<br>'.join(axis.to_list())
fig = px.scatter(df, x=str(x_axis_value), y=str(y_axis_value), color=legend, text=text,
title=f"{dataset_info['dataset_id']}: {len(df)} points<br>{dataset_info['vector_label']}: {dataset_info['vector_name']}",
labels=labels
)
"""
Adding product name to hover data
"""
fig.update_traces(customdata=df[dataset_info['vector_label']])
fig.update_traces(hovertemplate='%{customdata}')
fig.update_traces(
hovertemplate="<br>".join([
"X: %{x}",
"Y: %{y}",
"Label: %{customdata}",
])
)
fig.update_traces(
hovertemplate="<br>".join([
"X: %{x}",
"Y: %{y}",
"Label: %{customdata}",
])
)
fig.update_layout(
title_font_size=10,
)
"""
Show trendline
"""
projection_matrix = np.array(df[[str(x_axis_value), str(y_axis_value)]])
fig.add_trace(
go.Scatter(
x=[projection_matrix.min(), projection_matrix.max()],
y=[projection_matrix.min(), projection_matrix.max()],
mode="lines",
line=go.scatter.Line(color="gray"),
showlegend=False)
)
# fig.show()
return fig, df | /relevanceai-vectorops-0.1.2.tar.gz/relevanceai-vectorops-0.1.2/vectorops/utils/plot_utils.py | 0.542621 | 0.462048 | plot_utils.py | pypi |
from thrift.Thrift import TType, TMessageType, TException, TApplicationException
from thrift.transport import TTransport
from thrift.protocol import TBinaryProtocol, TProtocol
try:
from thrift.protocol import fastbinary
except:
fastbinary = None
class Language:
DE = 0
EN = 1
ES = 2
FR = 3
IT = 4
RU = 5
OTHER = 254
_VALUES_TO_NAMES = {
0: "DE",
1: "EN",
2: "ES",
3: "FR",
4: "IT",
5: "RU",
254: "OTHER",
}
_NAMES_TO_VALUES = {
"DE": 0,
"EN": 1,
"ES": 2,
"FR": 3,
"IT": 4,
"RU": 5,
"OTHER": 254,
}
class WordVectorDTO:
"""
Attributes:
- magnitude
- documentWeight
- scores
"""
thrift_spec = (
None, # 0
(1, TType.DOUBLE, 'magnitude', None, None, ), # 1
(2, TType.DOUBLE, 'documentWeight', None, None, ), # 2
(3, TType.MAP, 'scores', (TType.STRING,None,TType.DOUBLE,None), None, ), # 3
)
def __init__(self, magnitude=None, documentWeight=None, scores=None,):
self.magnitude = magnitude
self.documentWeight = documentWeight
self.scores = scores
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.DOUBLE:
self.magnitude = iprot.readDouble();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.DOUBLE:
self.documentWeight = iprot.readDouble();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.MAP:
self.scores = {}
(_ktype1, _vtype2, _size0 ) = iprot.readMapBegin()
for _i4 in xrange(_size0):
_key5 = iprot.readString();
_val6 = iprot.readDouble();
self.scores[_key5] = _val6
iprot.readMapEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('WordVectorDTO')
if self.magnitude is not None:
oprot.writeFieldBegin('magnitude', TType.DOUBLE, 1)
oprot.writeDouble(self.magnitude)
oprot.writeFieldEnd()
if self.documentWeight is not None:
oprot.writeFieldBegin('documentWeight', TType.DOUBLE, 2)
oprot.writeDouble(self.documentWeight)
oprot.writeFieldEnd()
if self.scores is not None:
oprot.writeFieldBegin('scores', TType.MAP, 3)
oprot.writeMapBegin(TType.STRING, TType.DOUBLE, len(self.scores))
for kiter7,viter8 in self.scores.items():
oprot.writeString(kiter7)
oprot.writeDouble(viter8)
oprot.writeMapEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.magnitude is None:
raise TProtocol.TProtocolException(message='Required field magnitude is unset!')
if self.documentWeight is None:
raise TProtocol.TProtocolException(message='Required field documentWeight is unset!')
if self.scores is None:
raise TProtocol.TProtocolException(message='Required field scores is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class ScoredWordDTO:
"""
Attributes:
- wordBuff
- score
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'wordBuff', None, None, ), # 1
(2, TType.DOUBLE, 'score', None, None, ), # 2
)
def __init__(self, wordBuff=None, score=None,):
self.wordBuff = wordBuff
self.score = score
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.wordBuff = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.DOUBLE:
self.score = iprot.readDouble();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('ScoredWordDTO')
if self.wordBuff is not None:
oprot.writeFieldBegin('wordBuff', TType.STRING, 1)
oprot.writeString(self.wordBuff)
oprot.writeFieldEnd()
if self.score is not None:
oprot.writeFieldBegin('score', TType.DOUBLE, 2)
oprot.writeDouble(self.score)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.wordBuff is None:
raise TProtocol.TProtocolException(message='Required field wordBuff is unset!')
if self.score is None:
raise TProtocol.TProtocolException(message='Required field score is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class CentroidMetadataDTO:
"""
Attributes:
- id
- created
- lastDocumentChange
- lastCalculated
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'id', None, None, ), # 1
(2, TType.I64, 'created', None, None, ), # 2
(3, TType.I64, 'lastDocumentChange', None, None, ), # 3
(4, TType.I64, 'lastCalculated', None, None, ), # 4
)
def __init__(self, id=None, created=None, lastDocumentChange=None, lastCalculated=None,):
self.id = id
self.created = created
self.lastDocumentChange = lastDocumentChange
self.lastCalculated = lastCalculated
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.id = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.I64:
self.created = iprot.readI64();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.I64:
self.lastDocumentChange = iprot.readI64();
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.I64:
self.lastCalculated = iprot.readI64();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('CentroidMetadataDTO')
if self.id is not None:
oprot.writeFieldBegin('id', TType.STRING, 1)
oprot.writeString(self.id)
oprot.writeFieldEnd()
if self.created is not None:
oprot.writeFieldBegin('created', TType.I64, 2)
oprot.writeI64(self.created)
oprot.writeFieldEnd()
if self.lastDocumentChange is not None:
oprot.writeFieldBegin('lastDocumentChange', TType.I64, 3)
oprot.writeI64(self.lastDocumentChange)
oprot.writeFieldEnd()
if self.lastCalculated is not None:
oprot.writeFieldBegin('lastCalculated', TType.I64, 4)
oprot.writeI64(self.lastCalculated)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.id is None:
raise TProtocol.TProtocolException(message='Required field id is unset!')
if self.created is None:
raise TProtocol.TProtocolException(message='Required field created is unset!')
if self.lastDocumentChange is None:
raise TProtocol.TProtocolException(message='Required field lastDocumentChange is unset!')
if self.lastCalculated is None:
raise TProtocol.TProtocolException(message='Required field lastCalculated is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class CentroidDTO:
"""
Attributes:
- id
- wordVector
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'id', None, None, ), # 1
(2, TType.STRUCT, 'wordVector', (WordVectorDTO, WordVectorDTO.thrift_spec), None, ), # 2
)
def __init__(self, id=None, wordVector=None,):
self.id = id
self.wordVector = wordVector
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.id = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.wordVector = WordVectorDTO()
self.wordVector.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('CentroidDTO')
if self.id is not None:
oprot.writeFieldBegin('id', TType.STRING, 1)
oprot.writeString(self.id)
oprot.writeFieldEnd()
if self.wordVector is not None:
oprot.writeFieldBegin('wordVector', TType.STRUCT, 2)
self.wordVector.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.id is None:
raise TProtocol.TProtocolException(message='Required field id is unset!')
if self.wordVector is None:
raise TProtocol.TProtocolException(message='Required field wordVector is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class ProcessedDocumentMetadataDTO:
"""
Attributes:
- id
- sha1Hash
- created
- updated
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'id', None, None, ), # 1
(2, TType.STRING, 'sha1Hash', None, None, ), # 2
(3, TType.I64, 'created', None, None, ), # 3
(4, TType.I64, 'updated', None, None, ), # 4
)
def __init__(self, id=None, sha1Hash=None, created=None, updated=None,):
self.id = id
self.sha1Hash = sha1Hash
self.created = created
self.updated = updated
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.id = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.sha1Hash = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.I64:
self.created = iprot.readI64();
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.I64:
self.updated = iprot.readI64();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('ProcessedDocumentMetadataDTO')
if self.id is not None:
oprot.writeFieldBegin('id', TType.STRING, 1)
oprot.writeString(self.id)
oprot.writeFieldEnd()
if self.sha1Hash is not None:
oprot.writeFieldBegin('sha1Hash', TType.STRING, 2)
oprot.writeString(self.sha1Hash)
oprot.writeFieldEnd()
if self.created is not None:
oprot.writeFieldBegin('created', TType.I64, 3)
oprot.writeI64(self.created)
oprot.writeFieldEnd()
if self.updated is not None:
oprot.writeFieldBegin('updated', TType.I64, 4)
oprot.writeI64(self.updated)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.id is None:
raise TProtocol.TProtocolException(message='Required field id is unset!')
if self.sha1Hash is None:
raise TProtocol.TProtocolException(message='Required field sha1Hash is unset!')
if self.created is None:
raise TProtocol.TProtocolException(message='Required field created is unset!')
if self.updated is None:
raise TProtocol.TProtocolException(message='Required field updated is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class ProcessedDocumentPersistenceDTO:
"""
Attributes:
- metadata
- scoredWords
- magnitude
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'metadata', (ProcessedDocumentMetadataDTO, ProcessedDocumentMetadataDTO.thrift_spec), None, ), # 1
(2, TType.LIST, 'scoredWords', (TType.STRUCT,(ScoredWordDTO, ScoredWordDTO.thrift_spec)), None, ), # 2
(3, TType.DOUBLE, 'magnitude', None, None, ), # 3
)
def __init__(self, metadata=None, scoredWords=None, magnitude=None,):
self.metadata = metadata
self.scoredWords = scoredWords
self.magnitude = magnitude
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.metadata = ProcessedDocumentMetadataDTO()
self.metadata.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.LIST:
self.scoredWords = []
(_etype12, _size9) = iprot.readListBegin()
for _i13 in xrange(_size9):
_elem14 = ScoredWordDTO()
_elem14.read(iprot)
self.scoredWords.append(_elem14)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.DOUBLE:
self.magnitude = iprot.readDouble();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('ProcessedDocumentPersistenceDTO')
if self.metadata is not None:
oprot.writeFieldBegin('metadata', TType.STRUCT, 1)
self.metadata.write(oprot)
oprot.writeFieldEnd()
if self.scoredWords is not None:
oprot.writeFieldBegin('scoredWords', TType.LIST, 2)
oprot.writeListBegin(TType.STRUCT, len(self.scoredWords))
for iter15 in self.scoredWords:
iter15.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.magnitude is not None:
oprot.writeFieldBegin('magnitude', TType.DOUBLE, 3)
oprot.writeDouble(self.magnitude)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.metadata is None:
raise TProtocol.TProtocolException(message='Required field metadata is unset!')
if self.scoredWords is None:
raise TProtocol.TProtocolException(message='Required field scoredWords is unset!')
if self.magnitude is None:
raise TProtocol.TProtocolException(message='Required field magnitude is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class ProcessedDocumentDTO:
"""
Attributes:
- metadata
- wordVector
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'metadata', (ProcessedDocumentMetadataDTO, ProcessedDocumentMetadataDTO.thrift_spec), None, ), # 1
(2, TType.STRUCT, 'wordVector', (WordVectorDTO, WordVectorDTO.thrift_spec), None, ), # 2
)
def __init__(self, metadata=None, wordVector=None,):
self.metadata = metadata
self.wordVector = wordVector
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.metadata = ProcessedDocumentMetadataDTO()
self.metadata.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.wordVector = WordVectorDTO()
self.wordVector.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('ProcessedDocumentDTO')
if self.metadata is not None:
oprot.writeFieldBegin('metadata', TType.STRUCT, 1)
self.metadata.write(oprot)
oprot.writeFieldEnd()
if self.wordVector is not None:
oprot.writeFieldBegin('wordVector', TType.STRUCT, 2)
self.wordVector.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.metadata is None:
raise TProtocol.TProtocolException(message='Required field metadata is unset!')
if self.wordVector is None:
raise TProtocol.TProtocolException(message='Required field wordVector is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class GetDocumentMetadataResponse:
"""
Attributes:
- metadata
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'metadata', (ProcessedDocumentMetadataDTO, ProcessedDocumentMetadataDTO.thrift_spec), None, ), # 1
)
def __init__(self, metadata=None,):
self.metadata = metadata
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.metadata = ProcessedDocumentMetadataDTO()
self.metadata.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('GetDocumentMetadataResponse')
if self.metadata is not None:
oprot.writeFieldBegin('metadata', TType.STRUCT, 1)
self.metadata.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.metadata is None:
raise TProtocol.TProtocolException(message='Required field metadata is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class GetFullDocumentResponse:
"""
Attributes:
- document
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'document', (ProcessedDocumentDTO, ProcessedDocumentDTO.thrift_spec), None, ), # 1
)
def __init__(self, document=None,):
self.document = document
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.document = ProcessedDocumentDTO()
self.document.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('GetFullDocumentResponse')
if self.document is not None:
oprot.writeFieldBegin('document', TType.STRUCT, 1)
self.document.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.document is None:
raise TProtocol.TProtocolException(message='Required field document is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class GetCentroidMetadataResponse:
"""
Attributes:
- metadata
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'metadata', (CentroidMetadataDTO, CentroidMetadataDTO.thrift_spec), None, ), # 1
)
def __init__(self, metadata=None,):
self.metadata = metadata
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.metadata = CentroidMetadataDTO()
self.metadata.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('GetCentroidMetadataResponse')
if self.metadata is not None:
oprot.writeFieldBegin('metadata', TType.STRUCT, 1)
self.metadata.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.metadata is None:
raise TProtocol.TProtocolException(message='Required field metadata is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class MultiSimilarityResponse:
"""
Attributes:
- scores
"""
thrift_spec = (
None, # 0
(1, TType.MAP, 'scores', (TType.STRING,None,TType.DOUBLE,None), None, ), # 1
)
def __init__(self, scores=None,):
self.scores = scores
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.MAP:
self.scores = {}
(_ktype17, _vtype18, _size16 ) = iprot.readMapBegin()
for _i20 in xrange(_size16):
_key21 = iprot.readString();
_val22 = iprot.readDouble();
self.scores[_key21] = _val22
iprot.readMapEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('MultiSimilarityResponse')
if self.scores is not None:
oprot.writeFieldBegin('scores', TType.MAP, 1)
oprot.writeMapBegin(TType.STRING, TType.DOUBLE, len(self.scores))
for kiter23,viter24 in self.scores.items():
oprot.writeString(kiter23)
oprot.writeDouble(viter24)
oprot.writeMapEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.scores is None:
raise TProtocol.TProtocolException(message='Required field scores is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class ListCentroidDocumentsResponse:
"""
Attributes:
- documents
"""
thrift_spec = (
None, # 0
(1, TType.LIST, 'documents', (TType.STRING,None), None, ), # 1
)
def __init__(self, documents=None,):
self.documents = documents
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.LIST:
self.documents = []
(_etype28, _size25) = iprot.readListBegin()
for _i29 in xrange(_size25):
_elem30 = iprot.readString();
self.documents.append(_elem30)
iprot.readListEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('ListCentroidDocumentsResponse')
if self.documents is not None:
oprot.writeFieldBegin('documents', TType.LIST, 1)
oprot.writeListBegin(TType.STRING, len(self.documents))
for iter31 in self.documents:
oprot.writeString(iter31)
oprot.writeListEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.documents is None:
raise TProtocol.TProtocolException(message='Required field documents is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class ListDocumentsResponse:
"""
Attributes:
- documents
"""
thrift_spec = (
None, # 0
(1, TType.LIST, 'documents', (TType.STRING,None), None, ), # 1
)
def __init__(self, documents=None,):
self.documents = documents
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.LIST:
self.documents = []
(_etype35, _size32) = iprot.readListBegin()
for _i36 in xrange(_size32):
_elem37 = iprot.readString();
self.documents.append(_elem37)
iprot.readListEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('ListDocumentsResponse')
if self.documents is not None:
oprot.writeFieldBegin('documents', TType.LIST, 1)
oprot.writeListBegin(TType.STRING, len(self.documents))
for iter38 in self.documents:
oprot.writeString(iter38)
oprot.writeListEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.documents is None:
raise TProtocol.TProtocolException(message='Required field documents is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class ListCentroidsResponse:
"""
Attributes:
- centroids
"""
thrift_spec = (
None, # 0
(1, TType.LIST, 'centroids', (TType.STRING,None), None, ), # 1
)
def __init__(self, centroids=None,):
self.centroids = centroids
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.LIST:
self.centroids = []
(_etype42, _size39) = iprot.readListBegin()
for _i43 in xrange(_size39):
_elem44 = iprot.readString();
self.centroids.append(_elem44)
iprot.readListEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('ListCentroidsResponse')
if self.centroids is not None:
oprot.writeFieldBegin('centroids', TType.LIST, 1)
oprot.writeListBegin(TType.STRING, len(self.centroids))
for iter45 in self.centroids:
oprot.writeString(iter45)
oprot.writeListEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.centroids is None:
raise TProtocol.TProtocolException(message='Required field centroids is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class CreateDocumentResponse:
"""
Attributes:
- id
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'id', None, None, ), # 1
)
def __init__(self, id=None,):
self.id = id
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.id = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('CreateDocumentResponse')
if self.id is not None:
oprot.writeFieldBegin('id', TType.STRING, 1)
oprot.writeString(self.id)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.id is None:
raise TProtocol.TProtocolException(message='Required field id is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class DeleteDocumentRequest:
"""
Attributes:
- id
- ignoreMissing
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'id', None, None, ), # 1
(2, TType.BOOL, 'ignoreMissing', None, None, ), # 2
)
def __init__(self, id=None, ignoreMissing=None,):
self.id = id
self.ignoreMissing = ignoreMissing
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.id = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.BOOL:
self.ignoreMissing = iprot.readBool();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('DeleteDocumentRequest')
if self.id is not None:
oprot.writeFieldBegin('id', TType.STRING, 1)
oprot.writeString(self.id)
oprot.writeFieldEnd()
if self.ignoreMissing is not None:
oprot.writeFieldBegin('ignoreMissing', TType.BOOL, 2)
oprot.writeBool(self.ignoreMissing)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.id is None:
raise TProtocol.TProtocolException(message='Required field id is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class DeleteDocumentResponse:
"""
Attributes:
- id
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'id', None, None, ), # 1
)
def __init__(self, id=None,):
self.id = id
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.id = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('DeleteDocumentResponse')
if self.id is not None:
oprot.writeFieldBegin('id', TType.STRING, 1)
oprot.writeString(self.id)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.id is None:
raise TProtocol.TProtocolException(message='Required field id is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class MultiDeleteDocumentsRequest:
"""
Attributes:
- ids
- ignoreMissing
"""
thrift_spec = (
None, # 0
(1, TType.LIST, 'ids', (TType.STRING,None), None, ), # 1
(2, TType.BOOL, 'ignoreMissing', None, None, ), # 2
)
def __init__(self, ids=None, ignoreMissing=None,):
self.ids = ids
self.ignoreMissing = ignoreMissing
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.LIST:
self.ids = []
(_etype49, _size46) = iprot.readListBegin()
for _i50 in xrange(_size46):
_elem51 = iprot.readString();
self.ids.append(_elem51)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.BOOL:
self.ignoreMissing = iprot.readBool();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('MultiDeleteDocumentsRequest')
if self.ids is not None:
oprot.writeFieldBegin('ids', TType.LIST, 1)
oprot.writeListBegin(TType.STRING, len(self.ids))
for iter52 in self.ids:
oprot.writeString(iter52)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.ignoreMissing is not None:
oprot.writeFieldBegin('ignoreMissing', TType.BOOL, 2)
oprot.writeBool(self.ignoreMissing)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.ids is None:
raise TProtocol.TProtocolException(message='Required field ids is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class MultiDeleteDocumentsResponse:
"""
Attributes:
- ids
"""
thrift_spec = (
None, # 0
(1, TType.LIST, 'ids', (TType.STRING,None), None, ), # 1
)
def __init__(self, ids=None,):
self.ids = ids
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.LIST:
self.ids = []
(_etype56, _size53) = iprot.readListBegin()
for _i57 in xrange(_size53):
_elem58 = iprot.readString();
self.ids.append(_elem58)
iprot.readListEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('MultiDeleteDocumentsResponse')
if self.ids is not None:
oprot.writeFieldBegin('ids', TType.LIST, 1)
oprot.writeListBegin(TType.STRING, len(self.ids))
for iter59 in self.ids:
oprot.writeString(iter59)
oprot.writeListEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.ids is None:
raise TProtocol.TProtocolException(message='Required field ids is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class MultiDeleteCentroidsRequest:
"""
Attributes:
- ids
- ignoreMissing
"""
thrift_spec = (
None, # 0
(1, TType.LIST, 'ids', (TType.STRING,None), None, ), # 1
(2, TType.BOOL, 'ignoreMissing', None, None, ), # 2
)
def __init__(self, ids=None, ignoreMissing=None,):
self.ids = ids
self.ignoreMissing = ignoreMissing
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.LIST:
self.ids = []
(_etype63, _size60) = iprot.readListBegin()
for _i64 in xrange(_size60):
_elem65 = iprot.readString();
self.ids.append(_elem65)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.BOOL:
self.ignoreMissing = iprot.readBool();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('MultiDeleteCentroidsRequest')
if self.ids is not None:
oprot.writeFieldBegin('ids', TType.LIST, 1)
oprot.writeListBegin(TType.STRING, len(self.ids))
for iter66 in self.ids:
oprot.writeString(iter66)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.ignoreMissing is not None:
oprot.writeFieldBegin('ignoreMissing', TType.BOOL, 2)
oprot.writeBool(self.ignoreMissing)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.ids is None:
raise TProtocol.TProtocolException(message='Required field ids is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class MultiDeleteCentroidsResponse:
"""
Attributes:
- ids
"""
thrift_spec = (
None, # 0
(1, TType.LIST, 'ids', (TType.STRING,None), None, ), # 1
)
def __init__(self, ids=None,):
self.ids = ids
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.LIST:
self.ids = []
(_etype70, _size67) = iprot.readListBegin()
for _i71 in xrange(_size67):
_elem72 = iprot.readString();
self.ids.append(_elem72)
iprot.readListEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('MultiDeleteCentroidsResponse')
if self.ids is not None:
oprot.writeFieldBegin('ids', TType.LIST, 1)
oprot.writeListBegin(TType.STRING, len(self.ids))
for iter73 in self.ids:
oprot.writeString(iter73)
oprot.writeListEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.ids is None:
raise TProtocol.TProtocolException(message='Required field ids is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class CreateCentroidResponse:
"""
Attributes:
- created
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'created', None, None, ), # 1
)
def __init__(self, created=None,):
self.created = created
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.created = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('CreateCentroidResponse')
if self.created is not None:
oprot.writeFieldBegin('created', TType.STRING, 1)
oprot.writeString(self.created)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.created is None:
raise TProtocol.TProtocolException(message='Required field created is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class CreateCentroidRequest:
"""
Attributes:
- id
- ignoreExisting
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'id', None, None, ), # 1
(2, TType.BOOL, 'ignoreExisting', None, None, ), # 2
)
def __init__(self, id=None, ignoreExisting=None,):
self.id = id
self.ignoreExisting = ignoreExisting
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.id = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.BOOL:
self.ignoreExisting = iprot.readBool();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('CreateCentroidRequest')
if self.id is not None:
oprot.writeFieldBegin('id', TType.STRING, 1)
oprot.writeString(self.id)
oprot.writeFieldEnd()
if self.ignoreExisting is not None:
oprot.writeFieldBegin('ignoreExisting', TType.BOOL, 2)
oprot.writeBool(self.ignoreExisting)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.id is None:
raise TProtocol.TProtocolException(message='Required field id is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class MultiCreateCentroidsRequest:
"""
Attributes:
- ids
- ignoreExisting
"""
thrift_spec = (
None, # 0
(1, TType.LIST, 'ids', (TType.STRING,None), None, ), # 1
(2, TType.BOOL, 'ignoreExisting', None, None, ), # 2
)
def __init__(self, ids=None, ignoreExisting=None,):
self.ids = ids
self.ignoreExisting = ignoreExisting
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.LIST:
self.ids = []
(_etype77, _size74) = iprot.readListBegin()
for _i78 in xrange(_size74):
_elem79 = iprot.readString();
self.ids.append(_elem79)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.BOOL:
self.ignoreExisting = iprot.readBool();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('MultiCreateCentroidsRequest')
if self.ids is not None:
oprot.writeFieldBegin('ids', TType.LIST, 1)
oprot.writeListBegin(TType.STRING, len(self.ids))
for iter80 in self.ids:
oprot.writeString(iter80)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.ignoreExisting is not None:
oprot.writeFieldBegin('ignoreExisting', TType.BOOL, 2)
oprot.writeBool(self.ignoreExisting)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.ids is None:
raise TProtocol.TProtocolException(message='Required field ids is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class MultiCreateCentroidsResponse:
"""
Attributes:
- created
"""
thrift_spec = (
None, # 0
(1, TType.LIST, 'created', (TType.STRING,None), None, ), # 1
)
def __init__(self, created=None,):
self.created = created
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.LIST:
self.created = []
(_etype84, _size81) = iprot.readListBegin()
for _i85 in xrange(_size81):
_elem86 = iprot.readString();
self.created.append(_elem86)
iprot.readListEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('MultiCreateCentroidsResponse')
if self.created is not None:
oprot.writeFieldBegin('created', TType.LIST, 1)
oprot.writeListBegin(TType.STRING, len(self.created))
for iter87 in self.created:
oprot.writeString(iter87)
oprot.writeListEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.created is None:
raise TProtocol.TProtocolException(message='Required field created is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class DeleteCentroidRequest:
"""
Attributes:
- id
- ignoreMissing
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'id', None, None, ), # 1
(2, TType.BOOL, 'ignoreMissing', None, None, ), # 2
)
def __init__(self, id=None, ignoreMissing=None,):
self.id = id
self.ignoreMissing = ignoreMissing
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.id = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.BOOL:
self.ignoreMissing = iprot.readBool();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('DeleteCentroidRequest')
if self.id is not None:
oprot.writeFieldBegin('id', TType.STRING, 1)
oprot.writeString(self.id)
oprot.writeFieldEnd()
if self.ignoreMissing is not None:
oprot.writeFieldBegin('ignoreMissing', TType.BOOL, 2)
oprot.writeBool(self.ignoreMissing)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.id is None:
raise TProtocol.TProtocolException(message='Required field id is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class DeleteCentroidResponse:
"""
Attributes:
- id
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'id', None, None, ), # 1
)
def __init__(self, id=None,):
self.id = id
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.id = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('DeleteCentroidResponse')
if self.id is not None:
oprot.writeFieldBegin('id', TType.STRING, 1)
oprot.writeString(self.id)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.id is None:
raise TProtocol.TProtocolException(message='Required field id is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class AddDocumentsToCentroidRequest:
"""
Attributes:
- centroidId
- documentIds
- ignoreMissingDocument
- ignoreMissingCentroid
- ignoreAlreadyInCentroid
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'centroidId', None, None, ), # 1
(2, TType.LIST, 'documentIds', (TType.STRING,None), None, ), # 2
(3, TType.BOOL, 'ignoreMissingDocument', None, None, ), # 3
(4, TType.BOOL, 'ignoreMissingCentroid', None, None, ), # 4
(5, TType.BOOL, 'ignoreAlreadyInCentroid', None, None, ), # 5
)
def __init__(self, centroidId=None, documentIds=None, ignoreMissingDocument=None, ignoreMissingCentroid=None, ignoreAlreadyInCentroid=None,):
self.centroidId = centroidId
self.documentIds = documentIds
self.ignoreMissingDocument = ignoreMissingDocument
self.ignoreMissingCentroid = ignoreMissingCentroid
self.ignoreAlreadyInCentroid = ignoreAlreadyInCentroid
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.centroidId = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.LIST:
self.documentIds = []
(_etype91, _size88) = iprot.readListBegin()
for _i92 in xrange(_size88):
_elem93 = iprot.readString();
self.documentIds.append(_elem93)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.BOOL:
self.ignoreMissingDocument = iprot.readBool();
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.BOOL:
self.ignoreMissingCentroid = iprot.readBool();
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.BOOL:
self.ignoreAlreadyInCentroid = iprot.readBool();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('AddDocumentsToCentroidRequest')
if self.centroidId is not None:
oprot.writeFieldBegin('centroidId', TType.STRING, 1)
oprot.writeString(self.centroidId)
oprot.writeFieldEnd()
if self.documentIds is not None:
oprot.writeFieldBegin('documentIds', TType.LIST, 2)
oprot.writeListBegin(TType.STRING, len(self.documentIds))
for iter94 in self.documentIds:
oprot.writeString(iter94)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.ignoreMissingDocument is not None:
oprot.writeFieldBegin('ignoreMissingDocument', TType.BOOL, 3)
oprot.writeBool(self.ignoreMissingDocument)
oprot.writeFieldEnd()
if self.ignoreMissingCentroid is not None:
oprot.writeFieldBegin('ignoreMissingCentroid', TType.BOOL, 4)
oprot.writeBool(self.ignoreMissingCentroid)
oprot.writeFieldEnd()
if self.ignoreAlreadyInCentroid is not None:
oprot.writeFieldBegin('ignoreAlreadyInCentroid', TType.BOOL, 5)
oprot.writeBool(self.ignoreAlreadyInCentroid)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.centroidId is None:
raise TProtocol.TProtocolException(message='Required field centroidId is unset!')
if self.documentIds is None:
raise TProtocol.TProtocolException(message='Required field documentIds is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class AddDocumentsToCentroidResponse:
"""
Attributes:
- centroidId
- documentIds
- added
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'centroidId', None, None, ), # 1
(2, TType.LIST, 'documentIds', (TType.STRING,None), None, ), # 2
(3, TType.LIST, 'added', (TType.BOOL,None), None, ), # 3
)
def __init__(self, centroidId=None, documentIds=None, added=None,):
self.centroidId = centroidId
self.documentIds = documentIds
self.added = added
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.centroidId = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.LIST:
self.documentIds = []
(_etype98, _size95) = iprot.readListBegin()
for _i99 in xrange(_size95):
_elem100 = iprot.readString();
self.documentIds.append(_elem100)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.LIST:
self.added = []
(_etype104, _size101) = iprot.readListBegin()
for _i105 in xrange(_size101):
_elem106 = iprot.readBool();
self.added.append(_elem106)
iprot.readListEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('AddDocumentsToCentroidResponse')
if self.centroidId is not None:
oprot.writeFieldBegin('centroidId', TType.STRING, 1)
oprot.writeString(self.centroidId)
oprot.writeFieldEnd()
if self.documentIds is not None:
oprot.writeFieldBegin('documentIds', TType.LIST, 2)
oprot.writeListBegin(TType.STRING, len(self.documentIds))
for iter107 in self.documentIds:
oprot.writeString(iter107)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.added is not None:
oprot.writeFieldBegin('added', TType.LIST, 3)
oprot.writeListBegin(TType.BOOL, len(self.added))
for iter108 in self.added:
oprot.writeBool(iter108)
oprot.writeListEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.centroidId is None:
raise TProtocol.TProtocolException(message='Required field centroidId is unset!')
if self.documentIds is None:
raise TProtocol.TProtocolException(message='Required field documentIds is unset!')
if self.added is None:
raise TProtocol.TProtocolException(message='Required field added is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class RemoveDocumentsFromCentroidRequest:
"""
Attributes:
- centroidId
- documentIds
- ignoreMissingDocument
- ignoreMissingCentroid
- ignoreNotInCentroid
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'centroidId', None, None, ), # 1
(2, TType.LIST, 'documentIds', (TType.STRING,None), None, ), # 2
(3, TType.BOOL, 'ignoreMissingDocument', None, None, ), # 3
(4, TType.BOOL, 'ignoreMissingCentroid', None, None, ), # 4
(5, TType.BOOL, 'ignoreNotInCentroid', None, None, ), # 5
)
def __init__(self, centroidId=None, documentIds=None, ignoreMissingDocument=None, ignoreMissingCentroid=None, ignoreNotInCentroid=None,):
self.centroidId = centroidId
self.documentIds = documentIds
self.ignoreMissingDocument = ignoreMissingDocument
self.ignoreMissingCentroid = ignoreMissingCentroid
self.ignoreNotInCentroid = ignoreNotInCentroid
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.centroidId = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.LIST:
self.documentIds = []
(_etype112, _size109) = iprot.readListBegin()
for _i113 in xrange(_size109):
_elem114 = iprot.readString();
self.documentIds.append(_elem114)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.BOOL:
self.ignoreMissingDocument = iprot.readBool();
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.BOOL:
self.ignoreMissingCentroid = iprot.readBool();
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.BOOL:
self.ignoreNotInCentroid = iprot.readBool();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('RemoveDocumentsFromCentroidRequest')
if self.centroidId is not None:
oprot.writeFieldBegin('centroidId', TType.STRING, 1)
oprot.writeString(self.centroidId)
oprot.writeFieldEnd()
if self.documentIds is not None:
oprot.writeFieldBegin('documentIds', TType.LIST, 2)
oprot.writeListBegin(TType.STRING, len(self.documentIds))
for iter115 in self.documentIds:
oprot.writeString(iter115)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.ignoreMissingDocument is not None:
oprot.writeFieldBegin('ignoreMissingDocument', TType.BOOL, 3)
oprot.writeBool(self.ignoreMissingDocument)
oprot.writeFieldEnd()
if self.ignoreMissingCentroid is not None:
oprot.writeFieldBegin('ignoreMissingCentroid', TType.BOOL, 4)
oprot.writeBool(self.ignoreMissingCentroid)
oprot.writeFieldEnd()
if self.ignoreNotInCentroid is not None:
oprot.writeFieldBegin('ignoreNotInCentroid', TType.BOOL, 5)
oprot.writeBool(self.ignoreNotInCentroid)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.centroidId is None:
raise TProtocol.TProtocolException(message='Required field centroidId is unset!')
if self.documentIds is None:
raise TProtocol.TProtocolException(message='Required field documentIds is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class RemoveDocumentsFromCentroidResponse:
"""
Attributes:
- centroidId
- documentIds
- removed
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'centroidId', None, None, ), # 1
(2, TType.LIST, 'documentIds', (TType.STRING,None), None, ), # 2
(3, TType.LIST, 'removed', (TType.BOOL,None), None, ), # 3
)
def __init__(self, centroidId=None, documentIds=None, removed=None,):
self.centroidId = centroidId
self.documentIds = documentIds
self.removed = removed
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.centroidId = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.LIST:
self.documentIds = []
(_etype119, _size116) = iprot.readListBegin()
for _i120 in xrange(_size116):
_elem121 = iprot.readString();
self.documentIds.append(_elem121)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.LIST:
self.removed = []
(_etype125, _size122) = iprot.readListBegin()
for _i126 in xrange(_size122):
_elem127 = iprot.readBool();
self.removed.append(_elem127)
iprot.readListEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('RemoveDocumentsFromCentroidResponse')
if self.centroidId is not None:
oprot.writeFieldBegin('centroidId', TType.STRING, 1)
oprot.writeString(self.centroidId)
oprot.writeFieldEnd()
if self.documentIds is not None:
oprot.writeFieldBegin('documentIds', TType.LIST, 2)
oprot.writeListBegin(TType.STRING, len(self.documentIds))
for iter128 in self.documentIds:
oprot.writeString(iter128)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.removed is not None:
oprot.writeFieldBegin('removed', TType.LIST, 3)
oprot.writeListBegin(TType.BOOL, len(self.removed))
for iter129 in self.removed:
oprot.writeBool(iter129)
oprot.writeListEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.centroidId is None:
raise TProtocol.TProtocolException(message='Required field centroidId is unset!')
if self.documentIds is None:
raise TProtocol.TProtocolException(message='Required field documentIds is unset!')
if self.removed is None:
raise TProtocol.TProtocolException(message='Required field removed is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class MultiJoinCentroidsRequest:
"""
Attributes:
- ids
- ignoreMissing
"""
thrift_spec = (
None, # 0
(1, TType.LIST, 'ids', (TType.STRING,None), None, ), # 1
(2, TType.BOOL, 'ignoreMissing', None, None, ), # 2
)
def __init__(self, ids=None, ignoreMissing=None,):
self.ids = ids
self.ignoreMissing = ignoreMissing
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.LIST:
self.ids = []
(_etype133, _size130) = iprot.readListBegin()
for _i134 in xrange(_size130):
_elem135 = iprot.readString();
self.ids.append(_elem135)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.BOOL:
self.ignoreMissing = iprot.readBool();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('MultiJoinCentroidsRequest')
if self.ids is not None:
oprot.writeFieldBegin('ids', TType.LIST, 1)
oprot.writeListBegin(TType.STRING, len(self.ids))
for iter136 in self.ids:
oprot.writeString(iter136)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.ignoreMissing is not None:
oprot.writeFieldBegin('ignoreMissing', TType.BOOL, 2)
oprot.writeBool(self.ignoreMissing)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.ids is None:
raise TProtocol.TProtocolException(message='Required field ids is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class MultiJoinCentroidsResponse:
"""
Attributes:
- ids
- recalculated
"""
thrift_spec = (
None, # 0
(1, TType.LIST, 'ids', (TType.STRING,None), None, ), # 1
(2, TType.LIST, 'recalculated', (TType.BOOL,None), None, ), # 2
)
def __init__(self, ids=None, recalculated=None,):
self.ids = ids
self.recalculated = recalculated
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.LIST:
self.ids = []
(_etype140, _size137) = iprot.readListBegin()
for _i141 in xrange(_size137):
_elem142 = iprot.readString();
self.ids.append(_elem142)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.LIST:
self.recalculated = []
(_etype146, _size143) = iprot.readListBegin()
for _i147 in xrange(_size143):
_elem148 = iprot.readBool();
self.recalculated.append(_elem148)
iprot.readListEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('MultiJoinCentroidsResponse')
if self.ids is not None:
oprot.writeFieldBegin('ids', TType.LIST, 1)
oprot.writeListBegin(TType.STRING, len(self.ids))
for iter149 in self.ids:
oprot.writeString(iter149)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.recalculated is not None:
oprot.writeFieldBegin('recalculated', TType.LIST, 2)
oprot.writeListBegin(TType.BOOL, len(self.recalculated))
for iter150 in self.recalculated:
oprot.writeBool(iter150)
oprot.writeListEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.ids is None:
raise TProtocol.TProtocolException(message='Required field ids is unset!')
if self.recalculated is None:
raise TProtocol.TProtocolException(message='Required field recalculated is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class JoinCentroidRequest:
"""
Attributes:
- id
- ignoreMissing
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'id', None, None, ), # 1
(2, TType.BOOL, 'ignoreMissing', None, None, ), # 2
)
def __init__(self, id=None, ignoreMissing=None,):
self.id = id
self.ignoreMissing = ignoreMissing
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.id = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.BOOL:
self.ignoreMissing = iprot.readBool();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('JoinCentroidRequest')
if self.id is not None:
oprot.writeFieldBegin('id', TType.STRING, 1)
oprot.writeString(self.id)
oprot.writeFieldEnd()
if self.ignoreMissing is not None:
oprot.writeFieldBegin('ignoreMissing', TType.BOOL, 2)
oprot.writeBool(self.ignoreMissing)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.id is None:
raise TProtocol.TProtocolException(message='Required field id is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class JoinCentroidResponse:
"""
Attributes:
- id
- recalculated
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'id', None, None, ), # 1
(2, TType.BOOL, 'recalculated', None, None, ), # 2
)
def __init__(self, id=None, recalculated=None,):
self.id = id
self.recalculated = recalculated
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.id = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.BOOL:
self.recalculated = iprot.readBool();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('JoinCentroidResponse')
if self.id is not None:
oprot.writeFieldBegin('id', TType.STRING, 1)
oprot.writeString(self.id)
oprot.writeFieldEnd()
if self.recalculated is not None:
oprot.writeFieldBegin('recalculated', TType.BOOL, 2)
oprot.writeBool(self.recalculated)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.id is None:
raise TProtocol.TProtocolException(message='Required field id is unset!')
if self.recalculated is None:
raise TProtocol.TProtocolException(message='Required field recalculated is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class ECentroidDoesNotExist(TException):
"""
Attributes:
- id
- message
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'id', None, None, ), # 1
(2, TType.STRING, 'message', None, None, ), # 2
)
def __init__(self, id=None, message=None,):
self.id = id
self.message = message
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.id = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.message = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('ECentroidDoesNotExist')
if self.id is not None:
oprot.writeFieldBegin('id', TType.STRING, 1)
oprot.writeString(self.id)
oprot.writeFieldEnd()
if self.message is not None:
oprot.writeFieldBegin('message', TType.STRING, 2)
oprot.writeString(self.message)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __str__(self):
return repr(self)
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class ECentroidAlreadyExists(TException):
"""
Attributes:
- id
- message
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'id', None, None, ), # 1
(2, TType.STRING, 'message', None, None, ), # 2
)
def __init__(self, id=None, message=None,):
self.id = id
self.message = message
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.id = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.message = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('ECentroidAlreadyExists')
if self.id is not None:
oprot.writeFieldBegin('id', TType.STRING, 1)
oprot.writeString(self.id)
oprot.writeFieldEnd()
if self.message is not None:
oprot.writeFieldBegin('message', TType.STRING, 2)
oprot.writeString(self.message)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __str__(self):
return repr(self)
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class EDocumentDoesNotExist(TException):
"""
Attributes:
- id
- message
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'id', None, None, ), # 1
(2, TType.STRING, 'message', None, None, ), # 2
)
def __init__(self, id=None, message=None,):
self.id = id
self.message = message
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.id = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.message = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('EDocumentDoesNotExist')
if self.id is not None:
oprot.writeFieldBegin('id', TType.STRING, 1)
oprot.writeString(self.id)
oprot.writeFieldEnd()
if self.message is not None:
oprot.writeFieldBegin('message', TType.STRING, 2)
oprot.writeString(self.message)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __str__(self):
return repr(self)
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class EDocumentAlreadyExists(TException):
"""
Attributes:
- id
- message
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'id', None, None, ), # 1
(2, TType.STRING, 'message', None, None, ), # 2
)
def __init__(self, id=None, message=None,):
self.id = id
self.message = message
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.id = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.message = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('EDocumentAlreadyExists')
if self.id is not None:
oprot.writeFieldBegin('id', TType.STRING, 1)
oprot.writeString(self.id)
oprot.writeFieldEnd()
if self.message is not None:
oprot.writeFieldBegin('message', TType.STRING, 2)
oprot.writeString(self.message)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __str__(self):
return repr(self)
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class EDocumentNotInCentroid(TException):
"""
Attributes:
- documentId
- centroidId
- message
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'documentId', None, None, ), # 1
(2, TType.STRING, 'centroidId', None, None, ), # 2
(3, TType.STRING, 'message', None, None, ), # 3
)
def __init__(self, documentId=None, centroidId=None, message=None,):
self.documentId = documentId
self.centroidId = centroidId
self.message = message
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.documentId = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.centroidId = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.message = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('EDocumentNotInCentroid')
if self.documentId is not None:
oprot.writeFieldBegin('documentId', TType.STRING, 1)
oprot.writeString(self.documentId)
oprot.writeFieldEnd()
if self.centroidId is not None:
oprot.writeFieldBegin('centroidId', TType.STRING, 2)
oprot.writeString(self.centroidId)
oprot.writeFieldEnd()
if self.message is not None:
oprot.writeFieldBegin('message', TType.STRING, 3)
oprot.writeString(self.message)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __str__(self):
return repr(self)
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class EDocumentAlreadyInCentroid(TException):
"""
Attributes:
- documentId
- centroidId
- message
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'documentId', None, None, ), # 1
(2, TType.STRING, 'centroidId', None, None, ), # 2
(3, TType.STRING, 'message', None, None, ), # 3
)
def __init__(self, documentId=None, centroidId=None, message=None,):
self.documentId = documentId
self.centroidId = centroidId
self.message = message
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.documentId = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.centroidId = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.message = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('EDocumentAlreadyInCentroid')
if self.documentId is not None:
oprot.writeFieldBegin('documentId', TType.STRING, 1)
oprot.writeString(self.documentId)
oprot.writeFieldEnd()
if self.centroidId is not None:
oprot.writeFieldBegin('centroidId', TType.STRING, 2)
oprot.writeString(self.centroidId)
oprot.writeFieldEnd()
if self.message is not None:
oprot.writeFieldBegin('message', TType.STRING, 3)
oprot.writeString(self.message)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __str__(self):
return repr(self)
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other) | /relevanced-client-0.9.8.tar.gz/relevanced-client-0.9.8/relevanced_client/gen_py/RelevancedProtocol/ttypes.py | 0.578329 | 0.211274 | ttypes.py | pypi |
from typing import NamedTuple, List, Tuple
from Redy.Magic.Classic import record
from numpy import number
from rbnf.easy import Tokenizer
globals()['NamedTuple'] = object
class Loc:
__slots__ = ['lineno', 'colno', 'filename']
lineno: int
colno: int
filename: str
def __init__(self, lineno, colno, filename):
self.lineno = lineno
self.colno = colno
self.filename = filename
def __matmul__(self, other):
if isinstance(other, Tokenizer):
return Loc(other.lineno, other.colno, getattr(other, 'filename') or '<unknown>')
return Loc(*other.loc)
def __iter__(self):
yield self.lineno
yield self.colno
yield self.filename
def __repr__(self):
return str(self)
def __str__(self):
return 'Loc(lineno={!r}, colno={!r}, filename={!r})'.format(
self.lineno, self.colno, self.filename)
def update(self, lineno=None, colno=None, filename=None):
if lineno:
self.lineno = lineno
if colno:
self.colno = colno
if filename:
self.filename = filename
class TAST:
loc: Loc
@property
def iter_fields(self):
for it in self.__annotations__:
if not it.startswith('_') and it not in ('iter_fields', 'lineno'):
yield it, getattr(self, it)
@property
def lineno(self):
return self.loc.lineno
loc = Loc(1, 1, "")
@record
class DefTy(TAST, NamedTuple):
loc: Loc
name: str
structure: TAST
@record
class DefFun(TAST, NamedTuple):
loc: Loc
name: str
args: 'List[Arg]'
body: TAST
doc: 'Doc'
@record
class Lam(TAST, NamedTuple):
loc: Loc
name: str
args: 'List[Arg]'
body: TAST
@record
class Arg(TAST, NamedTuple):
loc: Loc
name: str
ty: TAST
@record
class Suite(TAST, NamedTuple):
loc: Loc
statements: List[TAST]
@record
class Definition(TAST, NamedTuple):
loc: Loc
statements: List[TAST]
@record
class Where(TAST, NamedTuple):
loc: Loc
out: Suite
pre_def: Definition
@record
class DefVar(TAST, NamedTuple):
loc: Loc
name: str
value: TAST
@record
class If(TAST, NamedTuple):
loc: Loc
cond: TAST
iftrue: TAST
iffalse: TAST
@record
class Call(TAST, NamedTuple):
loc: Loc
callee: TAST
arg: TAST
@record
class Symbol(TAST, NamedTuple):
loc: Loc
name: str
@record
class Number(TAST, NamedTuple):
loc: Loc
value: number
@record
class Str(TAST, NamedTuple):
loc: Loc
value: str
@record
class HList(TAST, NamedTuple):
loc: Loc
seq: List[TAST]
@record
class HDict(TAST, NamedTuple):
loc: Loc
seq: List[Tuple[TAST, TAST]]
def make_set(seq: List[TAST]):
return tuple((each, Void(each.loc)) for each in seq)
@record
class Tuple(TAST, NamedTuple):
loc: Loc
seq: Tuple[TAST, ...]
@record
class Return(TAST, NamedTuple):
loc: Loc
expr: TAST
@record
class Yield(TAST, NamedTuple):
loc: Loc
expr: TAST
@record
class BinSeq(TAST, NamedTuple):
loc: Loc
seq: List[TAST]
@record
class Infix(TAST, NamedTuple):
loc: Loc
precedence: int
op: str
@record
class Operator(TAST, NamedTuple):
loc: Loc
name: str
@record
class Void(TAST, NamedTuple):
loc: Loc
@record
class Alias(TAST, NamedTuple):
loc: Loc
imp_name: str
name: str
@record
class Doc(TAST, NamedTuple):
loc: Loc
text: str
@record
class Import(TAST, NamedTuple):
loc: Loc
imp_name: str
name: str
stuffs: List[Alias]
@record
class Module(NamedTuple):
stmts: Definition
doc: Doc
exports: List[Operator]
def transform(f):
def ff(it):
return generic_visit(f(it))
def generic_visit(ast: TAST):
def stream():
for key, value in ast.iter_fields:
if type(value) is tuple or isinstance(value, list):
yield key, list(ff(e) for e in value)
else:
yield key, ff(value)
if hasattr(ast, 'iter_fields'):
return type(ast)(**dict(stream()))
return ast
return ff | /impl/expr_based_ast.py | 0.810254 | 0.223907 | expr_based_ast.py | pypi |
from typing import Generic, Iterable, TypeVar, Optional, Iterator
from functools import reduce
from .expr_based_ast import Operator, Call, Symbol
T = TypeVar('T')
class TwoSideLink(Iterable, Generic[T]):
def __init__(self,
content: T,
prev: 'Optional[TwoSideLink[T]]' = None,
next: 'Optional[TwoSideLink]' = None):
self.content: T = content
self.next = next
self.prev = prev
def __iter__(self) -> 'Iterator[TwoSideLink[T]]':
yield self
if self.next:
yield from self.next
def __str__(self):
return 'L<{}>'.format(self.content)
__repr__ = __str__
@classmethod
def from_iter(cls, iterable: 'Iterable') -> 'Optional[TwoSideLink]':
if not iterable:
return None
s_iterable = iter(iterable)
try:
fst = cls(next(s_iterable))
except StopIteration:
return None
reduce(lambda a, b: setattr(a, "next", cls(b)) or setattr(a.next, "prev", a) or a.next, s_iterable, fst)
return fst
def bin_reduce(op_priorities):
def bin_reduce(seq: Iterable):
seq = TwoSideLink.from_iter(seq)
def sort_by_func(e: 'TwoSideLink'):
return op_priorities[e.content.name]
op_nodes = (each for each in seq if isinstance(each.content, Operator))
op_nodes = sorted(op_nodes, key=sort_by_func, reverse=True)
bin_expr = None
for each in op_nodes:
sym = Symbol(loc=each.content.loc, name=each.content.name)
bin_expr = Call(sym.loc, Call(sym.loc, sym, each.prev.content),
each.next.content)
each.content = bin_expr
try:
each.prev.prev.next = each
each.prev = each.prev.prev
except AttributeError:
pass
try:
each.next.next.prev = each
each.next = each.next.next
except AttributeError:
pass
return bin_expr
return bin_reduce | /impl/precedence.py | 0.819713 | 0.218638 | precedence.py | pypi |
[](https://zenodo.org/badge/latestdoi/445846537)
# reliabiliPy
## Summary
* Simple implementation in Python of the [reliability](https://en.wikipedia.org/wiki/Reliability_(statistics) measures for surveys: Omega Total,
Omega Hierarchical and Omega Hierarchical Asymptotic and Omega Total, using Schmid-Leiman solution.
* Also Cronbach's Alpha Guttman’s lower bounds of reliability $\lamda_1$ and $\lamda_2$.
* Explanations and documentation available
See [Documentation](https://rafaelvalero.github.io/reliabiliPy/)
## Quick Start
If you have the correlations matrix of your dataset.
To install:
```bash
pip install reliabiliPy
```
To start using it:
```python
import pandas as pd
import numpy as np
from reliabilipy import reliability_analysis
correlations_matrix = pd.DataFrame(np.matrix([[1., 0.483, 0.34, 0.18, 0.277, 0.257, -0.074, 0.212, 0.226],
[0.483, 1., 0.624, 0.26, 0.433, 0.301, -0.028, 0.362, 0.236],
[0.34, 0.624, 1., 0.24, 0.376, 0.244, 0.233, 0.577, 0.352],
[0.18, 0.26, 0.24, 1., 0.534, 0.654, 0.165, 0.411, 0.306],
[0.277, 0.433, 0.376, 0.534, 1., 0.609, 0.041, 0.3, 0.239],
[0.257, 0.301, 0.244, 0.654, 0.609, 1., 0.133, 0.399, 0.32],
[-0.074, -0.028, 0.233, 0.165, 0.041, 0.133, 1., 0.346, 0.206],
[0.212, 0.362, 0.577, 0.411, 0.3, 0.399, 0.346, 1., 0.457],
[0.226, 0.236, 0.352, 0.306, 0.239, 0.32, 0.206, 0.457, 1.]]))
reliability_report = reliability_analysis(correlations_matrix=correlations_matrix)
reliability_report.fit()
print('here omega Hierarchical: ', reliability_report.omega_hierarchical)
print('here Omega Hierarchical infinite or asymptotic: ', reliability_report.omega_hierarchical_asymptotic)
print('here Omega Total', reliability_report.omega_total)
print('here Alpha Cronbach total', reliability_report.alpha_cronbach)
print(reliability_report.lambda1)
print(reliability_report.lambda2)
print(reliability_report.report_eigenvalues)
print(reliability_report.report_loadings)
```
If you want to use the whole dataset you could do it to, adding the inputations method
you prefer:
```python
import pandas as pd
import numpy as np
from reliabilipy import reliability_analysis
raw_dataset = pd.DataFrame([{'C1': 2.0, 'C2': 3.0, 'C3': 3.0, 'C4': 4.0, 'C5': 4.0},\
{'C1': 5.0, 'C2': 4.0, 'C3': 4.0, 'C4': 3.0, 'C5': 4.0},\
{'C1': 4.0, 'C2': 5.0, 'C3': 4.0, 'C4': 2.0, 'C5': 5.0},\
{'C1': 4.0, 'C2': 4.0, 'C3': 3.0, 'C4': 5.0, 'C5': 5.0},\
{'C1': 4.0, 'C2': 4.0, 'C3': 5.0, 'C4': 3.0, 'C5': 2.0},\
{'C1': 4.0, 'C2': np.nan, 'C3': 3.0, 'C4': 5.0, 'C5': 5.0},\
{'C1': np.nan, 'C2': 4.0, 'C3': 5.0, 'C4': 3.0, 'C5': 2.0}])
ra = reliability_analysis(raw_dataset=raw_dataset,
is_corr_matrix=False,
impute='median')
ra.fit()
print('here omega Hierarchical: ', ra.omega_hierarchical)
print('here Omega Hierarchical infinite or asymptotic: ', ra.omega_hierarchical_asymptotic)
print('here Omega Total', ra.omega_total)
print('here Alpha Cronbach total', ra.alpha_cronbach)
```
# Context
It is common to try check the [reliability](https://en.wikipedia.org/wiki/Reliability_(statistics)), i.e.: the consistency of
a measure, particular in psychometrics and surveys analysis.
`R` has packages for this kind of analysis available, such us `psych`by Revelle (2017). `python` goes behind on this.
The closes are [factor-analyser](https://github.com/EducationalTestingService/factor_analyzer) and [Pingouin](https://pingouin-stats.org/index.html).
As I write this there is a gap in the market since none of the above libraries currently implement any
omega related reliability measure. Although Pingouin implements [Cronbach's alpha](https://en.wikipedia.org/wiki/Cronbach%27s_alpha)
## Aim
1. To bring functions to ```python``` for psychometrics and survey analysis, as there is a gap. Mostly from the package in `R` `psych`.
2. To make the ideas and math behind those clear and transparent with examples, and documentation.
3. To allow people to collaborate and ask questions about.
# References
* Flora, David B. "Your coefficient alpha is probably wrong, but which coefficient omega is right? A tutorial on using R to obtain better reliability estimates." Advances in Methods and Practices in Psychological Science 3.4 (2020): 484-501. https://journals.sagepub.com/doi/pdf/10.1177/2515245920951747
* Revelle, Willian. Manuscrip. 2021. An introduction to psychometric theory with applications in R.
https://personality-project.org/r/book/Chapter7.pdf
* Revelle, William R. "psych: Procedures for personality and psychological research." (2017).
* Omega Implementation in R. https://github.com/cran/psych/blob/master/R/omega.R
* Schmid-Leiman in R. https://github.com/cran/psych/blob/master/R/schmid.R
* Starkweather, Jon (2013). Hierarchical Factor Analysis. https://it.unt.edu/sites/default/files/hierfa_l_jds_apr2013.pdf
* Vallat, R. (2018). Pingouin: statistics in Python. Journal of Open Source Software, 3(31), 1026, https://doi.org/10.21105/joss.01026
* Wolff, Hans-Georg, and Katja Preising. "Exploring item and higher order factor structure with the Schmid-Leiman solution: Syntax codes for SPSS and SAS." Behavior Research Methods 37.1 (2005): 48-58.
## Acknowledgement
* Factor Analyzer. Python library. This library is based heavily on this one. https://github.com/EducationalTestingService/factor_analyzer
# Cite this package as
* Rafael Valero Fernández. (2022). reliabiliPy: measures of survey domain
reliability in Python with explanations and examples.
Cronbach´s Alpha and Omegas. (v0.0.0).
Zenodo. https://doi.org/10.5281/zenodo.5830894
or
```bibtex
@software{rafael_valero_fernandez_2022_5830894,
author = {Rafael Valero Fernández},
title = {{reliabiliPy: measures of survey domain reliability
in Python with explanations and examples.
Cronbach´s Alpha and Omegas.}},
month = jan,
year = 2022,
publisher = {Zenodo},
version = {v0.0.0},
doi = {10.5281/zenodo.5830894},
url = {https://doi.org/10.5281/zenodo.5830894}
}
```
Happy to modify the above as petition and contributions. | /reliabiliPy-0.0.35.tar.gz/reliabiliPy-0.0.35/README.md | 0.601008 | 0.919607 | README.md | pypi |
import pandas as pd
import numpy as np
from factor_analyzer import FactorAnalyzer
from factor_analyzer.utils import impute_values, corr
from typing import List, Tuple, Union, Mapping, Any
POSSIBLE_IMPUTATIONS = ['mean', 'median', 'drop']
# This options are to alling with factor_analyzer package
POSSIBLE_METHODS = ['ml', 'mle', 'uls', 'minres', 'principal']
ORTHOGONAL_ROTATIONS = ['varimax', 'oblimax', 'quartimax', 'equamax', 'geomin_ort']
OBLIQUE_ROTATIONS = ['promax', 'oblimin', 'quartimin', 'geomin_obl']
POSSIBLE_ROTATIONS = ORTHOGONAL_ROTATIONS + OBLIQUE_ROTATIONS
class reliability_analysis:
"""
Initialization of the class.
Set up all key variables and options for the analysis
:param raw_dataset: None. pd.DataFrame or array-like. The raw data. However you could pass
the correlation matrix.
:param correlations_matrix: None. pd.DataFrame or array-like. The correlation matrix of the dataset.
:param rotation_fa_f: 'oblimin'. str. The rotation for factor analysis for the group factors. Other options are:
'promax', 'oblimin', 'quartimin', 'geomin_obl'. Please avoid orthogonal ones.
:param method_fa_g: 'minres' str. method for factor analysis for the common factor.
Options are 'ml', 'mle', 'uls', 'minres', 'principal'. Refer to `factor_analyzer` package.
:param method_fa_f: 'minres' str. method for factor analysis for the common factor.
Options are 'ml', 'mle', 'uls', 'minres', 'principal'. Refer to `factor_analyzer` package.
:param is_corr_matrix: boolean. True. True if you have introduced the correlation matrix in variable
`correlations_matrix`.
False if you have introduced the raw dataset in `raw_dataset`.
:param n_factors_f: 3. int. The number of groups factor to consider.
:return:
Examples
-------
With correlations matrix:
>>> import pandas as pd
>>> import numpy as np
>>> from reliabilipy import reliability_analysis
>>> correlations_matrix = pd.DataFrame(np.matrix([[1., 0.483, 0.34, 0.18, 0.277, 0.257, -0.074, 0.212, 0.226],\
[0.483, 1., 0.624, 0.26, 0.433, 0.301, -0.028, 0.362, 0.236],\
[0.34, 0.624, 1., 0.24, 0.376, 0.244, 0.233, 0.577, 0.352],\
[0.18, 0.26, 0.24, 1., 0.534, 0.654, 0.165, 0.411, 0.306],\
[0.277, 0.433, 0.376, 0.534, 1., 0.609, 0.041, 0.3, 0.239],\
[0.257, 0.301, 0.244, 0.654, 0.609, 1., 0.133, 0.399, 0.32],\
[-0.074, -0.028, 0.233, 0.165, 0.041, 0.133, 1., 0.346, 0.206],\
[0.212, 0.362, 0.577, 0.411, 0.3, 0.399, 0.346, 1., 0.457],\
[0.226, 0.236, 0.352, 0.306, 0.239, 0.32, 0.206, 0.457, 1.]]))
>>> reliability_report = reliability_analysis(correlations_matrix=correlations_matrix)
>>> reliability_report.fit()
>>> reliability_report.omega_hierarchical
0.5451484335574861
>>> reliability_report.omega_total
0.8579745972600469
>>> reliability_report.omega_hierarchical_asymptotic
0.6353899466236236
>>> reliability_report.alpha_cronbach
0.803183205136355
>>> np.testing.assert_almost_equal(reliability_report.lambda1, 0.7139, decimal=3)
>>> np.testing.assert_almost_equal(reliability_report.lambda2, 0.8149701194973398, decimal=3)
>>> np.testing.assert_almost_equal(reliability_report.report_eigenvalues['g'][0], 2.0281, decimal=3)
>>> np.testing.assert_almost_equal(reliability_report.report_eigenvalues['F1'][0], 1.1845, decimal=3)
>>> np.testing.assert_almost_equal(reliability_report.report_loadings['g'][0], 0.34, decimal=3)
With dataset and imputations:
>>> import pandas as pd
>>> import numpy as np
>>> from reliabilipy import reliability_analysis
>>> raw_dataset = pd.DataFrame([{'C1': 2.0, 'C2': 3.0, 'C3': 3.0, 'C4': 4.0, 'C5': 4.0},\
{'C1': 5.0, 'C2': 4.0, 'C3': 4.0, 'C4': 3.0, 'C5': 4.0},\
{'C1': 4.0, 'C2': 5.0, 'C3': 4.0, 'C4': 2.0, 'C5': 5.0},\
{'C1': 4.0, 'C2': 4.0, 'C3': 3.0, 'C4': 5.0, 'C5': 5.0},\
{'C1': 4.0, 'C2': 4.0, 'C3': 5.0, 'C4': 3.0, 'C5': 2.0},\
{'C1': 4.0, 'C2': np.nan, 'C3': 3.0, 'C4': 5.0, 'C5': 5.0},\
{'C1': np.nan, 'C2': 4.0, 'C3': 5.0, 'C4': 3.0, 'C5': 2.0}])
>>> ra = reliability_analysis(raw_dataset=raw_dataset,\
is_corr_matrix=False,\
impute='median')
>>> ra.fit()
>>> np.testing.assert_almost_equal(reliability_report.alpha_cronbach, 0.78917, decimal=3)
>>> np.testing.assert_almost_equal(reliability_report.omega_total, 0.9378722, decimal=3)
"""
def __init__(self,
correlations_matrix=None,
raw_dataset=None,
method_fa_f: str = 'minres',
rotation_fa_f: str = 'oblimin',
method_fa_g: str = 'minres',
is_corr_matrix: bool = True,
impute: str = 'drop',
n_factors_f: int = 3,
round_decimals: int = 2):
self.raw_dataset = raw_dataset
self.correlations_matrix = correlations_matrix
self.method_fa_f = method_fa_f.lower()
self.rotation_fa_f = rotation_fa_f.lower()
self.method_fa_g = method_fa_g.lower()
self.is_corr_matrix = is_corr_matrix
self.n_factors_f = n_factors_f
self.impute = impute
self.round_decimals = round_decimals
# Defaults to None
self.fa_f = None
self.fa_g = None
self.general_component = None
self.omega_hierarchical = None
self.omega_total = None
self.omega_hierarchical_asymptotic = None
self.alpha_cronbach = None
self.general_component_loading = None
self.lambda2 = None
self.lambda1 = None
self.general_component_eigenvalue = None
self.f_eigenvalues_final = None
self.f_loadings_final = None
self.omega_hierarchical = None
self.raw_dataset_imputated = None
def _argument_checker(self):
"""
This is to check the arguments from the beginning.
"""
if not isinstance(self.raw_dataset, type(None)) and self.is_corr_matrix == True:
raise ValueError(f"You have introduced variable 'raw_dataset' and "
f"'is_corr_matrix' as True. If 'is_corr_matrix' then"
f"you should use 'correlations_matrix' instead of "
f"'raw_dataset'.")
if isinstance(self.correlations_matrix, type(None)) and self.is_corr_matrix == True:
raise ValueError(f"If 'is_corr_matrix' is True, please introduce it in "
f"'correlations_matrix' = YOUR DATA")
self.impute = self.impute.lower() if isinstance(self.impute, str) else self.impute
if self.impute not in POSSIBLE_IMPUTATIONS:
raise ValueError(f"The imputation must be one of the following: {POSSIBLE_IMPUTATIONS}")
self.rotation_fa_f = self.rotation_fa_f.lower() if isinstance(self.rotation_fa_f, str) else self.rotation_fa_f
if self.rotation_fa_f not in POSSIBLE_ROTATIONS + [None]:
raise ValueError(f"The rotation must be one of the following: {POSSIBLE_ROTATIONS + [None]}")
for method_ in [self.method_fa_f, self.method_fa_g]:
method_ = method_.lower() if isinstance(method_, str) else method_
if method_ not in POSSIBLE_METHODS:
raise ValueError(f"The method must be one of the following: {POSSIBLE_METHODS}")
def fit(self):
"""
Here the key calculations happens.
See notebook with explanations.
"""
# check the input arguments. To make sure everything is according to
# FactorAnalyzer
self._argument_checker()
# convert to numpy
# check to see if there are any null values, and if
# so impute using the desired imputation approach
if not isinstance(self.raw_dataset, type(None)):
# convert to numpy
if isinstance(self.raw_dataset, pd.DataFrame):
self.raw_dataset = self.raw_dataset.to_numpy()
if np.isnan(self.raw_dataset).any() and not self.is_corr_matrix:
self.raw_dataset_imputated = impute_values(self.raw_dataset, how=self.impute)
# get the correlation matrix
if not self.is_corr_matrix:
if not isinstance(self.raw_dataset_imputated, type(None)):
self.correlations_matrix = np.abs(corr(self.raw_dataset_imputated))
else:
self.correlations_matrix = np.abs(corr(self.raw_dataset))
# Start Calculations
self.fa_f = FactorAnalyzer(rotation=self.rotation_fa_f,
method=self.method_fa_f,
is_corr_matrix=True)
self.fa_f.fit(self.correlations_matrix)
self.fa_g = FactorAnalyzer(rotation=None,
is_corr_matrix=True,
method=self.method_fa_g,
n_factors=1)
self.fa_g.fit(self.fa_f.phi_)
# Omega Report
self.general_component = np.dot(self.fa_f.loadings_, self.fa_g.loadings_)
Vt = self.correlations_matrix.sum().sum()
V = self.correlations_matrix
Vitem = sum(np.diag(self.correlations_matrix))
gsq = self.general_component.sum() ** 2
uniq = self.fa_f.get_uniquenesses().sum()
# From now we assume that data is in wide format
n, k = self.correlations_matrix.shape
nvar = k
self.omega_hierarchical = gsq / Vt
self.omega_total = (Vt - uniq) / Vt
self.omega_hierarchical_asymptotic = gsq / (Vt - uniq)
# Alpha calculations
self.alpha_cronbach = ((Vt - Vitem) / Vt) * (nvar / (nvar - 1))
self.lambda1 = 1 - np.diag(V).sum() / Vt
C2 = ((V - np.eye(n) * np.diag(V)) ** 2).sum().sum()
self.lambda2 = self.lambda1 + (n / (n - 1) * C2) ** 0.5 / Vt
"""Calculate general component. The part corresponding to the common factor """
general_component = np.dot(self.fa_f.loadings_,
self.fa_g.loadings_)
self.general_component_loading = np.abs(general_component)
self.general_component_eigenvalue = np.dot(general_component.T, general_component)
# Update Group Factors
f_loadings_final = np.zeros(self.fa_f.loadings_.shape)
for i in range(0, self.fa_g.get_uniquenesses().__len__()):
f_loadings_final[:, i] = self.fa_f.loadings_[:, i] * \
self.fa_g.get_uniquenesses()[i] ** 0.5
self.f_loadings_final = np.abs(f_loadings_final)
self.f_eigenvalues_final = np.dot(f_loadings_final.T, f_loadings_final).sum(axis=1)
self._create_report_loadings()
self._create_report_eigenvalues()
def _create_report_loadings(self):
"""
This function build the a dataframe to show the loads of the componenets
in a similar way than `psych` `omega` function
"""
self.f_loadings_final = pd.DataFrame(self.f_loadings_final)
self._f_columns_list = [f"F{i}" for i in self.f_loadings_final.columns]
self.f_loadings_final.columns = self._f_columns_list
self.report_loadings = pd.DataFrame(self.general_component_loading, columns=["g"])
self.report_loadings = pd.merge(self.report_loadings, self.f_loadings_final,
left_index=True, right_index=True).round(decimals=self.round_decimals)
self.report_loadings['u2'] = self.fa_f.get_uniquenesses()
self.report_loadings['h2'] = self.fa_f.get_communalities()
self.report_loadings = self.report_loadings.round(decimals=self.round_decimals)
def _create_report_eigenvalues(self):
"""
This function build the a dataframe to show the eigenvalues
in a similar way than `psych` `omega` function
"""
dict_to_create_pd = {'g': self.general_component_eigenvalue[0][0]}
for f_columns_id in range(0, self._f_columns_list.__len__()):
dict_to_create_pd[self._f_columns_list[f_columns_id]] = self.f_eigenvalues_final[f_columns_id]
aux = pd.DataFrame(dict_to_create_pd.values()).T
aux.columns = dict_to_create_pd.keys()
aux.index = ['eigenvalues']
self.report_eigenvalues = aux
if __name__ == '__main__':
import doctest
doctest.testmod(verbose=True) | /reliabiliPy-0.0.35.tar.gz/reliabiliPy-0.0.35/reliabilipy/_reliabili.py | 0.89197 | 0.589303 | _reliabili.py | pypi |
from dataclasses import dataclass
from enum import Enum
from typing import Union, Optional, Type, List
from archive_tools.structx import Struct
class ListableEnum(Enum):
@classmethod
def list(cls):
return list(map(lambda c: c.value, cls))
@staticmethod
def get_list(cls: Type[Enum]):
return list(map(lambda c: c.value, cls))
class VersionEnum(ListableEnum):
def __eq__(self, other):
if isinstance(other, VersionEnum):
return self.value == other.value
elif isinstance(other, Version):
return self.value == other
else:
super().__eq__(other)
def __ne__(self, other):
return not (self == other)
def __hash__(self):
return self.value.__hash__()
@dataclass
class Version:
major: int
minor: Optional[int] = 0
_32 = Struct("< H H")
_64 = Struct("< L L")
def __str__(self) -> str:
return f"Version {self.major}.{self.minor}"
def __eq__(self, other):
if other is None:
return False
elif isinstance(other, VersionEnum):
return self.major == other.value.major and self.minor == other.value.minor
elif isinstance(other, Version):
return self.major == other.major and self.minor == other.minor
else:
return super().__eq__(other)
def __ne__(self, other):
return not (self == other)
def __hash__(self):
# Realistically; Version will always be <256
# But we could manually set it to something much bigger by accident; and that may cause collisions
return self.major << 32 + self.minor
VersionLike = Union[Version, VersionEnum]
class VersionError(Exception):
def __init__(self, version: VersionLike = None, supported: Union[List[Version], Version, Type[VersionEnum], VersionEnum] = None, *args):
super().__init__(*args)
self.version = version
if supported:
if issubclass(supported, VersionEnum):
supported = ListableEnum.get_list(supported)
elif not isinstance(supported, list):
supported = [supported]
self.supported = supported
def __str__(self):
msg = "Unexpected version"
if self.version or self.supported:
msg += ";"
if self.version:
msg += f" got {repr(self.version)}"
if self.version and self.supported:
msg += ","
if self.supported:
msg += f" expected {repr(self.supported)}"
return msg + "!" | /relic_game_tool-2022.0a7-py3-none-any.whl/relic/common.py | 0.923786 | 0.222267 | common.py | pypi |
from enum import Enum
from os import PathLike
from os.path import join, exists, abspath
from pathlib import Path, PurePath
from typing import Optional, Iterable, Tuple, Set
import archive_tools.common_directories
dll_folder = abspath(join(__file__, "..\\..\\..\\Required EXEs"))
aifc_decoder_path = join(dll_folder, "dec.exe")
aifc_encoder_path = join(dll_folder, "enc.exe")
texconv_path = join(dll_folder, "texconv.exe")
def get_path_to_steam_library(steam_directory: PathLike = None) -> Path:
steam_directory = (PurePath(steam_directory) if steam_directory else steam_directory) or archive_tools.common_directories.get_steam_install_dir()
return steam_directory / "steamapps" / "common"
class DowIIIGame(Enum):
BaseGame = 0
class DowIIGame(Enum):
Retribution = 2
ChaosRising = 1
BaseGame = 0
class DowGame(Enum):
SoulStorm = 4
DarkCrusade = 3
WinterAssault = 2
Gold = 1
BaseGame = 0
dow_game_paths = {
DowIIIGame.BaseGame: "Dawn of War III",
DowIIGame.Retribution: "Dawn of War II - Retribution",
DowGame.SoulStorm: "Dawn of War Soulstorm",
DowGame.DarkCrusade: "Dawn of War Dark Crusade",
DowGame.WinterAssault: "Dawn of War Winter Assault",
DowGame.Gold: "Dawn of War Gold",
# DowGame.BaseGame:"Dawn of War", # The original dawn of war probably doesn't include 'Gold', IDK what it is specifically but this would be my first guess
}
def get_dow_root_directories() -> Iterable[Tuple[DowGame, Path]]:
steam_path = get_path_to_steam_library()
for game, partial_path in dow_game_paths.items():
path = steam_path / partial_path
if exists(path):
yield game, path
def filter_unique_dow_game(dow_root_directories: Iterable[Tuple[DowGame, Path]]) -> Iterable[Tuple[DowGame, Path]]:
unique: Set[DowGame] = set()
for game, path in dow_root_directories:
if game in unique:
continue
yield game, path
unique.add(game)
# Allows us to get the most
# up-to-date dump of all assets:
# Gold (I believe) only contains Space Marines, Orks, Chaos, & Eldar
# Winter Assault Adds Imperial Guard
# Dark Crusade Adds Tau & Necrons
# SoulStorm Adds Dark Eldar & Sisters Of Battle
# If we only want to dump ONE game; we'd want to dump the latest to get all the assets from the previous one
# Except for campaign assets; which are unique to each install
# For Campaign assets, use get_unique and dump each to a separate directory (or order the dumps such that later games come after earlier games)
def filter_latest_dow_game(dow_root_directories: Iterable[Tuple[DowGame, Path]], series: Enum = DowGame) -> Optional[Tuple[DowGame, Path]]:
latest = latest_path = None
for game, path in dow_root_directories:
if not isinstance(game, series):
continue
if latest and latest.value > game.value:
continue
latest = game
latest_path = path
if latest:
return latest, latest_path
return None
def get_latest_dow_game() -> Optional[Tuple[DowGame, Path]]:
return filter_latest_dow_game(get_dow_root_directories(), series=DowGame)
def get_latest_dow2_game() -> Optional[Tuple[DowGame, Path]]:
return filter_latest_dow_game(get_dow_root_directories(), series=DowIIGame)
def get_latest_dow3_game() -> Optional[Tuple[DowGame, Path]]:
return filter_latest_dow_game(get_dow_root_directories(), series=DowIIIGame)
def get_unique_dow_game() -> Iterable[Tuple[DowGame, Path]]:
return filter_unique_dow_game(get_dow_root_directories())
if __name__ == "__main__":
print("\nAll Dirs")
for game, path in get_dow_root_directories():
print(game.name, ":\t", path)
print("\nLatest")
dirs = get_dow_root_directories()
latest = filter_latest_dow_game(dirs)
print(latest) | /relic_game_tool-2022.0a7-py3-none-any.whl/relic/config.py | 0.741955 | 0.2372 | config.py | pypi |
from __future__ import annotations
from dataclasses import dataclass
from pathlib import PurePosixPath
from typing import Dict, List, Optional, TYPE_CHECKING
from ..hierarchy import DriveChild, FolderCollection, FileCollection, FolderChild, walk
if TYPE_CHECKING:
from ..file.file import File
from ..toc.toc import ArchiveTableOfContents
from ..vdrive.virtual_drive import VirtualDrive
from .header import FolderHeader
from ..hierarchy import ArchiveWalk
@dataclass
class Folder(FolderCollection, FileCollection, FolderChild, DriveChild):
header: FolderHeader
name: str
def __init__(self, header: FolderHeader, name: str, sub_folders: List[Folder], files: List[File], parent_folder: Optional[Folder] = None, drive: Optional[VirtualDrive] = None):
self.header = header
self.name = name
self.sub_folders = sub_folders
self.files = files
self._drive = drive
self._parent = parent_folder
@property
def full_path(self) -> PurePosixPath:
if self._drive:
return self._drive.full_path / self.name
else:
return PurePosixPath(self.name)
def walk(self) -> ArchiveWalk:
return walk(self)
@classmethod
def create(cls, header: FolderHeader) -> Folder:
name = None
folders = [None] * header.sub_folder_range.size
files = [None] * header.file_range.size
# noinspection PyTypeChecker
return Folder(header, name, folders, files)
def load_toc(self, toc: ArchiveTableOfContents):
self.load_folders(toc.folders)
self.load_files(toc.files)
self.load_name_from_lookup(toc.names)
def load_name_from_lookup(self, name_lookup: Dict[int, str]):
self.name = name_lookup[self.header.name_offset]
def load_folders(self, folders: List[Folder]):
if self.header.sub_folder_range.start < len(folders):
for folder_index in self.header.sub_folder_range:
sub_folder_index = folder_index - self.header.sub_folder_range.start
f = self.sub_folders[sub_folder_index] = folders[folder_index]
f._parent = self
def load_files(self, files: List[File]):
if self.header.file_range.start < len(files):
for file_index in self.header.file_range:
sub_file_index = file_index - self.header.file_range.start
f = self.files[sub_file_index] = files[file_index]
f._parent = self | /relic_game_tool-2022.0a7-py3-none-any.whl/relic/sga/folder/folder.py | 0.801819 | 0.181571 | folder.py | pypi |
from __future__ import annotations
from dataclasses import dataclass
from pathlib import PurePosixPath
from typing import List, TYPE_CHECKING
from ..hierarchy import FileCollection, FolderCollection, ArchiveWalk, walk
if TYPE_CHECKING:
from ..file.file import File
from ..folder.folder import Folder
from ..vdrive.header import VirtualDriveHeader
from ..toc.toc import ArchiveTableOfContents
@dataclass
class VirtualDrive(FolderCollection, FileCollection):
header: VirtualDriveHeader
def __init__(self, header: VirtualDriveHeader, sub_folders: List[Folder], files: List[File]):
self.header = header
self.sub_folders = sub_folders
self.files = files
@property
def path(self) -> str:
return self.header.path
@property
def name(self) -> str:
return self.header.name
def walk(self) -> ArchiveWalk:
return walk(self)
@property
def full_path(self) -> PurePosixPath:
return PurePosixPath(self.path + ":")
@classmethod
def create(cls, header: VirtualDriveHeader) -> VirtualDrive:
folders = [None] * header.sub_folder_range.size
files = [None] * header.file_range.size
# noinspection PyTypeChecker
return VirtualDrive(header, folders, files)
def load_toc(self, toc: ArchiveTableOfContents):
self.load_folders(toc.folders)
self.load_files(toc.files)
def load_folders(self, folders: List[Folder]):
if self.header.sub_folder_range.start < len(folders):
for folder_index in self.header.sub_folder_range:
sub_folder_index = folder_index - self.header.sub_folder_range.start
f = self.sub_folders[sub_folder_index] = folders[folder_index]
f._drive = self
def load_files(self, files: List[File]):
if self.header.file_range.start < len(files):
for file_index in self.header.file_range:
sub_file_index = file_index - self.header.file_range.start
f = self.files[sub_file_index] = files[file_index]
f._drive = self
def build_tree(self):
self.sub_folders = [f for f in self.sub_folders if not f._parent]
self.files = [f for f in self.files if not f._parent] | /relic_game_tool-2022.0a7-py3-none-any.whl/relic/sga/vdrive/virtual_drive.py | 0.767559 | 0.197987 | virtual_drive.py | pypi |
from __future__ import annotations
from dataclasses import dataclass
from typing import BinaryIO, List, Type, Dict, TYPE_CHECKING
from .header import ArchiveHeader
from ..common import ArchiveVersion
from ..hierarchy import DriveCollection, ArchiveWalk, walk
from ...common import VersionLike
if TYPE_CHECKING:
from ..toc.toc import ArchiveTableOfContents
from ..toc.toc_headers import ArchiveTableOfContentsHeaders
from ..toc.toc_ptr import ArchiveTableOfContentsPtr
from ..vdrive.virtual_drive import VirtualDrive
@dataclass
class Archive(DriveCollection):
header: ArchiveHeader
"""Sparse represents whether data was loaded on creation."""
_sparse: bool
def __init__(self, header: ArchiveHeader, drives: List[VirtualDrive], _sparse: bool):
self.header = header
self._sparse = _sparse
self.drives = drives
def walk(self) -> ArchiveWalk:
return walk(self)
@classmethod
def _unpack(cls, stream: BinaryIO, header: ArchiveHeader, sparse: bool = True):
from ..toc import ArchiveTableOfContents, ArchiveTableOfContentsPtr, ArchiveTableOfContentsHeaders
version = header.version
with header.toc_ptr.stream_jump_to(stream) as handle:
toc_ptr = ArchiveTableOfContentsPtr.unpack_version(handle, version)
toc_headers = ArchiveTableOfContentsHeaders.unpack(handle, toc_ptr, version)
toc = ArchiveTableOfContents.create(toc_headers)
toc.load_toc()
toc.build_tree() # ensures walk is unique; avoiding dupes and speeding things up
if not sparse:
with header.data_ptr.stream_jump_to(stream) as handle:
toc.load_data(handle)
return cls(header, toc.drives, sparse)
@classmethod
def unpack(cls, stream: BinaryIO, read_magic: bool = True, sparse: bool = True, *, validate: bool = True) -> Archive:
header = ArchiveHeader.unpack(stream, read_magic)
if validate:
header.validate_checksums(stream)
class_type = _VERSION_MAP[header.version]
return class_type._unpack(stream, header, sparse) # Defer to subclass (ensures packing works as expected)
def pack(self, stream: BinaryIO, write_magic: bool = True) -> int:
raise NotImplementedError
@dataclass(init=False)
class DowIArchive(Archive):
def pack(self, stream: BinaryIO, write_magic: bool = True) -> int:
pass
@dataclass(init=False)
class DowIIArchive(Archive):
def pack(self, stream: BinaryIO, write_magic: bool = True) -> int:
pass
@dataclass(init=False)
class DowIIIArchive(Archive):
def pack(self, stream: BinaryIO, write_magic: bool = True) -> int:
pass
_VERSION_MAP: Dict[VersionLike, Type[Archive]] = {
ArchiveVersion.Dow: DowIArchive,
ArchiveVersion.Dow2: DowIIArchive,
ArchiveVersion.Dow3: DowIIIArchive
} | /relic_game_tool-2022.0a7-py3-none-any.whl/relic/sga/archive/archive.py | 0.837021 | 0.214177 | archive.py | pypi |
from __future__ import annotations
from collections import UserDict
from dataclasses import dataclass
from typing import Dict, Type, List, Optional, Iterable, Union, Tuple
from relic.chunky.chunk.chunk import AbstractChunk, GenericDataChunk, FolderChunk
from relic.chunky.chunk.header import ChunkType, ChunkHeader
from relic.chunky.chunky.chunky import GenericRelicChunky
from relic.chunky_formats.protocols import ChunkDefinition, ChunkyDefinition, ConvertableFolderChunk, ConvertableDataChunk, ConvertableChunky
class SupportsFolderChunkAutoConvert(ChunkDefinition, ConvertableFolderChunk):
...
class SupportsDataChunkAutoConvert(ChunkDefinition, ConvertableDataChunk):
...
class SupportsChunkyAutoConvert(ConvertableChunky, ChunkyDefinition):
...
class ChunkyConverterFactory(UserDict[str, Type[ConvertableChunky]]):
def __init__(self, not_implemented: List[str] = None, __dict: Dict[str, Type[ConvertableChunky]] = None, **kwargs):
"""
:param not_implemented: A list of keys that will raise a NotImplementedError instead of a KeyError when using this class's convert method.
:param __dict: An existing dict mapping, see UserDict for details.
:param kwargs: See UserDict for details.
"""
super().__init__(__dict=__dict, **kwargs)
self.not_implemented = not_implemented or []
@property
def supported(self) -> List[str]:
return list(self.keys())
def __is_not_implemented(self, key: str) -> bool:
return self.__simplify_ext(key) in self.not_implemented
@classmethod
def __simplify_ext(cls, extension: str) -> str:
return extension.lower().lstrip(".")
def __setitem__(self, key: str, value):
super().__setitem__(self.__simplify_ext(key), value)
def __getitem__(self, item: str):
return super().__getitem__(self.__simplify_ext(item))
def register(self, convertable: Type[SupportsChunkyAutoConvert]):
self[convertable.EXT] = convertable
def add_converter(self, extension: str, convertable: Type[ConvertableChunky]):
self[extension] = convertable
def get_converter(self, extension: str, _default: Type[ConvertableChunky] = None) -> Optional[Type[ConvertableChunky]]:
return self.get(extension, _default)
def convert(self, extension: str, chunky: GenericRelicChunky):
try:
converter = self[extension]
except KeyError:
if self.__is_not_implemented(extension):
raise NotImplementedError(self.__simplify_ext(extension))
else:
raise
return converter.convert(chunky)
class ChunkConverterFactory(UserDict[Tuple[ChunkType, str], Type[Union[ConvertableDataChunk, ConvertableFolderChunk]]]):
@dataclass
class GenericFolderChunk(AbstractChunk):
chunks: List[AbstractChunk]
def __init__(self, default_generic_folder: bool = False, allow_overwrite: bool = False, __dict: Dict[Tuple[ChunkType, str], Type[Union[ConvertableDataChunk, ConvertableFolderChunk]]] = None, **kwargs):
super().__init__(__dict, **kwargs)
self.default_generic_folder = default_generic_folder
self.allow_overwrite = allow_overwrite
def __setitem__(self, key: Tuple[ChunkType, str], value):
assert len(key[1]) <= 4, f"ID '{key[1]}' is too large! IDs can be at most 4 characters long. This tool will strip '\0' but leave ' '."
if not self.allow_overwrite and key in self.keys():
raise KeyError(f"Key '{key}' already exists and overwrites are not allowed!")
super().__setitem__(key, value)
def __getitem__(self, item: Tuple[ChunkType, str]):
return super().__getitem__(item)
def add_converter(self, chunk_type: ChunkType, chunk_id: str, convertable: Union[ChunkConverterFactory, Type[Union[ConvertableDataChunk, ConvertableFolderChunk]]]):
self[(chunk_type, chunk_id)] = convertable
def register(self, convertable: Union[Type[SupportsDataChunkAutoConvert], Type[SupportsFolderChunkAutoConvert]]):
self.add_converter(convertable.CHUNK_TYPE, convertable.CHUNK_ID, convertable)
def register_sub_factory(self, chunk: ChunkDefinition, converter: ChunkConverterFactory):
self.add_converter(chunk.CHUNK_TYPE, chunk.CHUNK_ID, converter)
def add_data_converter(self, chunk_id: str, convertable: Type[ConvertableDataChunk]):
self.add_converter(ChunkType.Data, chunk_id, convertable)
def add_folder_converter(self, chunk_id: str, convertable: Union[ChunkConverterFactory, Type[SupportsFolderChunkAutoConvert]]):
self.add_converter(ChunkType.Data, chunk_id, convertable)
def get_converter(self, chunk_type: ChunkType, chunk_id: str, _default: Type[Union[ConvertableDataChunk, ConvertableFolderChunk]] = None) -> Optional[Type[Union[ConvertableDataChunk, ConvertableFolderChunk]]]:
return self.get((chunk_type, chunk_id), _default)
def get_converter_from_header(self, header: ChunkHeader) -> Optional[Type[Union[ConvertableDataChunk, ConvertableFolderChunk]]]:
return self.get_converter(header.type, header.id)
def get_converter_from_chunk(self, chunk: AbstractChunk) -> Optional[Type[Union[ConvertableDataChunk, ConvertableFolderChunk]]]:
return self.get_converter_from_header(chunk.header)
def __convert_folder_generic(self, chunk: FolderChunk) -> GenericFolderChunk:
header = chunk.header
sub_chunks = self.convert_many(chunk.chunks)
return self.GenericFolderChunk(header, sub_chunks)
def convert(self, chunk: Union[GenericDataChunk, FolderChunk]) -> AbstractChunk:
converter = self.get_converter_from_chunk(chunk)
if not converter:
if self.default_generic_folder and chunk.header.type == ChunkType.Folder:
return self.__convert_folder_generic(chunk)
raise KeyError(chunk.header.type, chunk.header.id)
if isinstance(converter, ChunkConverterFactory):
return converter.convert(chunk) # Same signature but very different methods
else:
return converter.convert(chunk)
def convert_many(self, chunks: Iterable[Union[GenericDataChunk, FolderChunk]]) -> List[AbstractChunk]:
return [self.convert(c) for c in chunks] | /relic_game_tool-2022.0a7-py3-none-any.whl/relic/chunky_formats/convertable.py | 0.891442 | 0.414573 | convertable.py | pypi |
from __future__ import annotations
import os
from dataclasses import dataclass
from typing import List, Iterable, Optional, Protocol, Sized, Type, Union, ClassVar
from relic.chunky import AbstractChunk, ChunkType, RelicChunky, GenericRelicChunky, FolderChunk, GenericDataChunk
from relic.chunky_formats.convertable import SupportsDataChunkAutoConvert
from relic.chunky_formats.protocols import ChunkDefinition
def DEBUG_WRITE_TO_BIN(data: bytes, name: str = None):
name = name or r"debug_dump"
name += ".bin"
print("\n", os.path.abspath(f".\\{name}"))
with open(name, "wb") as h:
h.write(data)
def find_chunks(chunks: List[AbstractChunk], id: str, type: ChunkType) -> Iterable[AbstractChunk]:
for c in chunks:
if c.header.id == id and c.header.type == type:
yield c
def find_chunk(chunks: List[AbstractChunk], id: str, type: ChunkType) -> Optional[AbstractChunk]:
for c in find_chunks(chunks, id, type):
return c
return None
@dataclass
class UnimplementedChunky(RelicChunky):
@classmethod
def convert(cls, chunky: GenericRelicChunky) -> None:
raise NotImplementedError(cls.__name__, [(_.header.type.value, _.header.id) for _ in chunky.chunks])
@dataclass
class UnimplementedFolderChunk(AbstractChunk):
@classmethod
def convert(cls, chunk: FolderChunk) -> None:
raise NotImplementedError(cls.__name__, [(_.header.type.value, _.header.id) for _ in chunk.chunks])
@dataclass
class UnimplementedDataChunk(AbstractChunk):
raw: bytes
@classmethod
def convert(cls, chunk: GenericDataChunk) -> UnimplementedDataChunk:
return cls(chunk.header, chunk.raw_bytes)
class ChunkCollection(Protocol):
chunks: Iterable[AbstractChunk]
class ChunkCollectionX:
@classmethod
def list2col(cls, col: List[AbstractChunk]) -> ChunkCollectionX:
@dataclass
class Wrapper:
chunks: List[AbstractChunk]
return ChunkCollectionX(Wrapper(col))
def __init__(self, inner: ChunkCollection):
self.inner = inner
def __len__(self) -> int:
if isinstance(self.inner.chunks, Sized):
return len(self.inner.chunks)
else:
return sum(1 for _ in self.inner.chunks)
def get_chunks_by_type(self, chunk_type: ChunkType) -> Iterable[AbstractChunk]:
for c in self.inner.chunks:
if c.header.type == chunk_type:
yield c
@property
def data_chunks(self) -> Iterable[AbstractChunk]:
return self.get_chunks_by_type(ChunkType.Data)
@property
def folder_chunks(self) -> Iterable[AbstractChunk]:
return self.get_chunks_by_type(ChunkType.Folder)
def find(self, chunk: Type[ChunkDefinition], many: bool = False) -> Union[List[AbstractChunk], Optional[AbstractChunk]]:
return self.get(chunk.CHUNK_ID, chunk.CHUNK_TYPE, many=many)
def find_and_convert(self, id_converter: Union[ClassVar[SupportsDataChunkAutoConvert], ClassVar[SupportsDataChunkAutoConvert]], many: bool = False) -> Union[Optional[AbstractChunk], List[AbstractChunk]]:
if many:
chunks = self.find_chunks(id_converter)
return [id_converter.convert(_) for _ in chunks]
else:
chunk = self.find_chunk(id_converter)
if chunk:
return id_converter.convert(chunk)
else:
return None
def find_chunk(self, chunk: Type[ChunkDefinition]) -> Union[List[AbstractChunk], Optional[AbstractChunk]]:
return self.get_chunk(chunk.CHUNK_ID, chunk.CHUNK_TYPE)
def find_chunks(self, chunk: Type[ChunkDefinition]) -> Union[List[AbstractChunk], Optional[AbstractChunk]]:
return self.get_chunks(chunk.CHUNK_ID, chunk.CHUNK_TYPE)
def get(self, chunk_id: str, chunk_type: ChunkType, many: bool = False) -> Union[List[AbstractChunk], Optional[AbstractChunk]]:
if many:
return self.get_chunks(chunk_id, chunk_type)
else:
return self.get_chunk(chunk_id, chunk_type)
def get_chunks(self, chunk_id: str, chunk_type: ChunkType) -> List[AbstractChunk]:
return [c for c in self.get_chunks_by_type(chunk_type) if c.header.id == chunk_id]
def get_chunk(self, chunk_id: str, chunk_type: ChunkType) -> Optional[AbstractChunk]:
for c in self.get_chunks(chunk_id, chunk_type):
return c
return None | /relic_game_tool-2022.0a7-py3-none-any.whl/relic/chunky_formats/util.py | 0.857932 | 0.363393 | util.py | pypi |
from __future__ import annotations
from dataclasses import dataclass
from typing import List, Optional
from relic.chunky import AbstractChunk, FolderChunk
from relic.chunky.chunk import ChunkType
from relic.chunky.chunky import RelicChunky, GenericRelicChunky
from relic.chunky_formats.dow.common_chunks.fbif import FbifChunk
from relic.chunky_formats.util import find_chunks, find_chunk, UnimplementedDataChunk
from relic.chunky_formats.dow.events import EvctChunk
from relic.chunky_formats.dow.rml import MtreChunk, ModlChunk
from relic.chunky_formats.dow.whm.animation import AnbvChunk, AnimChunk
@dataclass
class SeuiChunk(UnimplementedDataChunk):
pass
@dataclass
class ClasChunk(UnimplementedDataChunk):
pass
@dataclass
class ClstChunk(AbstractChunk):
clas: List[ClasChunk]
@classmethod
def convert(cls, chunk: FolderChunk) -> ClstChunk:
clas = find_chunks(chunk.chunks, "CLAS", ChunkType.Data)
clas = [ClasChunk.convert(_) for _ in clas]
assert len(chunk.chunks) == len(clas)
return cls(chunk.header, clas)
@dataclass
class ActsChunk(UnimplementedDataChunk):
pass
@dataclass
class CondChunk(UnimplementedDataChunk):
pass
@dataclass
class ConlChunk(AbstractChunk):
cond: List[CondChunk]
@classmethod
def convert(cls, chunk: FolderChunk) -> ConlChunk:
cond = find_chunks(chunk.chunks, "COND", ChunkType.Data)
cond = [CondChunk.convert(_) for _ in cond]
assert len(chunk.chunks) == len(cond)
return cls(chunk.header, cond)
@dataclass
class XrefChunk(UnimplementedDataChunk):
pass
@dataclass
class AnimChunk(AbstractChunk):
xref: XrefChunk
anbv: AnbvChunk
@classmethod
def convert(cls, chunk: FolderChunk) -> AnimChunk:
xref = find_chunk(chunk.chunks, "XREF", ChunkType.Data)
xref = XrefChunk.convert(xref)
anbv = find_chunk(chunk.chunks, "ANBV", ChunkType.Data)
anbv = AnbvChunk.convert(anbv)
assert len(chunk.chunks) == 2
return AnimChunk(chunk.header, xref, anbv)
@dataclass
class RebpChunk(AbstractChunk):
clst: Optional[ClstChunk]
conl: Optional[ConlChunk]
mtre: MtreChunk
acts: Optional[ActsChunk]
seui: SeuiChunk
evct: Optional[EvctChunk]
modl: Optional[ModlChunk]
anim: List[AnimChunk]
@classmethod
def convert(cls, chunk: FolderChunk) -> RebpChunk:
# Seems to vary; but version is always 4?
assert chunk.header.version == 4, chunk.header.version
clst = find_chunk(chunk.chunks, "CLST", ChunkType.Folder)
clst = ClstChunk.convert(clst) if clst else None
conl = find_chunk(chunk.chunks, "CONL", ChunkType.Folder)
conl = ConlChunk.convert(conl) if conl else None
mtre = find_chunk(chunk.chunks, "MTRE", ChunkType.Folder)
mtre = MtreChunk.convert(mtre)
acts = find_chunk(chunk.chunks, "ACTS", ChunkType.Data)
acts = ActsChunk.convert(acts) if acts else None
seui = find_chunk(chunk.chunks, "SEUI", ChunkType.Data)
seui = SeuiChunk.convert(seui)
evct = find_chunk(chunk.chunks, "EVCT", ChunkType.Folder)
evct = EvctChunk.convert(evct) if evct else None
modl = find_chunk(chunk.chunks, "MODL", ChunkType.Folder)
modl = ModlChunk.convert(modl) if modl else None
anim = find_chunks(chunk.chunks, "ANIM", ChunkType.Folder)
anim = [AnimChunk.convert(_) for _ in anim]
_loaded = 2 + (1 if clst else 0) + (1 if conl else 0) + (1 if evct else 0) + (1 if acts else 0) + (1 if modl else 0) + len(anim)
assert len(chunk.chunks) == _loaded, (len(chunk.chunks), _loaded, [(_.header.type.value, _.header.id) for _ in chunk.chunks], [clst, conl, mtre, acts, seui, evct, modl, anim])
return RebpChunk(chunk.header, clst, conl, mtre, acts, seui, evct, modl, anim)
@dataclass
class WheChunky(RelicChunky):
fbif: FbifChunk
rebp: RebpChunk
@classmethod
def convert(cls, chunky: GenericRelicChunky) -> WheChunky:
fbif = find_chunk(chunky.chunks, "FBIF", ChunkType.Data)
fbif = FbifChunk.convert(fbif)
rebp = find_chunk(chunky.chunks, "REBP", ChunkType.Folder)
rebp = RebpChunk.convert(rebp)
assert len(chunky.chunks) == 2
return WheChunky(chunky.header, fbif, rebp) | /relic_game_tool-2022.0a7-py3-none-any.whl/relic/chunky_formats/dow/whe.py | 0.814238 | 0.47025 | whe.py | pypi |
from __future__ import annotations
from dataclasses import dataclass
from typing import Iterable
from archive_tools.structx import Struct
from archive_tools.vstruct import VStruct
from ..common_chunks.fbif import FbifChunk
from ...convertable import ChunkConverterFactory
from ...util import ChunkCollectionX
from ....chunky import DataChunk, GenericDataChunk, ChunkType, RelicChunky, GenericRelicChunky, ChunkyVersion, AbstractChunk, FolderChunk
@dataclass
class FdaInfoChunk(DataChunk):
LAYOUT = Struct("< 7l")
CHUNK_ID = "INFO"
CHUNK_TYPE = ChunkType.Data
channels: int
sample_size: int
block_bitrate: int
sample_rate: int
begin_loop: int
end_loop: int
start_offset: int
@classmethod
def convert(cls, chunk: GenericDataChunk) -> FdaInfoChunk:
# VERSIONED
assert chunk.header.version in [1], chunk.header.version
args = cls.LAYOUT.unpack(chunk.raw_bytes)
return FdaInfoChunk(chunk.header, *args)
@dataclass
class FdaDataChunk(DataChunk):
CHUNK_ID = "DATA"
CHUNK_TYPE = ChunkType.Data
LAYOUT = VStruct("< v")
# size: int
data: bytes
@classmethod
def convert(cls, chunk: GenericDataChunk) -> FdaDataChunk:
# VERSIONED
assert chunk.header.version in [1], chunk.header.version
data = cls.LAYOUT.unpack(chunk.raw_bytes)[0]
assert len(data) == len(chunk.raw_bytes) - cls.LAYOUT.min_size
return FdaDataChunk(chunk.header, data)
@dataclass
class FdaChunk(AbstractChunk):
CHUNK_TYPE = ChunkType.Folder
CHUNK_ID = "FDA "
# chunks: List[AbstractChunk]
info: FdaInfoChunk
data: FdaDataChunk
@property
def chunks(self) -> Iterable[AbstractChunk]:
yield self.info
yield self.data
@classmethod
def convert(cls, chunk: FolderChunk) -> FdaChunk:
assert chunk.header.version in [1], chunk.header.version
converted = FdaChunkConverter.convert_many(chunk.chunks)
x = ChunkCollectionX.list2col(converted)
info = x.find(FdaInfoChunk)
data = x.find(FdaDataChunk)
assert len(converted) == len(chunk.chunks) and len(chunk.chunks) == 2
return FdaChunk(chunk.header, info, data)
@dataclass
class FdaChunky(RelicChunky):
SUPPORTED_VERSIONS = [ChunkyVersion.v0101]
fbif: FbifChunk
fda: FdaChunk
@property
def chunks(self) -> Iterable[AbstractChunk]:
yield self.fbif
yield self.fda
@classmethod
def convert(cls, chunky: GenericRelicChunky) -> FdaChunky:
# VERSIONED
assert chunky.header.version in cls.SUPPORTED_VERSIONS, chunky.header.version
converted = FdaChunkConverter.convert_many(chunky.chunks)
x = ChunkCollectionX.list2col(converted)
fbif = x.find(FbifChunk)
fda = x.find(FdaChunk)
assert len(converted) == len(chunky.chunks) and len(chunky.chunks) == 2
return FdaChunky(chunky.header, fbif, fda)
def add_fda_chunk_converter(conv: ChunkConverterFactory):
conv.register(FbifChunk)
conv.register(FdaInfoChunk)
conv.register(FdaDataChunk)
conv.register(FdaChunk)
def generate_fda_chunk_converter():
conv = ChunkConverterFactory()
add_fda_chunk_converter(conv)
return conv
# Individual converters are used to allow differing Chunkies to substitute their own Chunks
FdaChunkConverter = generate_fda_chunk_converter() | /relic_game_tool-2022.0a7-py3-none-any.whl/relic/chunky_formats/dow/fda/chunky.py | 0.83498 | 0.554229 | chunky.py | pypi |
from __future__ import annotations
# Painted Team BD?
from dataclasses import dataclass
from enum import Enum
from typing import Optional, List
from archive_tools.structx import Struct
from archive_tools.vstruct import VStruct
from relic.chunky.chunk.chunk import GenericDataChunk, AbstractChunk, FolderChunk
from relic.chunky.chunk.header import ChunkType
from relic.chunky.chunky.chunky import GenericRelicChunky, RelicChunky
from relic.chunky_formats.dow.common_chunks.imag import ImagChunk
from relic.chunky_formats.util import find_chunks, find_chunk
# Painted Team Layer Data?
# Painted Team BN?
# Looks identical to PTBD
@dataclass
class PtbdChunk(AbstractChunk):
LAYOUT = Struct("< 4f") # 4 floats?
# floats are typically positions, uv coordinates?
# atlas size maybe? IDK
unk_a: float
unk_b: float
unk_c: float
unk_d: float
@classmethod
def convert(cls, chunk: GenericDataChunk) -> 'PtbdChunk':
args = cls.LAYOUT.unpack(chunk.raw_bytes)
return PtbdChunk(chunk.header, *args)
@dataclass
class WtpInfoChunk(AbstractChunk):
LAYOUT = Struct("< 2l")
width: int
height: int
@classmethod
def convert(cls, chunk: GenericDataChunk) -> WtpInfoChunk:
height, width = cls.LAYOUT.unpack(chunk.raw_bytes)
return WtpInfoChunk(chunk.header, width=width, height=height) # SWAPPED! Using Kwargs to make sure order doesn't matter
@dataclass
class PtbnChunk:
LAYOUT = Struct("< 4f") # 4 floats?
unk_a: float
unk_b: float
unk_c: float
unk_d: float
@classmethod
def convert(cls, chunk: GenericDataChunk) -> PtbnChunk:
args = cls.LAYOUT.unpack(chunk.raw_bytes)
assert len(chunk.raw_bytes) == cls.LAYOUT.size
return PtbnChunk(*args)
class PtldLayer(Enum):
Primary = 0
Secondary = 1
Trim = 2
Weapon = 3
Eyes = 4
Dirt = 5
@dataclass
class PtldChunk(AbstractChunk):
LAYOUT = VStruct("< l v")
layer: PtldLayer
image: bytes
@classmethod
def convert(cls, chunk: GenericDataChunk) -> PtldChunk:
assert chunk.header.version == 1
layer_code, image = cls.LAYOUT.unpack(chunk.raw_bytes)
layer = PtldLayer(layer_code)
return PtldChunk(chunk.header, layer, image)
@dataclass
class TpatChunk:
info: WtpInfoChunk
imag: ImagChunk
ptld: List[PtldChunk]
ptbd: Optional[PtbdChunk]
ptbn: Optional[PtbnChunk]
@classmethod
def convert(cls, chunk: FolderChunk) -> 'TpatChunk':
info = find_chunk(chunk.chunks, "INFO", ChunkType.Data)
info = WtpInfoChunk.convert(info)
imag = find_chunk(chunk.chunks, "IMAG", ChunkType.Folder)
imag = ImagChunk.convert(imag)
ptld = find_chunks(chunk.chunks, "PTLD", ChunkType.Data)
ptld = [PtldChunk.convert(_) for _ in ptld]
# ptld = PtldChunk.convert(ptld) if ptld else None
ptbd = find_chunk(chunk.chunks, "PTBD", ChunkType.Data)
# ptbd = [PtbdChunk.convert(_) for _ in ptbd]
ptbd = PtbdChunk.convert(ptbd) if ptbd else None
ptbn = find_chunk(chunk.chunks, "PTBN", ChunkType.Data)
# ptbn = [PtbnChunk.convert(_) for _ in ptbn]
ptbn = PtbnChunk.convert(ptbn) if ptbn else None
assert len(chunk.chunks) == sum(1 if _ else 0 for _ in [ptbd, ptbn]) + 2 + len(ptld), [(_.header.type.value,_.header.id) for _ in chunk.chunks]
return TpatChunk(info, imag, ptld, ptbd, ptbn)
@dataclass
class WtpChunky(RelicChunky):
tpat: TpatChunk
@classmethod
def convert(cls, chunky: GenericRelicChunky) -> WtpChunky:
tpat = find_chunk(chunky.chunks, "TPAT", ChunkType.Folder)
tpat = TpatChunk.convert(tpat)
return WtpChunky(chunky.header, tpat) | /relic_game_tool-2022.0a7-py3-none-any.whl/relic/chunky_formats/dow/wtp/wtp.py | 0.880142 | 0.442576 | wtp.py | pypi |
import os
import shutil
import subprocess
from io import BytesIO
from os.path import dirname, splitext, exists
from tempfile import NamedTemporaryFile
from typing import BinaryIO, Optional
from .imag import ImagChunk
from ....file_formats.dxt import get_full_dxt_header, build_dow_tga_color_header, DDS_MAGIC, build_dow_tga_gray_header
TEX_CONV = "texconv.exe"
DEFAULT_LOCAL_TEX_CONV = os.path.abspath(fr".\{TEX_CONV}")
DEFAULT_PATH_TEX_CONV = TEX_CONV
def find_texconv() -> Optional[str]:
if shutil.which(DEFAULT_PATH_TEX_CONV):
return DEFAULT_PATH_TEX_CONV
if exists(DEFAULT_LOCAL_TEX_CONV):
return DEFAULT_LOCAL_TEX_CONV
return None
class ImagConverter:
TEXCONV_PATH: str = find_texconv()
@classmethod
def fix_dow_dds(cls, input_stream: BinaryIO, output_stream: BinaryIO, *, texconv_path: str = None):
"""
Vertically flips the dds image contained in input_stream and writes the result
:param input_stream: The dds file stream to read from
:param output_stream: The dds file stream to write to
:param texconv_path: If supplied, will use this path to call texconv instead of the class path.
"""
texconv_path = texconv_path or cls.TEXCONV_PATH
if not texconv_path:
raise FileNotFoundError("No texconv.exe could be found; try specifying texconv_path.")
elif not exists(texconv_path):
raise FileNotFoundError(texconv_path)
try:
with NamedTemporaryFile("wb", delete=False) as in_file:
in_file.write(input_stream.read())
in_file.close()
subprocess.run([texconv_path, "-vflip", "-y", "-o", dirname(in_file.name), in_file.name], stdout=subprocess.DEVNULL)
# subprocess.call([, in_file.name, out_file_name])
with open(in_file.name, "rb") as out_file:
output_stream.write(out_file.read())
finally:
try:
os.remove(in_file.name)
except FileNotFoundError:
pass
@classmethod
def ConvertStream(cls, input_stream: BinaryIO, output_stream: BinaryIO, out_format: str, input_ext: str = None, perform_dds_fix: bool = False, *, texconv_path: str = None): # An option to fix the dds inversion to avoid redoing a temp file
def get_texconv_fmt_ext() -> str:
lookup = {
'png': ".PNG",
}
return lookup[out_format.lower()]
input_ext = input_ext or "." + out_format
try:
texconv_path = texconv_path or cls.TEXCONV_PATH
with NamedTemporaryFile("wb", suffix=input_ext, delete=False) as in_file:
in_file.write(input_stream.read())
in_file.close()
# perform_dds_fix = False #TODO temp
args = [texconv_path, "-vflip" if perform_dds_fix else None, "-ft", out_format, "-y", "-o", dirname(in_file.name), in_file.name]
# filter out vflip
args = [arg for arg in args if arg is not None]
subprocess.run(args, stdout=subprocess.DEVNULL)
b, _ = splitext(in_file.name)
out_name = b + get_texconv_fmt_ext()
with open(out_name, "rb") as out_file:
output_stream.write(out_file.read())
finally:
try:
os.remove(in_file.name)
except (FileNotFoundError, UnboundLocalError):
pass
try:
os.remove(out_name)
except (FileNotFoundError, UnboundLocalError):
pass
@classmethod
def Imag2StreamRaw(cls, imag: ImagChunk, stream: BinaryIO, color_tga: bool = True):
info = imag.attr
data = imag.data.raw_bytes
if info.image_format.is_dxt:
header = get_full_dxt_header(info.image_format.fourCC, info.width, info.height, len(data), info.mips)
stream.write(DDS_MAGIC)
stream.write(header)
stream.write(data)
elif info.image_format.is_tga:
if color_tga:
header = build_dow_tga_color_header(info.width, info.height)
else:
header = build_dow_tga_gray_header(info.width, info.height)
stream.write(header)
stream.write(data)
else:
raise NotImplementedError(info.image_format, info.image_format.is_dxt)
# Less of a conversion
# writes the imag as an image to the stream, raw will not perform a DDS fix (or any other fixes)
@classmethod
def Imag2Stream(cls, imag: ImagChunk, stream: BinaryIO, out_format: str = None, raw: bool = False, *, texconv_path: str = None, color_tga: bool = True):
if raw: # Regardless of type, don't perform any fixes
cls.Imag2StreamRaw(imag, stream, color_tga=color_tga)
elif out_format:
with BytesIO() as temp:
cls.Imag2StreamRaw(imag, temp, color_tga=color_tga)
# We have to check needs fixing otherwise non-dds images will be dds_fixed
perform_dds_fix = not raw and imag.attr.image_format.is_dxt
temp.seek(0, 0)
cls.ConvertStream(temp, stream, out_format, imag.attr.image_format.extension, perform_dds_fix, texconv_path=texconv_path)
else:
if imag.attr.image_format.is_dxt:
with BytesIO() as temp:
cls.Imag2StreamRaw(imag, temp, color_tga=color_tga)
temp.seek(0, 0)
cls.fix_dow_dds(temp, stream, texconv_path=texconv_path)
else: # TGA, no fixes
cls.Imag2StreamRaw(imag, stream, color_tga=color_tga) | /relic_game_tool-2022.0a7-py3-none-any.whl/relic/chunky_formats/dow/common_chunks/imag_writer.py | 0.605682 | 0.226698 | imag_writer.py | pypi |
from __future__ import annotations
import dataclasses
import json
from dataclasses import dataclass
from enum import Enum
from json import JSONEncoder
from typing import TextIO, List, Any, Dict, Optional, Tuple
from .animation import AnimChunk, AnimDataBoneFrameInfo, AnimDataMeshFrameInfo
from .mesh import MslcChunk
from .shared import Byte
from .whm import WhmChunky, RsgmChunkV3, SkelChunk, MsgrChunk
from ....file_formats.mesh_io import Float3, Float2, Short3, Float4
def flip_float3(v: Float3, flip_x: bool = False, flip_y: bool = False, flip_z: bool = False) -> Float3:
if not any([flip_x, flip_y, flip_z]): # Used a list to avoid confusion with any((flip_x,flip_y,flip_z))
return v
x, y, z = v
if flip_x:
x *= -1
if flip_y:
y *= -1
if flip_z:
z *= -1
return x, y, z
@dataclass
class SimpleTransform:
position: Float3
rotation: Float4
@dataclass
class RawMesh:
name: str
positions: List[Float3]
normals: List[Float3]
bones: Dict[int, str]
bone_weights: Optional[List[List[Tuple[Float3, Byte]]]]
uvs: List[Float2]
sub_meshes: Dict[str, List[Short3]]
@classmethod
def convert_from_mslc(cls, chunk: MslcChunk) -> RawMesh:
mesh = chunk.data
name = chunk.header.name
# DO NOT PERFORM ANY MODIFICATIONS
# Let importer handle it to keep it in one location
positions = mesh.vertex_data.positions
normals = mesh.vertex_data.normals
# positions = [flip_float3(p, flip_x=True) for p in mesh.vertex_data.positions]
# normals = [flip_float3(n, flip_x=True) for n in mesh.vertex_data.normals]
bones = {b.index: b.name for b in mesh.bones}
bone_weights = None
if mesh.vertex_data.bone_weights:
bone_weights = []
for bwd in mesh.vertex_data.bone_weights:
w = []
t = 0
for i in range(4):
bi = bwd[1][i]
if bi == 255:
break
if i == 3:
bw = 1.0 - t
else:
bw = bwd[0][i]
t += bw
w.append((bi, bw))
bone_weights.append(w)
uvs = mesh.vertex_data.uvs
indexes = {sm.texture_path: sm.triangles for sm in mesh.sub_meshes}
return RawMesh(name, positions, normals, bones, bone_weights, uvs, indexes)
@classmethod
def convert_from_msgr(cls, chunk: MsgrChunk) -> List[RawMesh]:
return [cls.convert_from_mslc(c) for c in chunk.mslc]
@dataclass
class RawBone:
name: str
transform: SimpleTransform
children: List[RawBone]
@classmethod
def convert_from_skel(cls, chunk: SkelChunk) -> RawBone:
root = RawBone(chunk.header.name, None, [])
tree = [RawBone(s.name, SimpleTransform(s.pos, s.quaternion), []) for s in chunk.bones]
for i, b in enumerate(chunk.bones):
current = tree[i]
if b.parent_index == -1:
parent = root
else:
parent = tree[b.parent_index]
parent.children.append(current)
return root
def time_to_frame(frame_time: float, frame_count: int) -> int:
return round(frame_time * (frame_count - 1))
@dataclass
class RawAnimBone:
name: str
pos: Dict[int, Float3]
rot: Dict[int, Float4]
stale: bool
@classmethod
def convert(cls, data: AnimDataBoneFrameInfo, frame_count: int) -> RawAnimBone:
p = {time_to_frame(f, frame_count): v[1:] for f, v in data.positions.items()}
r = {time_to_frame(f, frame_count): v[1:] for f, v in data.rotations.items()}
return cls(data.name, p, r, data.stale)
@classmethod
def ignorable(cls, data: AnimDataBoneFrameInfo) -> bool:
return len(data.positions) + len(data.rotations) == 0
@dataclass
class RawAnimMesh:
name: str
mode: int
visibility: Dict[int, float]
unks: Tuple
@classmethod
def convert(cls, data: AnimDataMeshFrameInfo, frame_count: int) -> RawAnimMesh:
vis = {time_to_frame(f, frame_count): v[1:] for f, v in data.visibility.items()}
return cls(data.name, data.mode, vis, data.unks) # one of those unks is probably stale... ? But why mark it stale in the vis, where it should be implied?
@classmethod
def ignorable(cls, data: AnimDataMeshFrameInfo) -> bool:
return len(data.visibility) + len(data.visibility) == 0
@dataclass
class RawAnim:
name: str
key_frames: int # used to init animaition
bones: List[RawAnimBone]
meshes: List[RawAnimMesh]
# Since idk what unks animates, i've ignored it
@classmethod
def convert_from_anim(cls, anim: AnimChunk) -> RawAnim:
d = anim.data
bones = [RawAnimBone.convert(b, d.key_frames) for b in d.bones if not RawAnimBone.ignorable(b)]
meshes = [RawAnimMesh.convert(m, d.key_frames) for m in d.meshes if not RawAnimMesh.ignorable(m)]
return cls(anim.header.name, d.key_frames, bones, meshes)
@classmethod
def convert_from_anim_list(cls, anims: List[AnimChunk]) -> List[RawAnim]:
return [cls.convert_from_anim(a) for a in anims]
class SimpleJsonEncoder(JSONEncoder):
def default(self, o: Any) -> Any:
if dataclasses.is_dataclass(o):
return dataclasses.asdict(o)
elif isinstance(o, Enum):
return {o.name: o.value}
else:
return super().default(o)
def write_whm(stream: TextIO, whm: WhmChunky, pretty: bool = True):
# After browsing some of those old forums on the Way Back Machine (wish i'd remembered captured the url)
# Something about Tread L and Tread R being special bones; those meshes likely auto weight themselves to their special bone?
# Putting this here since this is the 'best' place I can think of
# Some objects have skel's but no bones (vehicles do this alot)
# I thought that maybe it was hidden elsewhere, but I decided ot play SS to look at the animations
# After 15 minutes I got a blane-blade and carefully watched it raise hell
# Despite my initial thoughts; that the cannon and barrels retracted after firing; the animations simply jolt the gun back to achieve a similar, cheaper effect
# My conclusion is a skel's bone is implicitly weighted IFF (if anf only if) no bones are listed as bone weights AND the mesh name matches a bone name
# Semi-Related, MARKs seem to be empty objects, should try listing that in the OBJ/JSON
if isinstance(whm.rsgm, RsgmChunkV3):
meshes = RawMesh.convert_from_msgr(whm.rsgm.msgr)
skel = RawBone.convert_from_skel(whm.rsgm.skel) if whm.rsgm.skel else None
name = whm.rsgm.header.name
anim = RawAnim.convert_from_anim_list(whm.rsgm.anim)
d = {'name': name, 'skel': skel, 'meshes': meshes, 'animations': anim}
try:
json.dump(d, stream, indent=(4 if pretty else None), cls=SimpleJsonEncoder)
except Exception as e:
print(e)
raise
else:
raise NotImplementedError | /relic_game_tool-2022.0a7-py3-none-any.whl/relic/chunky_formats/dow/whm/json_writer.py | 0.834069 | 0.377971 | json_writer.py | pypi |
from __future__ import annotations
from dataclasses import dataclass
from io import BytesIO
from typing import BinaryIO, List, Optional, Tuple, Any
from archive_tools.ioutil import has_data
from archive_tools.structx import Struct
from archive_tools.vstruct import VStruct
from relic.chunky import AbstractChunk, ChunkType, GenericDataChunk, FolderChunk
from relic.chunky_formats.convertable import ChunkConverterFactory
from relic.chunky_formats.dow.whm.shared import Short4, BvolChunk, Byte4
from relic.chunky_formats.util import ChunkCollectionX
from relic.file_formats.mesh_io import Float3, Float2, Short3
@dataclass
class MsclHeader:
LAYOUT = Struct("< l b l 2l")
flag: bytes
val: int
name_count: int
@classmethod
def unpack(cls, stream: BinaryIO) -> MsclHeader:
rsv_0a, flag, val, rsv_0b, names = cls.LAYOUT.unpack_stream(stream)
assert rsv_0a == 0
assert rsv_0b == 0
assert flag in [1, 0], (flag, val)
return MsclHeader(flag, val, names)
@dataclass
class MslcName:
LAYOUT = VStruct("vl")
name: str
unk_a: int
@classmethod
def unpack(cls, stream: BinaryIO) -> MslcName:
name, unk = cls.LAYOUT.unpack_stream(stream)
name = name.decode("ascii")
assert len(name) + cls.LAYOUT.min_size
return MslcName(name, unk)
@dataclass
class MslcVertexData:
VERTEX_POS_LAYOUT = Struct("< 3f")
VERTEX_NORM_LAYOUT = Struct("< 3f")
VERTEX_UV_LAYOUT = Struct("< 2f")
VERTEX_BONE_WEIGHT_LAYOUT = Struct("< 3f 4B")
positions: List[Float3]
normals: List[Float3]
bone_weights: Optional[List[Tuple[Float3, Byte4]]]
uvs: List[Float2]
@property
def count(self) -> int:
return len(self.positions)
@classmethod
def unpack(cls, stream: BinaryIO, vertex_count: int, V_SIZE: int) -> MslcVertexData:
if V_SIZE in [32, 48]:
position_buffer = [cls.VERTEX_POS_LAYOUT.unpack_stream(stream) for _ in range(vertex_count)]
else:
position_buffer = None
if V_SIZE in [48]:
bone_buffer = [cls.VERTEX_BONE_WEIGHT_LAYOUT.unpack_stream(stream) for _ in range(vertex_count)]
bone_buffer = [((w1, w2, w3), (b1, b2, b3, b4)) for w1, w2, w3, b1, b2, b3, b4 in bone_buffer]
else:
bone_buffer = None
if V_SIZE in [32, 48]:
normal_buffer = [cls.VERTEX_NORM_LAYOUT.unpack_stream(stream) for _ in range(vertex_count)]
else:
normal_buffer = None
if V_SIZE in [32, 48]:
uv_buffer = [cls.VERTEX_UV_LAYOUT.unpack_stream(stream) for _ in range(vertex_count)]
else:
uv_buffer = None
return cls(position_buffer, normal_buffer, bone_buffer, uv_buffer)
@dataclass
class MslcSubmeshData:
COUNT_LAYOUT = Struct("i")
NAME_LAYOUT = VStruct("v")
INDEX_LAYOUT = Struct("H")
INDEX_TRI_LAYOUT = Struct("3H")
INDEX_TRAILING_LAYOUT = Struct("4h")
texture_path: str
triangles: List[Short3]
trailing: Short4
@classmethod
def unpack(cls, stream: BinaryIO) -> MslcSubmeshData:
name = cls.NAME_LAYOUT.unpack_stream(stream)[0].decode("ascii")
index_count = cls.COUNT_LAYOUT.unpack_stream(stream)[0]
tri_count = index_count / cls.INDEX_TRI_LAYOUT.args
assert int(tri_count) == tri_count
indexes = [cls.INDEX_TRI_LAYOUT.unpack_stream(stream) for _ in range(int(tri_count))]
trailing = cls.INDEX_TRAILING_LAYOUT.unpack_stream(stream)
return cls(name, indexes, trailing)
@property
def index_count(self) -> int:
return self.triangle_count * 3
@property
def triangle_count(self) -> int:
return len(self.triangles)
@dataclass
class MslcBoneInfo:
LAYOUT = VStruct("vi")
name: str
index: int
@classmethod
def unpack(cls, stream: BinaryIO) -> MslcBoneInfo:
name, index = cls.LAYOUT.unpack_stream(stream)
name = name.decode("ascii")
return cls(name, index)
@dataclass
class MslcDataChunk(AbstractChunk):
CHUNK_ID = "DATA"
CHUNK_TYPE = ChunkType.Data
VERSIONS = [2]
# data: bytes
COUNT_LAYOUT = Struct("i")
NAME_LAYOUT = VStruct("v")
# VERTEX_LAYOUT = Struct("32s")
# EXCESS_SIZE = 8
HEADER_LAYOUT = Struct("< i b 4s i")
sub_header: Tuple[Any, ...]
unks: Tuple[Any, ...]
bones: List[MslcBoneInfo]
vertex_data: MslcVertexData
sub_meshes: List[MslcSubmeshData]
unk_a: List[bytes]
unk_b: List[bytes]
unk_c: List[bytes]
UNK2TEX = {}
_EX = []
TEX_SIZES = {
441096, 578440, 824112, 264924, 339144, 84288, 11112, 7620, 5820, 3996, 125616, 112236, 179028, 216024, 256308, 107340, 142692, 148284, 54600, 38496, 29304, 32256, 13236, 29712, 2016, 10500, 58680, 5868, 199452, 58656, 41712, 141456, 138396, 445596, 111672, 12684, 195636, 183672, 116100,
356496, 197976, 176424, 66276, 90804,
126144, 64308, 69996, 204432, 10164, 31716, 43872, 72204, 11808, 9828, 9288, 77268, 29040, 2556, 24204, 63240, 24276, 90980, 53004, 8748, 20700, 14988, 11556, 31356, 17436, 46380, 102300, 389148, 315840, 177696, 156144, 167676, 165888, 167772, 25152, 44820, 9396, 36948,
35664, 56844, 70392, 103752, 54360, 64524, 61272, 197364, 29340, 26316, 15276, 15996, 11868, 95352, 180084, 258120, 184836, 188280, 230604, 10176, 9252, 8328, 6516, 7596, 6876, 7956, 6876, 7956, 8496, 8496, 8496, 8496, 239004, 1836, 9780, 1116, 1116, 1116, 21888, 8052, 2220, 1836,
9780, 53148, 1116, 1116, 1116, 1116, 1116, 5124, 5124, 1116, 1116, 1116, 53148, 1116, 1116, 1116, 53148, 1116, 5124, 5124, 1116, 21888, 8052, 2220, 5124, 1116, 21888, 8052, 2220, 1836, 9780, 21888, 8052, 2220, 1836, 9780, 72180, 5328, 32088, 47664, 37500, 97092, 38292, 5328, 192168,
1116, 21888, 8052, 2220, 1836, 9780, 21888, 8052, 2220, 1836, 9780, 72180, 1116, 1116, 1116, 1116, 1116, 5124, 5124, 1116, 34116, 9780, 1836, 2220, 8052, 21888, 1116, 1116, 9780, 1836, 2220, 8052, 21888, 1116, 5124, 5124, 1116, 131508, 35820, 14268, 9516, 2736, 9516, 1656, 5328, 868,
34116, 9780, 1836, 2220, 8052, 21888, 1116, 1116, 9780, 1836, 2220, 8052, 21888, 1116, 5124, 1116, 34116, 9780, 1836, 2220, 8052, 21888, 1116, 1116, 9780, 1836, 2220, 8052, 21888, 1116, 5124, 5124, 1116, 119616, 59184, 3636, 39708, 34116, 9780, 1836, 2220, 8052, 21888, 1116, 1116,
9780, 1836, 2220, 8052, 21888, 1116, 5124, 5124, 8052, 2220, 1836, 9780, 21888, 8052, 2220, 1836, 9780, 288684, 1116, 5124, 5124, 1116, 2220, 2220, 2220, 25488, 25488, 7692, 7692, 7692, 7692, 71196, 17052, 1116, 5124, 5124, 1116, 4428, 4428, 4428, 4428, 21888, 4428, 4428, 21888,
8052, 2220, 1836, 9780, 21888, 8052, 2220, 1836, 9780, 288684, 6924, 9036, 6924, 9036, 1116, 6156, 6156, 238944, 4428, 21888, 8052, 2220, 1836, 9780, 21888, 8052, 2220, 1836, 9780, 182412, 1116, 5124, 5124, 1116, 4428, 4428, 144348, 2220, 2220, 2220, 2220, 1116, 5124, 5124, 1116,
21888, 8052, 2220, 1836, 9780, 21888, 8052, 2220, 2220, 2220, 2220, 2220, 1116, 5124, 5124, 1116, 21888, 8052, 2220, 1836, 9780, 21888, 8052, 2220, 1836, 9780, 2220, 1836, 9780, 144348, 25488, 25488, 7692, 7692, 7692, 7692, 71196, 17052, 11196, 11196, 33852, 162468, 1836, 9780,
144348, 2220, 2220, 2220, 2220, 1116, 5124, 5124, 1116, 21888, 8052, 2220, 1836, 9780, 21888, 8052, 88356, 1116, 5124, 5124, 1116, 21888, 8052, 2220, 1836, 9780, 1116, 1116, 21888, 8052, 2220, 1836, 9780, 34116, 39864, 209988, 53148, 9780, 1836, 1116, 5124, 2220, 8052, 21888, 1116,
1116, 1116, 9780, 1836, 1116, 5124, 2220, 8052, 21888, 19404, 9780, 1836, 2220, 8052, 21888, 1116, 9780, 1836, 2220, 8052, 21888, 1116, 5124, 1116, 5124, 5124, 1116, 21888, 8052, 2220, 1836, 9780, 1116, 1116, 21888, 8052, 2220, 1836, 9780, 34116, 1116, 5124, 5124, 1116, 21888, 8052,
2220, 1836, 9780, 1116, 1116, 1116, 21888, 8052, 2220, 1836, 9780, 53148, 1116, 1116, 1116, 1116, 1116, 5124, 5124, 1116, 21888, 8052, 2220, 1836, 9780, 21888, 8052, 2220, 1836, 9780, 72180, 5124, 1116, 34116, 9780, 1836, 2220, 8052, 21888, 1116, 1116, 9780, 1836, 2220, 8052, 21888,
1116, 5124, 5124, 1116, 116292, 5484, 5484, 5484, 7596, 4896, 75708, 31548, 34116, 9780, 1836, 2220, 8052, 21888, 1116, 1116, 9780, 1836, 2220, 8052, 21888, 1116, 5124, 2220, 8052, 21888, 2220, 2220, 9780, 1836, 2220, 8052, 21888, 1116, 5124, 5124, 1116, 165600, 1116, 1116, 6588,
22716, 68220, 9780, 1836, 2220, 8052, 21888, 2220, 2220, 9780, 1836, 2220, 8052, 21888, 1116, 5124, 5124, 1116, 68220, 9780, 1836, 5124, 1116, 34116, 9780, 1836, 2220, 8052, 21888, 1116, 1116, 9780, 1836, 2220, 8052, 21888, 1116, 5124, 5124, 1116, 44508, 9756, 9756, 8028, 16044,
34116, 9780, 1836, 2220, 8052, 21888, 1116, 1116, 9780, 1836, 2220, 8052, 21888, 1116, 5124, 27084, 2736, 4896, 2736, 4896, 2736, 4896, 2736, 4896, 10116, 10116, 10116, 10116, 36588, 1932, 13560, 13560, 13560, 13560, 4356, 3636, 4716, 4536, 3456, 4716, 4716, 4716, 4716, 3456, 6156,
22356, 4716, 10188, 5796, 5796, 5796, 5796, 176184, 25788, 2916, 25788, 2916, 25788, 2916, 9960, 1116, 1116, 1116, 1116, 6516, 6516, 6516, 6516, 6516, 6516, 37884, 37884, 25788, 2916, 23844, 23124, 23844, 23124, 25572, 25572, 25572, 25572, 23124, 22944, 22944, 23124, 3276, 3276,
3276, 3276, 3276, 3276, 23100, 144648, 5124, 10236, 11472, 11472, 186084, 24660, 12372, 2196, 2196, 2196, 2196, 14040, 1836, 34836, 5076, 16788, 20556, 20556, 20556, 20556, 5076, 20556, 20556, 20556, 20556, 20556, 20556, 20556, 20556, 16788, 5076, 34836, 1836, 14040, 2196, 2196,
2196, 2196, 12372, 24660, 27036, 27036, 27036, 27036, 22404, 30264, 137724, 89508, 44916, 20556, 20556, 20556, 20556, 16788, 5076, 34836, 1836, 14040, 2196, 2196, 2196, 2196, 12372, 24660, 20556, 20556, 20556, 20556, 5076, 9492, 4920, 9492, 4920, 18972, 23292, 13632, 60420, 34236,
20556, 20556, 20556, 20556, 5076, 9492, 4920, 9492, 4920, 18972, 23292, 13632, 60420, 34236, 16788, 5076, 34836, 1836, 14040, 2196, 2196, 2196, 2196, 12372, 24660, 9240, 9240, 9240, 9240, 9240, 9240, 9240, 9240, 99432, 20556, 20556, 20556, 20556, 16788, 5076, 34836, 1836, 14040,
2196, 2196, 2196, 2196, 12372, 24660, 7356, 12636, 12636, 184476, 20556, 20556, 20556, 20556, 16788, 5076, 34836, 1836, 14040, 2196, 2196, 2196, 2196, 12372, 24660, 12636, 49944, 49944, 29580, 29580, 20556, 20556, 20556, 20556, 16788, 5076, 34836, 1836, 14040, 2196, 2196, 2196, 2196,
12372, 24660, 34848, 137364, 8052, 1656, 1656, 8052, 20556, 20556, 20556, 20556, 16788, 5076, 34836, 1836, 14040, 2196, 2196, 2196, 2196, 12372, 24660, 5796, 33972, 11916, 4044, 58008, 12456, 4044, 91308, 24660, 12372, 2196, 2196, 2196, 2196, 14040, 1836, 34836, 5076, 16788, 14040,
1836, 34836, 5076, 16788, 20556, 20556, 20556, 20556, 10140, 10140, 10140, 10140, 3276, 3276, 143196, 5664, 5664, 5664, 5664, 5664, 5664, 5664, 5664, 2916, 8004, 39708, 116880, 24660, 12372, 2196, 2196, 2196, 2196, 123732, 123732, 22092, 22092, 22092, 179700, 128244, 306612, 127740,
123348, 68004, 271224, 274872, 274872, 270756, 250020, 250380, 106440, 106440, 339900, 170556, 170556, 183804, 92472, 5796, 26976, 33132, 46044, 34092, 25644, 45036, 45036, 17136, 315756, 159888, 154788, 65316, 23508, 25512, 152976, 152976, 347724, 19992, 27576, 126120, 23340, 30204,
30204, 9084, 9084, 9084, 9084, 9084, 25656, 78336, 18276, 6156, 12300, 7236, 8136, 5796, 5616, 5976, 5976, 7236, 8136, 15768, 22092, 23184, 23184, 35700, 8532, 23184, 5100, 5100, 25452, 26748, 80112, 14364, 18468, 36252, 36252, 3456, 7668, 51816, 168300, 43860, 33948, 108468, 108468,
87300, 6360, 6360, 135156, 168300, 96432, 47328, 16920, 39984, 88140, 81252, 67260, 58344, 6636, 72108, 79668, 10140, 24540, 24540, 24540, 35340, 31428, 15084, 137628, 84952, 80616, 109836, 115692, 105084, 56532, 8004, 156432, 1296, 1296, 1476, 19044, 3300, 74712, 38736, 31848,
36252, 29952, 156552, 1476, 15948, 6156, 6156, 25260, 19044, 77604, 34044, 290664, 6660, 5124, 5124, 1116, 19044, 198768, 2376, 2376, 2376, 2376, 756, 2916, 14796, 8652, 14796, 1656, 1656, 1656, 1656, 33588, 19920, 23568, 17292, 22272, 20388, 7260, 7260, 5436, 5436, 7284, 1116, 2196,
2196, 2916, 2916, 2916, 2916, 7284, 1116, 2196, 2196, 2916, 2916, 2916, 2916, 1476, 5772, 15456, 15456, 15456, 15456, 17148, 15456, 15456, 15456, 19044, 109560, 117516, 29724, 197496, 12732, 2376, 7464, 2376, 1116, 1116, 1116, 1116, 8652, 8652, 197520, 74460, 218364, 17352, 18492,
10296, 10296, 10296, 54324, 65244, 88092, 6924, 72372, 45516, 246660, 10176, 8328, 10296, 14952, 286056, 8496, 8496, 8496, 174420, 17352, 325404, 10116, 12096, 12096, 149640, 6876, 8316, 10176, 9252, 39528, 28752, 109668, 13776, 1836, 165780, 13800, 13800, 44508, 28752, 39588, 28884,
38892, 13596, 8436, 6972, 9204, 16188, 31632, 5148, 88416, 5280, 25212, 28200, 28752, 189924, 33396, 1548, 3660, 31140, 42588, 27648, 29904, 175572, 14340, 4620, 190008, 110088, 25524, 5172, 37248, 5172, 25524, 5172, 37788, 5172, 31752, 18696, 45888, 53988, 31488, 32400, 7020, 2196,
11820, 2196, 2196, 8436, 6972, 28752, 20424, 105036, 19212, 28236, 30540, 19500, 3636, 1836, 3636, 1836, 43068, 6876, 243684, 15372, 15372, 15372, 15372, 15372, 13836, 53976, 38412, 10188, 1116, 9216, 12456, 9756, 24000, 6876, 43068, 133980, 5124, 39084, 41676, 5124, 34092, 40788,
7104, 1116, 2916, 2916, 2916, 2916, 7104, 1116, 2916, 2916, 2916, 2916, 5436, 6660, 43068, 69948, 24900, 3300, 3300, 9948, 5052, 5052, 3300, 36252, 3300, 4920, 23808, 14844, 17292, 14100, 19188, 15324, 7260, 7260, 7284, 1116, 2196, 2196, 2916, 2916, 2916, 2916, 7284, 1116, 2196,
2196, 2916, 2916, 2916, 2916, 26772, 6900, 8004, 6900, 12708, 75156, 43068, 12660, 6900, 94368, 7284, 1116, 2196, 2196, 2916, 2916, 2916, 2916, 7284, 1116, 2196, 2196, 2916, 2916, 2916, 2916, 38988, 10752, 1836, 1116, 1116, 6876, 115392, 44508, 107508, 5124, 5436, 5124, 170292, 74460,
}
@classmethod
def convert(cls, chunk: GenericDataChunk) -> MslcDataChunk:
try:
assert chunk.header.version in cls.VERSIONS, chunk.header.version
assert len(chunk.raw_bytes) == chunk.header.size
with BytesIO(chunk.raw_bytes) as stream:
rsv0_a, flag, val, rsv0_b = cls.HEADER_LAYOUT.unpack_stream(stream)
assert rsv0_a == 0
assert rsv0_b == 0
header = (flag, val)
bone_info_count = cls.COUNT_LAYOUT.unpack_stream(stream)[0]
bones = [MslcBoneInfo.unpack(stream) for _ in range(bone_info_count)]
vertex_count = cls.COUNT_LAYOUT.unpack_stream(stream)[0]
vertex_size_id = cls.COUNT_LAYOUT.unpack_stream(stream)[0]
V_SIZE_TABLE = {
37: 32,
39: 48,
}
V_SIZE = V_SIZE_TABLE[vertex_size_id]
_debug_V_BUFFER_START = stream.tell()
_debug_vbuffer_fullsize = vertex_count * V_SIZE
vertex_data = MslcVertexData.unpack(stream, vertex_count, V_SIZE)
e = stream.read(4)
assert e == b'\x00\x00\x00\x00', e
index_buffer_count = cls.COUNT_LAYOUT.unpack_stream(stream)[0]
index_buffers = []
_debug_ibuffer_fullsize = 0
for _ in range(index_buffer_count):
sub_mesh = MslcSubmeshData.unpack(stream)
index_buffers.append(sub_mesh)
aaa = cls.COUNT_LAYOUT.unpack_stream(stream)[0]
aaa_data = [stream.read(12) for _ in range(aaa)]
aab = cls.COUNT_LAYOUT.unpack_stream(stream)[0]
aab_data = [stream.read(24) for _ in range(aab)]
aac = cls.COUNT_LAYOUT.unpack_stream(stream)[0]
aac_data = [stream.read(40) for _ in range(aac)]
assert not has_data(stream), stream.read()
return cls(chunk.header, header, (vertex_size_id,), bones, vertex_data, index_buffers, aaa_data, aab_data, aac_data)
except Exception as e:
raise
@dataclass
class MslcChunk(AbstractChunk):
CHUNK_ID = "MSLC"
CHUNK_TYPE = ChunkType.Folder
VERSIONS = [1]
data: MslcDataChunk
bvol: BvolChunk
@classmethod
def convert(cls, chunk: FolderChunk) -> MslcChunk:
# VERSIONED
assert chunk.header.version in cls.VERSIONS, chunk.header.version
converted = MslcChunkConverter.convert_many(chunk.chunks)
coll = ChunkCollectionX.list2col(converted)
data = coll.find(MslcDataChunk)
bvol = coll.find(BvolChunk)
assert len(chunk.chunks) == 2
return MslcChunk(chunk.header, data, bvol)
def add_mslc_chunk_converter(conv):
conv.register(MslcDataChunk)
conv.register(BvolChunk)
return conv
def generate_mslc_chunk_converter():
conv = ChunkConverterFactory()
add_mslc_chunk_converter(conv)
return conv
MslcChunkConverter = generate_mslc_chunk_converter() | /relic_game_tool-2022.0a7-py3-none-any.whl/relic/chunky_formats/dow/whm/mesh.py | 0.85741 | 0.369343 | mesh.py | pypi |
from __future__ import annotations
from dataclasses import dataclass
from io import BytesIO
from typing import Dict, Tuple, BinaryIO, List
from archive_tools.ioutil import has_data
from archive_tools.structx import Struct
from archive_tools.vstruct import VStruct
from ....chunky import ChunkType, AbstractChunk, GenericDataChunk, FolderChunk
from ...convertable import ChunkConverterFactory
from ...util import UnimplementedDataChunk, ChunkCollectionX
@dataclass
class AnbvChunk(UnimplementedDataChunk):
# Normally always 12 (or 16, forgot which) '\x00' bytes
# If I had to guess; animation bounding volume
CHUNK_ID = "ANBV"
CHUNK_TYPE = ChunkType.Data
@dataclass
class AnimDataBoneFrameInfo:
name: str
positions: Dict[int, Tuple]
rotations: Dict[int, Tuple]
# According to "https://forums.revora.net/topic/116206-tutorial-install-and-set-up-3ds-max-2008/"
# 'It should also say that all the bones are stale=yes so the vis file doesn't block other animations from playing.'
stale: bool # I have no idea how I'm going to emulate this in blender
# Also lists how meshes are chosen when multiple are given
# '''You can also group motions together and have the game choose one randomly when the unit spawns. For example:
# Create 3 vis animations, each one makes a different head visible.
# Then make 3 motions, one for each head.
# Then put those in a motion group, and the game will randomize the heads.'''
# Really neat way of abusing their animation engine to add variety
# IMO, you could go so far as adding completely different models (Say for example; a tyranid pack)
# Yes, I'm aware of the tyranid mod, but since they explicitly state not to dump their models, I haven't looked at em, but if they aren't doing this, they are missing out.
# Although it may be a problem if the engine still is calculating them, which would be a massive oversight imo; since they made this random mesh choice a feature
NAME_LAYOUT = VStruct("v")
COUNT_LAYOUT = Struct("i")
POS_KEYFRAME_LAYOUT = Struct("4f")
ROT_KEYFRAME_LAYOUT = Struct("5f")
@classmethod
def unpack(cls, stream: BinaryIO) -> AnimDataBoneFrameInfo:
name = cls.NAME_LAYOUT.unpack_stream(stream)[0]
name = name.decode("ascii")
pos_frames = {}
rot_frames = {}
# POS
key_pos_frames = cls.COUNT_LAYOUT.unpack_stream(stream)[0]
for _ in range(key_pos_frames):
frame, kf_x, kf_y, kf_z = cls.POS_KEYFRAME_LAYOUT.unpack_stream(stream)
pos_frames[frame] = (frame, kf_x, kf_y, kf_z)
# ROT
key_rot_frames = cls.COUNT_LAYOUT.unpack_stream(stream)[0]
for _ in range(key_rot_frames):
frame, kf_x, kf_y, kf_z, kf_w = cls.ROT_KEYFRAME_LAYOUT.unpack_stream(stream)
rot_frames[frame] = (frame, kf_x, kf_y, kf_z, kf_w)
# FLAG
unk = stream.read(1)
assert unk in [b'\00', b'\01'], unk
flag = (b'\01' == unk)
return cls(name, pos_frames, rot_frames, flag)
@dataclass
class AnimDataMeshFrameInfo:
NAME_LAYOUT = VStruct("v")
MESH_UNKS_LAYOUT = Struct("3i")
COUNT_LAYOUT = Struct("i")
VISIBILITY_LAYOUT = Struct("2f")
name: str
mode: int
unks: Tuple[int, int, int, int]
visibility: Dict[int, Tuple]
@classmethod
def unpack(cls, stream: BinaryIO) -> AnimDataMeshFrameInfo:
name = cls.NAME_LAYOUT.unpack_stream(stream)[0]
name = name.decode("ascii")
unks = cls.MESH_UNKS_LAYOUT.unpack_stream(stream)
mode = unks[0]
try:
assert mode in [0, 2], mode
except Exception as e:
raise
key_frame_count = cls.COUNT_LAYOUT.unpack_stream(stream)[0]
if mode == 2:
key_frame_count -= 1 # Meshes have an extra frame?
unk2 = cls.COUNT_LAYOUT.unpack_stream(stream)[0]
assert unk2 == 0
unk3 = cls.COUNT_LAYOUT.unpack_stream(stream)[0]
else:
unk2, unk3 = None, None
visibility = {}
for _ in range(key_frame_count):
frame, value = cls.VISIBILITY_LAYOUT.unpack_stream(stream)
visibility[frame] = (frame, value)
return cls(name, mode, (unks[1], unks[2], unk2, unk3), visibility)
@dataclass
class AnimDataUnkFrameInfo:
name: str
positions: Dict[int, Tuple]
rotations: Dict[int, Tuple]
NAME_LAYOUT = VStruct("v")
COUNT_LAYOUT = Struct("i")
POS_KEYFRAME_LAYOUT = Struct("4f")
ROT_KEYFRAME_LAYOUT = Struct("5f")
@classmethod
def unpack(cls, stream: BinaryIO) -> AnimDataUnkFrameInfo:
name = cls.NAME_LAYOUT.unpack_stream(stream)[0]
name = name.decode("ascii")
pos_frames = {}
rot_frames = {}
# POS
key_pos_frames = cls.COUNT_LAYOUT.unpack_stream(stream)[0]
for _ in range(key_pos_frames):
frame, kf_x, kf_y, kf_z = cls.POS_KEYFRAME_LAYOUT.unpack_stream(stream)
pos_frames[frame] = (frame, kf_x, kf_y, kf_z)
# ROT
key_rot_frames = cls.COUNT_LAYOUT.unpack_stream(stream)[0]
for _ in range(key_rot_frames):
frame, kf_x, kf_y, kf_z, kf_w = cls.ROT_KEYFRAME_LAYOUT.unpack_stream(stream)
rot_frames[frame] = (frame, kf_x, kf_y, kf_z, kf_w)
return cls(name, pos_frames, rot_frames)
@dataclass
class AnimDataChunk(AbstractChunk):
CHUNK_ID = "DATA"
CHUNK_TYPE = ChunkType.Data
VERSIONS = [1, 2] # ig\troops\battle_tank uses #1, basilisk uses #2, may use different anim layout?
LAYOUT = Struct("i i i")
COUNT_LAYOUT = Struct("i")
key_frames: int
bones: List[AnimDataBoneFrameInfo]
meshes: List[AnimDataMeshFrameInfo]
# Cams or markers, lacks stale flag, so it doens't support layering, which makes sense for cams (why turn it off, its not visible, and it shouldn't be stacking since it's technically just a point in space)
# Markers (TMK) are also just points in space, but they might need to be layered; like an FX which wiggles or something
unks: List[AnimDataUnkFrameInfo]
@classmethod
def convert(cls, chunk: GenericDataChunk) -> AnimDataChunk:
version = chunk.header.version
assert version in cls.VERSIONS, version
with BytesIO(chunk.raw_bytes) as stream:
# Never actually used frame count, I assumed it was FPS, and I'd multiply, but it's not clean 24,30,60 it's things like 37
# I assumed it was the total number of keyframes across the anim, but 'frames' are floats inside the parts
# So its probably the animation length in frames
frame_count, unk, bone_count = cls.LAYOUT.unpack_stream(stream)
bones = [AnimDataBoneFrameInfo.unpack(stream) for _ in range(bone_count)]
# MESH
mesh_count = cls.COUNT_LAYOUT.unpack_stream(stream)[0]
mesh = [AnimDataMeshFrameInfo.unpack(stream) for _ in range(mesh_count)]
# Missing in V1
if version in [2]:
unk_count = cls.COUNT_LAYOUT.unpack_stream(stream)[0]
unks = [AnimDataUnkFrameInfo.unpack(stream) for _ in range(unk_count)]
else:
unks = None
try:
assert not has_data(stream), stream.read()
except Exception as e:
raise
return cls(chunk.header, frame_count, bones, mesh, unks)
@dataclass
class AnimChunk(AbstractChunk):
CHUNK_TYPE = ChunkType.Folder
CHUNK_ID = "ANIM"
VERSIONS = [3]
data: AnimDataChunk
anbv: AnbvChunk
@classmethod
def convert(cls, chunk: FolderChunk) -> AnimChunk:
assert chunk.header.version in cls.VERSIONS, chunk.header.version
converted = AnimChunkConverter.convert_many(chunk.chunks)
coll = ChunkCollectionX.list2col(converted)
data = coll.find(AnimDataChunk)
anbv = coll.find(AnbvChunk)
assert len(chunk.chunks) == 2
return AnimChunk(chunk.header, data, anbv)
def add_anim_chunk_converter(conv):
conv.register(AnimDataChunk)
conv.register(AnbvChunk)
return conv
def generate_anim_chunk_converter():
conv = ChunkConverterFactory()
add_anim_chunk_converter(conv)
return conv
AnimChunkConverter = generate_anim_chunk_converter() | /relic_game_tool-2022.0a7-py3-none-any.whl/relic/chunky_formats/dow/whm/animation.py | 0.796965 | 0.390941 | animation.py | pypi |
from __future__ import annotations
from dataclasses import dataclass
from enum import Enum
from typing import BinaryIO, Dict, Type, Union
from archive_tools.structx import Struct
from archive_tools.vstruct import VStruct
from ..chunky.header import ChunkyVersion
from ...common import VersionLike, VersionError
class ChunkType(Enum):
Folder = "FOLD"
Data = "DATA"
@classmethod
def parse(cls, value: Union[str, bytes]) -> ChunkType:
if isinstance(value, bytes):
try:
_ = value.decode("ascii")
except UnicodeDecodeError:
raise ChunkTypeError(value)
value = _
try:
return ChunkType(value)
except ValueError:
raise ChunkTypeError(value)
class ChunkError(Exception):
pass
class ChunkTypeError(ChunkError):
def __init__(self, chunk_type: Union[bytes, str] = None, *args):
super().__init__(*args)
self.chunk_type = chunk_type
def __str__(self):
msg = f"ChunkType must be {repr(ChunkType.Folder.value)} or {repr(ChunkType.Data.value)}"
if not self.chunk_type:
return msg + "!"
else:
return msg + f"; got {repr(self.chunk_type)}!"
class ChunkNameError(ChunkError):
def __init__(self, name: Union[bytes, str] = None, *args):
super().__init__(*args)
self.name = name
def __str__(self):
msg = f"Chunk name was not parsable ascii text"
if not self.name:
return msg + "!"
else:
return msg + f"; got {repr(self.name)}!"
@dataclass
class ChunkHeader:
type: ChunkType
id: str
version: int
size: int
name: str
@property
def chunky_version(self) -> ChunkyVersion:
raise NotImplementedError
@classmethod
def _unpack(cls, stream: BinaryIO) -> ChunkHeader:
raise NotImplementedError
def _pack(self, stream: BinaryIO) -> int:
raise NotImplementedError
@classmethod
def unpack(cls, stream: BinaryIO, chunky_version: ChunkyVersion) -> ChunkHeader:
class_type = _VERSION_MAP.get(chunky_version)
if not class_type:
raise VersionError(chunky_version, list(_VERSION_MAP.keys()))
return class_type._unpack(stream)
def pack(self, stream: BinaryIO) -> int:
return self.pack(stream)
def copy(self) -> ChunkHeader:
raise NotImplementedError
# TODO Find a good solution to version in class names
# OH GOD VERSION NAMES IN THE CLASS, I've tried V(#)p(#), V(hex #)(hex #) and they both look ugly
# Sticking to hex since it looks less bad
@dataclass
class ChunkHeaderV0101(ChunkHeader):
CHUNK_TYPE_MAGIC_LAYOUT = Struct("< 4s") # Seperated so we can raise an error before reading vlen
LAYOUT = VStruct("< 4s 2l v")
@property
def chunky_version(self) -> ChunkyVersion:
return ChunkyVersion.v0101
@classmethod
def _unpack(cls, stream: BinaryIO) -> ChunkHeader:
chunk_type = cls.CHUNK_TYPE_MAGIC_LAYOUT.unpack_stream(stream)[0]
chunk_type = ChunkType.parse(chunk_type)
chunk_id, version, size, raw_name = cls.LAYOUT.unpack_stream(stream)
chunk_id = chunk_id.decode("ascii").strip("\x00")
try:
name = raw_name.decode("ascii").rstrip("\x00")
except UnicodeDecodeError as e:
raise ChunkNameError(raw_name) from e
return cls(chunk_type, chunk_id, version, size, name)
def _pack(self, stream: BinaryIO) -> int:
args = self.type.value, self.id, self.chunky_version, self.size, self.name
return self.LAYOUT.pack_stream(stream, *args)
@dataclass
class ChunkHeaderV0301(ChunkHeader):
LAYOUT = VStruct("< 4s 4s 3L 2l") # 2L v 2L")
unk_a: int
unk_b: int
@property
def chunky_version(self) -> ChunkyVersion:
return ChunkyVersion.v0301
@classmethod
def _unpack(cls, stream: BinaryIO) -> ChunkHeader:
chunk_type, chunk_id, version, size, name_size, unk_a, unk_b = cls.LAYOUT.unpack_stream(stream)
chunk_type = ChunkType(chunk_type.decode("ascii"))
chunk_id = chunk_id.decode("ascii").strip("\x00")
name = stream.read(name_size).decode("ascii").rstrip("\00")
return cls(chunk_type, chunk_id, version, size, name, *(unk_a, unk_b))
def _pack(self, stream: BinaryIO) -> int:
args = self.type.value, self.id, self.chunky_version, self.size, self.name, self.unk_a, self.unk_b
return self.LAYOUT.pack_stream(stream, *args)
_VERSION_MAP: Dict[VersionLike, Type[ChunkHeader]] = {
ChunkyVersion.v0101: ChunkHeaderV0101,
ChunkyVersion.v0301: ChunkHeaderV0301
} | /relic_game_tool-2022.0a7-py3-none-any.whl/relic/chunky/chunk/header.py | 0.682997 | 0.218795 | header.py | pypi |
from __future__ import annotations
from dataclasses import dataclass
from typing import BinaryIO, Dict, Type
from archive_tools.magic import MagicWordIO, MagicWord
from archive_tools.structx import Struct
from relic.common import VersionEnum, Version, VersionLike, VersionError
ChunkyVersionLayout = Struct("< 2L")
class ChunkyVersion(VersionEnum):
Unsupported = None
v0101 = Version(1, 1)
Dow = v0101 # ALIAS for Prettiness
v0301 = Version(3, 1)
Dow2 = v0301 # ALIAS for Prettiness
v4010 = Version(4, 1)
@classmethod
def unpack_version(cls, stream: BinaryIO) -> Version:
return Version(*ChunkyVersionLayout.unpack_stream(stream))
@classmethod
def pack_version(cls, stream: BinaryIO, version: VersionLike) -> int:
if isinstance(version, VersionEnum):
version = version.value
return ChunkyVersionLayout.pack_stream(stream, version.major, version.minor)
@classmethod
def unpack(cls, stream: BinaryIO) -> ChunkyVersion:
return ChunkyVersion(cls.unpack_version(stream))
def pack(self, stream: BinaryIO) -> int:
return self.pack_version(stream, self)
ChunkyMagic = MagicWordIO(Struct("< 12s"), "Relic Chunky".encode("ascii"))
MultiBR_Magic = MagicWord(Struct("< 4s"), "\r\n\x1a\0".encode("ascii")) # I forgot what the exact value was supposed to be (TODO)
@dataclass
class ChunkyHeader:
@property
def version(self) -> ChunkyVersion:
raise NotImplementedError
@classmethod
def _unpack(cls, stream: BinaryIO) -> ChunkyHeader:
raise NotImplementedError
def _pack(self, stream: BinaryIO) -> int:
raise NotImplementedError
@classmethod
def unpack(cls, stream: BinaryIO) -> ChunkyHeader:
MultiBR_Magic.assert_magic_word(stream)
version = ChunkyVersion.unpack(stream)
class_type = _VERSION_MAP.get(version)
if not class_type:
raise VersionError(version, list(_VERSION_MAP.keys()))
return class_type._unpack(stream)
def pack(self, stream: BinaryIO) -> int:
written = 0
written += MultiBR_Magic.write_magic_word(stream)
written += ChunkyVersion.pack_version(stream, self.version)
written += self._pack(stream)
return written
@dataclass
class ChunkyHeaderV0101(ChunkyHeader):
@property
def version(self) -> ChunkyVersion:
return ChunkyVersion.v0101
@classmethod
def _unpack(cls, stream: BinaryIO) -> ChunkyHeader:
return cls()
def _pack(self, stream: BinaryIO) -> int:
return 0
@dataclass
class ChunkyHeaderV0301(ChunkyHeader):
LAYOUT = Struct("< 3L")
CONST = (36, 28, 1)
@property
def version(self) -> ChunkyVersion:
return ChunkyVersion.v0301
@classmethod
def _unpack(cls, stream: BinaryIO) -> ChunkyHeader:
args = cls.LAYOUT.unpack_stream(stream)
assert args == cls.CONST, (args, cls.CONST)
return cls()
def _pack(self, stream: BinaryIO) -> int:
return self.LAYOUT.pack_stream(stream, *self.CONST)
_VERSION_MAP: Dict[VersionLike, Type[ChunkyHeader]] = {
ChunkyVersion.v0101: ChunkyHeaderV0101,
ChunkyVersion.v0301: ChunkyHeaderV0301
} | /relic_game_tool-2022.0a7-py3-none-any.whl/relic/chunky/chunky/header.py | 0.786008 | 0.173358 | header.py | pypi |
import struct
DDS_MAGIC = "DDS ".encode("ascii")
_HEADER = struct.Struct("< 7l 44s 32s 16s 4s")
__DDPIXELFORMAT = struct.Struct("< l l 4s 5l") # 32s
__DDCAP = struct.Struct("< l l 8s")
_DXT1 = "DXY1"
_DXT3 = "DXT3"
_DXT5 = "DXT5"
_DEFAULT_FLAGS = 0x00001007
_dwF_MIPMAP = 0x00020000
_dwF_DEPTH = 0x00800000
_dwF_PITCH = 0x00000008
_dwF_LINEAR = 0x00080000
_ddsF_FOURCC = 0x00000004
# I'm under the assumption that I can specify mipmap and then set count to 0
# I COULD alternatively flag when mips aren't present and then make a different constant
# According to 'http://doc.51windows.net/directx9_sdk/graphics/reference/DDSFileReference/ddsfileformat.htm#surface_format_header'
# Linearsize is the size of the bytes for main image, assuming main image is the data segment,
_DOW_DXT_FLAGS = _DEFAULT_FLAGS | _dwF_MIPMAP | _dwF_LINEAR
_ddscaps_F_TEXTURE = 0x1000
_ddscaps_F_COMPLEX = 0x8
_ddscaps_F_MIPMAP_S = 0x400000
_ddscaps_F_MIPMAP = _ddscaps_F_COMPLEX | _ddscaps_F_MIPMAP_S
# Mipmap requires complex? (Acording to DirectXTex's dds.h)
_DOW_DDSCAPS_FLAGS = _ddscaps_F_TEXTURE | _ddscaps_F_MIPMAP
# Does not include magic
def calculate_dxt_surface_format_header(width: int, height: int, size: int, pixel_format: bytes, dds_caps: bytes,
mips: int = 0) -> bytes:
_RES_44 = ("\00" * 44).encode("ascii")
_RES_4 = ("\00" * 4).encode("ascii")
return _HEADER.pack(124, _DOW_DXT_FLAGS, width, height, size, 0, mips, _RES_44, pixel_format, dds_caps, _RES_4)
def calculate_compressed_dxt_pixel_format(format: str):
return __DDPIXELFORMAT.pack(32, _ddsF_FOURCC, format.encode("ASCII"), 0, 0, 0, 0, 0)
def calculate_dxt_ddscaps(ddscaps_flags: int = _DOW_DDSCAPS_FLAGS, seconadry_flags: int = 0):
_RES_8 = ("\00" * 8).encode("ascii")
return __DDCAP.pack(ddscaps_flags, seconadry_flags, _RES_8)
# DOES NOT INCLUDE DDS MAGIC WORD
def get_full_dxt_header(format: str, width: int, height: int, size: int, mips: int = 0,
ddscaps_flags: int = _DOW_DDSCAPS_FLAGS, seconadry_flags: int = 0):
pixel_format = calculate_compressed_dxt_pixel_format(format)
caps = calculate_dxt_ddscaps(ddscaps_flags, seconadry_flags)
return calculate_dxt_surface_format_header(width, height, size, pixel_format, caps, mips)
# TGA
# http://www.paulbourke.net/dataformats/tga/
_TGA_HEADER = struct.Struct("< b b b h h b h h h h b b")
# OH, BOY
_TGA_16_0 = 0x0
_TGA_16_1 = 0x1
_TGA_32 = 0x8
_TGA_24 = 0x0
_SCREEN_ORGIN_LOWER = 0x0
_SCREEN_ORGIN_UPPER = 1 << 5
_NONINTERLAVED = 0x00 << 6
_EvenOddInterlave = 0x01 << 6
_FourWay = 0x10 << 6
_ILLEGAL = 0x11 << 6
# I don't fully understand non-interleaved, but nothing broke when it was set
# I'd imagine that RGB(A) would be interleaved as such, but maybe not, IDK
_DOW_FORMAT = _TGA_32 | _SCREEN_ORGIN_LOWER | _NONINTERLAVED
# SEE TGA spec linked 'http://www.paulbourke.net/dataformats/tga/'
_COLOR = 2
_GRAY = 3
def build_dow_tga_color_header(width: int, height: int):
_PIXEL_SIZE = 32
return _TGA_HEADER.pack(0, 0, _COLOR, 0, 0, 0, 0, 0, width, height, _PIXEL_SIZE, _DOW_FORMAT)
def build_dow_tga_gray_header(width: int, height: int):
_PIXEL_SIZE = 8 # size seems roughly 1/4th the size of the color
return _TGA_HEADER.pack(0, 0, _GRAY, 0, 0, 0, 0, 0, width, height, _PIXEL_SIZE, _DOW_FORMAT) | /relic_game_tool-2022.0a7-py3-none-any.whl/relic/file_formats/dxt.py | 0.492676 | 0.223801 | dxt.py | pypi |
from __future__ import annotations
from dataclasses import dataclass
from enum import Enum
from typing import BinaryIO, ClassVar, Any
from relic.core.errors import MismatchError
from serialization_tools.magic import MagicWordIO
from serialization_tools.structx import Struct
class ChunkType(str, Enum):
Folder = "FOLD"
Data = "DATA"
class ChunkFourCC:
def __init__(self, code: str) -> None:
if len(code) != 4:
raise TypeError("`code` must be a four character long string!")
self.code = code
def __str__(self) -> str:
return self.code
def __eq__(self, other: Any) -> bool:
eq: bool = self.code == other.code
return eq
@dataclass
class Version:
"""
A `Chunky Version`
"""
""" The Major Version """
major: int
""" The Minor Version, this is typically `1` """
minor: int = 1
LAYOUT: ClassVar[Struct] = Struct("<2I")
def __str__(self) -> str:
return f"Version {self.major}.{self.minor}"
def __eq__(self, other: Any) -> bool:
if isinstance(other, Version):
return self.major == other.major and self.minor == other.minor
return super().__eq__(other)
def __hash__(self) -> int:
# Realistically; Version will always be <256
# But we could manually set it to something much bigger by accident; and that may cause collisions
TERM_SIZE_IN_BYTES: int = self.LAYOUT.size // 2
return self.major << (TERM_SIZE_IN_BYTES * 8) + self.minor
@classmethod
def unpack(cls, stream: BinaryIO) -> Version:
layout: Struct = cls.LAYOUT
args = layout.unpack_stream(stream)
return cls(*args)
def pack(self, stream: BinaryIO) -> int:
layout: Struct = self.LAYOUT
args = (self.major, self.minor)
written: int = layout.pack_stream(stream, *args)
return written
MagicWord = MagicWordIO(
Struct("< 16s"), b"Relic Chunky\r\n\x1a\0"
) # We include \r\n\x1a\0 because it signals a properly formatted file
def _validate_magic_word(self: MagicWordIO, stream: BinaryIO, advance: bool) -> None:
magic = self.read_magic_word(stream, advance)
if magic != self.word:
raise MismatchError("MagicWord", magic, self.word)
@dataclass
class _ChunkLazyInfo:
jump_to: int
size: int
stream: BinaryIO
def read(self) -> bytes:
jump_back = self.stream.tell()
self.stream.seek(self.jump_to)
buffer = self.stream.read(self.size)
if len(buffer) != self.size:
raise MismatchError("Buffer Read Size", len(buffer), self.size)
self.stream.seek(jump_back)
return buffer
__all__ = ["ChunkType", "ChunkFourCC", "MagicWord", "Version"] | /relic-tool-chunky-core-1.1.0.tar.gz/relic-tool-chunky-core-1.1.0/src/relic/chunky/core/definitions.py | 0.934954 | 0.236626 | definitions.py | pypi |
from dataclasses import dataclass
from typing import BinaryIO, Dict, cast
from serialization_tools.structx import Struct
from relic.chunky.core.definitions import ChunkFourCC
from relic.chunky.core.errors import ChunkNameError
from relic.chunky.core.protocols import StreamSerializer
from relic.chunky.core.serialization import (
ChunkTypeSerializer,
chunk_type_serializer,
ChunkFourCCSerializer,
chunk_cc_serializer,
ChunkCollectionHandler, ChunkyFSSerializer
)
from relic.chunky.v1.definitions import version as version_1p1, ChunkHeader
@dataclass
class ChunkHeaderSerializer(StreamSerializer[ChunkHeader]):
chunk_type_serializer: ChunkTypeSerializer
chunk_cc_serializer: ChunkFourCCSerializer
layout: Struct
def unpack(self, stream: BinaryIO) -> ChunkHeader:
chunk_type = self.chunk_type_serializer.unpack(stream)
chunk_cc = self.chunk_cc_serializer.unpack(stream)
version, size, name_size = self.layout.unpack_stream(stream)
name_buffer = stream.read(name_size)
try:
name = name_buffer.rstrip(b"\0").decode("ascii")
except UnicodeDecodeError as exc:
raise ChunkNameError(name_buffer) from exc
return ChunkHeader(chunk_type, chunk_cc, version, size, name)
def pack(self, stream: BinaryIO, packable: ChunkHeader) -> int:
written = 0
written += self.chunk_type_serializer.pack(stream, packable.type)
name_buffer = packable.name.encode("ascii")
args = packable.cc, packable.version, packable.type, len(name_buffer)
written += self.layout.pack(args)
written += stream.write(name_buffer)
return written
chunk_header_serializer = ChunkHeaderSerializer(
chunk_type_serializer, chunk_cc_serializer, Struct("<3L")
)
class _NoneHeaderSerializer(StreamSerializer[None]):
def unpack(self, stream: BinaryIO) -> None:
return None
def pack(self, stream: BinaryIO, packable: None) -> int:
return 0
def _noneHeader2Meta(_: None) -> Dict[str, object]:
return {}
def _noneMeta2Header(_: Dict[str, object]) -> None:
return None
def _chunkHeader2meta(header: ChunkHeader) -> Dict[str, object]:
return {
"name": header.name,
"version": header.version,
"4cc": str(header.cc),
}
def _meta2chunkHeader(meta: Dict[str, object]) -> ChunkHeader:
fourcc: str = cast(str, meta["4cc"])
version: int = cast(int, meta["version"])
name: str = cast(str, meta["name"])
return ChunkHeader(name=name, cc=ChunkFourCC(fourcc), version=version, type=None, size=None) # type: ignore
_chunk_collection_handler = ChunkCollectionHandler(
header_serializer=chunk_header_serializer,
header2meta=_chunkHeader2meta,
meta2header=_meta2chunkHeader
)
chunky_fs_serializer = ChunkyFSSerializer(
version=version_1p1,
chunk_serializer=_chunk_collection_handler,
header_serializer=_NoneHeaderSerializer(),
header2meta=_noneHeader2Meta,
meta2header=_noneMeta2Header
)
__all__ = [
"chunky_fs_serializer",
] | /relic_tool_chunky_v1-2.0.0-py3-none-any.whl/relic/chunky/v1/serialization.py | 0.730482 | 0.182644 | serialization.py | pypi |
from __future__ import annotations
import hashlib
import typing
import zlib
from dataclasses import dataclass
from io import BytesIO
from pathlib import PurePath
from typing import (
BinaryIO,
List,
Dict,
Optional,
Callable,
Tuple,
Iterable,
TypeVar,
Generic,
)
from fs.base import FS
from serialization_tools.size import KiB, MiB
from serialization_tools.structx import Struct
from relic.sga.core.definitions import (
StorageType,
Version,
MagicWord,
_validate_magic_word,
)
from relic.sga.core.errors import (
MD5MismatchError,
VersionMismatchError,
DecompressedSizeMismatch,
)
from relic.sga.core.filesystem import EssenceFS, _EssenceDriveFS, EssenceFSHandler
from relic.sga.core.protocols import StreamSerializer, T
@dataclass
class TocBlock:
drive_info: Tuple[int, int]
folder_info: Tuple[int, int]
file_info: Tuple[int, int]
name_info: Tuple[int, int]
@classmethod
def default(cls) -> TocBlock:
null_pair = (0, 0)
return cls(null_pair, null_pair, null_pair, null_pair)
class TocHeaderSerializer(StreamSerializer[TocBlock]):
def __init__(self, layout: Struct):
self.layout = layout
def unpack(self, stream: BinaryIO) -> TocBlock:
(
drive_pos,
drive_count,
folder_pos,
folder_count,
file_pos,
file_count,
name_pos,
name_count,
) = self.layout.unpack_stream(stream)
return TocBlock(
(drive_pos, drive_count),
(folder_pos, folder_count),
(file_pos, file_count),
(name_pos, name_count),
)
def pack(self, stream: BinaryIO, value: TocBlock) -> int:
args = (
value.drive_info[0],
value.drive_info[1],
value.folder_info[0],
value.folder_info[1],
value.file_info[0],
value.file_info[1],
value.name_info[0],
value.name_info[1],
)
packed: int = self.layout.pack_stream(stream, *args)
return packed
@dataclass
class DriveDef:
alias: str
name: str
root_folder: int
folder_range: Tuple[int, int]
file_range: Tuple[int, int]
class DriveDefSerializer(StreamSerializer[DriveDef]):
def __init__(self, layout: Struct):
self.layout = layout
def unpack(self, stream: BinaryIO) -> DriveDef:
encoded_alias: bytes
encoded_name: bytes
(
encoded_alias,
encoded_name,
folder_start,
folder_end,
file_start,
file_end,
root_folder,
) = self.layout.unpack_stream(stream)
alias: str = encoded_alias.rstrip(b"\0").decode("ascii")
name: str = encoded_name.rstrip(b"\0").decode("ascii")
folder_range = (folder_start, folder_end)
file_range = (file_start, file_end)
return DriveDef(
alias=alias,
name=name,
root_folder=root_folder,
folder_range=folder_range,
file_range=file_range,
)
def pack(self, stream: BinaryIO, value: DriveDef) -> int:
alias: bytes = value.alias.encode("ascii")
name: bytes = value.name.encode("ascii")
args = (
alias,
name,
value.folder_range[0],
value.folder_range[1],
value.file_range[0],
value.file_range[1],
value.root_folder,
)
packed: int = self.layout.pack_stream(stream, *args)
return packed
@dataclass
class FolderDef:
name_pos: int
folder_range: Tuple[int, int]
file_range: Tuple[int, int]
class FolderDefSerializer(StreamSerializer[FolderDef]):
def __init__(self, layout: Struct):
self.layout = layout
def unpack(self, stream: BinaryIO) -> FolderDef:
(
name_pos,
folder_start,
folder_end,
file_start,
file_end,
) = self.layout.unpack_stream(stream)
folder_range = (folder_start, folder_end)
file_range = (file_start, file_end)
return FolderDef(
name_pos=name_pos, folder_range=folder_range, file_range=file_range
)
def pack(self, stream: BinaryIO, value: FolderDef) -> int:
args = (
value.name_pos,
value.folder_range[0],
value.folder_range[1],
value.file_range[0],
value.file_range[1],
)
packed: int = self.layout.pack_stream(stream, *args)
return packed
@dataclass
class MetaBlock:
name: str
ptrs: ArchivePtrs
# TMetadata = TypeVar("TMetadata")
TMetaBlock = TypeVar("TMetaBlock", bound=MetaBlock)
TTocMetaBlock = TypeVar("TTocMetaBlock")
@dataclass
class FileDef:
name_pos: int
data_pos: int
length_on_disk: int
length_in_archive: int
storage_type: StorageType
TFileDef = TypeVar("TFileDef", bound=FileDef)
AssembleFileMetaFunc = Callable[[TFileDef], Dict[str, object]]
DisassembleFileMetaFunc = Callable[[Dict[str, object]], TFileDef]
AssembleMetaFunc = Callable[
[BinaryIO, TMetaBlock, Optional[TTocMetaBlock]], Dict[str, object]
]
DisassembleMetaFunc = Callable[
[BinaryIO, Dict[str, object]], Tuple[TMetaBlock, TTocMetaBlock]
]
def _write_data(data: bytes, stream: BinaryIO) -> int:
"""
Returns the index the data was written to.
"""
pos = stream.tell()
stream.write(data)
return pos
def _get_or_write_name(name: str, stream: BinaryIO, lookup: Dict[str, int]) -> int:
if name in lookup:
return lookup[name]
pos = lookup[name] = stream.tell()
enc_name = name.encode("ascii") + b"\0"
stream.write(enc_name)
return pos
@dataclass
class TOCSerializationInfo(Generic[TFileDef]):
drive: StreamSerializer[DriveDef]
folder: StreamSerializer[FolderDef]
file: StreamSerializer[TFileDef]
name_toc_is_count: bool
ESSENCE_NAMESPACE = "essence"
class FSAssembler(Generic[TFileDef]):
"""
A Helper class used to assemble the SGA hierarchy
"""
def __init__(
self,
stream: BinaryIO,
ptrs: ArchivePtrs,
toc: TocBlock,
toc_serialization_info: TOCSerializationInfo[TFileDef],
build_file_meta: AssembleFileMetaFunc[TFileDef],
):
self.stream: BinaryIO = stream
self.ptrs: ArchivePtrs = ptrs
self.toc: TocBlock = toc
self.toc_serialization_info: TOCSerializationInfo[
TFileDef
] = toc_serialization_info
self.build_file_meta: AssembleFileMetaFunc[TFileDef] = build_file_meta
self.names: Dict[int, str] = {}
# decompress_files: bool = False
# lazy: bool = False
def read_toc_part(
self,
toc_info: Tuple[int, int],
serializer: StreamSerializer[T],
) -> List[T]:
self.stream.seek(self.ptrs.header_pos + toc_info[0])
return [serializer.unpack(self.stream) for _ in range(toc_info[1])]
def read_toc(
self,
) -> Tuple[List[DriveDef], List[FolderDef], List[TFileDef], Dict[int, str]]:
drives = self.read_toc_part(
self.toc.drive_info, self.toc_serialization_info.drive
)
folders = self.read_toc_part(
self.toc.folder_info, self.toc_serialization_info.folder
)
files = self.read_toc_part(self.toc.file_info, self.toc_serialization_info.file)
names = (
_read_toc_names_as_count(
self.stream, self.toc.name_info, self.ptrs.header_pos
)
if self.toc_serialization_info.name_toc_is_count
else _read_toc_names_as_size(
self.stream, self.toc.name_info, self.ptrs.header_pos
)
)
return drives, folders, files, names
def assemble_file(self, parent_dir: FS, file_def: TFileDef) -> None:
name = self.names[file_def.name_pos]
metadata = self.build_file_meta(file_def)
file_compressed = file_def.storage_type != StorageType.STORE
lazy_info = FileLazyInfo(
jump_to=self.ptrs.data_pos + file_def.data_pos,
packed_size=file_def.length_in_archive,
unpacked_size=file_def.length_on_disk,
stream=self.stream,
decompress=file_compressed, # self.decompress_files,
)
data = lazy_info.read(file_compressed) # self.decompress_files)
essence_info: Dict[str, object] = {"storage_type": int(file_def.storage_type)}
if metadata is not None:
essence_info.update(metadata)
with parent_dir.open(name, "wb") as file:
file.write(data)
info = {ESSENCE_NAMESPACE: essence_info}
parent_dir.setinfo(name, info)
def _assemble_container(
self,
container: FS,
file_range: Tuple[int, int],
folder_range: Tuple[int, int],
files: List[TFileDef],
folders: List[FolderDef],
file_offset: int,
folder_offset: int,
) -> None:
offsetted_file_range = [
file_range[0] - file_offset,
file_range[1] - file_offset,
]
offsetted_folder_range = [
folder_range[0] - folder_offset,
folder_range[1] - folder_offset,
]
container_files = files[offsetted_file_range[0] : offsetted_file_range[1]]
container_folders = folders[
offsetted_folder_range[0] : offsetted_folder_range[1]
]
for file_def in container_files:
self.assemble_file(container, file_def)
for folder_def in container_folders:
self.assemble_folder(
container, folder_def, files, folders, file_offset, folder_offset
)
def assemble_folder(
self,
parent_dir: FS,
folder_def: FolderDef,
files: List[TFileDef],
folders: List[FolderDef],
file_offset: int,
folder_offset: int,
) -> FS:
raw_folder_name = self.names[folder_def.name_pos]
folder_name_as_path = PurePath(raw_folder_name)
folder_name = (
folder_name_as_path.parts[-1]
if len(folder_name_as_path.parts) > 0
else raw_folder_name
)
folder = parent_dir.makedir(folder_name)
self._assemble_container(
folder,
folder_def.file_range,
folder_def.folder_range,
files,
folders,
file_offset,
folder_offset,
)
return folder
def assemble_drive(
self,
essence_fs: EssenceFS,
drive_def: DriveDef,
folder_defs: List[FolderDef],
file_defs: List[TFileDef],
) -> FS:
local_file_defs = file_defs[drive_def.file_range[0] : drive_def.file_range[1]]
local_folder_defs = folder_defs[
drive_def.folder_range[0] : drive_def.folder_range[1]
]
file_offset = drive_def.file_range[0]
folder_offset = drive_def.folder_range[0]
# make root folder relative to our folder slice
drive_folder_index = drive_def.root_folder - folder_offset
drive_folder_def = local_folder_defs[drive_folder_index]
drive = essence_fs.create_drive(drive_def.alias)
self._assemble_container(
drive,
drive_folder_def.file_range,
drive_folder_def.folder_range,
local_file_defs,
local_folder_defs,
file_offset,
folder_offset,
)
return drive
def assemble(self, fs: EssenceFS) -> None:
drive_defs, folder_defs, file_defs, names = self.read_toc()
self.names.update(names)
for drive_def in drive_defs:
self.assemble_drive(fs, drive_def, folder_defs, file_defs)
class FSDisassembler(Generic[TFileDef]):
def __init__(
self,
fs: EssenceFS,
toc_stream: BinaryIO,
data_stream: BinaryIO,
name_stream: BinaryIO,
toc_serialization_info: TOCSerializationInfo[TFileDef],
meta2def: DisassembleFileMetaFunc[TFileDef],
):
self.fs = fs
"""A stream containing the TOC Block"""
self.toc_stream = toc_stream
"""A stream containing the DATA Block"""
self.data_stream = data_stream
"""A stream containing the NAME Block"""
self.name_stream = name_stream
"""A collection containing serializers for DriveDef, FolderDef, FileDef, and a flag to determine whether the NAME Block uses 'size in bytes ~ SIZE' or 'number of elements ~ COUNT'"""
self.toc_serialization_info = toc_serialization_info
"""A function which converts FileMetadata to a FileDef"""
self.meta2def = meta2def
"""A collection of file definitions laid out sequentially (by folder). This is populated and used inside the assembler."""
self.flat_files: List[TFileDef] = []
"""A collection of folder definitions laid out sequentially (by drive/parent folder). This is populated and used inside the assembler."""
self.flat_folders: List[FolderDef] = []
"""A collection of drive definitions), ordered arbitrarily. This is populated and used inside the assembler."""
self.flat_drives: List[DriveDef] = []
"""A lookup table to find names already written to the NAME block; contains the position of the desired name in the NAME block."""
self.flat_names: Dict[str, int] = {}
def disassemble_file(self, container_fs: FS, file_name: str) -> TFileDef:
with container_fs.open(file_name, "rb") as handle:
data = handle.read()
metadata = dict(container_fs.getinfo(file_name, ["essence"]).raw["essence"])
file_def: TFileDef = self.meta2def(metadata)
storage_type = StorageType(metadata["storage_type"])
if storage_type == StorageType.STORE:
store_data = data
elif storage_type in [
StorageType.BUFFER_COMPRESS,
StorageType.STREAM_COMPRESS,
]:
store_data = zlib.compress(data) # TODO process in chunks for large files
else:
raise NotImplementedError
file_def.storage_type = storage_type
file_def.length_on_disk = len(data)
file_def.length_in_archive = len(store_data)
file_def.name_pos = _get_or_write_name(
file_name, self.name_stream, self.flat_names
)
file_def.data_pos = _write_data(store_data, self.data_stream)
return file_def
def flatten_file_collection(self, container_fs: FS) -> Tuple[int, int]:
subfile_start = len(self.flat_files)
subfile_defs = [
self.disassemble_file(container_fs, file_info.name)
for file_info in container_fs.scandir("/")
if not file_info.is_dir
]
self.flat_files.extend(subfile_defs)
subfile_end = len(self.flat_files)
return subfile_start, subfile_end
def flatten_folder_collection(self, container_fs: FS, path: str) -> Tuple[int, int]:
# Create temporary None folders to ensure a continuous range of child folders; BEFORE entering any child folders
subfolder_start = len(self.flat_folders)
folders = [
file_info.name
for file_info in container_fs.scandir("/")
if file_info.is_dir
]
self.flat_folders.extend([None] * len(folders)) # type:ignore
subfolder_end = len(self.flat_folders)
# Enter subfolders, and add them to the flat array
subfolder_defs = [
self.disassemble_folder(container_fs.opendir(folder), f"{path}/{folder}")
for folder in folders
]
self.flat_folders[subfolder_start:subfolder_end] = subfolder_defs
return subfolder_start, subfolder_end
def disassemble_folder(self, folder_fs: FS, path: str) -> FolderDef:
folder_def = FolderDef(None, None, None) # type: ignore
# Subfiles
subfile_range = self.flatten_file_collection(folder_fs)
# Subfolders
# # Since Relic typically uses the first folder as the root folder; I will try to preserve that parent folders come before their child folders
subfolder_range = self.flatten_folder_collection(folder_fs, path)
folder_name = str(path).split(":", 1)[-1] # Strip 'alias:' from path
folder_def.name_pos = _get_or_write_name(
folder_name, self.name_stream, self.flat_names
)
folder_def.file_range = subfile_range
folder_def.folder_range = subfolder_range
return folder_def
def disassemble_drive(self, drive: _EssenceDriveFS, alias: str) -> DriveDef:
name = ""
drive_folder_def = FolderDef(None, None, None) # type: ignore
root_folder = len(self.flat_folders)
folder_start = len(self.flat_folders)
file_start = len(self.flat_files)
self.flat_folders.append(drive_folder_def)
drive_folder_def.name_pos = _get_or_write_name(
name, self.name_stream, self.flat_names
)
drive_folder_def.file_range = self.flatten_file_collection(drive)
drive_folder_def.folder_range = self.flatten_folder_collection(drive, name)
folder_end = len(self.flat_folders)
file_end = len(self.flat_files)
drive_def = DriveDef(
alias,
name,
root_folder,
folder_range=(folder_start, folder_end),
file_range=(file_start, file_end),
)
return drive_def
def write_toc(self) -> TocBlock:
"""
Writes TOC data to the stream.
The TocHeader returned is relative to the toc stream's start, does not include the TocHeader itself.
"""
# Normally, this is drive -> folder -> file -> names
# But the TOC can handle an arbitrary order (due to ptrs); so we only do this to match their style
drive_offset = self.toc_stream.tell()
for drive_def in self.flat_drives:
self.toc_serialization_info.drive.pack(self.toc_stream, drive_def)
folder_offset = self.toc_stream.tell()
for folder_def in self.flat_folders:
self.toc_serialization_info.folder.pack(self.toc_stream, folder_def)
file_offset = self.toc_stream.tell()
for file_def in self.flat_files:
self.toc_serialization_info.file.pack(self.toc_stream, file_def)
name_offset = self.toc_stream.tell()
name_size = self.name_stream.tell()
self.name_stream.seek(0)
_chunked_copy(self.name_stream, self.toc_stream, chunk_size=64 * KiB)
return TocBlock(
drive_info=(drive_offset, len(self.flat_drives)),
folder_info=(folder_offset, len(self.flat_folders)),
file_info=(file_offset, len(self.flat_files)),
name_info=(
name_offset,
len(self.flat_names)
if self.toc_serialization_info.name_toc_is_count
else name_size,
),
)
def disassemble(self) -> TocBlock:
for name, drive_fs in self.fs.iterate_fs():
drive_fs = typing.cast(_EssenceDriveFS, drive_fs)
drive_def = self.disassemble_drive(drive_fs, name)
self.flat_drives.append(drive_def)
return self.write_toc()
def _read_toc_names_as_count(
stream: BinaryIO, toc_info: Tuple[int, int], header_pos: int, buffer_size: int = 256
) -> Dict[int, str]:
NULL = 0
NULL_CHAR = b"\0"
stream.seek(header_pos + toc_info[0])
names: Dict[int, str] = {}
running_buffer = bytearray()
offset = 0
while len(names) < toc_info[1]:
buffer = stream.read(buffer_size)
if len(buffer) == 0:
raise Exception("Ran out of data!") # TODO, proper exception
terminal_null = buffer[-1] == NULL
parts = buffer.split(NULL_CHAR)
if len(parts) > 1:
parts[0] = running_buffer + parts[0]
running_buffer.clear()
if not terminal_null:
running_buffer.extend(parts[-1])
parts = parts[:-1] # drop empty or partial
else:
if not terminal_null:
running_buffer.extend(parts[0])
offset += len(buffer)
continue
remaining = toc_info[1] - len(names)
available = min(len(parts), remaining)
for _ in range(available):
name = parts[_]
names[offset] = name.decode("ascii")
offset += len(name) + 1
return names
def _read_toc_names_as_size(
stream: BinaryIO, toc_info: Tuple[int, int], header_pos: int
) -> Dict[int, str]:
stream.seek(header_pos + toc_info[0])
name_buffer = stream.read(toc_info[1])
parts = name_buffer.split(b"\0")
names: Dict[int, str] = {}
offset = 0
for part in parts:
names[offset] = part.decode("ascii")
offset += len(part) + 1
return names
def _chunked_read(
stream: BinaryIO, size: Optional[int] = None, chunk_size: Optional[int] = None
) -> Iterable[bytes]:
if size is None and chunk_size is None:
yield stream.read()
elif size is None and chunk_size is not None:
while True:
buffer = stream.read(chunk_size)
yield buffer
if len(buffer) != chunk_size:
break
elif size is not None and chunk_size is None:
yield stream.read(size)
elif size is not None and chunk_size is not None:
chunks = size // chunk_size
for _ in range(chunks):
yield stream.read(chunk_size)
total_read = chunk_size * chunks
if total_read < size:
yield stream.read(size - total_read)
else:
raise Exception("Something impossible happened!")
def _chunked_copy(
in_stream: BinaryIO,
out_stream: BinaryIO,
size: Optional[int] = None,
chunk_size: Optional[int] = None,
) -> None:
for chunk in _chunked_read(in_stream, size, chunk_size):
out_stream.write(chunk)
@dataclass
class Md5ChecksumHelper:
expected: Optional[bytes]
stream: Optional[BinaryIO]
start: int
size: Optional[int] = None
eigen: Optional[bytes] = None
def read(self, stream: Optional[BinaryIO] = None) -> bytes:
stream = self.stream if stream is None else stream
if stream is None:
raise IOError("No Stream Provided!")
stream.seek(self.start)
md5 = hashlib.md5(self.eigen) if self.eigen is not None else hashlib.md5()
# Safer for large files to read chunked
for chunk in _chunked_read(stream, self.size, 256 * KiB):
md5.update(chunk)
md5_str = md5.hexdigest()
return bytes.fromhex(md5_str)
def validate(self, stream: Optional[BinaryIO] = None) -> None:
result = self.read(stream)
if self.expected != result:
raise MD5MismatchError(result, self.expected)
def _fix_toc(toc: TocBlock, cur_toc_start: int, desired_toc_start: int) -> None:
def _fix(info: Tuple[int, int]) -> Tuple[int, int]:
return info[0] + (cur_toc_start - desired_toc_start), info[1]
toc.folder_info = _fix(toc.folder_info)
toc.file_info = _fix(toc.file_info)
toc.drive_info = _fix(toc.drive_info)
toc.name_info = _fix(toc.name_info)
class EssenceFSSerializer(
EssenceFSHandler, Generic[TFileDef, TMetaBlock, TTocMetaBlock]
):
# Would use a dataclass; but I also want to be able to override defaults in parent dataclasses
def __init__(
self,
version: Version,
meta_serializer: StreamSerializer[TMetaBlock],
toc_serializer: StreamSerializer[TocBlock],
toc_meta_serializer: Optional[StreamSerializer[TTocMetaBlock]],
toc_serialization_info: TOCSerializationInfo[TFileDef],
assemble_meta: AssembleMetaFunc[TMetaBlock, TTocMetaBlock],
disassemble_meta: DisassembleMetaFunc[TMetaBlock, TTocMetaBlock],
build_file_meta: AssembleFileMetaFunc[TFileDef],
gen_empty_meta: Callable[[], TMetaBlock],
finalize_meta: Callable[[BinaryIO, TMetaBlock], None],
meta2def: Callable[[Dict[str, object]], TFileDef],
):
self.version = version
self.meta_serializer = meta_serializer
self.toc_serializer = toc_serializer
self.toc_meta_serializer = toc_meta_serializer
self.toc_serialization_info = toc_serialization_info
self.assemble_meta = assemble_meta
self.disassemble_meta = disassemble_meta
self.build_file_meta = build_file_meta
self.gen_empty_meta = gen_empty_meta
self.finalize_meta = finalize_meta
self.meta2def = meta2def
def read(self, stream: BinaryIO) -> EssenceFS:
# Magic & Version; skippable so that we can check for a valid file and read the version elsewhere
_validate_magic_word(MagicWord, stream, advance=True)
stream_version = Version.unpack(stream)
if stream_version != self.version:
raise VersionMismatchError(stream_version, self.version)
meta_block = self.meta_serializer.unpack(stream)
stream.seek(meta_block.ptrs.header_pos)
toc_block = self.toc_serializer.unpack(stream)
# Additional TOC information is not present in earlier versions
toc_meta_block = (
self.toc_meta_serializer.unpack(stream)
if self.toc_meta_serializer is not None
else None
)
name, metadata = meta_block.name, self.assemble_meta(
stream, meta_block, toc_meta_block
)
assembler: FSAssembler[TFileDef] = FSAssembler(
stream=stream,
ptrs=meta_block.ptrs,
toc=toc_block,
toc_serialization_info=self.toc_serialization_info,
# decompress_files=decompress,
build_file_meta=self.build_file_meta,
# lazy=lazy,
)
essence_fs = EssenceFS()
assembler.assemble(essence_fs)
essence_info: Dict[str, object] = {
"name": name,
"version": {"major": stream_version.major, "minor": stream_version.minor},
}
if metadata is not None:
essence_info.update(metadata)
essence_fs.setmeta(essence_info, ESSENCE_NAMESPACE)
return essence_fs
def write(self, stream: BinaryIO, essence_fs: EssenceFS) -> int:
archive_metadata: Dict[str, object] = typing.cast(
Dict[str, object], essence_fs.getmeta("essence")
)
archive_name: str = typing.cast(str, archive_metadata["name"])
# IDK why I write to a temp stream; maybe to preserve dest stream in case of errors?
with BytesIO() as temp_stream:
MagicWord.write_magic_word(temp_stream)
self.version.pack(temp_stream)
with BytesIO() as data_stream:
with BytesIO() as toc_stream:
with BytesIO() as name_stream:
disassembler = FSDisassembler(
fs=essence_fs,
toc_stream=toc_stream,
data_stream=data_stream,
name_stream=name_stream,
toc_serialization_info=self.toc_serialization_info,
meta2def=self.meta2def,
)
partial_toc = disassembler.disassemble()
partial_meta, toc_meta = self.disassemble_meta(
temp_stream, archive_metadata
)
# we need to come back with the correct data
meta_writeback = temp_stream.tell()
empty_meta = self.gen_empty_meta()
self.meta_serializer.pack(temp_stream, empty_meta)
# the start of the toc stream in the current stream
toc_start = temp_stream.tell()
toc_writeback = toc_start
self.toc_serializer.pack(temp_stream, TocBlock.default())
if self.toc_meta_serializer:
self.toc_meta_serializer.pack(temp_stream, toc_meta)
toc_rel_start = temp_stream.tell()
toc_stream.seek(0)
_chunked_copy(toc_stream, temp_stream, chunk_size=64 * KiB)
toc_end = temp_stream.tell() # The end of the TOC block;
toc_size = toc_end - toc_start
data_start = temp_stream.tell()
data_stream.seek(0)
_chunked_copy(data_stream, temp_stream, chunk_size=1 * MiB)
data_size = data_stream.tell()
partial_meta.name = archive_name
partial_meta.ptrs = ArchivePtrs(
toc_start, toc_size, data_start, data_size
)
_fix_toc(partial_toc, toc_rel_start, toc_start)
temp_stream.seek(toc_writeback)
self.toc_serializer.pack(temp_stream, partial_toc)
if self.finalize_meta is not None:
self.finalize_meta(temp_stream, partial_meta)
temp_stream.seek(meta_writeback)
self.meta_serializer.pack(temp_stream, partial_meta)
temp_stream.seek(0)
_chunked_copy(temp_stream, stream, chunk_size=16 * MiB)
return temp_stream.tell()
# Archives have 7 blocks:
# MagicBlock
# Contains "_ARCHIVE" (8 byte long ASCII string)
# Contains Version (UINT16, UINT16 tuple)
# MetaBlock
# Several Metadata sections
# PTR Block
# TOC Block
# FileBlock
# FolderBlock
# DriveBlock
# NameBlock
# DataBlock
@dataclass
class FileLazyInfo:
jump_to: int
packed_size: int
unpacked_size: int
stream: BinaryIO
decompress: bool
def read(self, decompress: Optional[bool] = None) -> bytes:
decompress = self.decompress if decompress is None else decompress
jump_back = self.stream.tell()
self.stream.seek(self.jump_to)
in_buffer = self.stream.read(self.packed_size)
if decompress and self.packed_size != self.unpacked_size:
out_buffer = zlib.decompress(in_buffer)
if len(out_buffer) != self.unpacked_size:
raise DecompressedSizeMismatch(len(out_buffer), self.unpacked_size)
else:
out_buffer = in_buffer
self.stream.seek(jump_back)
return out_buffer
@dataclass
class ArchivePtrs:
"""
Contains 'pointers' to the TOC Block (header_pos, header_size) and the DATA Block (data_pos, data_size)
"""
header_pos: int
header_size: int
data_pos: int
data_size: Optional[int] = None
@classmethod
def default(cls) -> ArchivePtrs:
"""
Creates a 'Default' Archive Ptrs Object; used to create a valid placeholder until proper data is supplied.
"""
return cls(0, 0, 0, 0)
__all__ = [
"TocBlock",
"TocHeaderSerializer",
"DriveDef",
"DriveDefSerializer",
"FolderDef",
"FolderDefSerializer",
"MetaBlock",
"TMetaBlock",
"TTocMetaBlock",
"FileDef",
"TFileDef",
"AssembleFileMetaFunc",
"DisassembleFileMetaFunc",
"AssembleMetaFunc",
"DisassembleMetaFunc",
"TOCSerializationInfo",
"FSAssembler",
"FSDisassembler",
"Md5ChecksumHelper",
"EssenceFSSerializer",
"FileLazyInfo",
"ArchivePtrs",
] | /relic-tool-sga-core-1.0.0.tar.gz/relic-tool-sga-core-1.0.0/src/relic/sga/core/serialization.py | 0.819063 | 0.218024 | serialization.py | pypi |
from __future__ import annotations
from dataclasses import dataclass
from typing import BinaryIO, Dict, Tuple, cast
from serialization_tools.structx import Struct
from relic.sga.core import serialization as _s
from relic.sga.core.definitions import StorageType
from relic.sga.core.filesystem import registry
from relic.sga.core.protocols import StreamSerializer
from relic.sga.core.serialization import (
FileDef,
ArchivePtrs,
TocBlock,
TOCSerializationInfo,
)
from relic.sga.v2.definitions import version
class FileDefSerializer(StreamSerializer[FileDef]):
"""
Serializes File information using the V2 format.
"""
STORAGE2INT: Dict[StorageType, int] = {
StorageType.STORE: 0,
StorageType.BUFFER_COMPRESS: 16, # 0x10
StorageType.STREAM_COMPRESS: 32, # 0x20
}
INT2STORAGE: Dict[int, StorageType] = {
value: key for key, value in STORAGE2INT.items()
} # reverse the dictionary
def __init__(self, layout: Struct):
self.layout = layout
def unpack(self, stream: BinaryIO) -> FileDef:
"""Unpacks a File Definition from the stream."""
storage_type_val: int
(
name_pos,
storage_type_val,
data_pos,
length_in_archive,
length_on_disk,
) = self.layout.unpack_stream(stream)
storage_type: StorageType = self.INT2STORAGE[storage_type_val]
return FileDef(
name_pos=name_pos,
data_pos=data_pos,
length_on_disk=length_on_disk,
length_in_archive=length_in_archive,
storage_type=storage_type,
)
def pack(self, stream: BinaryIO, value: FileDef) -> int:
"""Packs a File Definition into the stream."""
storage_type = self.STORAGE2INT[value.storage_type]
args = (
value.name_pos,
storage_type,
value.data_pos,
value.length_in_archive,
value.length_on_disk,
)
packed: int = self.layout.pack_stream(stream, *args)
return packed
@dataclass
class MetaBlock(_s.MetaBlock):
"""
Container for header information used by V2
"""
name: str
ptrs: ArchivePtrs
file_md5: bytes
header_md5: bytes
@classmethod
def default(cls) -> MetaBlock:
"""Returns a Default, 'garbage' instance which can be used as a placeholder for write-backs."""
default_md5: bytes = b"default hash. "
return cls(
"Default Meta Block", ArchivePtrs.default(), default_md5, default_md5
)
@dataclass
class ArchiveHeaderSerializer(StreamSerializer[MetaBlock]):
"""
Serializer to convert header information to it's dataclass; ArchiveHeader
"""
layout: Struct
ENCODING = "utf-16-le"
def unpack(self, stream: BinaryIO) -> MetaBlock:
"""Unpacks a MetaBlock from the stream."""
(
file_md5,
encoded_name,
header_md5,
header_size,
data_pos,
) = self.layout.unpack_stream(stream)
header_pos = stream.tell()
name = encoded_name.decode(self.ENCODING).rstrip("\0")
ptrs = ArchivePtrs(header_pos, header_size, data_pos)
return MetaBlock(name, ptrs, file_md5=file_md5, header_md5=header_md5)
def pack(self, stream: BinaryIO, value: MetaBlock) -> int:
"""Packs a MetaBlock into the stream."""
encoded_name = value.name.encode(self.ENCODING)
args = (
value.file_md5,
encoded_name,
value.header_md5,
value.ptrs.header_size,
value.ptrs.data_pos,
)
written: int = self.layout.pack_stream(stream, *args)
return written
FILE_MD5_EIGEN = b"E01519D6-2DB7-4640-AF54-0A23319C56C3"
HEADER_MD5_EIGEN = b"DFC9AF62-FC1B-4180-BC27-11CCE87D3EFF"
def assemble_meta(_: BinaryIO, header: MetaBlock, __: None) -> Dict[str, object]:
"""Extracts information from the meta-block to a dictionary the FS can store."""
return {"file_md5": header.file_md5.hex(), "header_md5": header.header_md5.hex()}
def disassemble_meta(
_: BinaryIO, metadata: Dict[str, object]
) -> Tuple[MetaBlock, None]:
"""Converts the archive's metadata dictionary into a MetaBlock class the Serializer can use."""
meta = MetaBlock(
None, # type: ignore
None, # type: ignore
header_md5=bytes.fromhex(cast(str, metadata["header_md5"])),
file_md5=bytes.fromhex(cast(str, metadata["file_md5"])),
)
return meta, None
def recalculate_md5(stream: BinaryIO, meta: MetaBlock) -> None:
"""
Recalculates file and header
"""
file_md5_helper = _s.Md5ChecksumHelper(
expected=None,
stream=stream,
start=meta.ptrs.header_pos,
eigen=FILE_MD5_EIGEN,
)
header_md5_helper = _s.Md5ChecksumHelper(
expected=None,
stream=stream,
start=meta.ptrs.header_pos,
size=meta.ptrs.header_size,
eigen=HEADER_MD5_EIGEN,
)
meta.file_md5 = file_md5_helper.read()
meta.header_md5 = header_md5_helper.read()
def meta2def(meta: Dict[str, object]) -> FileDef:
"""
Converts metadata to a File Definitions
V2.0 only stores 'storage_type', which should be overridden later in the pipeline.
"""
return FileDef(None, None, None, None, meta["storage_type"]) # type: ignore
class EssenceFSSerializer(_s.EssenceFSSerializer[FileDef, MetaBlock, None]):
"""
Serializer to read/write an SGA file to/from a stream from/to a SGA File System
"""
def __init__(
self,
toc_serializer: StreamSerializer[TocBlock],
meta_serializer: StreamSerializer[MetaBlock],
toc_serialization_info: TOCSerializationInfo[FileDef],
):
super().__init__(
version=version,
meta_serializer=meta_serializer,
toc_serializer=toc_serializer,
toc_meta_serializer=None,
toc_serialization_info=toc_serialization_info,
assemble_meta=assemble_meta,
disassemble_meta=disassemble_meta,
build_file_meta=lambda _: {},
gen_empty_meta=MetaBlock.default,
finalize_meta=recalculate_md5,
meta2def=meta2def,
)
_folder_layout = Struct("<I 4H")
_folder_serializer = _s.FolderDefSerializer(_folder_layout)
_drive_layout = Struct("<64s 64s 5H")
_drive_serializer = _s.DriveDefSerializer(_drive_layout)
_file_layout = Struct("<5I")
_file_serializer = FileDefSerializer(_file_layout)
_toc_layout = Struct("<IH IH IH IH")
_toc_header_serializer = _s.TocHeaderSerializer(_toc_layout)
_meta_header_layout = Struct("<16s 128s 16s 2I")
_meta_header_serializer = ArchiveHeaderSerializer(_meta_header_layout)
essence_fs_serializer = EssenceFSSerializer(
meta_serializer=_meta_header_serializer,
toc_serializer=_toc_header_serializer,
toc_serialization_info=TOCSerializationInfo(
file=_file_serializer,
drive=_drive_serializer,
folder=_folder_serializer,
name_toc_is_count=True,
),
)
registry.auto_register(essence_fs_serializer)
__all__ = [
"FileDefSerializer",
"MetaBlock",
"ArchiveHeaderSerializer",
# "ArchiveSerializer",
# "archive_serializer",
"essence_fs_serializer",
] | /relic_tool_sga_v2-1.0.0-py3-none-any.whl/relic/sga/v2/serialization.py | 0.911185 | 0.22946 | serialization.py | pypi |
import requests
from typing import Dict, List, Final
from .base import API_BASE_URL
API_OUTPUT_URL: Final[str] = API_BASE_URL + "/output"
API_OUTPUT_TYPE_DICT_URL: Final[str] = API_OUTPUT_URL + "/type_dict.json"
API_OUTPUT_WFM_ITEMS_CATEGORIZED_URL: Final[str] = (
API_OUTPUT_URL + "/wfm_items_categorized.json"
)
def get_output_type_dict() -> Dict[str, List[str]]:
"""Get the list of types from the API.
:return: The list of types from the API.
:rtype: Dict[str, List[str]]
:raises requests.HTTPError: If there is an error with the request.
Usage::
>>> from relics_run_api import get_output_type_dict
>>> get_output_type_dict()
{
"Relics": [
"Lith K8 Relic",
"Axi G10 Relic",
"Lith H1 Relic",
...
],
"Arcanes": [
"Arcane Intention",
"Arcane Phantasm",
"Arcane Detoxifier",
...
],
...
}
"""
# Get the list of types from the API
response: Final[Response] = requests.get(API_OUTPUT_TYPE_DICT_URL)
# Check for errors
response.raise_for_status()
# Return the list of types
return response.json()
def get_output_wfm_items_categorized() -> Dict[str, dict]:
"""Get the list of items from the API.
:return: The list of items from the API.
:rtype: Dict[str, dict]
:raises requests.HTTPError: If there is an error with the request.
Usage::
>>> from relics_run_api import get_output_wfm_items_categorized
>>> get_output_wfm_items_categorized()
{
"ArcaneHelmets": {
"Arcane Aura Helmet": {
"item_name": "Arcane Aura Helmet",
"url_name": "arcane_aura_trinity_helmet",
"thumb": "items/images/en/thumbs/arcane_aura_trinity_helmet.d2b86ccdef9653830055ab389ef0d577.128x128.png",
"id": "54aaf1eee779890a8654131d"
},
...
},
"Arcanes": {
"Akimbo Slip Shot": {
"item_name": "Akimbo Slip Shot",
"url_name": "akimbo_slip_shot",
"thumb": "items/images/en/thumbs/akimbo_slip_shot.9cb48cdcc739b5306bb4e2a8b3868e72.128x128.png",
"id": "649322b07ec190215a693094"
},
...
},
...
}
"""
# Get the list of items from the API
response: Final[Response] = requests.get(API_OUTPUT_WFM_ITEMS_CATEGORIZED_URL)
# Check for errors
response.raise_for_status()
# Return the list of items
return response.json() | /relics_run_api-0.1.1.tar.gz/relics_run_api-0.1.1/relics_run_api/output.py | 0.797281 | 0.227899 | output.py | pypi |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.