code stringlengths 114 1.05M | path stringlengths 3 312 | quality_prob float64 0.5 0.99 | learning_prob float64 0.2 1 | filename stringlengths 3 168 | kind stringclasses 1
value |
|---|---|---|---|---|---|
import random
import numpy
from echonest.remix import video, audio
usage = """
Usage:
python vdissoc.py inputFilenameOrUrl outputFilename [variation]
variation is the number of near candidates chosen from. [default=4]
Example:
python vdissoc.py 'http://www.youtube.com/watch?v=Es7mk19wMrk' Seventh.mp4
"""
def main(infile, outfile, choices=4):
if infile.startswith("http://"):
av = video.loadavfromyoutube(infile)
else:
av = video.loadav(infile)
meter = av.audio.analysis.time_signature['value']
sections = av.audio.analysis.sections
output = audio.AudioQuantumList()
for section in sections:
beats = []
bars = section.children()
for bar in bars:
beats.extend(bar.children())
if not bars or not beats:
continue
beat_array = []
for m in range(meter):
metered_beats = []
for b in beats:
if beats.index(b) % meter == m:
metered_beats.append(b)
beat_array.append(metered_beats)
# Always start with the first beat
output.append(beat_array[0][0]);
for x in range(1, len(bars) * meter):
meter_index = x % meter
next_candidates = beat_array[meter_index]
def sorting_function(chunk, target_chunk=output[-1]):
timbre = chunk.mean_timbre()
target_timbre = target_chunk.mean_timbre()
timbre_distance = numpy.linalg.norm(numpy.array(timbre) - numpy.array(target_timbre))
return timbre_distance
next_candidates = sorted(next_candidates, key=sorting_function)
next_index = random.randint(0, min(choices, len(next_candidates) - 1))
output.append(next_candidates[next_index])
out = video.getpieces(av, output)
out.save(outfile)
if __name__ == '__main__':
import sys
try:
inputFilename = sys.argv[1]
outputFilename = sys.argv[2]
if len(sys.argv) > 3:
variation = int(sys.argv[3])
else:
variation = 4
except:
print usage
sys.exit(-1)
main(inputFilename, outputFilename, variation) | /remix-2.4.0.tar.gz/remix-2.4.0/examples/videx/vdissoc.py | 0.504639 | 0.259386 | vdissoc.py | pypi |
import random
import numpy
import echonest.remix.audio as audio
usage = """
Usage:
python step.py inputFilename outputFilename [variation [length]]
variation is the number of near candidates chosen from. [default=4]
length is the number of bars in the final product. [default=40]
Example:
python step.py Discipline.mp3 Undisciplined.mp3 4 100
"""
def main(infile, outfile, choices=4, bars=40):
audiofile = audio.LocalAudioFile(infile)
meter = audiofile.analysis.time_signature['value']
fade_in = audiofile.analysis.end_of_fade_in
fade_out = audiofile.analysis.start_of_fade_out
beats = []
for b in audiofile.analysis.beats:
if b.start > fade_in or b.end < fade_out:
beats.append(b)
output = audio.AudioQuantumList()
beat_array = []
for m in range(meter):
metered_beats = []
for b in beats:
if beats.index(b) % meter == m:
metered_beats.append(b)
beat_array.append(metered_beats)
# Always start with the first beat
output.append(beat_array[0][0]);
for x in range(1, bars * meter):
meter_index = x % meter
next_candidates = beat_array[meter_index]
def sorting_function(chunk, target_chunk=output[-1]):
timbre = chunk.mean_pitches()
target_timbre = target_chunk.mean_pitches()
timbre_distance = numpy.linalg.norm(numpy.array(timbre) - numpy.array(target_timbre))
return timbre_distance
next_candidates = sorted(next_candidates, key=sorting_function)
next_index = random.randint(0, min(choices, len(next_candidates) -1 ))
output.append(next_candidates[next_index])
out = audio.getpieces(audiofile, output)
out.encode(outfile)
if __name__ == '__main__':
import sys
try:
inputFilename = sys.argv[1]
outputFilename = sys.argv[2]
if len(sys.argv) > 3:
variation = int(sys.argv[3])
else:
variation = 4
if len(sys.argv) > 4:
length = int(sys.argv[4])
else:
length = 40
except:
print usage
sys.exit(-1)
main(inputFilename, outputFilename, variation, length) | /remix-2.4.0.tar.gz/remix-2.4.0/examples/step/step-by-pitch.py | 0.426322 | 0.295999 | step-by-pitch.py | pypi |
import random
import numpy
import echonest.remix.audio as audio
usage = """
Usage:
python step.py inputFilename outputFilename [variation [length]]
variation is the number of near candidates chosen from. [default=4]
length is the number of bars in the final product. [default=40]
Example:
python step.py Discipline.mp3 Undisciplined.mp3 4 100
"""
def main(infile, outfile, choices=4, bars=40):
audiofile = audio.LocalAudioFile(infile)
meter = audiofile.analysis.time_signature['value']
fade_in = audiofile.analysis.end_of_fade_in
fade_out = audiofile.analysis.start_of_fade_out
beats = []
for b in audiofile.analysis.beats:
if b.start > fade_in or b.end < fade_out:
beats.append(b)
output = audio.AudioQuantumList()
beat_array = []
for m in range(meter):
metered_beats = []
for b in beats:
if beats.index(b) % meter == m:
metered_beats.append(b)
beat_array.append(metered_beats)
# Always start with the first beat
output.append(beat_array[0][0]);
for x in range(1, bars * meter):
meter_index = x % meter
next_candidates = beat_array[meter_index]
def sorting_function(chunk, target_chunk=output[-1]):
timbre = chunk.mean_timbre()
target_timbre = target_chunk.mean_timbre()
timbre_distance = numpy.linalg.norm(numpy.array(timbre) - numpy.array(target_timbre))
return timbre_distance
next_candidates = sorted(next_candidates, key=sorting_function)
next_index = random.randint(0, min(choices, len(next_candidates) - 1))
output.append(next_candidates[next_index])
out = audio.getpieces(audiofile, output)
out.encode(outfile)
if __name__ == '__main__':
import sys
try:
inputFilename = sys.argv[1]
outputFilename = sys.argv[2]
if len(sys.argv) > 3:
variation = int(sys.argv[3])
else:
variation = 4
if len(sys.argv) > 4:
length = int(sys.argv[4])
else:
length = 40
except:
print usage
sys.exit(-1)
main(inputFilename, outputFilename, variation, length) | /remix-2.4.0.tar.gz/remix-2.4.0/examples/step/step.py | 0.427038 | 0.30013 | step.py | pypi |
# ReMixT
ReMixT is a tool for joint inference of clone specific segment and breakpoint copy number in whole genome sequencing data. The input for the tool is a set of segments, a set of breakpoints predicted from the sequencing data, and normal and tumour bam files. Where multiple tumour samples are available, they can be analyzed jointly for additional benefit.
## How to cite
If you find ReMixT useful, please consider citing our [genome biology article](https://doi.org/10.1186/s13059-017-1267-2).
## Installation
Conda is a prerequisite, install [anaconda python](https://store.continuum.io/cshop/anaconda/) from the continuum website.
### Installing from pip
The recommended method of installation for ReMixT is using `pip`.
pip install remixt
You will also need to `shapeit` and `samtools` on your path. They can be installed using conda:
conda install samtools
conda install -c dranew shapeit
### Installing from conda
The conda distribution is now out of date. However, to use conda, add my channel, and the bioconda channel, and install ReMixT as follows.
conda config --add channels https://conda.anaconda.org/dranew
conda config --add channels 'bioconda'
conda install remixt
### Installing from source
#### Clone Source Code
To install the code, first clone from bitbucket. A recursive clone is preferred to pull in all submodules.
git clone --recursive git@bitbucket.org:dranew/remixt.git
#### Dependencies
To install from source you will need several dependencies. A list of dependencies can be found in the `conda` `yaml` file in the repo at `conda/remixt/meta.yaml`.
#### Build executables and install
To build executables and install the ReMixT code as a python package run the following command in the ReMixT repo:
python setup.py install
## Setup ReMixT
### Reference genome
Download and setup of the reference genome is automated. The default is hg19. Select a directory on your system that will contain the reference data, herein referred to as `$ref_data_dir`. The `$ref_data_dir` directory will be used in many of the subsequent scripts when running destruct.
Download the reference data and build the required indexes:
remixt create_ref_data $ref_data_dir
### Mappability file
Additionally, ReMixT requires a mappability file to be generated. We have provided a workflow for generating a mappability file based on `bwa` alignments, for other aligners, you may want to create your own mappability workflow, see `remixt/mappability/bwa/workflow.py` as an example.
To create a mappability file for `bwa`, run:
remixt mappability_bwa $ref_data_dir
Note that this workflow will take a considerable amount of time and it is recommended you run this part of ReMixT setup on a cluster or multicore machine.
For parallelism options see the section [Parallelism using pypeliner](#markdown-header-parallelism-using-pypeliner).
## Running ReMixT
### Input Data
ReMixT takes multiple bam files as input. Bam files should be multiple samples from the same patient, with one bam sequenced from a normal sample from that patient.
Additionally, ReMixT takes a list of predicted breakpoints detected by paired end sequencing as an additional input.
#### Breakpoint Prediction Input Format
The predicted breakpoints should be provided in a tab separated file with the following columns:
* `prediction_id`
* `chromosome_1`
* `strand_1`
* `position_1`
* `chromosome_2`
* `strand_2`
* `position_2`
The first line should be the column names, which should be identical to the above list. Each subsequent line is a breakpoint prediction. The `prediction_id` should be unique to each breakpoint prediction. The `chromosome_`, `strand_` and `position_` columns give the position and orientation of each end of the breakpoint. The values for `strand_` should be either `+` or `-`. A value of `+` means that sequence to the right of `chromosome_`, `position_` is preserved in the tumour chromosome containing the breakpoint. Conversely, a value of `-` means that sequence to the left of `chromosome_`, `position_` is preserved in the tumour chromosome containing the breakpoint.
The following table may assist in understanding the strand of a break-end. Note that an inversion event produces two breakpoints, the strand configurations for both are shown. Additionally, for inter-chromosomal events, any strand configuration is possible.
| Structural Variation | Strand of Leftmost Break-End | Strand of Rightmost Break-End |
| ------------------------ | ---------------------------- | ----------------------------- |
| Deletion | + | - |
| Duplication | - | + |
| Inversion (Breakpoint A) | + | + |
| Inversion (Breakpoint B) | - | - |
### ReMixT Command Line
Running ReMixT involves invoking a single command, `remixt run`. The result of ReMixT is an [hdf5](https://www.hdfgroup.org) file storing [pandas](http://pandas.pydata.org) tables.
Suppose we have the following list of inputs:
* Normal sample with ID `123N` and bam filename `$normal_bam`
* Tumour sample with ID `123A` and bam filename `$tumour_a_bam`
* Tumour sample with ID `123B` and bam filename `$tumour_b_bam`
* Breakpoint table in TSV format with filename `$breakpoints`
Additionally, ReMixT will generate the following outputs:
* Results as HDF5 file storing pandas tables with filename `$results_h5`
* Temporary files and logs stored in directory `$remixt_tmp_dir` (directory created if it doesnt exist)
Given the above inputs and outputs run ReMixT as follows:
remixt run $ref_data_dir $raw_data_dir $breakpoints \
--normal_sample_id 123N \
--normal_bam_file $normal_bam \
--tumour_sample_ids 123A 123B \
--tumour_bam_files $tumour_a_bam $tumour_b_bam \
--results_files $results_h5
--tmpdir $remixt_tmp_dir
Note that ReMixT creates multiple jobs and many parts of ReMixT are massively parallelizable, thus it is recommended you run ReMixT on a cluster or multicore machine. For parallelism options see the section [Parallelism using pypeliner](#markdown-header-parallelism-using-pypeliner).
### Output File Formats
The main output file is an HDF5 store containing pandas dataframes. These can be extracted in python or viewed using the ReMixT viewer. Important tables include:
* `stats`: statistics for each restart
* `solutions/solution_{idx}/cn`: segment copy number table for solution `idx`
* `solutions/solution_{idx}/brk_cn`: breakpoint copy number table for solution `idx`
* `solutions/solution_{idx}/h`: haploid depths for solution `idx`
#### Statistics
ReMixT uses optimal restarts and model selection by BIC. The statistics table contains one row per restart, sorted by BIC. The table contains the following columns:
* `idx`: the solution index, used to refer to `solutions/solution_{idx}/*` tables.
* `bic`: the bic of this solution
* `log_posterior`: log posterior of the HMM
* `log_posterior_graph`: log posterior of the genome graph model
* `num_clones`: number of clones including normal
* `num_segments`: number of segments
* `h_converged`: whether haploid depths estimation converged
* `h_em_iter`: number of iterations for convergence of h
* `graph_opt_iter`: number of iterations for convergence of genome graph copy number
* `decreased_log_posterior`: whether the genome graph optimization stopped due to a move that decreased the log posterior
#### Segment Copy Number
The segment copy number table adds additional columns to the segment counts table described above, including but not limited to:
* `major_1`
* `minor_1`
* `major_2`
* `minor_2`
The columns refer to the major and minor copy number in tumour clone 1 and 2.
#### Breakpoints Copy Number
The breakpoint copy number table contains the following columns:
* `prediction_id`
* `cn_1`
* `cn_2`
The `prediction_id` column matches the column of the same name in the input breakpoints file, and specifies for which breakpoint prediction the copy number is being provided. The `cn_1` and `cn_2` columns provide the clone specific copy number for tumour clone 1 and 2 respectively.
#### Haploid Depths
The haploid depths is a vector of `M` depths for each of the `M` clones including the normal. To recover cell mixture proportions, simply normalize `h`.
### Extracting Tables as TSV files
If preferred, it is possible to extract copy number and metadata in TSV and YaML format. For results file `$results_h5`, extract segment copy number, breakpoint copy number and meta data to files `$cn_table`, `$brk_cn_table`, `$meta_data` respectively as follows:
remixt write_results \
$results_h5 $cn_table $brk_cn_table $meta_data
### ReMixT Viewer
There is an experimental viewer for ReMixT at `tools/remixt_viewer_app.py`. Bokeh '>0.10.0' is required. To use the viewer app, organize your patient sample results files as `./patient_*/sample_*.h5`. From the directory containing patient subdirectories, run the bokeh server:
bokeh-server --script $REMIXT_DIR/tools/remixt_viewer_app.py
Then navigate to `http://127.0.0.1:5006/remixt`.
## Test Dataset for ReMixT
A test dataset is provided for providing the ability to run a quick analysis of a small dataset to ensure remixt is working correctly.
We will assume that the `REMIXT_DIR` environment variable points to a clone of the ReMixT source code. Additionally, create a directory, and set the environment variable `WORK_DIR` to the location of that directory.
First use the `remixt create_ref_data` sub-command to create a reference dataset. Specify a config, and use the example config that restricts to chromosome 15.
remixt create_ref_data $WORK_DIR/ref_data \
--config $REMIXT_DIR/examples/chromosome_15_config.yaml
Use `wget` to retrieve a precomputed mappability file.
wget http://remixttestdata.s3.amazonaws.com/hg19.100.bwa.mappability.h5 --directory-prefix $WORK_DIR/ref_data/
Use `wget` to retrieve the example bam files and their indices for chromosome 15, and the breakpoints file with chromosome 15 breakpoints.
wget http://remixttestdata.s3.amazonaws.com/HCC1395_chr15.bam --directory-prefix $WORK_DIR/
wget http://remixttestdata.s3.amazonaws.com/HCC1395_chr15.bam.bai --directory-prefix $WORK_DIR/
wget http://remixttestdata.s3.amazonaws.com/HCC1395BL_chr15.bam --directory-prefix $WORK_DIR/
wget http://remixttestdata.s3.amazonaws.com/HCC1395BL_chr15.bam.bai --directory-prefix $WORK_DIR/
wget http://remixttestdata.s3.amazonaws.com/HCC1395_breakpoints.tsv --directory-prefix $WORK_DIR/
Use the `remixt run` sub-command to run a remixt analysis.
remixt run $WORK_DIR/ref_data $WORK_DIR/raw_data $WORK_DIR/HCC1395_breakpoints.tsv \
--config $REMIXT_DIR/examples/chromosome_15_config.yaml \
--tmpdir $WORK_DIR/tmp_remixt \
--tumour_sample_ids HCC1395 \
--tumour_bam_files $WORK_DIR/HCC1395_chr15.bam \
--normal_sample_id HCC1395BL \
--normal_bam_file $WORK_DIR/HCC1395BL_chr15.bam \
--loglevel DEBUG \
--submit local \
--results_files $WORK_DIR/HCC1395.h5
Use the `remixt write_results` sub-command to write out tables of results and a yaml file containing inferred parameters and other meta data.
remixt write_results $WORK_DIR/HCC1395.h5 \
$WORK_DIR/HCC1395_cn.tsv \
$WORK_DIR/HCC1395_brk_cn.tsv \
$WORK_DIR/HCC1395_info.yaml
Finally, create a visualization of the solutions using the `remixt visualize_solutions` sub-command.
remixt visualize_solutions $WORK_DIR/HCC1395.h5 \
$WORK_DIR/HCC1395.html
## Parallelism Using Pypeliner
ReMixT uses the pypeliner python library for parallelism. Several of the scripts described above will complete more quickly on a multi-core machine or on a cluster.
To run a script in multicore mode, using a maximum of 4 cpus, add the following command line option:
--maxjobs 4
To run a script on a cluster with qsub/qstat, add the following command line option:
--submit asyncqsub
Often a call to qsub requires specific command line parameters to request the correct queue, and importantly to request the correct amount of memory. To allow correct calls to qsub, use the `--nativespec` command line option, and use the placeholder `{mem}` which will be replaced by the amount of memory (in gigabytes) required for each job launched with qsub. For example, to use qsub, and request queue `all.q` and set the `mem_free` to the required memory, add the following command line options:
--submit asyncqsub --nativespec "-q all.q -l mem_free={mem}G"
# Build
## Docker builds
To build a docker image, for instance version v0.5.13, run the following docker command:
docker build --build-arg app_version=v0.5.13 -t amcpherson/remixt:v0.5.13 .
docker push amcpherson/remixt:v0.5.13
## Pip build
To build with pip and distribute to pypi, use the following commands:
python setup.py build_ext --force sdist
twine upload --repository pypi dist/*
# License
ReMixT is released under the [MIT License](http://www.opensource.org/licenses/MIT).
| /remixt-0.5.23.tar.gz/remixt-0.5.23/README.md | 0.887546 | 0.808672 | README.md | pypi |
import pickle
import re
from urllib.request import urlopen
import nltk
from nltk.corpus import stopwords
from nltk.stem.porter import PorterStemmer
from sklearn.feature_extraction.text import CountVectorizer
nltk.download('stopwords')
class Preprocessing:
"""This class preprocesses the data"""
def __init__(self):
"""Initialize the preprocessing module"""
self.porter_stemmer = PorterStemmer()
self.count_vectorizer = None
all_stopwords = stopwords.words('english')
all_stopwords.remove('not')
self.stopwords = all_stopwords
def preprocess_dataset(self, dataset):
"""Preprocess the entire dataset"""
corpus = []
for i in range(0, dataset.shape[0]):
corpus.append(self.preprocess_review(dataset['Review'][i]))
return corpus
def preprocess_review(self, review):
"""Preprocess a single review"""
review = re.sub('[^a-zA-Z]', ' ', review)
review = review.lower()
review = review.split()
review = [self.porter_stemmer.stem(word) for word in review \
if not word in set(self.stopwords)]
review = ' '.join(review)
return review
def fit_transform(self, corpus):
"""Fit a corpus and transform it into a BoW representation"""
self.count_vectorizer = CountVectorizer(max_features = 1420)
return self.count_vectorizer.fit_transform(corpus).toarray()
def transform(self, corpus):
"""Transform a corpus into a BoW representation"""
if self.count_vectorizer is None:
raise Exception("Vectorizer is not initialized. \
Please call fit_transform or load from PKL or URL first.")
return self.count_vectorizer.transform(corpus).toarray()
def vectorizer_from_pkl(self, cv_file):
"""Load the vectorizer from a PKL file"""
with open(cv_file, "rb") as file:
self.count_vectorizer = pickle.load(file)
def vectorizer_from_url(self, url):
"""Load the vectorizer from a URL"""
with urlopen(url) as file:
self.count_vectorizer = pickle.load(file)
def save_vectorizer(self, filename):
"""Save the vectorizer to a file"""
with open(filename, "wb") as file:
pickle.dump(self.count_vectorizer, file) | /remla23_team10_preprocessing-2.0.1-py3-none-any.whl/restaurant_preprocessing/preprocessing.py | 0.5083 | 0.213869 | preprocessing.py | pypi |
# remme-core-cli
[](https://github.com/Remmeauth/remme-core-cli/releases)
[](https://pypi.python.org/pypi/remme-core-cli/)
[](https://travis-ci.com/Remmeauth/remme-core-cli)
[](https://codecov.io/gh/Remmeauth/remme-core-cli)
[](https://pepy.tech/project/remme-core-cli)
[](https://pypi.python.org/pypi/remme-core-cli/)
[](https://pypi.python.org/pypi/remme-core-cli/)
* [Getting started](#getting-started)
* [Requirements](#getting-started-requirements)
* [Ubuntu 16.04 & 18.04](#ubuntu-1604--1804)
* [MacOS](#macos)
* [Installation](#installation)
* [Usage](#usage)
* [Configuration file](#configuration-file)
* [Service](#service)
* [Account](#account)
* [Node Account](#node-account)
* [Block](#block)
* [Atomic Swap](#atomic-swap)
* [Batch](#batch)
* [Node](#node)
* [Masternode](#masternode)
* [Public key](#public-key)
* [State](#state)
* [Transaction](#transaction)
* [Receipt](#receipt)
* [Development](#development)
* [Requirements](#development-requirements)
* [Docker](#docker)
* [Production](#production)
* [Contributing](#contributing)
* [Request pull request's review](#request-pull-requests-review)
## Getting started
<h3 id="getting-started-requirements">Requirements</h4>
#### Ubuntu 16.04 & 18.04
If you have `16.04` version, install system requirements with the following terminal commands:
```bash
$ apt-get update && apt-get install -y software-properties-common && add-apt-repository ppa:deadsnakes/ppa -y && \
apt-get install -y build-essential automake libtool pkg-config \
libffi-dev libssl-dev libxml2-dev libxslt1-dev libjpeg8-dev zlib1g-dev
```
If `18.04`, then use the following terminal commands:
```bash
$ apt-get update && apt-get install -y software-properties-common && add-apt-repository ppa:deadsnakes/ppa -y && \
apt-get install -y build-essential automake libtool pkg-config libsecp256k1-dev \
libffi-dev libssl-dev libxml2-dev libxslt1-dev libjpeg8-dev zlib1g-dev
```
Now, for both of versions, install `Python 3.6` (also, we support 3.7):
```bash
$ apt-get update && apt-get install -y python3.6 python3.6-dev python3-pip python3-setuptools python3.6-venv
```
And make it as default `python3` with the following command:
```bash
$ rm /usr/bin/python3 && sudo ln -s /usr/bin/python3.6 /usr/bin/python3
```
#### MacOS
Install `Python 3.7` (also, we support 3.6):
```
$ brew install python3
```
Install system requirements with the following terminal command:
```bash
$ brew install automake pkg-config libtool libffi gmp
```
## Installation
Install the package from the [PyPi](https://pypi.org/project/remme-core-cli) through [pip](https://github.com/pypa/pip):
```bash
$ pip3 install remme-core-cli
```
## Usage
You can use the following list of the addresses of the nodes to execute commands to:
- `node-genesis-testnet.remme.io`,
- `node-6-testnet.remme.io`,
- `node-1-testnet.remme.io`.
Also, you can use the following IP-addresses (development servers):
- `159.89.104.9`,
- `165.22.75.163`.
They work based on a bit different codebase. So, if you have errors using a domain name, use IP-address instead. But, keep in mind that development servers don't consist in the public test network.
### Configuration file
*Disclaimer!* Configuration file is supported only on Unix operating systems and isn't supported on Windows.
Using the command line interface, you will have an option to declare the `node URL` to send commands to as illustrated below:
```bash
$ remme account get-balance \
--address=1120076ecf036e857f42129b58303bcf1e03723764a1702cbe98529802aad8514ee3cf \
--node-url=node-genesis-testnet.remme.io
```
You shouldn't declare `node URL` every time when you execute a command, use configuration file instead. Configuration file
is required to be named `.remme-core-cli.yml` and located in the home directory (`~/`).
The configuration file have an optional section to declare `node URL` to send commands to:
```bash
node-url: node-genesis-testnet.remme.io
```
Try it out by downloading the example of the configuration file to the home directory.
```bash
$ curl -L https://git.io/fj3Mi > ~/.remme-core-cli.yml
```
### Service
Get the version of the package — ``remme --version``:
```bash
$ remme --version
remme, version 0.1.0
```
Get all possible package's commands — ``remme --help``:
```bash
$ remme --help
Usage: remme [OPTIONS] COMMAND [ARGS]...
Command line interface for PyPi version checking.
Options:
--version Show the version and exit.
--help Show this message and exit.
...
```
### Account
Get balance of the account by its address — ``remme account get-balance``:
| Arguments | Type | Required | Description |
| :-------: | :----: | :------: | ------------------------------------ |
| address | String | Yes | Account address to get a balance by. |
| node-url | String | No | Node URL to apply a command to. |
```bash
$ remme account get-balance \
--address=1120076ecf036e857f42129b58303bcf1e03723764a1702cbe98529802aad8514ee3cf \
--node-url=node-genesis-testnet.remme.io
{
"result": {
"balance": 368440.0
}
}
```
Transfer tokens to address — ``remme account transfer-tokens``:
| Arguments | Type | Required | Description |
| :---------: | :-----: | :------: | ---------------------------------------------- |
| private-key | String | Yes | Account's private key to transfer tokens from. |
| address-to | String | Yes | Account address to transfer tokens to. |
| amount | Integer | Yes | Amount to transfer. |
| node-url | String | No | Node URL to apply a command to. |
```bash
$ remme account transfer-tokens \
--private-key=1067b42e24b4c533706f7c6e62278773c8ec7bf9e78bf570e9feb58ba8274acc \
--address-to=112007d71fa7e120c60fb392a64fd69de891a60c667d9ea9e5d9d9d617263be6c20202 \
--amount=1000 \
--node-url=node-genesis-testnet.remme.io
{
"result": {
"batch_identifier": "aac64d7b10be4b93b8c345b5eca1dc870c6b3905485e48a0ca5f58928a88a42b7a404abb4f1027e973314cca95379b1ef375358ad1661d0964c1ded4c212810f"
}
}
```
### Node account
Get information about the node account by its address — ``remme node-account get``:
| Arguments | Type | Required | Description |
| :-------: | :----: | :------: | -------------------------------------------------------------- |
| address | String | Yes | Node account address to get information about node account by. |
| node-url | String | No | Node URL to apply a command to. |
```bash
$ remme node-account get \
--address=1168290a2cbbce30382d9420fd5f8b0ec75e953e5c695365b1c22862dce713fa1e48ca \
--node-url=node-1-testnet.remme.io
{
"result": {
"balance": "0.0000",
"last_defrost_timestamp": "0",
"min": true,
"node_state": "OPENED",
"reputation": {
"frozen": "250000.4100",
"unfrozen": "51071032.5900"
},
"shares": [
{
"block_num": "552",
"block_timestamp": "1556178213",
"defrost_months": 0,
"frozen_share": "5440",
"reward": "0"
},
],
},
}
```
Transfer tokens to address — ``remme node-account transfer-tokens``:
| Arguments | Type | Required | Description |
| :---------: | :-----: | :------: | ---------------------------------------------- |
| private-key | String | Yes | Account's private key to transfer tokens from. |
| address-to | String | Yes | Account address to transfer tokens to. |
| amount | Integer | Yes | Amount to transfer. |
| node-url | String | No | Node URL to apply a command to. |
```bash
$ remme node-account transfer-tokens \
--private-key=7ae575740dcdae8e704ff461ab89ad42505e06abbbae8ea68e18387e537b7462 \
--address-to=1168292465adcaffeea284f89330dcc013533c8c285089b75466a958733f4f3fc9174d \
--amount=100 \
--node-url=node-genesis-testnet.remme.io
{
"result": {
"batch_identifier": "aac64d7b10be4b93b8c345b5eca1dc870c6b3905485e48a0ca5f58928a88a42b7a404abb4f1027e973314cca95379b1ef375358ad1661d0964c1ded4c212810f"
}
}
```
Transfer available tokens from frozen to unfrozen reputation's balances (executable only on the machine which runs the node) — `remme node-account transfer-tokens-from-frozen-to-unfrozen`.
```bash
$ remme node-account transfer-tokens-from-frozen-to-unfrozen
{
"result": {
"batch_id": "045c2b7c43a7ca7c3dc60e92714c03265572a726d1fae631c39a404eaf97770e3f6a7a8c35c86f6361afb2e4f12b4a17d71a66a19158b62f30531ab32b62f06f"
}
}
```
### Block
Get a list of blocks — ``remme block get-list``:
| Arguments | Type | Required | Description |
| :-------: | :-----: | :------: | -------------------------------------------------- |
| ids | String | No | Identifiers to get a list of blocks by. |
| limit | Integer | No | Maximum amount of blocks to return. |
| head | Integer | No | Block identifier to get a list of transactions to. |
| ids-only | Bool | No | The flag to get a list of blocks' identifiers. |
| reverse | Bool | No | Parameter to reverse result. |
| node-url | String | No | Node URL to apply a command to. |
```bash
$ remme block get-list \
--ids='fe56a16dab009cc96e7125c647b6c71eb1063818cf8dece283b125423ecb184f7f1e61802bf66382da904698413f80831031f8a1b29150260c3fa4db537fdf4c,
56100bf24eed12d2f72fe3c3ccf75fe2f53d87c224d9dda6fb98a1411070b06a40fcf97fccc61cb9c88442953af6ae50344ad7773f1becc6bae108443c18c551' \
--head=fe56a16dab009cc96e7125c647b6c71eb1063818cf8dece283b125423ecb184f7f1e61802bf66382da904698413f80831031f8a1b29150260c3fa4db537fdf4c \
--limit=2 \
--reverse \
--node-url=node-genesis-testnet.remme.io
{
"result": [
{
"batches": [
{
"header": {
"signer_public_key": "02d1fbda50dbcd0d3c286a6a9fa71aa7ce2d97159b90ddd463e0816422d621e135",
"transaction_ids": [
"6593d21046519022ba32c98e934d7dfc81e8b4edf6c064dbf70feb13db4310873ec00816bce8660cafd4fa2a8c80d0147d63cf616c624babd03142c694272017"
]
},
"header_signature": "fa2d1a209ad04fd2ad7fb5183976e647cc47b4c08e2e578097afc2566a0284e760eb3f2ff8f72f290765211d4da3341f23091cc7a16805025a17c04a90818a44",
"trace": false,
"transactions": [
{
"header": {
"batcher_public_key": "02d1fbda50dbcd0d3c286a6a9fa71aa7ce2d97159b90ddd463e0816422d621e135",
"dependencies": [],
"family_name": "block_info",
"family_version": "1.0",
"inputs": [
"00b10c0100000000000000000000000000000000000000000000000000000000000000",
"00b10c00"
],
"nonce": "",
"outputs": [
"00b10c0100000000000000000000000000000000000000000000000000000000000000",
"00b10c00"
],
"payload_sha512": "1b2cdc6ecfb575b926abea76b44e6988617e945e0f3d84b7624ee228cf35252a7cd186eabe5126c5f967ff54d0b1001e2c07716a7d9e00b5710e836400a913d5",
"signer_public_key": "02d1fbda50dbcd0d3c286a6a9fa71aa7ce2d97159b90ddd463e0816422d621e135"
},
"header_signature": "6593d21046519022ba32c98e934d7dfc81e8b4edf6c064dbf70feb13db4310873ec00816bce8660cafd4fa2a8c80d0147d63cf616c624babd03142c694272017",
"payload": "CtMCCLwBEoABOWI4Y2NhODk3Nzk2NDJiYWEyMGMwZWUyZjEzOWVlMGNlMWNjYjEwMjY5OTVjNDY3NDYzZDEzOTI0ZDg3YTg3NjNlODMzOWI2YzIyMzNmMTZiY2I5ZDVjNjEwMzVmNzAzY2FiNjBiNzQxMGJlMjJkZjkzNWEyYWE4YmIzNGE1NTcaQjAyZDFmYmRhNTBkYmNkMGQzYzI4NmE2YTlmYTcxYWE3Y2UyZDk3MTU5YjkwZGRkNDYzZTA4MTY0MjJkNjIxZTEzNSKAAWZkNzgwY2UwNzY0MGJhNDExMjI0ODY5MTU4MWE1OTU4NDVmZTc2MmJmM2ZlYjQ5Yjg0Mzk3NGFhZTU3ODQ3OGM2YmY1MTg3MzllY2RjNDlkNzAxOTM4M2QzYmQ5ZTNhYTZmYTBhZjgzODRiNDQ5MThmMGJmZjM3NDAyYjUxMGIyKMzfgeYF"
}
]
},
...
],
"header": {
"batch_ids": [
"fa2d1a209ad04fd2ad7fb5183976e647cc47b4c08e2e578097afc2566a0284e760eb3f2ff8f72f290765211d4da3341f23091cc7a16805025a17c04a90818a44",
"661492181b838636b11ee347312bf5346b4231e0510c5c5bec27ea999ea389a66a1264696ea53e3b30e29b03192154bed8d160f035706da4f0da7f0be107a2b2"
],
"block_num": "189",
"consensus": "RGV2bW9kZdG76dVw7Q7VRgkNr6HxHnnJxNwI+iySmLepFZPJXvDa",
"previous_block_id": "fd780ce07640ba4112248691581a595845fe762bf3feb49b843974aae578478c6bf518739ecdc49d7019383d3bd9e3aa6fa0af8384b44918f0bff37402b510b2",
"signer_public_key": "02d1fbda50dbcd0d3c286a6a9fa71aa7ce2d97159b90ddd463e0816422d621e135",
"state_root_hash": "693d08c1520c9c1b2dba54ae147bf689f6209f74e304f8ed44e1ec818a08072e"
},
"header_signature": "fe56a16dab009cc96e7125c647b6c71eb1063818cf8dece283b125423ecb184f7f1e61802bf66382da904698413f80831031f8a1b29150260c3fa4db537fdf4c"
},
...
]
}
```
Get a list of blocks' identifiers (can be combined with other parameters like `--limit`):
```bash
$ remme block get-list --ids-only --node-url=node-6-testnet.remme.io
{
"result": [
"b757c74fbcd57ae12577b71490878affb6b688434c2e20170138760e72e937ca1bb3d6773e2ef37b5151ed74dcb663114a181072e0870e7a4d452c58659a6dbb",
"585f23725d1236e90e2b961b0c0c1404aba0ba5a96e4d85cd2f048b1d61b027669153e3618c84fc09a8041f8e149b97d50a89ee7761d0458cd57c63d5f354cbd",
...
]
}
```
Get information about the block by its identifier — ``remme block get``:
| Arguments | Type | Required | Description |
| :-------: | :----: | :------: | ------------------------------------------------------ |
| id | String | Yes | Identifier of the block to fetch information about by. |
| node-url | String | No | Node URL to apply a command to. |
```bash
$ remme block get \
--id=4a7897650db9863aca34874778e6c5802f86c3df0e22b39cfea730bc83654357037a422f8ef51ac85a9bc61d2484bd0f37be10cfc861588c41dc6f1bbfd92cde \
--node-url=node-6-testnet.remme.io
{
"result": {
"batches": [
{
"header": {
"signer_public_key": "02d1fbda50dbcd0d3c286a6a9fa71aa7ce2d97159b90ddd463e0816422d621e135",
"transaction_ids": [
"ce8dd0946326072eb4c70818d7d0df32ebd80b3a24525306ff92e8caa8c886ee571d8ba9f01c73c2c4aaab7960c0ef88865ace6dd9274dd378649f5b9da7c820"
]
},
"header_signature": "b684d527666cce92ea57d8e14d467ee3cec5515759e1d0a78df65dbcd2a5ff993f95c8efac7c35a6380cbce81941119e98b72956278e663b9fa04e396bb7849f",
"trace": false,
"transactions": [
{
"header": {
"batcher_public_key": "02d1fbda50dbcd0d3c286a6a9fa71aa7ce2d97159b90ddd463e0816422d621e135",
"dependencies": [],
"family_name": "block_info",
"family_version": "1.0",
"inputs": [
"00b10c0100000000000000000000000000000000000000000000000000000000000000",
"00b10c00"
],
"nonce": "",
"outputs": [
"00b10c0100000000000000000000000000000000000000000000000000000000000000",
"00b10c00"
],
"payload_sha512": "ef5953af5e24047f92cea476c6706da72b6207ac89077cb314d6d518a1293433955c0a5012c52c4acb34e2220ac8fcc33f83b33ab847631f0471f10dcdf0a54f",
"signer_public_key": "02d1fbda50dbcd0d3c286a6a9fa71aa7ce2d97159b90ddd463e0816422d621e135"
},
"header_signature": "ce8dd0946326072eb4c70818d7d0df32ebd80b3a24525306ff92e8caa8c886ee571d8ba9f01c73c2c4aaab7960c0ef88865ace6dd9274dd378649f5b9da7c820",
"payload": "CtICCAESgAExNTJmM2JlOTFkODIzODUzOGE4MzA3N2VjOGNkNWQxZDkzNzc2N2MwOTMwZWVhNjFiNTkxNTFiMGRmYTdjNWExNzlhNjZmMTc2Y2UyM2MxNGE2N2Q4NDUxY2VjMjg1MmM4ZmY2MGZlOWU4OTYzYzNlZDExNWJkNjA3ODg5OGRhMBpCMDJkMWZiZGE1MGRiY2QwZDNjMjg2YTZhOWZhNzFhYTdjZTJkOTcxNTliOTBkZGQ0NjNlMDgxNjQyMmQ2MjFlMTM1IoABNGFlNmYzOWY0ZDZlNWJiNDhmYzA0Y2Y0MGJhNzEwMTNmYzA0NGZlNTdjOWE3Njg3ZjRlMTNkZjhjZDQ4ODQ1OTA4YTAxNjAzOTRlN2RjNjRjNDc5YTg0YzVkYmYwZmUzYzVlZTZkNmIxMDhlNzZjODYyNzQ4NzkxMWZjNjgxYWUokIr35QU="
}
]
},
{
"header": {
"signer_public_key": "02d1fbda50dbcd0d3c286a6a9fa71aa7ce2d97159b90ddd463e0816422d621e135",
"transaction_ids": [
"e112670497e184e7b3d7fab962440fe4be7e905ce7c73712a1a7ca9c65fba00b23fcf62cc640944bdac3c7ab1414d5d5c6fe3edf2f755d3dbca982b3d83394e2"
]
},
"header_signature": "cd11713211c6eb2fe4adc0e44925c1f82e9300e0b8827bd3c73d8be10e61cd2b1e8da810078845ca1665b4adf7f691ad731ab4cea0fc994c55a8863b30220c6e",
"trace": false,
"transactions": [
{
"header": {
"batcher_public_key": "02d1fbda50dbcd0d3c286a6a9fa71aa7ce2d97159b90ddd463e0816422d621e135",
"dependencies": [],
"family_name": "account",
"family_version": "0.1",
"inputs": [
"112007d71fa7e120c60fb392a64fd69de891a60c667d9ea9e5d9d9d617263be6c20202",
"112007a90f66c661b32625f17e27177034a6d2cb552f89cba8c78868705ae276897df6"
],
"nonce": "7d5445ee5559645bd72db237a0b448bec64c33c70be214e974da7ad0f523278cbb0c77c4a690ff751b68c318437ece2aef6eb29518a41c5ec8037218ed6fbf0d",
"outputs": [
"112007d71fa7e120c60fb392a64fd69de891a60c667d9ea9e5d9d9d617263be6c20202",
"112007a90f66c661b32625f17e27177034a6d2cb552f89cba8c78868705ae276897df6"
],
"payload_sha512": "bb0e5d9898c92b9b922a4de677ed6cab106ed5c90e975941cd5d1e22ce6f0d397b812c7152796b410a9cfe1d3fd4af080c6ee88c9548fc8393e7a55cae596b8c",
"signer_public_key": "02d1fbda50dbcd0d3c286a6a9fa71aa7ce2d97159b90ddd463e0816422d621e135"
},
"header_signature": "e112670497e184e7b3d7fab962440fe4be7e905ce7c73712a1a7ca9c65fba00b23fcf62cc640944bdac3c7ab1414d5d5c6fe3edf2f755d3dbca982b3d83394e2",
"payload": "EksSRjExMjAwN2Q3MWZhN2UxMjBjNjBmYjM5MmE2NGZkNjlkZTg5MWE2MGM2NjdkOWVhOWU1ZDlkOWQ2MTcyNjNiZTZjMjAyMDIY6Ac="
}
]
}
],
"header": {
"batch_ids": [
"b684d527666cce92ea57d8e14d467ee3cec5515759e1d0a78df65dbcd2a5ff993f95c8efac7c35a6380cbce81941119e98b72956278e663b9fa04e396bb7849f",
"cd11713211c6eb2fe4adc0e44925c1f82e9300e0b8827bd3c73d8be10e61cd2b1e8da810078845ca1665b4adf7f691ad731ab4cea0fc994c55a8863b30220c6e"
],
"block_num": "2",
"consensus": "RGV2bW9kZVrz+4RUt+Xyzhofvok/lkMcK3ZtAh/zcO/6gbPJPLPw",
"previous_block_id": "4ae6f39f4d6e5bb48fc04cf40ba71013fc044fe57c9a7687f4e13df8cd48845908a0160394e7dc64c479a84c5dbf0fe3c5ee6d6b108e76c8627487911fc681ae",
"signer_public_key": "02d1fbda50dbcd0d3c286a6a9fa71aa7ce2d97159b90ddd463e0816422d621e135",
"state_root_hash": "54eeacdf8fe3262862782110d4396b60f4b8c3863ff1b1b208fa996b6bb24a0f"
},
"header_signature": "4a7897650db9863aca34874778e6c5802f86c3df0e22b39cfea730bc83654357037a422f8ef51ac85a9bc61d2484bd0f37be10cfc861588c41dc6f1bbfd92cde"
}
}
```
### Atomic Swap
Get public key of atomic swap — ``remme atomic-swap get-public-key``:
| Arguments | Type | Required | Description |
| :-------: | :----: | :------: | ------------------------------- |
| node-url | String | No | Node URL to apply a command to. |
```bash
$ remme atomic-swap get-public-key --node-url=node-6-testnet.remme.io
{
"result": {
"public_key": "03738df3f4ac3621ba8e89413d3ff4ad036c3a0a4dbb164b695885aab6aab614ad"
}
}
```
Get information about atomic swap by its identifier — ``remme atomic-swap get-info``:
| Arguments | Type | Required | Description |
| :-------: | :----: | :------: | ------------------------------------------------- |
| id | String | Yes | Swap identifier to get information about swap by. |
| node-url | String | No | Node URL to apply a command to. |
```bash
$ remme atomic-swap get-info \
--id=033402fe1346742486b15a3a9966eb5249271025fc7fb0b37ed3fdb4bcce6808 \
--node-url=node-genesis-testnet.remme.io
{
"result": {
"information": {
"amount": "10.0000",
"created_at": 1556803765,
"email_address_encrypted_optional": "",
"is_initiator": false,
"receiver_address": "112007484def48e1c6b77cf784aeabcac51222e48ae14f3821697f4040247ba01558b1",
"secret_key": "",
"secret_lock": "0728356568862f9da0825aa45ae9d3642d64a6a732ad70b8857b2823dbf2a0b8",
"sender_address": "1120076ecf036e857f42129b58303bcf1e03723764a1702cbe98529802aad8514ee3cf",
"sender_address_non_local": "0xe6ca0e7c974f06471759e9a05d18b538c5ced11e",
"state": "OPENED",
"swap_id": "033402fe1346742486b15a3a9966eb5249271025fc7fb0b37ed3fdb4bcce6808"
}
}
}
```
### Batch
Get a batch by identifier — ``remme batch get``:
| Arguments | Type | Required | Description |
| :-------: | :----: | :------: | ------------------------------- |
| id | String | Yes | Identifier to get a batch by. |
| node-url | String | No | Node URL to apply a command to. |
```bash
$ remme batch get \
--id=61a02b6428342c4ac2bb0d9d253d48fd229d9b0a1344b2c114f22f127e7bfaeb3e2be19574fbd48776b71bbdb728ee1eedab2c2a4f0b951251899470318cee9d \
--node-url=node-6-testnet.remme.io
{
"result": {
"header": {
"signer_public_key": "029de6b8d982a714b5e781e266a4f1e0ef88ba1ef6bd4a96e7b7f21da164d84cda",
"transaction_ids": [
"73b913d679d7ec5ccd6658909b71ebdbdef5d01ea510c620639f519812efa76e66710d1d2f932f6e23775f907e5ed6c41d80b1fe227dd3316ac82452d20487c8"
]
},
"header_signature": "61a02b6428342c4ac2bb0d9d253d48fd229d9b0a1344b2c114f22f127e7bfaeb3e2be19574fbd48776b71bbdb728ee1eedab2c2a4f0b951251899470318cee9d",
"trace": false,
"transactions": [
{
"header": {
"batcher_public_key": "029de6b8d982a714b5e781e266a4f1e0ef88ba1ef6bd4a96e7b7f21da164d84cda",
"dependencies": [],
"family_name": "block_info",
"family_version": "1.0",
"inputs": [
"00b10c0100000000000000000000000000000000000000000000000000000000000000",
"00b10c00"
],
"nonce": "",
"outputs": [
"00b10c0100000000000000000000000000000000000000000000000000000000000000",
"00b10c00"
],
"payload_sha512": "0104c4f12d1bc53ee1d14a71a036305cfc2b82b41cee52cc6d7b9d0905d5fa0aa0db8d01e28531676b319552ce2a33e719386cb3eb5b938d8996abfa64bd3488",
"signer_public_key": "029de6b8d982a714b5e781e266a4f1e0ef88ba1ef6bd4a96e7b7f21da164d84cda"
},
"header_signature": "73b913d679d7ec5ccd6658909b71ebdbdef5d01ea510c620639f519812efa76e66710d1d2f932f6e23775f907e5ed6c41d80b1fe227dd3316ac82452d20487c8",
"payload": "CtMCCNkzEoABMDk1YmM0MjQ4YjU4NjYzMTllNGE5YWQ2YzZkMWFkNGI3MDA5OTFmNmJjMGVjMGRlN2UwNGJhMTAxNGYxYTU3ZTI4OWE1MmE0MjVhNzc3ZTg3YjgzMzFjMjVkNmU4NTIwNmY1ZGZmNjk1ZGFiMTI0Yzc3YjQ2OWNhMzhhNDFjY2QaQjAyZjU3OWQ3NzU0ZTg3YmYwZTRlZDJlNTBmYjEzNmI4ZTM1NTg2OGU0ODMwODkwZTE0MjRlOWZmZGVhZjZiZTE2MyKAATQzMjA5ODdiYzU3YTJjMmZlYTMzNWFkM2UxZTFmNGU0NDk3YTJhYmM2MmFhYzdlZDIwY2RmZmY5NWFhY2JiMDgyNGU2ZTBmMGFiNGI4MmQxOGExOWEyYTVmMDE3OGE2Mjk0MDIyNjhmODExNzAyZmUxZTk0MzFmZmExMGEyNWI2KMTIreYF"
}
]
}
}
```
Get a batch status by its identifier — ``remme batch get-status``:
| Arguments | Type | Required | Description |
| :-------: | :----: | :------: | ------------------------------------ |
| id | String | Yes | Identifier to get a batch status by. |
| node-url | String | No | Node URL to apply a command to. |
```bash
$ remme batch get-status \
--id=61a02b6428342c4ac2bb0d9d253d48fd229d9b0a1344b2c114f22f127e7bfaeb3e2be19574fbd48776b71bbdb728ee1eedab2c2a4f0b951251899470318cee9d \
--node-url=node-6-testnet.remme.io
{
"result": "COMMITTED"
}
```
Get a list of batches — ``remme batch get-list``:
| Arguments | Type | Required | Description |
| :-------: | :-----: | :------: | -------------------------------------------------------- |
| ids | String | No | Identifiers to get a list of batches by. |
| start | String | No | Batch identifier to get a list of batches starting from. |
| limit | Integer | No | Maximum amount of batches to return. |
| head | String | No | Block identifier to get a list of batches from. |
| reverse | Bool | No | Parameter to reverse result. |
| ids-only | Bool | No | The flag to get a list of batches' identifiers. |
| node-url | String | No | Node URL to apply a command to. |
```bash
$ remme batch get-list \
--ids='6bd3382e3deef34d0bc63a7b450c88c7ae00152f5168c7b4dc4357feff6d52175209919cd0710441fa2768f4c12adf97143440ef8414bb5144b9459d78ff3e0e, 7a5daba99d5757adc997ea6a0b1b83263b3c16604dbd83c0153dc01c9fd780af4b570338c2ec60e086b1db58a4397a4dc661d6c93b0a7250fe75642e15b26e81' \
--start=6bd3382e3deef34d0bc63a7b450c88c7ae00152f5168c7b4dc4357feff6d52175209919cd0710441fa2768f4c12adf97143440ef8414bb5144b9459d78ff3e0e \
--limit=2 \
--head=57a7944497ca41f424932ae6b70897e7086652ab98450d4aba6a02a2d891501460947812a41028b8041f087066df6dc7e1100c4b0e5cc94bb58b002f6950eb02 \
--reverse \
--node-url=node-6-testnet.remme.io
{
"result": [
{
"header": {
"signer_public_key": "03738df3f4ac3621ba8e89413d3ff4ad036c3a0a4dbb164b695885aab6aab614ad",
"transaction_ids": [
"376efc69c217a0b9deb545348ca32664ce61b3e35706252d1d0374bdb93b10e62abc35fc16a3d19f0d8346ddbadc1c0974af6b4364f98ffea66de72cfb11b238"
]
},
"header_signature": "ed0fc04a114e87ae7d2046db667bb82cf5a9bbab9b51024c4192b569a997785260ea5f4ad55ac4e2a167a04d50806b00f35b2a553bb4072bb5a36be7ba49b9be",
"trace": false,
"transactions": [
{
"header": {
"batcher_public_key": "03738df3f4ac3621ba8e89413d3ff4ad036c3a0a4dbb164b695885aab6aab614ad",
"dependencies": [],
"family_name": "block_info",
"family_version": "1.0",
"inputs": [
"00b10c0100000000000000000000000000000000000000000000000000000000000000",
"00b10c00"
],
"nonce": "",
"outputs": [
"00b10c0100000000000000000000000000000000000000000000000000000000000000",
"00b10c00"
],
"payload_sha512": "7b11153de66545d8c8847004425f9c5815483636688e79fd2bfbb6d979218fbeb7ccdcb244241d8d52ea38a1b1d62c5d178cf74c3c7b5f496936059c616163e2",
"signer_public_key": "03738df3f4ac3621ba8e89413d3ff4ad036c3a0a4dbb164b695885aab6aab614ad"
},
"header_signature": "376efc69c217a0b9deb545348ca32664ce61b3e35706252d1d0374bdb93b10e62abc35fc16a3d19f0d8346ddbadc1c0974af6b4364f98ffea66de72cfb11b238",
"payload": "CtMCCLwbEoABYmZiNzhkNGQxMWQyZjQzOWRlZjkzNTc2Y2YyN2M0NGVhZTNmYmMyM2Q2ODAwNmUyNGRlYmJmZGYxZWRiNmQ4MDY5MDExYzYxNWZjNjk4NGMxM2EzZDJjMDMyYzFhZTY2NWYzNmZjZTUxOWVjZTdlOGI2YmFjMGMxYWRlMTgxYWYaQjAzNzM4ZGYzZjRhYzM2MjFiYThlODk0MTNkM2ZmNGFkMDM2YzNhMGE0ZGJiMTY0YjY5NTg4NWFhYjZhYWI2MTRhZCKAAThmMjJkYjUyNTUyYzQ3MjE4ZTc0ZmE4OGExZTU2NGJhZTE1YjYwMmY0ZTI3ZTZiYTYwOWI0NzM4YjY0ZTllZTYzYzcwMjM4MjI3ZWU0NTU1OTVhNjMzYTIzOWU5ZGZiMWNiMGMxNWI1MzVhZGJkYTZmMGE2Yjk3MmU3ZWU3MWQyKLvr4eYF"
}
]
}
]
}
```
Get a list of batches' identifiers (can be combined with other parameters like `--limit`):
```bash
$ remme batch get-list --ids-only --node-url=node-6-testnet.remme.io
{
"result": [
"6bd3382e3deef34d0bc63a7b450c88c7ae00152f5168c7b4dc4357feff6d52175209919cd0710441fa2768f4c12adf97143440ef8414bb5144b9459d78ff3e0e",
"7a5daba99d5757adc997ea6a0b1b83263b3c16604dbd83c0153dc01c9fd780af4b570338c2ec60e086b1db58a4397a4dc661d6c93b0a7250fe75642e15b26e81",
...
]
}
```
### Node
Get the node configurations — ``remme node get-configs``:
| Arguments | Type | Required | Description |
| :-------: | :----: | :------: | ------------------------------- |
| node-url | String | No | Node URL to apply a command to. |
```bash
$ remme node get-configs --node-url=node-genesis-testnet.remme.io
{
"result": {
"configurations": {
"node_address": "1168296ecf036e857f42129b58303bcf1e03723764a1702cbe98529802aad8514ee3cf",
"node_public_key": "03738df3f4ac3621ba8e89413d3ff4ad036c3a0a4dbb164b695885aab6aab614ad"
}
}
}
```
Get the node's peers — ``remme node get-peers``:
| Arguments | Type | Required | Description |
| :-------: | :----: | :------: | ------------------------------- |
| node-url | String | No | Node URL to apply a command to. |
```bash
$ remme node get-peers --node-url=node-genesis-testnet.remme.io
{
"result": {
"peers": [
"tcp://node-22-testnet.remme.io:8800",
"tcp://node-9-testnet.remme.io:8800",
"tcp://node-29-testnet.remme.io:8800"
]
}
}
```
Get node information — ``remme node get-info``:
| Arguments | Type | Required | Description |
| :-------: | :----: | :------: | ------------------------------- |
| node-url | String | No | Node URL to apply a command to. |
```bash
$ remme node get-info --node-url=node-27-testnet.remme.io
{
"result": {
"information": {
"is_synced": true,
"peer_count": 3
}
}
}
```
Open the node to participate in the network (executable only on the machine which runs the node) — ``remme node open``:
```bash
$ remme node open
{
"result": {
"batch_id": "b877a10ddc0ef7f28b0b4a075cbab580b5f7be4dc4063e282a87ce812105316569ccba6c554176c36174bb62025181dc7bb9d83cba57d90dd27c04c043261c9c"
}
}
```
Get the initial stake of the node — ``remme node get-initial-stake``:
| Arguments | Type | Required | Description |
| :-------: | :----: | :------: | ------------------------------- |
| node-url | String | No | Node URL to apply a command to. |
```bash
$ remme node get-initial-stake --node-url=node-27-testnet.remme.io
{
"result": 250000
}
```
### Masternode
Open the masternode (executable only on the machine which runs the node) — ``remme masternode open``:
| Arguments | Type | Required | Description |
| :-------: | :-------: | :--------: | ----------------------------------------------------------- |
| amount | Integer | Yes | Starting amount of tokens to put to the masternode account. |
```bash
$ remme masternode open --amount=300000
{
"result": {
"batch_id": "b877a10ddc0ef7f28b0b4a075cbab580b5f7be4dc4063e282a87ce812105316569ccba6c554176c36174bb62025181dc7bb9d83cba57d90dd27c04c043261c9c"
}
}
```
Close the masternode (executable only on the machine which runs the node) — ``remme masternode close``:
```bash
$ remme masternode close
{
"result": {
"batch_id": "ae0ad8d5379beb28211cdc3f4d70a7ef66852eb815241cb201425897fc470e727c34e67ea77525ac696633afd27cca88227df52493889edcbb6fb840b4c93326"
}
}
```
Set the masternode betting behavior (executable only on the machine which runs the node) — ``remme masternode set-bet``:
| Arguments | Type | Required | Description |
| :-------: | :----: | :------: | ---------------------------------------------------------------- |
| bet | String | Yes | Bet to set to the masternode account. |
| | | | Possible values are `min`, `max`, or an integer value (e.g. 20). |
```bash
$ remme masternode set-bet --bet=max
{
"result": {
"batch_id": "a58c23ba6b346aeb3c7186754e436eb23162a5250384667a6c3ce70f7f02e19c42e8ca31f871e4aea333849b8ea752321882977499b1df098832a8296b0c6e9a"
}
}
```
### Public key
Get a list of the addresses of the public keys by account address — ``remme public-key get-list``:
| Arguments | Type | Required | Description |
| :-------: | :----: | :------: | --------------------------------------------------------------------- |
| address | String | Yes | Account address to get a list of the addresses of the public keys by. |
| node-url | String | No | Node URL to apply a command to. |
```bash
$ remme public-key get-list \
--address=1120076ecf036e857f42129b58303bcf1e03723764a1702cbe98529802aad8514ee3cf \
--node-url=node-genesis-testnet.remme.io
{
"result": {
"addresses": [
"a23be10b3aad1b4a98f338c71d6dcdb2aa2f296c7e31fb400615e335dc10dd1d4f62bf",
"a23be14b362514d624c1985277005327f6fc40413fb090eee6fccb673a32c9809060ff"
]
}
}
```
Get information about public key by its address — ``remme public-key get-info``:
| Arguments | Type | Required | Description |
| :-------: | :----: | :------: | ---------------------------------------------------------- |
| address | String | Yes | Public key address to get information about public key by. |
| node-url | String | No | Node URL to apply a command to. |
```bash
$ remme public-key get-info \
--address=a23be17addad8eeb5177a395ea47eb54b4a646f8c570f4a2ecc0b1d2f6241c6845181b \
--node-url=node-genesis-testnet.remme.io
{
"result": {
"information": {
"address": "a23be10d215132aee9377cfe26b6d301d32da070a799c227fb4701103e5626d48cd6ba",
"entity_hash": "1edd6d5b1c722a83e03b17180b888d89ec4c079a0044f074b7c8bb2720cad8ba4e97a80c7edbd24c1824f5312dfd8a0877453394a63410b52c1f16e1d60ef754",
"entity_hash_signature": "1322ca51fb6d33e44d2b6c028eb668b5712a5277bbdea089112203e8e950d1c7d02d446291865a2f5fca4c6767fb84583e53205df850f1fc05ea6f22c736635f425b0159881f7f998da52378bf08353d87d2a2c226a7ababea9a245e69be06d54c573a42c3be907ca49589a67b5e9cc4d8ed12cea8546b2df531fd9620f4dc71869d8fa0bfcbef239d9a6e2e3bf12bcac4fd562b22ff408d7b077b75d8e59af0348264a7c9e7e61b4c5f844636a0fbbcfae61955efdf10323a992ea2a1734eb0ee7952519b00e696a02e7460771b0e0887e011b709e88abfda896b68150c08dcf6b4bf7c70f996f6031c13311056ab935ce1fdf63d3f19b5a3ca6ae604c4f12b",
"is_revoked": false,
"is_valid": true,
"owner_public_key": "03738df3f4ac3621ba8e89413d3ff4ad036c3a0a4dbb164b695885aab6aab614ad",
"public_key": "30820122300d06092a864886f70d01010105000382010f003082010a02820101008b29cc5ec32dab21b48b63faf2fd00f88879b9d4286c3cde6218d19263ea8226fce499039968c5f9736149e298bbc56680b516f2d83507d88fb95771445ca3c59bcdbb31bb5993a4e5dfcd2c4bc86328ec76e95e2f4582f9cac8223a2f16a2b14c4358b6fb105e37baf9daa9bd5b708ab204d8015a1ce782e28024eae1801151616c90a3b1aa1916d5b8dd021b3aa4cec77450660841f8619a7234c6199d01ccd43b1d6ff7fa5f50bf80bc06b682b126bdca0753a6830b7a95afca79442ec64fd09ddcc34627dcbdad0c5e66317db98d0e1c24c3f992b83f4b0f97e2b0300a2cb51e33eccf060f26b4e19a88f15216f8c17be5f5e023a1f260f7c93a2a4523ed0203010001",
"type": "rsa",
"valid_from": 1556118334,
"valid_to": 1587222334
}
}
}
```
### State
Get a state by its address — ``remme state get``:
| Arguments | Type | Required | Description |
| :-------: | :----: | :------: | ---------------------------------- |
| address | String | Yes | Account address to get a state by. |
| node-url | String | No | Node URL to apply a command to. |
```bash
$ remme state get \
--address=000000a87cb5eafdcca6a8cde0fb0dec1400c5ab274474a6aa82c12840f169a04216b7 \
--node-url=node-6-testnet.remme.io
{
"result": {
"state": {
"data": "CmwKJnNhd3Rvb3RoLnNldHRpbmdzLnZvdGUuYXV0aG9yaXplZF9rZXlzEkIwMmE2NTc5NmYyNDkwOTFjMzA4NzYxNGI0ZDljMjkyYjAwYjhlYmE1ODBkMDQ1YWMyZmQ3ODEyMjRiODdiNmYxM2U=",
"head": "95d78133eb98628d5ff17c7d1972b9ab03e50fceeb8e199d98cb52078550f5473bb001e57c116238697bdc1958eaf6d5f096f7b66974e1ea46b9c9da694be9d9"
}
}
}
```
Get a list of states — ``remme state get-list``:
| Arguments | Type | Required | Description |
| :-------: | :-----: | :------: | ----------------------------------------------------- |
| address | String | No | Account address to get a list of states by. |
| start | String | No | Account address to get a list of states starting from.|
| limit | Integer | No | Maximum amount of transactions to return. |
| head | String | No | Block identifier to get a list of states to. |
| reverse | Bool | No | Parameter to reverse result. |
| node-url | String | No | Node URL to apply a command to. |
```bash
$ remme state get-list \
--address=00001d0024b20fbe284cdaca250b30f40c30c3999e2cafbace268f2f26d9d493a4d09b \
--start=00001d0024b20fbe284cdaca250b30f40c30c3999e2cafbace268f2f26d9d493a4d09b \
--limit=1 \
--head=d3b9c12f76bf33ed0fb70df9f0ab9af9b3e29a6c9cf3e446fb2d799bdae07a92721cc52a0f3c683a972d562abae6a041d09a90c4157fce9bd305036e1cb15149 \
--reverse \
--node-url=node-6-testnet.remme.io
{
"result": [
{
"address": "00001d0024b20fbe284cdaca250b30f40c30c3999e2cafbace268f2f26d9d493a4d09b",
"data": "CmkKH25vZGVfYWNjb3VudF9wZXJtaXNzaW9uc19wb2xpY3kSRggBEkIwMzczOGRmM2Y0YWMzNjIxYmE4ZTg5NDEzZDNmZjRhZDAzNmMzYTBhNGRiYjE2NGI2OTU4ODVhYWI2YWFiNjE0YWQ="
}
]
}
```
### Transaction
Get a list of transactions — ``remme transaction get-list``:
| Arguments | Type | Required | Description |
| :---------: | :-----: | :------: | -------------------------------------------------------------- |
| ids | String | No | Identifiers to get a list of transactions by. |
| start | String | No | Transaction identifier to get a list transaction starting from. |
| limit | Integer | No | Maximum amount of transactions to return. |
| head | String | No | Block identifier to get a list of transactions from. |
| reverse | Bool | No | Parameter to reverse result. |
| ids-only | Bool | No | The flag to get a list of transactions' identifiers. |
| family-name | String | No | List of transactions by its family name. |
| node-url | String | No | Node URL to apply a command to. |
```bash
$ remme transaction get-list \
--ids='568a1094e574747c757c1f5028d9b929105984e509c4f2f3cb76e5f46f03ca4c3681ca0eeca86a4bd4bb5a3eaaa52fd73b08ebc5d5d85fbb1957b064f8b71972,
d9b891d3efdd51cd47156ad2083bf5cabd5b35bb2ebe66813996d1a0f783e58721bbc50917ff284a40696f24058ef1e22e48600abf37d500ace78eadf7f4ecff' \
--start=568a1094e574747c757c1f5028d9b929105984e509c4f2f3cb76e5f46f03ca4c3681ca0eeca86a4bd4bb5a3eaaa52fd73b08ebc5d5d85fbb1957b064f8b71972 \
--limit=2 \
--head=39566f24561727f5ab2d19eb23612f1a38ff5f0cf9491caa0275261706a7cf8b080d38da0a38fa5b1cbef0cced889fdf6da679cc616a9711380f76b33e53efdf \
--reverse \
--family-name=account \
--node-url=node-6-testnet.remme.io
{
"result": {
"data": [
{
"header": {
"batcher_public_key": "03d4613540ce29cd1f5f28ea9169a5cb5853bd53dede635903af9383bc9ffaf079",
"dependencies": [],
"family_name": "account",
"family_version": "0.1",
"inputs": [
"112007db16c75019f59423da4de3cd5c79609989d7dc1697c9975307ea846e1d4af91f",
"1120076ecf036e857f42129b58303bcf1e03723764a1702cbe98529802aad8514ee3cf"
],
"nonce": "99ccdbcfeb008e2c8407870b7033117e316b4b12df4173f3e2ffd510676e524a77ac64a0b65e6c7889a797fbd4e4462830548f455497e2362dde1bbf35d5372f",
"outputs": [
"112007db16c75019f59423da4de3cd5c79609989d7dc1697c9975307ea846e1d4af91f",
"1120076ecf036e857f42129b58303bcf1e03723764a1702cbe98529802aad8514ee3cf"
],
"payload_sha512": "1f0313cb9cd67559c1d33d61104882b3ebca80dfcd091d5ae3b0ee99bd27723af591551dfeea43be05e2b24a2f9a54adc6c357b60fc5c5720b161c5ff9d10ae1",
"signer_public_key": "03738df3f4ac3621ba8e89413d3ff4ad036c3a0a4dbb164b695885aab6aab614ad"
},
"header_signature": "d9b891d3efdd51cd47156ad2083bf5cabd5b35bb2ebe66813996d1a0f783e58721bbc50917ff284a40696f24058ef1e22e48600abf37d500ace78eadf7f4ecff",
"payload": "CAASTQgAEkYxMTIwMDdkYjE2Yzc1MDE5ZjU5NDIzZGE0ZGUzY2Q1Yzc5NjA5OTg5ZDdkYzE2OTdjOTk3NTMwN2VhODQ2ZTFkNGFmOTFmGOgH"
}
],
"head": "39566f24561727f5ab2d19eb23612f1a38ff5f0cf9491caa0275261706a7cf8b080d38da0a38fa5b1cbef0cced889fdf6da679cc616a9711380f76b33e53efdf",
"paging": {
"limit": 2,
"next": "",
"start": "568a1094e574747c757c1f5028d9b929105984e509c4f2f3cb76e5f46f03ca4c3681ca0eeca86a4bd4bb5a3eaaa52fd73b08ebc5d5d85fbb1957b064f8b71972"
}
}
}
```
Get a list of transactions' identifiers (can be combined with other parameters like `--limit`):
```bash
$ remme transaction get-list --ids-only --node-url=node-6-testnet.remme.io
{
"result": [
"eb662acc48d313c9bba4a72359b0462d607bba8fc66aeb3d169d02fafd21849b6bf8bea8396b54b6fc907e1cce2a386f76bd19889d0f3e496b45b8440b161ebc",
"206a3767f368c1db9d07b273f80d4824d201ae61b9ced8a6aeedac58032c5557544ac622d5e3fd59f6d9873d97af1c6114d0131b4b1a191cbba7d5a8aa5a3caf",
"63ed3259b6067525ae241a12f66b5be1e1502cdbd6f475b139bf94cf4ba842643577835fcef0482d25190243b8dfab3a1f9913f7fd0edc425ad0c19333d8bd4b",
...
]
}
```
Get a transaction by identifier — ``remme transaction get``:
| Arguments | Type | Required | Description |
| :-------: | :----: | :------: | --------------------------------- |
| id | String | Yes | Identifier to get transaction by. |
| node-url | String | No | Node URL to apply a command to. |
```bash
$ remme transaction get \
--id=64d032fbaae9bc59f9e5484ec6f52cbceef567923456039a26a1cfb8bc9ee2431ac2b5de43efce28ef11820a3734dab9fa56db57a1b2fbdc2323036cceeab6ab \
--node-url=node-6-testnet.remme.io
{
"result": {
"data": {
"header": {
"batcher_public_key": "03738df3f4ac3621ba8e89413d3ff4ad036c3a0a4dbb164b695885aab6aab614ad",
"dependencies": [],
"family_name": "consensus_account",
"family_version": "0.1",
"inputs": [
"116829",
"112007",
"0000007ca83d6bbb759da9cde0fb0dec1400c54773f137ea7cfe91e3b0c44298fc1c14",
"0000007ca83d6bbb759da9cde0fb0dec1400c5034223fb6c3e825ee3b0c44298fc1c14",
"0000007ca83d6bbb759da9cde0fb0dec1400c5e64de9aa6a37ac92e3b0c44298fc1c14",
"00b10c0100000000000000000000000000000000000000000000000000000000000000",
"00b10c00",
"fd0e4f0000000000000000000000000000000000000000000000000000000000000000"
],
"nonce": "b8baa6c54ab9463590627c18fb9c10ed",
"outputs": [
"116829",
"112007",
"fd0e4f0000000000000000000000000000000000000000000000000000000000000000"
],
"payload_sha512": "cf83e1357eefb8bdf1542850d66d8007d620e4050b5715dc83f4a921d36ce9ce47d0d13c5d85f2b0ff8318d2877eec2f63b931bd47417a81a538327af927da3e",
"signer_public_key": "03738df3f4ac3621ba8e89413d3ff4ad036c3a0a4dbb164b695885aab6aab614ad"
},
"header_signature": "64d032fbaae9bc59f9e5484ec6f52cbceef567923456039a26a1cfb8bc9ee2431ac2b5de43efce28ef11820a3734dab9fa56db57a1b2fbdc2323036cceeab6ab",
"payload": ""
}
}
}
```
### Receipt
Get a list of the transaction's receipts by identifiers — ``remme receipt get``:
| Arguments | Type | Required | Description |
| :-------: | :----: | :------: | ------------------------------------------------------- |
| ids | String | True | Identifiers to get a list of transaction's receipts by. |
| node-url | String | No | Node URL to apply a command to. |
```bash
$ remme receipt get \
--ids='e79a883581c184787360de8607c5f970cdeeaa684af3e50d8532aa9dd07afa8e7fc92f0dc509b41b9695e795704bdd50455bebd1ed327a5330710ba40698b492,
6593d21046519022ba32c98e934d7dfc81e8b4edf6c064dbf70feb13db4310873ec00816bce8660cafd4fa2a8c80d0147d63cf616c624babd03142c694272017' \
--node-url='159.89.104.9'
{
"result": [
{
"data": [],
"events": [],
"id": "e79a883581c184787360de8607c5f970cdeeaa684af3e50d8532aa9dd07afa8e7fc92f0dc509b41b9695e795704bdd50455bebd1ed327a5330710ba40698b492",
"state_changes": [
{
"address": "00b10c0100000000000000000000000000000000000000000000000000000000000000",
"type": "SET",
"value": "CL0BGIACIKwC"
},
{
"address": "00b10c00000000000000000000000000000000000000000000000000000000000000bd",
"type": "SET",
"value": "CL0BEoABZmQ3ODBjZTA3NjQwYmE0MTEyMjQ4NjkxNTgxYTU5NTg0NWZlNzYyYmYzZmViNDliODQzOTc0YWFlNTc4NDc4YzZiZjUxODczOWVjZGM0OWQ3MDE5MzgzZDNiZDllM2FhNmZhMGFmODM4NGI0NDkxOGYwYmZmMzc0MDJiNTEwYjIaQjAyZDFmYmRhNTBkYmNkMGQzYzI4NmE2YTlmYTcxYWE3Y2UyZDk3MTU5YjkwZGRkNDYzZTA4MTY0MjJkNjIxZTEzNSKAAWZlNTZhMTZkYWIwMDljYzk2ZTcxMjVjNjQ3YjZjNzFlYjEwNjM4MThjZjhkZWNlMjgzYjEyNTQyM2VjYjE4NGY3ZjFlNjE4MDJiZjY2MzgyZGE5MDQ2OTg0MTNmODA4MzEwMzFmOGExYjI5MTUwMjYwYzNmYTRkYjUzN2ZkZjRjKIzggeYF"
}
]
},
...
]
}
```
## Development
<h3 id="development-requirements">Requirements</h4>
- Docker — https://www.docker.com. Install it with the [following reference](https://docs.docker.com/install).
### Docker
Clone the project and move to project folder:
```bash
$ git clone https://github.com/Remmeauth/remme-core-cli && cd remme-core-cli
```
If you already worked with the project, you can clean it's container and images with the following command:
```bash
$ docker rm remme-core-cli -f || true && docker rmi remme-core-cli -f || true
```
Run the ``Docker container`` with the project source code in the background mode:
```bash
$ docker build -t remme-core-cli . -f Dockerfile.development
$ docker run -d --network host -v $PWD:/remme-core-cli --name remme-core-cli remme-core-cli
```
Enter the container bash:
```bash
$ docker exec -it remme-core-cli bash
```
And now being in the container, you can develop the project. For instance, run tests and linters:
```bash
$ coverage run -m pytest -vv tests
$ coverage report -m && coverage xml
$ flake8 cli && flake8 tests/
$ bash <(curl -s https://linters.io/isort-diff) cli tests
```
When you have developed new functionality, check it with the following command. This command creates the ``Python package``
from source code instead of installing it from the ``PyPi``.
```bash
$ pip3 uninstall -y remme-core-cli && rm -rf dist/ remme_core_cli.egg-info && \
python3 setup.py sdist && pip3 install dist/*.tar.gz
```
So after this command, you are free to execute the command line interface as if you installed in through ``pip3 install``:
```bash
$ remme --version
```
With the commands above you could test your features as if user will use it on own.
```bash
$ docker rm $(docker ps -a -q) -f
$ docker rmi $(docker images -q) -f
```
## Production
To build the package and upload it to [PypI](https://pypi.org) to be accessible through [pip](https://github.com/pypa/pip),
use the following commands. [Twine](https://twine.readthedocs.io/en/latest/) requires the username and password of the
account package is going to be uploaded to.
```build
$ python3 setup.py sdist
$ twine upload dist/*
username: remme
password: ********
```
## Contributing
Clone the project and install requirements:
```bash
$ git clone https://github.com/Remmeauth/remme-core-cli && cd remme-core-cli
$ pip3 install -r requirements.txt -r requirements-dev.txt -r requirements-tests.txt
```
When you make changes, ensure your code:
* pass [the checkers](https://github.com/Remmeauth/remme-core-cli/blob/develop/.travis.yml#L16),
* is covered by tests using [pytest](https://docs.pytest.org/en/latest),
* follow team [code style](https://github.com/dmytrostriletskyi/nimble-python-code-style-guide).
If you are new for the contribution, please read:
* Read about pull requests — https://help.github.com/en/articles/about-pull-requests
* Read how to provide pull request — https://help.github.com/en/articles/creating-a-pull-request-from-a-fork
* Also the useful article about how to contribute — https://akrabat.com/the-beginners-guide-to-contributing-to-a-github-project/
### Request pull request's review
If you want to your pull request to be review, ensure you:
1. [Branch isn't out-of-date with the base branch](https://habrastorage.org/webt/ux/gi/wm/uxgiwmnft08fubvjfd6d-8pw2wq.png).
2. [Have written the description of the pull request and have added at least 2 reviewers](https://camo.githubusercontent.com/55c309334a8b61a4848a6ef25f9b0fb3751ae5e9/68747470733a2f2f686162726173746f726167652e6f72672f776562742f74312f70792f63752f7431707963753162786a736c796f6a6c707935306d7862357969652e706e67).
3. [Continuous integration has been passed](https://habrastorage.org/webt/oz/fl/-n/ozfl-nl-jynrh7ofz8yuz9_gapy.png).
| /remme-core-cli-0.6.0.tar.gz/remme-core-cli-0.6.0/README.md | 0.683631 | 0.744796 | README.md | pypi |
import asyncio
from accessify import implements
from aiohttp_json_rpc import RpcGenericServerDefinedError
from cli.transaction.interfaces import TransactionInterface
loop = asyncio.get_event_loop()
@implements(TransactionInterface)
class Transaction:
"""
Implements transaction.
"""
def __init__(self, service):
"""
Constructor.
Arguments:
service: object to interact with Remme core API.
"""
self.service = service
def get_list(self, ids, start, limit, head, reverse, family_name):
"""
Get a list of transactions.
Arguments:
ids (list, optional): identifiers to get a list of transactions by.
start (string, optional): transaction identifier to get a list transaction starting from.
limit (int, optional): maximum amount of transactions to return.
head (string, optional): block identifier to get a list of transactions from.
reverse (bool, optional): parameter to reverse result.
family_name (string, optional): list of a transactions by its family name.
"""
try:
transactions = loop.run_until_complete(
self.service.blockchain_info.get_transactions(query={
'ids': ids,
'start': start,
'limit': limit,
'head': head,
'family_name': family_name,
'reverse': reverse,
}),
)
except RpcGenericServerDefinedError as error:
return None, str(error.message)
except Exception as error:
return None, str(error)
return transactions, None
def get_list_ids(self, ids, start, limit, head, reverse, family_name):
"""
Get a list of transactions' identifiers.
A list of transactions' identifiers could be filtered by transactions' identifiers,
start, limit, head, reverse, family_name.
Arguments:
ids (list, optional): identifiers to get a list of blocks by.
start (string, optional): transaction identifier to get a list transaction starting from.
limit (int, optional): maximum amount of blocks to return.
head (string, optional): block identifier to get a list of transactions to.
reverse (bool, optional): parameter to reverse result.
family_name (string, optional): list of a transactions by its family name.
"""
transactions, errors = self.get_list(
ids=ids, start=start, head=head, limit=limit, reverse=reverse, family_name=family_name,
)
if errors is not None:
return None, errors
transactions_identifiers = []
for transaction in transactions.get('data'):
transaction_identifier = transaction.get('header_signature')
transactions_identifiers.append(transaction_identifier)
return transactions_identifiers, None
def get(self, transaction_id):
"""
Get a transaction.
Arguments:
transaction_id (string, required): transaction identifier.
"""
try:
transaction = loop.run_until_complete(
self.service.blockchain_info.get_transaction_by_id(transaction_id=transaction_id),
)
except RpcGenericServerDefinedError as error:
return None, str(error.message)
except Exception as error:
return None, str(error)
return transaction, None | /remme-core-cli-0.6.0.tar.gz/remme-core-cli-0.6.0/cli/transaction/service.py | 0.836621 | 0.263422 | service.py | pypi |
import asyncio
from accessify import implements
from aiohttp_json_rpc import RpcGenericServerDefinedError
from cli.block.interfaces import BlockInterface
loop = asyncio.get_event_loop()
@implements(BlockInterface)
class Block:
"""
Implements block.
"""
def __init__(self, service):
"""
Constructor.
Arguments:
service: object to interact with Remme core API.
"""
self.service = service
def get(self, identifier):
"""
Get a block by its identifier.
"""
try:
block = loop.run_until_complete(
self.service.blockchain_info.get_block_by_id(block_id=identifier),
)
except RpcGenericServerDefinedError as error:
return None, str(error.message)
except Exception as error:
return None, str(error)
return block.get('data'), None
def get_list(self, ids, head, limit, reverse):
"""
Get a list of blocks.
A list of blocks could be filtered by blocks identifiers, limit, head, reverse.
Arguments:
ids (list, optional): identifiers to get a list of blocks by.
limit (int, optional): maximum amount of blocks to return.
head (string, optional): block identifier to get a list of transactions to.
reverse (bool, optional): parameter to reverse result.
"""
try:
blocks = loop.run_until_complete(
self.service.blockchain_info.get_blocks(query={
'ids': ids,
'limit': limit,
'head': head,
'reverse': reverse,
}),
)
except RpcGenericServerDefinedError as error:
return None, str(error.message)
except Exception as error:
return None, str(error)
return blocks.get('data'), None
def get_list_ids(self, ids, head, limit, reverse):
"""
Get a list of blocks identifiers.
A list of blocks identifiers could be filtered by blocks identifiers, limit, head, reverse.
Arguments:
ids (list, optional): identifiers to get a list of blocks by.
limit (int, optional): maximum amount of blocks to return.
head (string, optional): block identifier to get a list of transactions to.
reverse (bool, optional): parameter to reverse result.
"""
blocks, errors = self.get_list(ids=ids, head=head, limit=limit, reverse=reverse)
if errors is not None:
return None, errors
blocks_identifiers = []
for block in blocks:
block_identifier = block.get('header_signature')
blocks_identifiers.append(block_identifier)
return blocks_identifiers, None | /remme-core-cli-0.6.0.tar.gz/remme-core-cli-0.6.0/cli/block/service.py | 0.741206 | 0.18248 | service.py | pypi |
import asyncio
from accessify import implements
from aiohttp_json_rpc import RpcGenericServerDefinedError
from cli.node_account.interfaces import NodeAccountInterface
loop = asyncio.get_event_loop()
@implements(NodeAccountInterface)
class NodeAccount:
"""
Implements node account.
"""
def __init__(self, service):
"""
Constructor.
Arguments:
service: object to interact with Remme core API.
"""
self.service = service
def get(self, address):
"""
Get information about the node account by its address.
Arguments:
address (str, required): node account address to get information about node account by.
"""
try:
node_account_information = loop.run_until_complete(
self.service.node_management.get_node_account(node_account_address=address),
)
except RpcGenericServerDefinedError as error:
return None, str(error.message)
except Exception as error:
return None, str(error)
return node_account_information.node_account_response, None
def transfer_tokens(self, address_to, amount):
"""
Transfer tokens to address.
"""
try:
transaction = loop.run_until_complete(self.service.token.transfer(address_to=address_to, amount=amount))
except Exception as error:
return None, str(error)
return {
'batch_id': transaction.batch_id,
}, None
def transfer_tokens_from_frozen_to_unfrozen(self):
"""
Transfer available tokens from frozen to unfrozen reputation's balances.
"""
try:
transfer_transaction = loop.run_until_complete(
self.service.token.transfer_from_frozen_to_unfrozen(),
)
except Exception as error:
return None, str(error)
return {
'batch_identifier': transfer_transaction.batch_id,
}, None | /remme-core-cli-0.6.0.tar.gz/remme-core-cli-0.6.0/cli/node_account/service.py | 0.67104 | 0.19046 | service.py | pypi |
import asyncio
from accessify import implements
from cli.node.interfaces import NodeInterface
loop = asyncio.get_event_loop()
@implements(NodeInterface)
class Node:
"""
Implements node.
"""
def __init__(self, service):
"""
Constructor.
Arguments:
service: object to interact with Remme core API.
"""
self.service = service
def get_configs(self):
"""
Get the node configurations.
"""
try:
node_configurations = loop.run_until_complete(self.service.node_management.get_node_config())
except Exception as error:
return None, str(error)
return {
'configurations': node_configurations.data,
}, None
def get_peers(self):
"""
Get the node's peers.
"""
try:
node_peers = loop.run_until_complete(self.service.blockchain_info.get_peers())
except Exception as error:
return None, str(error)
return {
'peers': node_peers,
}, None
def get_info(self):
"""
Get information about synchronization and peer count of the node form.
"""
try:
node_information = loop.run_until_complete(self.service.node_management.get_node_info())
except Exception as error:
return None, str(error)
return {
'information': node_information.data,
}, None
def get_initial_stake(self):
"""
Get the initial stake of the node.
"""
try:
node_initial_stake = loop.run_until_complete(self.service.node_management.get_initial_stake())
except Exception as error:
return None, str(error)
return node_initial_stake, None
def open(self):
"""
Open the node to participate in the network.
"""
try:
open_node = loop.run_until_complete(self.service.node_management.open_node())
except Exception as error:
return None, str(error)
return {
'batch_id': open_node.batch_id,
}, None | /remme-core-cli-0.6.0.tar.gz/remme-core-cli-0.6.0/cli/node/service.py | 0.563018 | 0.187356 | service.py | pypi |
import asyncio
from accessify import implements
from aiohttp_json_rpc import RpcGenericServerDefinedError
from cli.state.interfaces import StateInterface
loop = asyncio.get_event_loop()
@implements(StateInterface)
class State:
"""
Implements state.
"""
def __init__(self, service):
"""
Constructor.
Arguments:
service: object to interact with Remme core API.
"""
self.service = service
def get(self, address):
"""
Get a state by its address.
"""
try:
state = loop.run_until_complete(
self.service.blockchain_info.get_state_by_address(address=address),
)
except RpcGenericServerDefinedError as error:
return None, str(error.message)
except Exception as error:
return None, str(error)
return {
'state': state,
}, None
def get_list(self, address, start, limit, head, reverse):
"""
Get a list of states.
A list of states could be filtered by account address, start address, limit, head identifier, reverse.
Arguments:
address (string, optional): account address to get a state by.
start (string, optional): account address to get a list of states starting from.
limit (int, optional): maximum amount of states to return.
head (string, optional): block identifier to get a list of states to.
reverse (bool, optional): parameter to reverse result.
"""
try:
states = loop.run_until_complete(
self.service.blockchain_info.get_states(query={
'address': address,
'start': start,
'limit': limit,
'head': head,
'reverse': reverse,
}),
)
except RpcGenericServerDefinedError as error:
return None, str(error.message)
except Exception as error:
return None, str(error)
return states.get('data'), None | /remme-core-cli-0.6.0.tar.gz/remme-core-cli-0.6.0/cli/state/service.py | 0.702122 | 0.323567 | service.py | pypi |
import re
from marshmallow import (
ValidationError,
fields,
)
from cli.constants import (
ADDRESS_REGEXP,
BATCH_IDENTIFIER_REGEXP,
BET_TYPES,
BLOCK_IDENTIFIER_REGEXP,
DOMAIN_NAME_REGEXP,
FAMILY_NAMES,
PRIVATE_KEY_REGEXP,
PUBLIC_KEY_ADDRESS_REGEXP,
SWAP_IDENTIFIER_REGEXP,
TRANSACTION_IDENTIFIER_REGEXP,
)
class AccountAddressField(fields.Field):
"""
Implements validation of the account address.
"""
def _deserialize(self, value, attr, obj, **kwargs):
"""
Validate data (account address) that was passed to field.
"""
address = value
if re.match(pattern=ADDRESS_REGEXP, string=address) is None:
raise ValidationError(f'The following address `{address}` is invalid.')
return address
class FamilyNameField(fields.Field):
"""
Implements validation of the family name.
"""
def _deserialize(self, value, attr, obj, **kwargs):
"""
Validate data (family name) that was passed to field.
"""
family_name = value
if family_name not in FAMILY_NAMES:
raise ValidationError(f'The following family name `{family_name}` is invalid.')
return family_name
class TransactionIdentifiersListField(fields.Field):
"""
Implements validation of the list of the identifiers.
"""
def _deserialize(self, value, attr, obj, **kwargs):
"""
Validate data (list of the identifiers) that was passed to field.
"""
validated_identifiers = []
for identifier in value.split(','):
identifier = identifier.strip()
if re.match(pattern=TRANSACTION_IDENTIFIER_REGEXP, string=identifier) is None:
raise ValidationError(f'The following identifier `{identifier}` is invalid.')
validated_identifiers.append(identifier)
return validated_identifiers
class TransactionIdentifierField(fields.Field):
"""
Implements validation of the identifier.
"""
def _deserialize(self, value, attr, obj, **kwargs):
"""
Validate data (identifier) that was passed to field.
"""
transaction_identifier = value
if re.match(pattern=TRANSACTION_IDENTIFIER_REGEXP, string=transaction_identifier) is None:
raise ValidationError(f'The following identifier `{transaction_identifier}` is invalid.')
return transaction_identifier
class BatchIdentifiersListField(fields.Field):
"""
Implements validation of the list of the identifiers.
"""
def _deserialize(self, value, attr, obj, **kwargs):
"""
Validate data (list of the identifiers) that was passed to field.
"""
validated_identifiers = []
for identifier in value.split(','):
identifier = identifier.strip()
if re.match(pattern=BATCH_IDENTIFIER_REGEXP, string=identifier) is None:
raise ValidationError(f'The following identifier `{identifier}` is invalid.')
validated_identifiers.append(identifier)
return validated_identifiers
class BatchIdentifierField(fields.Field):
"""
Implements validation of the identifier.
"""
def _deserialize(self, value, attr, obj, **kwargs):
"""
Validate data (batch identifier) that was passed to field.
"""
batch_identifier = value
if re.match(pattern=BATCH_IDENTIFIER_REGEXP, string=batch_identifier) is None:
raise ValidationError(f'The following identifier `{batch_identifier}` is invalid.')
return batch_identifier
class NodeUrlField(fields.Field):
"""
Implements validation of the node URL.
If node URL is localhost, it means client didn't passed any URL, so nothing to validate.
"""
def _deserialize(self, value, attr, obj, **kwargs):
"""
Validate data (node URL) that was passed to field.
"""
node_url = value
if node_url == 'localhost' or node_url == '127.0.0.1':
return node_url
if 'http' in node_url or 'https' in node_url:
raise ValidationError(f'Pass the following node URL `{node_url}` without protocol (http, https, etc.).')
if re.match(pattern=DOMAIN_NAME_REGEXP, string=node_url) is None:
raise ValidationError(f'The following node URL `{node_url}` is invalid.')
return node_url
class PrivateKeyField(fields.Field):
"""
Implements validation of the private key.
"""
def _deserialize(self, value, attr, data, **kwargs):
"""
Validate data (private key) that was passed to field.
"""
private_key = value
if re.match(pattern=PRIVATE_KEY_REGEXP, string=private_key) is None:
raise ValidationError(f'The following private key `{private_key}` is invalid.')
return private_key
class PublicKeyAddressField(fields.Field):
"""
Implements validation of the public key address.
"""
def _deserialize(self, value, attr, data, **kwargs):
"""
Validate data (public key address) that was passed to field.
"""
public_key_address = value
if re.match(pattern=PUBLIC_KEY_ADDRESS_REGEXP, string=public_key_address) is None:
raise ValidationError(f'The following public key address `{public_key_address}` is invalid.')
return public_key_address
class SwapIdentifierField(fields.Field):
"""
Implements validation of the swap identifier.
"""
def _deserialize(self, value, attr, data, **kwargs):
"""
Validate data (swap identifier) that was passed to field.
"""
swap_identifier = value
if re.match(pattern=SWAP_IDENTIFIER_REGEXP, string=swap_identifier) is None:
raise ValidationError(f'The following swap identifier `{swap_identifier}` is invalid.')
return swap_identifier
class BlockIdentifierField(fields.Field):
"""
Implements validation of the block identifier.
"""
def _deserialize(self, value, attr, data, **kwargs):
"""
Validate data (block identifier) that was passed to field.
"""
block_identifier = value
if re.match(pattern=BLOCK_IDENTIFIER_REGEXP, string=block_identifier) is None:
raise ValidationError(f'The following block identifier `{block_identifier}` is invalid.')
return block_identifier
class BlockIdentifiersListField(fields.Field):
"""
Implements validation of the list of block identifiers.
"""
def _deserialize(self, value, attr, obj, **kwargs):
"""
Validate data (list of block identifiers) that was passed to field.
"""
block_identifiers = value
block_validated_identifiers = []
for identifier in block_identifiers.split(','):
identifier = identifier.strip()
if re.match(pattern=BLOCK_IDENTIFIER_REGEXP, string=identifier) is None:
raise ValidationError(f'The following block identifier `{identifier}` is invalid.')
block_validated_identifiers.append(identifier)
return block_validated_identifiers
class BetField(fields.Field):
"""
Implements validation of the bet.
"""
def _deserialize(self, value, attr, obj, **kwargs):
"""
Validate data (bet) that was passed to field.
Valid bet is `min` or `max` as strings, or an integer positive value.
"""
bet = value
if bet in BET_TYPES:
return bet
if bet.isdigit():
return int(bet)
raise ValidationError(f'The following bet `{bet}` is invalid.') | /remme-core-cli-0.6.0.tar.gz/remme-core-cli-0.6.0/cli/generic/forms/fields.py | 0.712532 | 0.198045 | fields.py | pypi |
import asyncio
from accessify import implements
from cli.errors import NotSupportedBetError
from cli.masternode.interfaces import MasternodeInterface
loop = asyncio.get_event_loop()
@implements(MasternodeInterface)
class Masternode:
"""
Implements masternode.
"""
def __init__(self, service):
"""
Constructor.
Arguments:
service: object to interact with Remme core API.
"""
self.service = service
def open(self, amount):
"""
Open the masternode with starting amount.
"""
try:
open_masternode = loop.run_until_complete(
self.service.node_management.open_master_node(amount=amount),
)
except Exception as error:
return None, str(error)
return {
'batch_id': open_masternode.batch_id,
}, None
def close(self):
"""
Close the masternode.
"""
try:
close_masternode = loop.run_until_complete(
self.service.node_management.close_master_node(),
)
except Exception as error:
return None, str(error)
return {
'batch_id': close_masternode.batch_id,
}, None
def set_bet(self, bet):
"""
Set the masternode betting behavior.
Arguments:
bet (string or integer, required): type of bet to set to the masternode account. Valid bet is
`min` or `max` as strings, or an integer value (e.g. 20).
"""
if isinstance(bet, str):
bet = bet.upper()
if isinstance(bet, int):
if bet == 0:
raise NotSupportedBetError(
f'The following bet `{bet}` is not supported, the minimum bet is integer 1.',
)
try:
masternode_bet = loop.run_until_complete(
self.service.node_management.set_bet(bet_type=bet),
)
except Exception as error:
return None, str(error)
return {
'batch_id': masternode_bet.batch_id,
}, None | /remme-core-cli-0.6.0.tar.gz/remme-core-cli-0.6.0/cli/masternode/service.py | 0.625552 | 0.312777 | service.py | pypi |
import asyncio
from accessify import implements
from aiohttp_json_rpc import RpcGenericServerDefinedError
from cli.batch.interfaces import BatchInterface
loop = asyncio.get_event_loop()
@implements(BatchInterface)
class Batch:
"""
Implements batch.
"""
def __init__(self, service):
"""
Constructor.
Arguments:
service: object to interact with Remme core API.
"""
self.service = service
def get(self, id):
"""
Get a batch by its identifier.
Arguments:
id (string, required): batch identifier.
"""
try:
batch = loop.run_until_complete(
self.service.blockchain_info.get_batch_by_id(batch_id=id),
)
except RpcGenericServerDefinedError as error:
return None, str(error.message)
except Exception as error:
return None, str(error)
return batch.get('data'), None
def get_status(self, id):
"""
Get a batch status by its identifier.
Arguments:
id (string, required): batch identifier.
"""
try:
batch_status = loop.run_until_complete(self.service.blockchain_info.get_batch_status(batch_id=id))
except RpcGenericServerDefinedError as error:
return None, str(error.message)
except Exception as error:
return None, str(error)
return batch_status, None
def get_list(self, ids, start, limit, head, reverse):
"""
Get a list of batches.
Arguments:
ids (list, optional): identifiers to get a list of batches by.
start (string, optional): batch identifier to get a list of batches starting from.
limit (int, optional): maximum amount of batches to return.
head (string, optional): block identifier to get a list of batches from.
reverse (bool, optional): parameter to reverse result.
"""
try:
batches = loop.run_until_complete(
self.service.blockchain_info.get_batches(query={
'ids': ids,
'start': start,
'limit': limit,
'head': head,
'reverse': reverse,
}),
)
except RpcGenericServerDefinedError as error:
return None, str(error.message)
except Exception as error:
return None, str(error)
return batches.get('data'), None
def get_list_ids(self, ids, start, limit, head, reverse):
"""
Get a list of batches' identifiers.
Arguments:
ids (list, optional): identifiers to get a list of batches by.
start (string, optional): batch identifier to get a list of batches starting from.
limit (int, optional): maximum amount of batches to return.
head (string, optional): block identifier to get a list of batches from.
reverse (bool, optional): parameter to reverse result.
"""
batches, errors = self.get_list(ids=ids, start=start, head=head, limit=limit, reverse=reverse)
if errors is not None:
return None, errors
batch_identifiers = []
for batch in batches:
batch_identifier = batch.get('header_signature')
batch_identifiers.append(batch_identifier)
return batch_identifiers, None | /remme-core-cli-0.6.0.tar.gz/remme-core-cli-0.6.0/cli/batch/service.py | 0.824002 | 0.183575 | service.py | pypi |
import os
import time
from typing import List
import csv
from .domain import Image, Dataset, AnnotationSet, class_encodings, Annotation, AnnotatedImage
from .api import API
from .endpoints import frontend
from .viewer import factory
class SDK:
"""
Creates sdk object, and checks connection to server
Args:
server: server host name, e.g. ``http://localhost:8123/``
email: user credentials
password: user credentials
viewer: allows to choose between browser, electron and jupyter viewer.
To be able change viewer, you can use :func:`set_viewer` function. See example.
Example::
import remo
remo.set_viewer('browser')
"""
def __init__(self, server: str, email: str, password: str, viewer: str = 'browser'):
self.api = API(server, email, password)
self.viewer = None
self.set_viewer(viewer)
def set_public_url(self, public_url: str):
self.api.set_public_url(public_url)
def set_viewer(self, viewer: str):
"""
Allows to choose one of available viewers
Args:
viewer: choose between 'browser', 'electron' and 'jupyter' viewer
"""
self.viewer = factory(viewer)
def create_dataset(
self,
name: str,
local_files: List[str] = None,
paths_to_upload: List[str] = None,
urls: List[str] = None,
annotation_task: str = None,
class_encoding=None,
wait_for_complete=True
) -> Dataset:
"""
Creates a new dataset in Remo and optionally populate it with images and annotations.
To add annotations, you need to specify an annotation task.
Args:
name: name of the dataset.
local_files: list of files or directories.
These files will be linked.
Folders will be recursively scanned for image files: ``jpg``, ``png``, ``tif``.
paths_to_upload: list of files or directories.
These files will be copied. Supported files: images, annotations and archives.
- image files: ``jpg``, ``png``, ``tif``.
- annotation files: ``json``, ``xml``, ``csv``.
- archive files: ``zip``, ``tar``, ``gzip``.
Unpacked archive will be scanned for images, annotations and nested archives.
urls: list of urls pointing to downloadable target, which can be image, annotation file or archive.
annotation_task: specifies annotation task. See also: :class:`remo.task`.
class_encoding: specifies how to convert class labels in annotation files to classes.
See also: :class:`remo.class_encodings`.
wait_for_complete: blocks function until upload data completes
Returns:
:class:`remo.Dataset`
"""
json_data = self.api.create_dataset(name)
ds = Dataset(**json_data)
ds.add_data(
local_files, paths_to_upload, urls, annotation_task=annotation_task, class_encoding=class_encoding,
wait_for_complete=wait_for_complete
)
ds.fetch()
return ds
def add_data_to_dataset(
self,
dataset_id: int,
local_files: List[str] = None,
paths_to_upload: List[str] = None,
urls: List[str] = None,
annotation_task: str = None,
folder_id: int = None,
annotation_set_id: int = None,
class_encoding=None,
wait_for_complete=True
) -> dict:
"""
Adds images and/or annotations to an existing dataset.
Use ``local files`` to link (rather than copy) images. Use ``paths_to_upload`` if you want to copy image files or archive files. Use ``urls`` to download from the web images, annotations or archives.
Adding images: support for ``jpg``,``jpeg``, ``png``, ``tif``
Adding annotations: to add annotations, you need to specify the annotation task and make sure the specific file format is one of those supported. See documentation here: https://remo.ai/docs/annotation-formats/
Adding archive files: support for ``zip``, ``tar``, ``gzip``
Args:
dataset_id: id of the dataset to add data to
local_files: list of files or directories containing annotations and image files
Remo will create smaller copies of your images for quick previews but it will point at the original files to show original resolutions images.
Folders will be recursively scanned for image files.
paths_to_upload: list of files or directories containing images, annotations and archives.
These files will be copied inside .remo folder.
Folders will be recursively scanned for image files.
Unpacked archive will be scanned for images, annotations and nested archives.
urls: list of urls pointing to downloadable target, which can be image, annotation file or archive.
annotation_task: annotation tasks tell remo how to parse annotations. See also: :class:`remo.task`.
folder_id: specifies target virtual folder in the remo dataset. If None, it adds to the root level.
annotation_set_id: specifies target annotation set in the dataset. If None: if no annotation set exists, one will be automatically created. If exactly one annotation set already exists, it will add annotations to that annotation set, provided the task matches.
class_encoding: specifies how to convert labels in annotation files to readable labels. If None, Remo will try to interpret the encoding automatically - which for standard words, means they will be read as they are.
See also: :class:`remo.class_encodings`.
wait_for_complete: blocks function until upload data completes
Returns:
Dictionary with results for linking files, upload files and upload urls::
{
'files_link_result': ...,
'files_upload_result': ...,
'urls_upload_result': ...
}
"""
kwargs = {
'annotation_task': annotation_task,
'folder_id': folder_id,
'annotation_set_id': annotation_set_id,
}
# logic to deal with the case where we are trying to upload annotations without specifying the annotation set id
if annotation_task and (not annotation_set_id):
annotation_sets = self.list_annotation_sets(dataset_id)
if len(annotation_sets) > 1:
raise Exception(
'Define which annotation set you want to use. Dataset {} has {} annotation sets. '
'You can see them with my_dataset.annotation_sets()'.format(dataset_id, len(annotation_sets))
)
elif len(annotation_sets) == 1:
kwargs['annotation_set_id'] = annotation_sets[0].id
# check values
if local_files:
self._raise_value_error(local_files, 'local_files', list, 'list of paths')
if paths_to_upload:
self._raise_value_error(paths_to_upload, 'paths_to_upload', list, 'list of paths')
if urls:
self._raise_value_error(urls, 'urls', list, 'list of URLs')
session_id = self.api.create_new_upload_session(dataset_id)
kwargs['session_id'] = session_id
if local_files:
encoding = class_encodings.for_linking(class_encoding)
self.api.upload_local_files(
dataset_id, local_files, class_encoding=encoding, **kwargs
)
if paths_to_upload:
encoding = class_encodings.for_upload(class_encoding)
self.api.bulk_upload_files(
dataset_id, paths_to_upload, class_encoding=encoding, **kwargs
)
if urls:
encoding = class_encodings.for_linking(class_encoding)
self.api.upload_urls(
dataset_id, urls, class_encoding=encoding, **kwargs
)
self.api.complete_upload_session(session_id)
if not wait_for_complete:
return {'session_id': session_id}
return self._report_processing_data_progress(session_id)
def _report_processing_data_progress(self, session_id: str):
"""
Reports progress for upload session
Args:
session_id: upload session id
Returns:
session status
"""
def format_msg(msg, *args, max_length=100):
msg = msg.format(*args)
return '{} {}'.format(msg, ' ' * (max_length - len(msg)))
def print_session_errors(data, key='error'):
for err in data:
msg = err[key] if 'value' not in err else '{}: {}'.format(err['value'], err[key])
print(msg)
def print_session_warnings(data):
print_session_errors(data, key='warning')
def print_file_errors(data, key='errors'):
errors = data.get(key, [])
for err in errors:
filename, errs = err['filename'], err[key]
msg = errs[0] if len(errs) == 1 else '\n * ' + '\n * '.join(errs)
msg = '{}: {}'.format(filename, msg)
print(msg)
def print_file_warnings(data):
print_file_errors(data, key='warnings')
last_msg = ''
while True:
session = self.api.get_upload_session_status(session_id)
if not session:
raise Exception('Something went wrong, got empty session from server')
status = session.get('status')
substatus = session.get('substatus')
uploaded = session['uploaded']['total']
if status == 'not complete':
msg = format_msg('Acquiring data - {} files, {}', uploaded['items'], uploaded['human_size'])
if msg != last_msg:
print(msg, end='\r')
last_msg = msg
elif status == 'pending':
msg = format_msg('Acquiring data - completed')
if msg != last_msg:
print(msg)
last_msg = msg
elif status == 'in progress':
msg = 'Processing data'
if substatus:
msg = '\r{}'.format(substatus)
else:
msg = '{}'.format(msg)
msg = format_msg(msg)
if msg != last_msg:
print(msg, end=' ')
last_msg = msg
elif status in ('done', 'failed'):
print(format_msg('Processing data - completed'))
msg = 'Data upload completed' if status == 'done' else 'Data upload completed with some errors:'
print(msg)
if status == 'failed':
print_session_errors(session.get('errors', []))
print_file_errors(session['images'])
print_file_errors(session['annotations'])
if session.get('warnings', []) or session['images'].get('warnings', []) or session['annotations'].get('warnings', []):
print('With some warnings:')
print_session_warnings(session.get('warnings', []))
print_file_warnings(session['images'])
print_file_warnings(session['annotations'])
return session
time.sleep(1)
@staticmethod
def _raise_value_error(value, value_name, expected_type, expected_description):
if not isinstance(value, expected_type):
raise ValueError(
'Parameter "{}" should be a {}, but instead is a {}.'.format(
value_name, expected_description, type(value)
)
)
def list_datasets(self) -> List[Dataset]:
"""
Lists the available datasets
Returns:
List[:class:`remo.Dataset`]
"""
json_data = self.api.list_datasets()
return [Dataset(**ds_item) for ds_item in json_data.get('results', [])]
def get_dataset(self, dataset_id: int) -> Dataset:
"""
Retrieves a dataset with given dataset id.
Args:
dataset_id: dataset id
Returns:
:class:`remo.Dataset`
"""
json_data = self.api.get_dataset(dataset_id)
if json_data.get('detail') == "Not found.":
raise Exception(
"Dataset ID {} not found. "
"You can check your existing datasets with `remo.list_datasets()`".format(dataset_id)
)
return Dataset(**json_data)
def delete_dataset(self, dataset_id: int):
"""
Deletes dataset
Args:
dataset_id: dataset id
"""
self.api.delete_dataset(dataset_id)
def list_annotation_sets(self, dataset_id: int) -> List[AnnotationSet]:
"""
Returns a list of AnnotationSet containing all the AnnotationSets of a given dataset
Args:
dataset_id: dataset id
Returns:
List[:class:`remo.AnnotationSet`]
"""
result = self.api.list_annotation_sets(dataset_id)
return [
AnnotationSet(
id=annotation_set['id'],
name=annotation_set['name'],
updated_at=annotation_set['updated_at'],
task=annotation_set['task']['name'],
dataset_id=dataset_id,
top3_classes=annotation_set['statistics']['top3_classes'],
total_images=annotation_set['statistics']['annotated_images_count'],
total_classes=annotation_set['statistics']['total_classes'],
total_annotation_objects=annotation_set['statistics']['total_annotation_objects'],
)
for annotation_set in result.get('results', [])
]
def get_annotation_set(self, annotation_set_id: int) -> AnnotationSet:
"""
Retrieves annotation set
Args:
annotation_set_id: annotation set id
Returns:
:class:`remo.AnnotationSet`
"""
annotation_set = self.api.get_annotation_set(annotation_set_id)
if 'detail' in annotation_set:
raise Exception(
'Annotation set with ID = {} not found. '
'You can check the list of annotation sets in your dataset using dataset.annotation_sets()'.format(annotation_set_id)
)
return AnnotationSet(
id=annotation_set['id'],
name=annotation_set['name'],
updated_at=annotation_set['updated_at'],
task=annotation_set['task']['name'],
dataset_id=annotation_set['dataset']['id'],
total_classes=len(annotation_set['classes']),
)
def _export_annotations(
self,
annotation_set_id: int,
annotation_format: str = 'json',
export_coordinates: str = 'pixel',
append_path: bool = True,
export_tags : bool = True,
filter_by_tags: list = None
) -> bytes:
"""
Exports annotations in a Binary format for a given annotation set.
To export to file, use export_annotations_to_file.
It offers some convenient export options, including:
- Methods to append the full_path to image filenames,
- Choose between coordinates in pixels or percentages,
- Export tags to a separate file
- Export annotations filtered by user-determined tags.
Args:
annotation_set_id: annotation set id
annotation_format: can be one of ['json', 'coco', 'csv']. Default: 'json'
append_path: if True, appends the path to the filename (e.g. local path). Default: True
export_coordinates: converts output values to percentage or pixels, can be one of ['pixel', 'percent']. Default: 'pixel'
export_tags: if True, exports the tags to a CSV file. Default: True
filter_by_tags: allows to export annotations only for images containing certain image tags. It can be of type List[str] or str. Default: None
Returns:
annotation file content
"""
return self.api.export_annotations(
annotation_set_id,
annotation_format=annotation_format,
export_coordinates=export_coordinates,
full_path=append_path,
export_tags=export_tags,
filter_by_tags=filter_by_tags
)
def export_annotations_to_file(
self,
output_file: str,
annotation_set_id: int,
annotation_format: str = 'json',
export_coordinates: str = 'pixel',
append_path: bool = True,
export_tags: bool = True,
filter_by_tags: list = None
):
"""
Exports annotations in a given format and saves it to a file.
If export_tags = True, output_file needs to be a .zip file.
It offers some convenient export options, including:
- Methods to append the full_path to image filenames,
- Choose between coordinates in pixels or percentages,
- Export tags to a separate file
- Export annotations filtered by user-determined tags
Example::
# Download and unzip this sample dataset: https://s-3.s3-eu-west-1.amazonaws.com/dogs_dataset.json
dogs_dataset = remo.create_dataset(name = 'dogs_dataset',
local_files = ['dogs_dataset.json'],
annotation_task = 'Instance Segmentation')
dogs_dataset.export_annotations_to_file(output_file = './dogs_dataset_train.json',
annotation_format = 'coco',
append_path = False,
export_tags = False,
filter_by_tags = 'train')
Args:
output_file: output file to save. Includes file extension and can include file path. If export_tags = True, output_file needs to be a .zip file
annotation_set_id: annotation set id
annotation_format: can be one of ['json', 'coco', 'csv']. Default: 'json'
append_path: if True, appends the path to the filename (e.g. local path). Default: True
export_coordinates: converts output values to percentage or pixels, can be one of ['pixel', 'percent']. Default: 'pixel'
export_tags: if True, exports also all the tags to a CSV file. Default: True
filter_by_tags: allows to export annotations only for images containing certain image tags. It can be of type List[str] or str. Default: None
"""
_, file_extension = os.path.splitext(output_file)
if (export_tags and file_extension is not '.zip'):
raise Exception("If export_tags = True, output_file needs to be a ZIP file. \nChange {} to be .zip or set export_tags = False".format(output_file))
content = self._export_annotations(
annotation_set_id,
annotation_format=annotation_format,
export_coordinates=export_coordinates,
append_path=append_path,
export_tags=export_tags,
filter_by_tags=filter_by_tags
)
self._save_to_file(content, output_file)
def _save_to_file(self, content: bytes, output_file: str):
output_file = self._resolve_path(output_file)
dir_path = os.path.dirname(output_file)
os.makedirs(dir_path, exist_ok=True)
with open(output_file, 'wb') as out_file:
out_file.write(content)
@staticmethod
def _resolve_path(path: str):
if path.startswith('~'):
path = os.path.expanduser(path)
return os.path.realpath(os.path.abspath(path))
def get_annotation_info(self, dataset_id: int, annotation_set_id: int, image_id: int) -> list:
"""
Returns current annotations for the image
Args:
dataset_id: dataset id
annotation_set_id: annotation set id
image_id: image id
Returns:
annotations info - list of annotation objects or classes
"""
resp = self.api.get_annotation_info(dataset_id, annotation_set_id, image_id)
return resp.get('annotation_info', [])
def list_image_annotations(
self, dataset_id: int,
annotation_set_id: int,
image_id: int
) -> List[Annotation]:
"""
Returns annotations for a given image
Args:
dataset_id: dataset id
annotation_set_id: annotation set id
image_id: image id
Returns:
List[:class:`remo.Annotation`]
"""
annotation_items = self.get_annotation_info(dataset_id, annotation_set_id, image_id)
img = self.get_image(image_id)
if not img:
return None
annotations = []
for item in annotation_items:
annotation = Annotation(img_filename=img.name)
if 'lower' in item:
annotation.classes = item.get('name')
else:
classes = [cls.get('name') for cls in item.get('classes', [])]
annotation.classes = classes
points = item.get('coordinates')
if len(points) == 2:
bbox = [points[0]['x'], points[0]['y'], points[1]['x'], points[1]['y']]
annotation.bbox = bbox
elif len(points) > 2:
segment = []
for p in points:
segment.append(p['x'])
segment.append(p['y'])
annotation.segment = segment
annotations.append(annotation)
return annotations
def list_annotations(self, dataset_id: int, annotation_set_id: int) -> List[Annotation]:
"""
Returns all annotations for a given annotation set
Args:
dataset_id: dataset id
annotation_set_id: annotation set id
Returns:
List[:class:`remo.Annotation`]
"""
images = self.list_dataset_images(dataset_id)
annotations = []
for img in images:
annotations += self.list_image_annotations(dataset_id, annotation_set_id, img.id)
return annotations
def create_annotation_set(
self, annotation_task: str, dataset_id: int, name: str, classes: List[str] = []
) -> AnnotationSet:
"""
Creates a new annotation set within the given dataset
Args:
annotation_task: specified task for the annotation set. See also: :class:`remo.task`
dataset_id: dataset id
name: name of the annotation set
classes: list of classes. Default is no classes
Returns:
:class:`remo.AnnotationSet`
"""
annotation_set = self.api.create_annotation_set(annotation_task, dataset_id, name, classes)
if 'error' in annotation_set:
raise Exception(
'Error while creating an annotation set. Message:\n{}'.format(annotation_set['error'])
)
return AnnotationSet(
id=annotation_set['id'],
name=annotation_set['name'],
task=annotation_set['task'],
dataset_id=annotation_set['dataset_id'],
total_classes=len(annotation_set['classes']),
)
def add_annotations_to_image(self, annotation_set_id: int, image_id: int, annotations: List[Annotation]):
"""
Adds annotation to a given image
#TODO: check instance segmentation
Args:
annotation_set_id: annotation set id
image_id: image id
annotations: Annotation object
"""
annotation_set = self.get_annotation_set(annotation_set_id)
dataset_id = annotation_set.dataset_id
annotation_info = self.get_annotation_info(dataset_id, annotation_set_id, image_id)
object_id = len(annotation_info)
objects = []
classes = []
for item in annotations:
if item.bbox:
objects.append(
{
"name": "OBJ " + str(object_id),
"coordinates": [
{"x": item.bbox.xmin, "y": item.bbox.ymin},
{"x": item.bbox.xmax, "y": item.bbox.ymax},
],
"auto_created": False,
"position_number": object_id,
"classes": [
{"name": cls, "lower": cls.lower(), "questionable": False} for cls in item.classes
],
"objectId": object_id,
"isHidden": False,
}
)
object_id += 1
elif item.segment:
objects.append(
{
"name": "OBJ " + str(object_id),
"coordinates": item.segment.points,
"auto_created": False,
"position_number": object_id,
"classes": [
{"name": cls, "lower": cls.lower(), "questionable": False} for cls in item.classes
],
"objectId": object_id,
"isHidden": False,
}
)
object_id += 1
else:
classes += [
{"name": cls, "lower": cls.lower(), "questionable": False} for cls in item.classes
]
return self.api.add_annotation(
dataset_id, annotation_set_id, image_id, annotation_info, classes=classes, objects=objects
)
def list_annotation_set_classes(self, annotation_set_id: int) -> List[str]:
"""
List classes within the annotation set
Args:
annotation_set_id: annotation set id
Returns:
list of classes
"""
classes_with_ids = self.api.list_annotation_set_classes(annotation_set_id)
return [item.get('name') for item in classes_with_ids]
def list_dataset_images(self, dataset_id: int, limit: int = None, offset: int = None) -> List[Image]:
"""
Returns a list of images within a dataset with given dataset_id
Args:
dataset_id: dataset id
limit: limits result images
offset: specifies offset
Returns:
List[:class:`remo.Image`]
"""
json_data = self.api.list_dataset_images(dataset_id, limit=limit, offset=offset)
if 'error' in json_data:
raise Exception(
'Failed to get all images for dataset ID = {}. Error message:\n: {}'.format(
dataset_id, json_data.get('error')
)
)
images = json_data.get('results', [])
return [Image(**img) for img in images]
def get_image_content(self, url: str) -> bytes:
"""
Get image file content by url
Args:
url: image url
Returns:
image binary data
"""
return self.api.get_image_content(url)
def get_image(self, image_id: int) -> Image:
"""
Retrieves image by a given image id
Args:
image_id: image id
Returns:
:class:`remo.Image`
"""
json_data = self.api.get_image(image_id)
if 'error' in json_data:
raise Exception(
'Failed to get image by ID = {}. Error message:\n: {}'.format(
image_id, json_data.get('error')
)
)
return Image(**json_data)
def search_images(
self,
dataset_id: int,
annotation_sets_id: int = None,
classes: str = None, classes_not: str = None,
tags: str = None, tags_not: str = None,
image_name_contains: str = None,
limit: int = None,
) -> List[AnnotatedImage]:
"""
Search images by classes and tags
Examples::
remo.search_images(dataset_id=1, classes = ["dog","person"])
remo.search_images(dataset_id=1, image_name_contains = "pic2")
Args:
dataset_id: the ID of the dataset to search into
annotation_sets_id: the annotation sets ID to search into (can be multiple, e.g. [1, 2]). No need to specify it if the dataset has only one annotation set
classes: string or list of strings - search for images which have objects of all the given classes
classes_not: string or list of strings - search for images excluding those that have objects of all the given classes
tags: string or list of strings - search for images having all the given tags
tags_not: string or list of strings - search for images excluding those that have all the given tags
image_name_contains: search for images whose name contains the given string
limit: limits number of search results (by default returns all results)
Returns:
List[:class:`remo.AnnotatedImage`]
"""
if not isinstance(dataset_id, int):
raise Exception("Enter a valid dataset_id to search into")
if any((classes, classes_not, tags, tags)) and not annotation_sets_id:
# logic to deal with the case where we are trying to upload annotations without specifying the annotation set id
# we have this as a method in Dataset class (default_annotation_set). We might want to move the whole logic as a method of the SDK object
annotation_sets = self.list_annotation_sets(dataset_id)
if len(annotation_sets) > 1:
raise Exception(
'Define which annotation set you want to use. Dataset {} has {} annotation sets. '
'You can see them with my_dataset.annotation_sets()'.format(dataset_id, len(annotation_sets))
)
elif len(annotation_sets) == 1:
annotation_sets_id = annotation_sets[0].id
json_data = self.api.search_images(
dataset_id,
annotation_sets=annotation_sets_id,
classes=classes, classes_not=classes_not,
tags=tags, tags_not=tags_not,
image_name_contains=image_name_contains,
limit=limit)
result = []
for entry in json_data:
img_json = entry.get('image', {})
annotations_json = img_json.get('annotations', [])
result.append(AnnotatedImage(Image(dataset_id=dataset_id, **img_json), annotations_json))
return result
def view_search(self):
"""
Opens browser in search page
"""
return self._view(frontend.search)
def view_image(self, image_id: int, dataset_id: int):
"""
Opens browser on the image view for given image
Args:
image_id: image id
dataset_id: dataset id
"""
img = self.get_image(image_id)
if not img:
return
if img.dataset_id != dataset_id:
raise Exception('Image ID = {} not found in dataset ID: {}'.format(image_id, dataset_id))
return self._view(frontend.image_view.format(image_id, dataset_id))
def open_ui(self):
"""
Opens the main page of Remo
"""
return self._view(frontend.datasets)
def view_dataset(self, id: int):
"""
Opens browser for the given dataset
Args:
id: dataset id
"""
return self._view(frontend.datasets, id)
def view_annotation_tool(self, id: int):
"""
Opens browser in annotation view for the given annotation set
Args:
id: annotation set id
"""
return self._view(frontend.annotation.format(id))
def view_annotate_image(self, annotation_set_id: int, image_id: int):
"""
Opens browser on the annotation tool for giving image
Args:
annotation_set_id: annotation set id
image_id: image id
"""
return self._view(frontend.annotate_image.format(annotation_set_id, image_id))
def view_annotation_stats(self, annotation_set_id: int):
"""
Opens browser in annotation set insights page
Args:
annotation_set_id: annotation set id
"""
return self._view(frontend.annotation_set_insights.format(annotation_set_id))
def _view(self, url, *args, **kwargs):
return self.viewer.browse(self.api.public_url(url, *args, **kwargs))
def generate_annotations_from_folders(self,
path_to_data_folder: str,
output_file_path : str = './annotations.csv',
append_path : bool = True):
"""
Creates a CSV annotation file associating images with labels, starting from folders named with labels (a common folder structure for Image Classification tasks). The CSV file is saved in the same input directory where images are stored.
Example of data structure for a dog / cat dataset:
- cats_and_dogs
- dog
- img1.jpg
- img2.jpg
- ...
- cat
- img199.jpg
- img200.jpg
- ...
Example::
# Download and unzip this sample dataset: s-3.s3-eu-west-1.amazonaws.com/cats_and_dogs.zip
data_path = "cats_and_dogs"
remo.generate_annotations_from_folders(path_to_data_folder=data_path)
Args:
path_to_data_folder: path to the source folder where data is stored
output_file_path: location and filename where to store the file. Default: './annotations.csv'
append_path: if True, file paths are appended to filenames in the output file, otherwise the filename alone is used. Default : True
Returns:
output_file_path: string, path to the generated CSV annotation file. Format: 'file_name', 'class_name'
"""
classes = [d.name for d in os.scandir(path_to_data_folder) if d.is_dir()]
im_dict = {}
for class_name in classes:
class_path = os.path.join(path_to_data_folder, class_name)
im_list = os.listdir(class_path)
if append_path:
for im in im_list:
im_dict[os.path.abspath(os.path.join(class_path, im))] = class_name
else:
for im in im_list:
im_dict[os.path.basename(im)] = class_name
with open(output_file_path, 'w', newline='') as csvfile:
fieldnames = ["file_name", "class_name"]
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
for key in im_dict:
writer.writerow({'file_name': key, 'class_name': im_dict[key]})
return output_file_path
def generate_image_tags(self, tags_dictionary : dict,
output_file_path : str = './images_tags.csv',
append_path : bool = True):
"""
Creates a CSV annotation file associating tags to images, as defined in the tags_dictionary.
The CSV file is saved in the current working directory.
Example of a dictionary: {'train': ['img1.jpg', 'img2.jpg'],'test': ['img3.jpg', 'img4.jpg'],'val': ['img5.jpg', 'img6.jpg']}
Example::
# Download and unzip this sample dataset: https://s-3.s3-eu-west-1.amazonaws.com/small_flowers.zip
import glob
import os
import random
im_list = [os.path.basename(i) for i in glob.glob(str('./small_flowers/images')+'/**/*.jpg', recursive=True)])
im_list = random.sample(im_list, len(im_list))
tags_dict = {'train' : im_list[0:121], 'test' : im_list[121:131], 'valid' : im_list[131:141]}
remo.generate_image_tags(tags_dict)
Args:
tags_dictionary: dictionary where each key is a tags and the value is a List of image filenames (or foder paths containing images) to which we want to assign the tags.
output_file_path: location and filename where to store the file. Default: './images_tags.csv'
append_path: if absolute path to images is required. Default: True
Returns:
output_file_path: string, path to the generated CSV tags file. Format: 'file_name', 'tag'
"""
split_dict = {}
if append_path:
for tag in tags_dictionary:
for _ in tags_dictionary[tag]:
if os.path.isdir(_):
im_list = os.listdir(_)
for im in im_list:
split_dict[im] = tag
else:
split_dict[_] = tag
else:
for tag in tags_dictionary:
for _ in tags_dictionary[tag]:
if os.path.isdir(_):
im_list = os.listdir(_)
for im in im_list:
split_dict[os.path.basename(im)] = tag
else:
split_dict[os.path.basename(_)] = tag
with open(output_file_path, 'w', newline='') as csvfile:
fieldnames = ["file_name", "tag"]
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
for key in split_dict:
writer.writerow({'file_name': key, 'tag' : split_dict[key]})
return output_file_path | /remo_python-0.1.14-py3-none-any.whl/remo/sdk.py | 0.857097 | 0.322139 | sdk.py | pypi |
import csv
import os
import tempfile
from typing import List, TypeVar
from .domain.task import *
Annotation = TypeVar('Annotation')
def check_annotation_task(expected_task, actual_task):
if expected_task is not actual_task:
raise Exception(
"Expected annotation task '{}', but received annotation for '{}'".format(
expected_task, actual_task
)
)
class SimpleCSVBase:
task = None
headers = None
@staticmethod
def inline_classes(classes):
if isinstance(classes, list):
return ';'.join(classes)
if isinstance(classes, str):
return classes
return ''
def validate_annotation_task(self, annotations: List[Annotation]):
for annotation in annotations:
check_annotation_task(self.task, annotation.task)
def prepare_data(self, annotations: List[Annotation]) -> List[List[str]]:
self.validate_annotation_task(annotations)
return [self.headers, *self._csv_data(annotations)]
def _csv_data(self, annotations: List[Annotation]) -> List[List[str]]:
return []
class SimpleCSVForObjectDetection(SimpleCSVBase):
task = object_detection
headers = ["file_name", "class_name", "xmin", "ymin", "xmax", "ymax"]
def _csv_data(self, annotations: List[Annotation]) -> List[List[str]]:
return [
[annotation.img_filename, self.inline_classes(annotation.classes), *annotation.coordinates]
for annotation in annotations
]
class SimpleCSVForInstanceSegmentation(SimpleCSVBase):
task = instance_segmentation
headers = ["file_name", "class_name", "coordinates"]
@staticmethod
def inline_coordinates(coordinates):
return '; '.join(map(str, coordinates))
def _csv_data(self, annotations: List[Annotation]) -> List[List[str]]:
return [
[
annotation.img_filename,
self.inline_classes(annotation.classes),
self.inline_coordinates(annotation.coordinates),
]
for annotation in annotations
]
class SimpleCSVForImageClassification(SimpleCSVBase):
task = image_classification
headers = ["file_name", "class_name"]
def _csv_data(self, annotations: List[Annotation]) -> List[List[str]]:
return [[annotation.img_filename, self.inline_classes(annotation.classes)] for annotation in annotations]
csv_makers = {
SimpleCSVForObjectDetection.task: SimpleCSVForObjectDetection(),
SimpleCSVForInstanceSegmentation.task: SimpleCSVForInstanceSegmentation(),
SimpleCSVForImageClassification.task: SimpleCSVForImageClassification()
}
def prepare_annotations_for_upload(annotations: List[Annotation], annotation_task):
csv_maker = csv_makers.get(annotation_task)
if not csv_maker:
raise Exception(
"Annotation task '{}' not recognised. "
"Supported annotation tasks are 'instance_segmentation', 'object_detection' and "
"'image_classification'".format(annotation_task)
)
return csv_maker.prepare_data(annotations)
def create_tempfile(annotations: List[Annotation]) -> (str, List[str]):
fd, temp_path = tempfile.mkstemp(suffix='.csv')
annotation_task = annotations[0].task
prepared_data = prepare_annotations_for_upload(annotations, annotation_task)
# getting a list of classes. Skipping the first row as it contains the csv header
list_of_classes = [row[1] for row in prepared_data[1:]]
classes = set()
if isinstance(list_of_classes, list) and list_of_classes:
for item in list_of_classes:
if isinstance(item, list):
classes.union(item)
else:
classes.add(item)
list_of_classes = list(classes)
with os.fdopen(fd, 'w') as temp:
writer = csv.writer(temp)
writer.writerows(prepared_data)
return temp_path, list_of_classes
def parse_csv_obj_det(file_path) -> List[Annotation]:
"""
Args
file_path: path to annotations
Example:
# file_name,class_name,coordinates
# ILSVRC2012_val_00000003.JPEG,N01751748, 10 20 30 40
# ILSVRC2012_val_00000003.JPEG,N01751748, 10 20 30 40
Returns:
List[:class:`remo.Annotation`]
"""
annotations = []
with open(file_path, 'r') as f:
csv_file = csv.reader(f, delimiter=',')
for row in csv_file:
file_name, class_name, coordinates = row
# convert coordinates to list of integers
bbox = [int(val) for val in coordinates.split(' ')]
annotation = Annotation(img_filename=file_name, classes=class_name)
annotation.bbox = bbox
annotations.append(annotation)
return annotations | /remo_python-0.1.14-py3-none-any.whl/remo/annotation_utils.py | 0.639398 | 0.430387 | annotation_utils.py | pypi |
import os
from typing import List, TypeVar
from .annotation import Annotation
from .image import Image
from remo.annotation_utils import create_tempfile
AnnotationSet = TypeVar('AnnotationSet')
class Dataset:
"""
Remo dataset
Args:
id: dataset id
name: dataset name
quantity: number of images
"""
def __init__(self, id: int = None, name: str = None, quantity: int = 0, **kwargs):
from remo import _sdk
self.sdk = _sdk
self.id = id
self.name = name
self.n_images = quantity
def __str__(self):
return "Dataset {id:2d} - {name:5s} - {n_images:,} images".format(
id=self.id, name="'{}'".format(self.name), n_images=self.n_images
)
def __repr__(self):
return self.__str__()
def info(self):
"""
Prints basic info about the dataset:
- Dataset name
- Dataset ID
- Number of images contained in the dataset
- Number of annotation sets contained in the dataset
"""
info = """Dataset name: {name}
Dataset ID: {id}
Images: {n_images}
Annotation Sets: {n_annotation_sets}""".format(
id=self.id, name=self.name, n_images=self.n_images, n_annotation_sets=len(self.annotation_sets())
)
print(info)
def add_data(
self,
local_files: List[str] = None,
paths_to_upload: List[str] = None,
urls: List[str] = None,
annotation_task: str = None,
folder_id: int = None,
annotation_set_id: int = None,
class_encoding=None,
wait_for_complete=True,
) -> dict:
"""
Adds images and/or annotations to the dataset.
Use the parameters as follows:
- Use ``local files`` to link (rather than copy) images.
- Use ``paths_to_upload`` if you want to copy image files or archive files.
- Use ``urls`` to download from the web images, annotations or archives.
In terms of supported formats:
- Adding images: support for ``jpg``, ``jpeg``, ``png``, ``tif``
- Adding annotations: to add annotations, you need to specify the annotation task and make sure the specific file format is one of those supported. See documentation here: https://remo.ai/docs/annotation-formats/
- Adding archive files: support for ``zip``, ``tar``, ``gzip``
Example::
! wget 'https://s-3.s3-eu-west-1.amazonaws.com/open-images.zip'
! unzip open-images.zip
urls = ['https://s-3.s3-eu-west-1.amazonaws.com/open-images.zip']
my_dataset = remo.create_dataset(name = 'D1')
my_dataset.add_data(local_files=['./open-images'], annotation_task = 'Object detection')
Args:
dataset_id: id of the dataset to add data to
local_files: list of files or directories containing annotations and image files
Remo will create smaller copies of your images for quick previews but it will point at the original files to show original resolutions images.
Folders will be recursively scanned for image files.
paths_to_upload: list of files or directories containing images, annotations and archives.
These files will be copied inside .remo folder.
Folders will be recursively scanned for image files.
Unpacked archive will be scanned for images, annotations and nested archives.
urls: list of urls pointing to downloadable target, which can be image, annotation file or archive.
annotation_task: annotation tasks tell remo how to parse annotations. See also: :class:`remo.task`.
folder_id: specifies target virtual folder in the remo dataset. If None, it adds to the root level.
annotation_set_id: specifies target annotation set in the dataset. If None, it adds to the default annotation set.
class_encoding: specifies how to convert labels in annotation files to readable labels. If None, Remo will try to interpret the encoding automatically - which for standard words, means they will be read as they are.
See also: :class:`remo.class_encodings`.
wait_for_complete: if True, the function waits for upload data to complete
Returns:
Dictionary with results for linking files, upload files and upload urls::
{
'files_link_result': ...,
'files_upload_result': ...,
'urls_upload_result': ...
}
"""
if annotation_set_id:
annotation_set = self.get_annotation_set(annotation_set_id)
if not annotation_set:
raise Exception('Annotation set ID = {} not found'.format(annotation_set_id))
return self.sdk.add_data_to_dataset(
self.id,
local_files=local_files,
paths_to_upload=paths_to_upload,
urls=urls,
annotation_task=annotation_task,
folder_id=folder_id,
annotation_set_id=annotation_set_id,
class_encoding=class_encoding,
wait_for_complete=wait_for_complete,
)
def fetch(self):
"""
Updates dataset information from server
"""
dataset = self.sdk.get_dataset(self.id)
self.__dict__.update(dataset.__dict__)
def annotation_sets(self) -> List[AnnotationSet]:
"""
Lists the annotation sets within the dataset.
Returns:
List[:class:`remo.AnnotationSet`]
"""
return self.sdk.list_annotation_sets(self.id)
def add_annotations(
self,
annotations: List[Annotation],
annotation_set_id: int = None,
create_new_annotation_set: bool = False,
):
"""
Fast upload of annotations to the Dataset.
If annotation_set_id is not provided, annotations will be added to:
- the only annotation set present, if the Dataset has exactly one Annotation Set and the tasks match
- a new annotation set, if the Dataset doesn't have any Annotation Sets or if create_new_annotation_set = True
Otherwise, annotations will be added to the Annotation Set specified by annotation_set_id.
Example::
urls = ['https://remo-scripts.s3-eu-west-1.amazonaws.com/open_images_sample_dataset.zip']
my_dataset = remo.create_dataset(name = 'D1', urls = urls)
image_name = '000a1249af2bc5f0.jpg'
annotations = []
annotation = remo.Annotation()
annotation.img_filename = image_name
annotation.classes='Human hand'
annotation.bbox=[227, 284, 678, 674]
annotations.append(annotation)
annotation = remo.Annotation()
annotation.img_filename = image_name
annotation.classes='Fashion accessory'
annotation.bbox=[496, 322, 544,370]
annotations.append(annotation)
my_dataset.add_annotations(annotations)
Args:
annotations: list of Annotation objects
annotation_set_id: annotation set id
create_new_annotation_set: if True, a new annotation set will be created
"""
if annotation_set_id and create_new_annotation_set:
raise Exception(
"You passed an annotation set but also set create_new_annotation_set = True. You can't have both."
)
if annotation_set_id:
annotation_set = self.get_annotation_set(annotation_set_id)
else:
annotation_sets = self.annotation_sets()
if len(annotation_sets) > 0:
annotation_set = self.get_annotation_set()
annotation_set_id = annotation_set.id
temp_path, list_of_classes = create_tempfile(annotations)
if create_new_annotation_set or (not annotation_set_id):
n_annotation_sets = len(self.annotation_sets())
self.create_annotation_set(
annotation_task=annotations[0].task,
name='my_ann_set_{}'.format(n_annotation_sets + 1),
classes=list_of_classes,
paths_to_files=temp_path,
)
else:
self.add_data(
annotation_task=annotation_set.task,
annotation_set_id=annotation_set.id,
paths_to_upload=[temp_path],
)
# TODO ALR: removing the temp_path doesn't work on Windows, hence the try except as a temp fix
try:
os.remove(temp_path)
except:
pass
def _export_annotations(
self,
annotation_set_id: int = None,
annotation_format: str = 'json',
export_coordinates: str = 'pixel',
append_path: bool = True,
export_tags: bool = True,
filter_by_tags: list = None
) -> bytes:
"""
Export annotations in Binary format, for a given annotation set.
To export to file, use export_annotations_to_file.
It offers some convenient export options, including:
- Methods to append the full_path to image filenames,
- Choose between coordinates in pixels or percentages,
- Export tags to a separate file
- Export annotations filtered by user-determined tags.
Args:
annotation_set_id: annotation set id, by default will be used default_annotation_set
annotation_format: can be one of ['json', 'coco', 'csv']. Default: 'json'
export_coordinates: converts output values to percentage or pixels, can be one of ['pixel', 'percent']. Default: 'pixel'
append_path: if True, it appends the image path to the filename, otherwise it uses just the filename. Default: True
export_tags: if True, it also exports tags to a separate CSV file. Default: True
filter_by_tags: allows to export annotations only for images containing certain image tags. It can be of type List[str] or str. Default: None
Returns:
annotation file content
"""
annotation_set = self.get_annotation_set(annotation_set_id)
return annotation_set._export_annotations(
annotation_format=annotation_format,
export_coordinates=export_coordinates,
append_path=append_path,
export_tags=export_tags,
filter_by_tags=filter_by_tags
)
def export_annotations_to_file(
self,
output_file: str,
annotation_set_id: int = None,
annotation_format: str = 'json',
export_coordinates: str = 'pixel',
append_path: bool = True,
export_tags: bool = True,
filter_by_tags: list = None
):
"""
Exports annotations for a given annotation set in a given format and saves it to a file.
If export_tags = True, output_file needs to be a .zip file.
It offers some convenient export options, including:
- Methods to append the full_path to image filenames,
- Choose between coordinates in pixels or percentages,
- Export tags to a separate file
- Export annotations filtered by user-determined tags.
Example::
# Download and unzip this sample dataset: https://s-3.s3-eu-west-1.amazonaws.com/dogs_dataset.json
dogs_dataset = remo.create_dataset(name = 'dogs_dataset',
local_files = ['dogs_dataset.json'],
annotation_task = 'Instance Segmentation')
dogs_dataset.export_annotations_to_file(output_file = './dogs_dataset_train.json',
annotation_format = 'coco',
append_path = False,
export_tags = False,
filter_by_tags = 'train')
Args:
output_file: output file to save. Includes file extension and can include file path. If export_tags = True, output_file needs to be a .zip file
annotation_set_id: annotation set id
annotation_format: can be one of ['json', 'coco', 'csv']. Default: 'json'
append_path: if True, it appends the image path to the filename, otherwise it uses just the filename. Default: True
export_coordinates: converts output values to percentage or pixels, can be one of ['pixel', 'percent']. Default: 'pixel'
export_tags: if True, it also exports tags to a separate CSV file. Default: True
filter_by_tags: allows to export annotations only for images containing certain image tags. It can be of type List[str] or str. Default: None
"""
annotation_set = self.get_annotation_set(annotation_set_id)
self.sdk.export_annotations_to_file(
output_file,
annotation_set.id,
annotation_format=annotation_format,
append_path=append_path,
export_coordinates=export_coordinates,
export_tags=export_tags,
filter_by_tags=filter_by_tags
)
def list_image_annotations(self, annotation_set_id: int, image_id: int) -> List[Annotation]:
"""
Retrieves annotations for a given image
Args:
annotation_set_id: annotation set id
image_id: image id
Returns:
List[:class:`remo.Annotation`]
"""
return self.sdk.list_image_annotations(self.id, annotation_set_id, image_id)
def create_annotation_set(
self,
annotation_task: str,
name: str,
classes: List[str] = [],
paths_to_files: List[str] = None,
) -> AnnotationSet:
"""
Creates a new annotation set within the dataset
If paths_to_files is provided, it populates it with the given annotations.
The first created annotation set for the given dataset, is considered the default one.
Args:
annotation_task: annotation task. See also: :class:`remo.task`
name: annotation set name
classes: list of classes to prepopulate the annotation set. Example: ['Cat', 'Dog']. Default is no classes
paths_to_files: list of paths to files or directories containing files to be uploaded. Useful to upload annotatations while creating an annotation set. Default: None
Returns:
:class:`remo.AnnotationSet`
"""
annotation_set = self.sdk.create_annotation_set(annotation_task, self.id, name, classes)
if annotation_set and paths_to_files:
self.add_data(
paths_to_upload=paths_to_files,
annotation_task=annotation_task,
annotation_set_id=annotation_set.id,
)
annotation_set = self.sdk.get_annotation_set(annotation_set.id)
return annotation_set
def get_annotation_set(self, annotation_set_id: int = None) -> AnnotationSet:
"""
Retrieves annotation set with given id.
If no annotation set id is passed:
- if the dataset has only one annotation set, it returns that one
- if the dataset has multiple annotation sets, it raises an error
Args:
annotation_set_id: annotation set id
Returns:
:class:`remo.AnnotationSet`
"""
if not annotation_set_id:
return self.default_annotation_set()
annotation_set = self.sdk.get_annotation_set(annotation_set_id)
if annotation_set and annotation_set.dataset_id == self.id:
return annotation_set
else:
raise Exception(
'Annotation set with ID = {} is not part of dataset {}. You can check the list of annotation sets in your dataset using dataset.annotation_sets()'.format(
annotation_set_id, self.__str__()
)
)
def default_annotation_set(self) -> AnnotationSet:
"""
If the dataset has only one annotation set, it returns that annotation set.
Otherwise, it raises an exception.
"""
annotation_sets = self.annotation_sets()
if len(annotation_sets) > 1:
raise Exception(
'Define which annotation set you want to use. '
+ self.__str__()
+ ' has '
+ str(len(annotation_sets))
+ ' annotation sets. You can see them with `my_dataset.annotation_sets()`'
)
elif len(annotation_sets) == 0:
raise Exception(
self.__str__()
+ " doesn't have any annotations. You can check the list of annotation sets with `my_dataset.annotation_sets()`"
)
return annotation_sets[0]
def get_annotation_statistics(self, annotation_set_id: int = None):
"""
Retrieves annotation statistics of a given annotation set. If annotation_set_id is not provided, it retrieves the statistics of all the available annotation sets within the dataset.
Returns:
list of dictionaries with fields annotation set id, name, num of images, num of classes, num of objects, top3 classes, release and update dates
"""
# TODO: ALR - Improve output formatting
statistics = []
for ann_set in self.annotation_sets():
if (annotation_set_id is None) or (annotation_set_id == ann_set.id):
stat = {
'AnnotationSet ID': ann_set.id,
'AnnotationSet name': ann_set.name,
'n_images': ann_set.total_images,
'n_classes': ann_set.total_classes,
'n_objects': ann_set.total_annotation_objects,
'top_3_classes': ann_set.top3_classes,
'creation_date': ann_set.released_at,
'last_modified_date': ann_set.updated_at,
}
statistics.append(stat)
return statistics
def classes(self, annotation_set_id: int = None) -> List[str]:
"""
Lists all the classes within the dataset
Args:
annotation_set_id: annotation set id. If not specified the default annotation set is considered.
Returns:
List of classes
"""
annotation_set = self.get_annotation_set(annotation_set_id)
if annotation_set:
return annotation_set.classes()
def annotations(self, annotation_set_id: int = None) -> List[Annotation]:
"""
Returns all annotations for a given annotation set.
If no annotation set is specified, the default annotation set will be used
Args:
annotation_set_id: annotation set id
Returns:
List[:class:`remo.Annotation`]
"""
annotation_set = self.get_annotation_set(annotation_set_id)
if annotation_set:
return self.sdk.list_annotations(self.id, annotation_set.id)
print('ERROR: annotation set was not defined.')
def images(self, limit: int = None, offset: int = None) -> List[Image]:
"""
Lists images within the dataset
Args:
limit: the number of images to be listed
offset: specifies offset
Returns:
List[:class:`remo.Image`]
Example::
my_dataset.images()
"""
return self.sdk.list_dataset_images(self.id, limit=limit, offset=offset)
def image(self, img_filename=None, img_id=None) -> Image:
"""
Returns the :class:`remo.Image` with matching img_filename or img_id.
Pass either img_filename or img_id.
Args:
img_filename: filename of the Image to retrieve
img_id: id of the the Image to retrieve
Returns:
:class:`remo.Image`
"""
# TODO ALR: do we need to raise an error if no image is found?
# TODO ALR: we have a sdk.get_image by img_id. Should we implement get_image by img_name in the server for faster processing?
if (img_filename) and (img_id):
raise Exception("You passed both img_filename and img_id. Pass only one of the two")
if img_filename:
list_of_images = self.images()
for i_image in list_of_images:
if i_image.name == img_filename:
return i_image
elif img_id:
return self.sdk.get_image(img_id)
def delete(self):
"""
Deletes dataset
"""
self.sdk.delete_dataset(self.id)
def search_images(self, annotation_sets_id: int = None,
classes: str = None,
classes_not: str = None,
tags: str = None,
tags_not: str = None,
image_name_contains: str = None,
limit: int = None):
"""
Search images by filename, classes and tags
Examples::
my_dataset.search_images(classes = ["dog","person"])
my_dataset.search_images(image_name_contains = "pic2")
Args:
annotation_sets_id: the annotation sets ID to search into (can be multiple, e.g. [1, 2]). No need to specify it if the dataset has only one annotation set
classes: string or list of strings - search for images which have objects of all the given classes
classes_not: string or list of strings - search for images excluding those that have objects of all the given classes
tags: string or list of strings - search for images having all the given tags
tags_not: string or list of strings - search for images excluding those that have all the given tags
image_name_contains: search for images whose name contains the given string
limit: limits number of search results (by default returns all results)
Returns:
List[:class:`remo.AnnotatedImage`]
"""
return self.sdk.search_images(dataset_id = self.id,
annotation_sets_id = annotation_sets_id,
classes = classes,
classes_not = classes_not,
tags = tags,
tags_not = tags_not,
image_name_contains = image_name_contains,
limit = limit)
def view(self):
"""
Opens browser on dataset page
"""
# print('self.sdk', self.sdk, type(self.sdk))
return self.sdk.view_dataset(self.id)
def view_annotate(self, annotation_set_id: int = None):
"""
Opens browser on the annotation tool for the given annotation set
Args:
annotation_set_id: annotation set id. If the dataset has only one annotation set, there is no need to specify the annotation_set_id.
"""
annotation_set = self.get_annotation_set(annotation_set_id)
if annotation_set:
return annotation_set.view()
else:
print('ERROR: annotation set was not defined.')
def view_annotation_stats(self, annotation_set_id: int = None):
"""
Opens browser on annotation set insights page
Args:
annotation_set_id: annotation set id. If the dataset has only one annotation set, there is no need to specify the annotation_set_id.
"""
annotation_set = self.get_annotation_set(annotation_set_id)
if annotation_set:
return annotation_set.view_stats()
else:
print('ERROR: annotation set was not defined.')
def view_image(self, image_id: int):
"""
Opens browser on image view page for the given image
Args:
image_id: image id
"""
return self.sdk.view_image(image_id, self.id) | /remo_python-0.1.14-py3-none-any.whl/remo/domain/dataset.py | 0.829734 | 0.340225 | dataset.py | pypi |
from typing import List, TypeVar
from .annotation import Annotation
from remo.annotation_utils import create_tempfile
Dataset = TypeVar('Dataset')
class AnnotationSet:
"""
Remo annotation set
Args:
id: annotation set id
name: annotation set name
task: annotation task. See also: :class:`remo.task`
dataset_id: dataset id
total_classes: total annotation classes
updated_at: date, when annotation set was last updated
released_at: annotation set release date
total_images: total number of images
top3_classes: top 3 classes in annotation set
total_annotation_objects: total number of annotation objects in annotation set
"""
def __init__(
self,
id: int = None,
name: str = None,
task: str = None,
dataset_id: int = None,
total_classes=None,
updated_at=None,
released_at=None,
total_images: int = None,
top3_classes=None,
total_annotation_objects: int = None,
**kwargs
):
from remo import _sdk
self.sdk = _sdk
self.id = id
self.name = name
self.task = task
self.dataset_id = dataset_id
self.total_classes = total_classes
self.updated_at = updated_at
self.released_at = released_at
self.total_images = total_images
self.top3_classes = top3_classes
self.total_annotation_objects = total_annotation_objects
def __str__(self):
return "Annotation set {id} - '{name}', task: {task}, #classes: {total_classes}".format(
id=self.id, name=self.name, task=self.task, total_classes=self.total_classes
)
def __repr__(self):
return self.__str__()
def add_annotations(self, annotations: List[Annotation]):
"""
Upload of annotations to the annotation set.
Example::
urls = ['https://remo-scripts.s3-eu-west-1.amazonaws.com/open_images_sample_dataset.zip']
ds = remo.create_dataset(name = 'D1', urls = urls)
ann_set = ds.create_annotation_set(annotation_task = 'Object Detection', name = 'test_set')
image_name = '000a1249af2bc5f0.jpg'
annotations = []
annotation = remo.Annotation()
annotation.img_filename = image_name
annotation.classes='Human hand'
annotation.bbox=[227, 284, 678, 674]
annotations.append(annotation)
annotation = remo.Annotation()
annotation.img_filename = image_name
annotation.classes='Fashion accessory'
annotation.bbox=[496, 322, 544,370]
annotations.append(annotation)
ann_set.add_annotations(annotations)
Args:
annotations: list of Annotation objects
"""
temp_path, _ = create_tempfile(annotations)
self.sdk.add_data_to_dataset(
dataset_id = self.dataset_id,
paths_to_upload=[temp_path],
annotation_task=self.task,
annotation_set_id=self.id
)
def add_image_annotation(self, image_id: int, annotation: Annotation):
"""
Adds new annotation to the image
Args:
image_id: image id
annotation: annotation data
"""
self.sdk.add_annotations_to_image(self.id, image_id, annotation)
def _export_annotations(
self,
annotation_format: str = 'json',
export_coordinates: str = 'pixel',
append_path: bool = True,
export_tags: bool = True,
filter_by_tags: list = None
):
"""
Exports annotations for the annotation set in Binary format.
To export to file, use export_annotations_to_file.
It offers some convenient export options, including:
- Methods to append the full_path to image filenames,
- Choose between coordinates in pixels or percentages,
- Export tags to a separate file
- Export annotations filtered by user-determined tags.
Args:
annotation_format: choose format from this list ['json', 'coco', 'csv']
append_path: if True, appends the path to the filename (e.g. local path). Default: True
export_coordinates: converts output values to percentage or pixels, can be one of ['pixel', 'percent']. Default='pixel'
export_tags: exports the tags to a CSV file, it can be one of [True, False]. Default=True
filter_by_tags: allows to export annotations only for images containing certain image tags. It can be of type List[str] or str. Default: None
Returns:
annotation file content
"""
return self.sdk._export_annotations(
self.id,
annotation_format=annotation_format,
export_coordinates=export_coordinates,
append_path=append_path,
export_tags=export_tags,
filter_by_tags=filter_by_tags
)
def export_annotations_to_file(
self,
output_file: str,
annotation_format: str = 'json',
export_coordinates: str = 'pixel',
append_path: bool = True,
export_tags: bool = True,
filter_by_tags: list = None
):
"""
Exports annotations in a given format and saves it to a file.
If export_tags = True, output_file needs to be a .zip file.
It offers some convenient export options, including:
- Methods to append the full_path to image filenames,
- Choose between coordinates in pixels or percentages,
- Export tags to a separate file
- Export annotations filtered by user-determined tags.
Args:
output_file: output file to save. Includes file extension and can include file path. If export_tags = True, output_file needs to be a .zip file
annotation_format: can be one of ['json', 'coco', 'csv']. Default='json'
append_path: if True, appends the path to the filename (e.g. local path). Default: True
export_coordinates: converts output values to percentage or pixels, can be one of ['pixel', 'percent']. Default='pixel'
export_tags: exports the tags to a CSV file, it can be one of [True, False. Default=True
filter_by_tags: allows to filter results by tags, can be list or str
"""
self.sdk.export_annotations_to_file(
output_file,
self.id,
annotation_format=annotation_format,
append_path=append_path,
export_coordinates=export_coordinates,
export_tags=export_tags,
filter_by_tags=filter_by_tags
)
def classes(self) -> List[str]:
"""
List classes within the annotation set
Returns:
List of classes
"""
return self.sdk.list_annotation_set_classes(self.id)
def view(self):
"""
Opens browser on the annotation tool page for this annotation set
"""
return self.sdk.view_annotation_tool(self.id)
def view_stats(self):
"""
Opens browser on annotation set insights page
"""
return self.sdk.view_annotation_stats(self.id) | /remo_python-0.1.14-py3-none-any.whl/remo/domain/annotation_set.py | 0.897894 | 0.238628 | annotation_set.py | pypi |
from typing import List
from .task import *
class Bbox:
"""
Represents coordinates of a bounding box annotation. Used in object detection.
Args:
xmin: X min
ymin: Y min
xmax: X max
ymax: Y max
"""
task = object_detection
type = 'Bounding Box'
def __init__(self, xmin: int, ymin: int, xmax: int, ymax: int):
self.xmin = xmin
self.ymin = ymin
self.xmax = xmax
self.ymax = ymax
self.coordinates = [xmin, ymin, xmax, ymax]
class Segment:
"""
Represents coordinates of a segment annotation. Used in instance segmentation.
Args:
points: list of segment coordinates ``[x0, y0, x1, y1, ..., xN, yN]``
"""
task = instance_segmentation
type = 'Polygon'
def __init__(self, points: List[int]):
self.points = [{'x': x, 'y': y} for x, y in zip(points[::2], points[1::2])]
self.coordinates = points
class Annotation:
"""
Represents a single annotation object. This can be:
- list of classes only: to assign classes to an image for image classification tasks
- bounding box and list of classes: to create a bounding box annotation object and assign it a list of classes
- segment and list of classes: to create a polygon annotation object and assign it a list of classes
Args:
img_filename: file name of the image the annotation refers to
classes: class or list of classes to add to the whole image or the object
object: the specific annotation object to add
Examples:
to create a bounding box:
annotation = Annotation('image.png', 'Dog')
annotation.bbox = [1, 23, 3, 2]
to create a polygon:
annotation = Annotation('image.png', 'Dog')
annotation.segment = [1, 23, 3, 2, 1, 2, 1, 2]
"""
def __init__(self, img_filename: str = None, classes=None, object=None):
if object and (
not isinstance(object, Bbox) and not isinstance(object, Segment)
):
raise Exception('Expected object type Annotation.Bbox or Annotation.Segment')
self.img_filename = img_filename
self.classes = classes
self.object = object
def __str__(self):
return "Annotation: {classes} (type:{ann_type}, file:{filename})".format(
classes=self.classes, ann_type=self.type, filename=self.img_filename
)
def __repr__(self):
return self.__str__()
@property
def classes(self):
if isinstance(self.__classes, list):
return self.__classes
elif isinstance(self.__classes, str):
self.__classes = [self.classes]
return self.__classes
return []
@classes.setter
def classes(self, classes):
if isinstance(classes, list):
self.__classes = classes
elif isinstance(classes, str):
self.__classes = [classes]
else:
self.__classes = []
@property
def type(self):
if not self.object:
return None
return self.object.type
@property
def coordinates(self):
if not self.object:
return None
return self.object.coordinates
@property
def task(self):
if not self.object:
return image_classification
return self.object.task
@property
def bbox(self):
if isinstance(self.object, Bbox):
return self.object
return None
@bbox.setter
def bbox(self, values: List[int]):
if len(values) != 4:
raise Exception('Bounding box expects 4 values: xmin, ymin, xmax, ymax')
self.object = Bbox(*values)
@property
def segment(self):
if isinstance(self.object, Segment):
return self.object
return None
@segment.setter
def segment(self, points: List[int]):
if not points:
raise Exception('Segment coordinates cannot be an empty list.')
if len(points) % 2 == 1:
raise Exception(
'Segment coordinates need to be an even number of elements indicating (x, y) coordinates of each point.'
)
self.object = Segment(points) | /remo_python-0.1.14-py3-none-any.whl/remo/domain/annotation.py | 0.968812 | 0.62601 | annotation.py | pypi |
import os
import shutil
from typing import TypeVar, List
Annotation = TypeVar('Annotation')
AnnotationSet = TypeVar('AnnotationSet')
class Image:
"""
Remo image
Args:
id: image id
name: image file name
dataset_id: dataset id
path: local path, if available
url: image remo internal URL
size: file size in bytes
width: image width in pixels
height: image height in pixels
upload_date: upload date
"""
__fields = ['id', 'name', 'dataset_id', 'path', 'url', 'size', 'width', 'height', 'upload_date']
def __init__(
self,
id: int = None,
name: str = None,
dataset_id: int = None,
path: str = None,
url: str = None,
size: int = None,
width: int = None,
height: int = None,
upload_date: str = None,
**kwargs
):
from remo import _sdk
self.sdk = _sdk
self.id = id
self.name = name
self.dataset_id = dataset_id
self.path = path
self.url = url
self.size = size
self.width = width
self.height = height
self.upload_date = upload_date
def __str__(self):
return 'Image: {} - {}'.format(self.id, self.name)
def __repr__(self):
return self.__str__()
def fetch_details(self):
"""
Fetch the latest image details from the database and updating all fields
"""
img = self.sdk.get_image(self.id)
self.update_fields(img)
def update_fields(self, img):
"""
Update all fields of the image based on the passed Image instamce
"""
for field in self.__fields:
current_value = getattr(self, field)
new_value = getattr(img, field, current_value)
setattr(self, field, new_value)
def get_content(self) -> bytes:
"""
Retrieves image file content
Returns:
image binary data
"""
if not self.url:
raise Exception("ERROR: image url is not set")
return self.sdk.get_image_content(self.url)
def save_to(self, dir_path: str):
"""
Save image to giving directory
Args:
dir_path: path to the directory
"""
dir_path = self.sdk._resolve_path(dir_path)
os.makedirs(dir_path, exist_ok=True)
file_path = os.path.join(dir_path, self.name)
if self.path:
shutil.copy(self.path, file_path)
return
img_content = self.get_content()
if not img_content:
return
self.sdk._save_to_file(img_content, file_path)
def list_annotation_sets(self) -> List[AnnotationSet]:
"""
Lists annotations sets
Returns:
List[:class:`remo.AnnotationSet`]
"""
return self.sdk.list_annotation_sets(self.dataset_id)
# TODO: fix it
def list_annotations(self, annotation_set_id: int) -> List[Annotation]:
"""
Retrieves image annotations from giving annotation set
Args:
annotation_set_id: annotation set id
Returns:
List[:class:`remo.Annotation`]
"""
return self.sdk.list_image_annotations(self.dataset_id, annotation_set_id, self.id)
def get_annotation_set(self, annotation_set_id: int = None) -> AnnotationSet:
ds = self.sdk.get_dataset(self.dataset_id)
return ds.get_annotation_set(annotation_set_id)
def add_annotation(self, annotation: Annotation, annotation_set_id: int = None):
"""
Adds new annotation to the image
Args:
annotation_set_id: annotation set id
annotation: annotation data
"""
if not annotation_set_id:
annotation_set = self.get_annotation_set()
annotation_set_id = annotation_set.id
if annotation_set_id:
self.sdk.add_annotations_to_image(annotation_set_id, self.id, annotation)
else:
print('ERROR: annotation set not defined')
def view(self):
"""
Opens browser on image view for the image
"""
return self.sdk.view_image(self.id, self.dataset_id)
def view_annotate(self, annotation_set_id: int):
"""
Opens browser on the annotation tool for giving annotation set
Args:
annotation_set_id: annotation set id
"""
return self.sdk.view_annotate_image(annotation_set_id, self.id)
class AnnotatedImage(Image):
"""Image with raw json annotations"""
def __init__(self, image: Image, annotations: list):
super().__init__()
self.update_fields(image)
self.annotations = annotations | /remo_python-0.1.14-py3-none-any.whl/remo/domain/image.py | 0.740831 | 0.23741 | image.py | pypi |
import os
import time
from typing import List
from .domain import Image, Dataset, AnnotationSet, class_encodings, Annotation
from .api import API
from .endpoints import frontend
from .viewer import factory
class SDK:
"""
Creates sdk object, and checks connection to server
Args:
server: server host name, e.g. ``http://localhost:8123/``
email: user credentials
password: user credentials
viewer: allows to choose between browser, electron and jupyter viewer.
To be able change viewer, you can use :func:`set_viewer` function. See example.
Example::
import remo
remo.set_viewer('browser')
"""
def __init__(self, server: str, email: str, password: str, viewer: str = 'browser'):
self.api = API(server, email, password)
self.viewer = None
self.set_viewer(viewer)
def set_viewer(self, viewer: str):
"""
Allows to choose one of available viewers
Args:
viewer: choose between 'browser', 'electron' and 'jupyter' viewer
"""
self.viewer = factory(viewer)
def create_dataset(
self,
name: str,
local_files: List[str] = None,
paths_to_upload: List[str] = None,
urls: List[str] = None,
annotation_task: str = None,
class_encoding=None,
wait_for_complete=True
) -> Dataset:
"""
Creates a new dataset in Remo and optionally populate it with images and annotations.
To add annotations, you need to specify an annotation task.
Args:
name: name of the dataset.
local_files: list of files or directories.
These files will be linked.
Folders will be recursively scanned for image files: ``jpg``, ``png``, ``tif``.
paths_to_upload: list of files or directories.
These files will be copied. Supported files: images, annotations and archives.
- image files: ``jpg``, ``png``, ``tif``.
- annotation files: ``json``, ``xml``, ``csv``.
- archive files: ``zip``, ``tar``, ``gzip``.
Unpacked archive will be scanned for images, annotations and nested archives.
urls: list of urls pointing to downloadable target, which can be image, annotation file or archive.
annotation_task: specifies annotation task. See also: :class:`remo.task`.
class_encoding: specifies how to convert class labels in annotation files to classes.
See also: :class:`remo.class_encodings`.
wait_for_complete: blocks function until upload data completes
Returns:
:class:`remo.Dataset`
"""
json_data = self.api.create_dataset(name)
ds = Dataset(**json_data)
ds.add_data(
local_files, paths_to_upload, urls, annotation_task=annotation_task, class_encoding=class_encoding,
wait_for_complete=wait_for_complete
)
ds.fetch()
return ds
def add_data_to_dataset(
self,
dataset_id: int,
local_files: List[str] = None,
paths_to_upload: List[str] = None,
urls: List[str] = None,
annotation_task: str = None,
folder_id: int = None,
annotation_set_id: int = None,
class_encoding=None,
wait_for_complete=True
) -> dict:
"""
Adds images and/or annotations to an existing dataset.
Use ``local files`` to link (rather than copy) images. Use ``paths_to_upload`` if you want to copy image files or archive files. Use ``urls`` to download from the web images, annotations or archives.
Adding images: support for ``jpg``,``jpeg``, ``png``, ``tif``
Adding annotations: to add annotations, you need to specify the annotation task and make sure the specific file format is one of those supported. See documentation here: https://remo.ai/docs/annotation-formats/
Adding archive files: support for ``zip``, ``tar``, ``gzip``
Args:
dataset_id: id of the dataset to add data to
local_files: list of files or directories containing annotations and image files
Remo will create smaller copies of your images for quick previews but it will point at the original files to show original resolutions images.
Folders will be recursively scanned for image files.
paths_to_upload: list of files or directories containing images, annotations and archives.
These files will be copied inside .remo folder.
Folders will be recursively scanned for image files.
Unpacked archive will be scanned for images, annotations and nested archives.
urls: list of urls pointing to downloadable target, which can be image, annotation file or archive.
annotation_task: annotation tasks tell remo how to parse annotations. See also: :class:`remo.task`.
folder_id: specifies target virtual folder in the remo dataset. If None, it adds to the root level.
annotation_set_id: specifies target annotation set in the dataset. If None: if no annotation set exists, one will be automatically created. If exactly one annotation set already exists, it will add annotations to that annotation set, provided the task matches.
class_encoding: specifies how to convert labels in annotation files to readable labels. If None, Remo will try to interpret the encoding automatically - which for standard words, means they will be read as they are.
See also: :class:`remo.class_encodings`.
wait_for_complete: blocks function until upload data completes
Returns:
Dictionary with results for linking files, upload files and upload urls::
{
'files_link_result': ...,
'files_upload_result': ...,
'urls_upload_result': ...
}
"""
kwargs = {
'annotation_task': annotation_task,
'folder_id': folder_id,
'annotation_set_id': annotation_set_id,
}
# logic to deal with the case where we are trying to upload annotations without specifying the annotation set id
if annotation_task and (not annotation_set_id):
annotation_sets = self.list_annotation_sets(dataset_id)
if len(annotation_sets) > 1:
raise Exception(
'Define which annotation set you want to use. Dataset {} has {} annotation sets. '
'You can see them with my_dataset.annotation_sets()'.format(dataset_id, len(annotation_sets))
)
elif len(annotation_sets) == 1:
kwargs['annotation_set_id'] = annotation_sets[0].id
# check values
if local_files:
self._raise_value_error(local_files, 'local_files', list, 'list of paths')
if paths_to_upload:
self._raise_value_error(paths_to_upload, 'paths_to_upload', list, 'list of paths')
if urls:
self._raise_value_error(urls, 'urls', list, 'list of URLs')
session_id = self.api.create_new_upload_session(dataset_id)
kwargs['session_id'] = session_id
if local_files:
encoding = class_encodings.for_linking(class_encoding)
self.api.upload_local_files(
dataset_id, local_files, class_encoding=encoding, **kwargs
)
if paths_to_upload:
encoding = class_encodings.for_upload(class_encoding)
self.api.bulk_upload_files(
dataset_id, paths_to_upload, class_encoding=encoding, **kwargs
)
if urls:
encoding = class_encodings.for_linking(class_encoding)
self.api.upload_urls(
dataset_id, urls, class_encoding=encoding, **kwargs
)
self.api.complete_upload_session(session_id)
if not wait_for_complete:
return {'session_id': session_id}
return self._report_processing_data_progress(session_id)
def _report_processing_data_progress(self, session_id: str):
"""
Reports progress for upload session
Args:
session_id: upload session id
Returns:
session status
"""
def format_msg(msg, *args, max_length=100):
msg = msg.format(*args)
return '{} {}'.format(msg, ' ' * (max_length - len(msg)))
last_msg = ''
while True:
session = self.api.get_upload_session_status(session_id)
status = session.get('status')
substatus = session.get('substatus')
uploaded = session['uploaded']['total']
if status == 'not complete':
msg = format_msg('Acquiring data - {} files, {}', uploaded['items'], uploaded['human_size'])
if msg != last_msg:
print(msg, end='\r')
last_msg = msg
elif status == 'pending':
msg = format_msg('Acquiring data - completed')
if msg != last_msg:
print(msg)
last_msg = msg
elif status == 'in progress':
msg = 'Processing data'
if substatus:
msg = '{} - {}'.format(msg, substatus)
msg = format_msg(msg)
if msg != last_msg:
print(msg, end='\r')
last_msg = msg
elif status in ('done', 'failed'):
print(format_msg('Processing data - completed'))
msg = 'Data upload completed' if status == 'done' else 'Data upload completed with some errors:'
print(msg)
if status == 'failed':
errors = session.get('errors', [])
for err in errors:
msg = err['error'] if 'value' not in err else '{}: {}'.format(err['value'], err['error'])
print(msg)
errors = session['images'].get('errors', [])
for err in errors:
filename, errs = err['filename'], err['errors']
msg = errs[0] if len(errs) == 1 else '\n * ' + '\n * '.join(errs)
msg = '{}: {}'.format(filename, msg)
print(msg)
errors = session['annotations'].get('errors', [])
for err in errors:
filename, errs = err['filename'], err['errors']
msg = errs[0] if len(errs) == 1 else '\n * ' + '\n * '.join(errs)
msg = '{}: {}'.format(filename, msg)
print(msg)
return session
time.sleep(1)
@staticmethod
def _raise_value_error(value, value_name, expected_type, expected_description):
if not isinstance(value, expected_type):
raise ValueError(
'Parameter "{}" should be a {}, but instead is a {}.'.format(
value_name, expected_description, type(value)
)
)
def list_datasets(self) -> List[Dataset]:
"""
Lists the available datasets
Returns:
List[:class:`remo.Dataset`]
"""
json_data = self.api.list_datasets()
return [Dataset(**ds_item) for ds_item in json_data.get('results', [])]
def get_dataset(self, dataset_id: int) -> Dataset:
"""
Retrieves a dataset with given dataset id.
Args:
dataset_id: dataset id
Returns:
:class:`remo.Dataset`
"""
json_data = self.api.get_dataset(dataset_id)
if json_data.get('detail') == "Not found.":
raise Exception(
"Dataset ID {} not found. "
"You can check your existing datasets with `remo.list_datasets()`".format(dataset_id)
)
return Dataset(**json_data)
def delete_dataset(self, dataset_id: int):
"""
Deletes dataset
Args:
dataset_id: dataset id
"""
self.api.delete_dataset(dataset_id)
def list_annotation_sets(self, dataset_id: int) -> List[AnnotationSet]:
"""
Returns a list of AnnotationSet containing all the AnnotationSets of a given dataset
Args:
dataset_id: dataset id
Returns:
List[:class:`remo.AnnotationSet`]
"""
result = self.api.list_annotation_sets(dataset_id)
return [
AnnotationSet(
id=annotation_set['id'],
name=annotation_set['name'],
updated_at=annotation_set['updated_at'],
task=annotation_set['task']['name'],
dataset_id=dataset_id,
top3_classes=annotation_set['statistics']['top3_classes'],
total_images=annotation_set['statistics']['annotated_images_count'],
total_classes=annotation_set['statistics']['total_classes'],
total_annotation_objects=annotation_set['statistics']['total_annotation_objects'],
)
for annotation_set in result.get('results', [])
]
def get_annotation_set(self, annotation_set_id: int) -> AnnotationSet:
"""
Retrieves annotation set
Args:
annotation_set_id: annotation set id
Returns:
:class:`remo.AnnotationSet`
"""
annotation_set = self.api.get_annotation_set(annotation_set_id)
if 'detail' in annotation_set:
raise Exception(
'Annotation set with ID = {} not found. '
'You can check the list of annotation sets in your dataset using dataset.annotation_sets()'.format(annotation_set_id)
)
return AnnotationSet(
id=annotation_set['id'],
name=annotation_set['name'],
updated_at=annotation_set['updated_at'],
task=annotation_set['task']['name'],
dataset_id=annotation_set['dataset']['id'],
total_classes=len(annotation_set['classes']),
)
def export_annotations(
self,
annotation_set_id: int,
annotation_format: str = 'json',
export_coordinates: str = 'pixel',
full_path: str = 'true',
) -> bytes:
"""
Exports annotations in given format
Args:
annotation_set_id: annotation set id
annotation_format: can be one of ['json', 'coco', 'csv'], default='json'
full_path: uses full image path (e.g. local path), can be one of ['true', 'false'], default='false'
export_coordinates: converts output values to percentage or pixels, can be one of ['pixel', 'percent'], default='pixel'
Returns:
annotation file content
"""
return self.api.export_annotations(
annotation_set_id,
annotation_format=annotation_format,
export_coordinates=export_coordinates,
full_path=full_path,
)
def export_annotations_to_file(
self,
output_file: str,
annotation_set_id: int,
annotation_format: str = 'json',
export_coordinates: str = 'pixel',
full_path: str = 'true',
):
"""
Exports annotations in given format
Args:
output_file: output file to save
annotation_set_id: annotation set id
annotation_format: can be one of ['json', 'coco', 'csv'], default='json'
full_path: uses full image path (e.g. local path), can be one of ['true', 'false'], default='false'
export_coordinates: converts output values to percentage or pixels, can be one of ['pixel', 'percent'], default='pixel'
"""
content = self.export_annotations(
annotation_set_id,
annotation_format=annotation_format,
export_coordinates=export_coordinates,
full_path=full_path,
)
self._save_to_file(content, output_file)
def _save_to_file(self, content: bytes, output_file: str):
output_file = self._resolve_path(output_file)
dir_path = os.path.dirname(output_file)
os.makedirs(dir_path, exist_ok=True)
with open(output_file, 'wb') as out_file:
out_file.write(content)
@staticmethod
def _resolve_path(path: str):
if path.startswith('~'):
path = os.path.expanduser(path)
return os.path.realpath(os.path.abspath(path))
def get_annotation_info(self, dataset_id: int, annotation_set_id: int, image_id: int) -> list:
"""
Returns current annotations for the image
Args:
dataset_id: dataset id
annotation_set_id: annotation set id
image_id: image id
Returns:
annotations info - list of annotation objects or classes
"""
resp = self.api.get_annotation_info(dataset_id, annotation_set_id, image_id)
return resp.get('annotation_info', [])
def list_image_annotations(
self, dataset_id: int, annotation_set_id: int, image_id: int
) -> List[Annotation]:
"""
Returns annotations for a given image
Args:
dataset_id: dataset id
annotation_set_id: annotation set id
image_id: image id
Returns:
List[:class:`remo.Annotation`]
"""
annotation_items = self.get_annotation_info(dataset_id, annotation_set_id, image_id)
img = self.get_image(image_id)
if not img:
return None
annotations = []
for item in annotation_items:
annotation = Annotation(img_filename=img.name)
if 'lower' in item:
annotation.classes = item.get('name')
else:
classes = [cls.get('name') for cls in item.get('classes', [])]
annotation.classes = classes
points = item.get('coordinates')
if len(points) == 2:
bbox = [points[0]['x'], points[0]['y'], points[1]['x'], points[1]['y']]
annotation.bbox = bbox
elif len(points) > 2:
segment = []
for p in points:
segment.append(p['x'])
segment.append(p['y'])
annotation.segment = segment
annotations.append(annotation)
return annotations
def list_annotations(self, dataset_id: int, annotation_set_id: int) -> List[Annotation]:
"""
Returns all annotations for a given annotation set
Args:
dataset_id: dataset id
annotation_set_id: annotation set id
Returns:
List[:class:`remo.Annotation`]
"""
images = self.list_dataset_images(dataset_id)
annotations = []
for img in images:
annotations += self.list_image_annotations(dataset_id, annotation_set_id, img.id)
return annotations
def create_annotation_set(
self, annotation_task: str, dataset_id: int, name: str, classes: List[str] = []
) -> AnnotationSet:
"""
Creates a new annotation set within the given dataset
Args:
annotation_task: specified task for the annotation set. See also: :class:`remo.task`
dataset_id: dataset id
name: name of the annotation set
classes: list of classes. Default is no classes
Returns:
:class:`remo.AnnotationSet`
"""
annotation_set = self.api.create_annotation_set(annotation_task, dataset_id, name, classes)
if 'error' in annotation_set:
raise Exception(
'Error while creating an annotation set. Message:\n{}'.format(annotation_set['error'])
)
return AnnotationSet(
id=annotation_set['id'],
name=annotation_set['name'],
task=annotation_set['task'],
dataset_id=annotation_set['dataset_id'],
total_classes=len(annotation_set['classes']),
)
def add_annotations_to_image(self, annotation_set_id: int, image_id: int, annotations: List[Annotation]):
"""
Adds annotation to a given image
#TODO: check instance segmentation
Args:
annotation_set_id: annotation set id
image_id: image id
annotations: Annotation object
"""
annotation_set = self.get_annotation_set(annotation_set_id)
dataset_id = annotation_set.dataset_id
annotation_info = self.get_annotation_info(dataset_id, annotation_set_id, image_id)
object_id = len(annotation_info)
objects = []
classes = []
for item in annotations:
if item.bbox:
objects.append(
{
"name": "OBJ " + str(object_id),
"coordinates": [
{"x": item.bbox.xmin, "y": item.bbox.ymin},
{"x": item.bbox.xmax, "y": item.bbox.ymax},
],
"auto_created": False,
"position_number": object_id,
"classes": [
{"name": cls, "lower": cls.lower(), "questionable": False} for cls in item.classes
],
"objectId": object_id,
"isHidden": False,
}
)
object_id += 1
elif item.segment:
objects.append(
{
"name": "OBJ " + str(object_id),
"coordinates": item.segment.points,
"auto_created": False,
"position_number": object_id,
"classes": [
{"name": cls, "lower": cls.lower(), "questionable": False} for cls in item.classes
],
"objectId": object_id,
"isHidden": False,
}
)
object_id += 1
else:
classes += [
{"name": cls, "lower": cls.lower(), "questionable": False} for cls in item.classes
]
return self.api.add_annotation(
dataset_id, annotation_set_id, image_id, annotation_info, classes=classes, objects=objects
)
def list_annotation_set_classes(self, annotation_set_id: int) -> List[str]:
"""
List classes within the annotation set
Args:
annotation_set_id: annotation set id
Returns:
list of classes
"""
classes_with_ids = self.api.list_annotation_set_classes(annotation_set_id)
return [item.get('name') for item in classes_with_ids]
def list_dataset_images(self, dataset_id: int, limit: int = None, offset: int = None) -> List[Image]:
"""
Returns a list of images within a dataset with given dataset_id
Args:
dataset_id: dataset id
limit: limits result images
offset: specifies offset
Returns:
List[:class:`remo.Image`]
"""
json_data = self.api.list_dataset_images(dataset_id, limit=limit, offset=offset)
if 'error' in json_data:
raise Exception(
'Failed to get all images for dataset ID = {}. Error message:\n: {}'.format(
dataset_id, json_data.get('error')
)
)
images = json_data.get('results', [])
return [Image(**img) for img in images]
def get_image_content(self, url: str) -> bytes:
"""
Get image file content by url
Args:
url: image url
Returns:
image binary data
"""
return self.api.get_image_content(url)
def get_image(self, image_id: int) -> Image:
"""
Retrieves image by a given image id
Args:
image_id: image id
Returns:
:class:`remo.Image`
"""
json_data = self.api.get_image(image_id)
if 'error' in json_data:
raise Exception(
'Failed to get image by ID = {}. Error message:\n: {}'.format(
image_id, json_data.get('error')
)
)
return Image(**json_data)
def search_images(
self, classes=None, task: str = None, dataset_id: int = None, limit: int = None,
):
"""
Search images by class and annotation task
Args:
classes: string or list of strings - search for images which match all given classes
task: name of the annotation task to filter dataset
dataset_id: narrows search result to given dataset
limit: limits number of search results
Returns:
image_id, dataset_id, name, annotations
"""
# TODO: check this function
return self.api.search_images(classes, task, dataset_id, limit)
def view_search(self):
"""
Opens browser in search page
"""
return self._view(frontend.search)
def view_image(self, image_id: int, dataset_id: int):
"""
Opens browser on the image view for given image
Args:
image_id: image id
dataset_id: dataset id
"""
img = self.get_image(image_id)
if not img:
return
if img.dataset_id != dataset_id:
raise Exception('Image ID = {} not found in dataset ID: {}'.format(image_id, dataset_id))
return self._view(frontend.image_view.format(image_id, dataset_id))
def open_ui(self):
"""
Opens the main page of Remo
"""
return self._view(frontend.datasets)
def view_dataset(self, id: int):
"""
Opens browser for the given dataset
Args:
id: dataset id
"""
return self._view(frontend.datasets, id)
def view_annotation_tool(self, id: int):
"""
Opens browser in annotation view for the given annotation set
Args:
id: annotation set id
"""
return self._view(frontend.annotation.format(id))
def view_annotate_image(self, annotation_set_id: int, image_id: int):
"""
Opens browser on the annotation tool for giving image
Args:
annotation_set_id: annotation set id
image_id: image id
"""
return self._view(frontend.annotate_image.format(annotation_set_id, image_id))
def view_annotation_stats(self, annotation_set_id: int):
"""
Opens browser in annotation set insights page
Args:
annotation_set_id: annotation set id
"""
return self._view(frontend.annotation_set_insights.format(annotation_set_id))
def _view(self, url, *args, **kwargs):
return self.viewer.browse(self.api.url(url, *args, **kwargs)) | /remo-sdk-0.1.1.tar.gz/remo-sdk-0.1.1/remo/sdk.py | 0.856182 | 0.323727 | sdk.py | pypi |
import csv
import os
import tempfile
from typing import List, TypeVar
from .domain.task import *
Annotation = TypeVar('Annotation')
def check_annotation_task(expected_task, actual_task):
if expected_task is not actual_task:
raise Exception(
"Expected annotation task '{}', but received annotation for '{}'".format(
expected_task, actual_task
)
)
class SimpleCSVBase:
task = None
headers = None
@staticmethod
def inline_classes(classes):
if isinstance(classes, list):
return ';'.join(classes)
if isinstance(classes, str):
return classes
return ''
def validate_annotation_task(self, annotations: List[Annotation]):
for annotation in annotations:
check_annotation_task(self.task, annotation.task)
def prepare_data(self, annotations: List[Annotation]) -> List[List[str]]:
self.validate_annotation_task(annotations)
return [self.headers, *self._csv_data(annotations)]
def _csv_data(self, annotations: List[Annotation]) -> List[List[str]]:
return []
class SimpleCSVForObjectDetection(SimpleCSVBase):
task = object_detection
headers = ["file_name", "class_name", "xmin", "ymin", "xmax", "ymax"]
def _csv_data(self, annotations: List[Annotation]) -> List[List[str]]:
return [
[annotation.img_filename, self.inline_classes(annotation.classes), *annotation.coordinates]
for annotation in annotations
]
class SimpleCSVForInstanceSegmentation(SimpleCSVBase):
task = instance_segmentation
headers = ["file_name", "class_name", "coordinates"]
@staticmethod
def inline_coordinates(coordinates):
return '; '.join(map(str, coordinates))
def _csv_data(self, annotations: List[Annotation]) -> List[List[str]]:
return [
[
annotation.img_filename,
self.inline_classes(annotation.classes),
self.inline_coordinates(annotation.coordinates),
]
for annotation in annotations
]
class SimpleCSVForImageClassification(SimpleCSVBase):
task = image_classification
headers = ["file_name", "class_name"]
def _csv_data(self, annotations: List[Annotation]) -> List[List[str]]:
return [[annotation.img_filename, self.inline_classes(annotation.classes)] for annotation in annotations]
csv_makers = {
SimpleCSVForObjectDetection.task: SimpleCSVForObjectDetection(),
SimpleCSVForInstanceSegmentation.task: SimpleCSVForInstanceSegmentation(),
SimpleCSVForImageClassification.task: SimpleCSVForImageClassification()
}
def prepare_annotations_for_upload(annotations: List[Annotation], annotation_task):
csv_maker = csv_makers.get(annotation_task)
if not csv_maker:
raise Exception(
"Annotation task '{}' not recognised. "
"Supported annotation tasks are 'instance_segmentation', 'object_detection' and "
"'image_classification'".format(annotation_task)
)
return csv_maker.prepare_data(annotations)
def create_tempfile(annotations: List[Annotation]) -> (str, List[str]):
fd, temp_path = tempfile.mkstemp(suffix='.csv')
annotation_task = annotations[0].task
prepared_data = prepare_annotations_for_upload(annotations, annotation_task)
# getting a list of classes. Skipping the first row as it contains the csv header
list_of_classes = [row[1] for row in prepared_data[1:]]
classes = set()
if isinstance(list_of_classes, list) and list_of_classes:
for item in list_of_classes:
if isinstance(item, list):
classes.union(item)
else:
classes.add(item)
list_of_classes = list(classes)
with os.fdopen(fd, 'w') as temp:
writer = csv.writer(temp)
writer.writerows(prepared_data)
return temp_path, list_of_classes
def parse_csv_obj_det(file_path) -> List[Annotation]:
"""
Args
file_path: path to annotations
Example:
# file_name,class_name,coordinates
# ILSVRC2012_val_00000003.JPEG,N01751748, 10 20 30 40
# ILSVRC2012_val_00000003.JPEG,N01751748, 10 20 30 40
Returns:
List[:class:`remo.Annotation`]
"""
annotations = []
with open(file_path, 'r') as f:
csv_file = csv.reader(f, delimiter=',')
for row in csv_file:
file_name, class_name, coordinates = row
# convert coordinates to list of integers
bbox = [int(val) for val in coordinates.split(' ')]
annotation = Annotation(img_filename=file_name, classes=class_name)
annotation.bbox = bbox
annotations.append(annotation)
return annotations | /remo-sdk-0.1.1.tar.gz/remo-sdk-0.1.1/remo/annotation_utils.py | 0.639398 | 0.430387 | annotation_utils.py | pypi |
import os
from typing import List, TypeVar, Callable
from .annotation import Annotation
from .image import Image
from remo.annotation_utils import create_tempfile
AnnotationSet = TypeVar('AnnotationSet')
class Dataset:
"""
Remo dataset
Args:
id: dataset id
name: dataset name
quantity: number of images
"""
def __init__(self, id: int = None, name: str = None, quantity: int = 0, **kwargs):
from remo import _sdk
self.sdk = _sdk
self.id = id
self.name = name
self.n_images = quantity
def __str__(self):
return "Dataset {id} - '{name}'".format(id=self.id, name=self.name)
def __repr__(self):
return self.__str__()
def add_data(
self,
local_files: List[str] = None,
paths_to_upload: List[str] = None,
urls: List[str] = None,
annotation_task: str = None,
folder_id: int = None,
annotation_set_id: int = None,
class_encoding=None,
wait_for_complete=True
) -> dict:
"""
Adds images and/or annotations to the dataset.
Use the parameters as follows:
- Use ``local files`` to link (rather than copy) images.
- Use ``paths_to_upload`` if you want to copy image files or archive files.
- Use ``urls`` to download from the web images, annotations or archives.
In terms of supported formats:
- Adding images: support for ``jpg``, ``jpeg``, ``png``, ``tif``
- Adding annotations: to add annotations, you need to specify the annotation task and make sure the specific file format is one of those supported. See documentation here: https://remo.ai/docs/annotation-formats/
- Adding archive files: support for ``zip``, ``tar``, ``gzip``
Example::
urls = ['https://remo-scripts.s3-eu-west-1.amazonaws.com/open_images_sample_dataset.zip']
my_dataset = remo.create_dataset(name = 'D1', urls = urls)
my_dataset.add_data(local_files=annotation_files, annotation_task = 'Object detection')
Args:
dataset_id: id of the dataset to add data to
local_files: list of files or directories containing annotations and image files
Remo will create smaller copies of your images for quick previews but it will point at the original files to show original resolutions images.
Folders will be recursively scanned for image files.
paths_to_upload: list of files or directories containing images, annotations and archives.
These files will be copied inside .remo folder.
Folders will be recursively scanned for image files.
Unpacked archive will be scanned for images, annotations and nested archives.
urls: list of urls pointing to downloadable target, which can be image, annotation file or archive.
annotation_task: annotation tasks tell remo how to parse annotations. See also: :class:`remo.task`.
folder_id: specifies target virtual folder in the remo dataset. If None, it adds to the root level.
annotation_set_id: specifies target annotation set in the dataset. If None, it adds to the default annotation set.
class_encoding: specifies how to convert labels in annotation files to readable labels. If None, Remo will try to interpret the encoding automatically - which for standard words, means they will be read as they are.
See also: :class:`remo.class_encodings`.
wait_for_complete: if True, the function waits for upload data to complete
Returns:
Dictionary with results for linking files, upload files and upload urls::
{
'files_link_result': ...,
'files_upload_result': ...,
'urls_upload_result': ...
}
"""
if annotation_set_id:
annotation_set = self.get_annotation_set(annotation_set_id)
if not annotation_set:
raise Exception('Annotation set ID = {} not found'.format(annotation_set_id))
return self.sdk.add_data_to_dataset(
self.id,
local_files=local_files,
paths_to_upload=paths_to_upload,
urls=urls,
annotation_task=annotation_task,
folder_id=folder_id,
annotation_set_id=annotation_set_id,
class_encoding=class_encoding,
wait_for_complete=wait_for_complete
)
def fetch(self):
"""
Updates dataset information from server
"""
dataset = self.sdk.get_dataset(self.id)
self.__dict__.update(dataset.__dict__)
def annotation_sets(self) -> List[AnnotationSet]:
"""
Lists the annotation sets within the dataset.
Returns:
List[:class:`remo.AnnotationSet`]
"""
return self.sdk.list_annotation_sets(self.id)
def add_annotations(self, annotations: List[Annotation], annotation_set_id: int = None, create_new_annotation_set: bool = False):
"""
Fast upload of annotations to the Dataset.
If annotation_set_id is not provided, annotations will be added to:
- the only annotation set present, if the Dataset has exactly one Annotation Set and the tasks match
- a new annotation set, if the Dataset doesn't have any Annotation Sets or if ceate_new_annotation_set = True
Otherwise, annotations will be added to the Annotation Set specified by annotation_set_id.
Example::
urls = ['https://remo-scripts.s3-eu-west-1.amazonaws.com/open_images_sample_dataset.zip']
my_dataset = remo.create_dataset(name = 'D1', urls = urls)
image_name = '000a1249af2bc5f0.jpg'
annotations = []
annotation = remo.Annotation()
annotation.img_filename = image_name
annotation.classes='Human hand'
annotation.bbox=[227, 284, 678, 674]
annotations.append(annotation)
annotation = remo.Annotation()
annotation.img_filename = image_name
annotation.classes='Fashion accessory'
annotation.bbox=[496, 322, 544,370]
annotations.append(annotation)
my_dataset.add_annotations(annotations)
Args:
annotations: list of Annotation objects
(optional) annotation_set_id: annotation set id
(optional) create_new_annotation_set: if True, a new annotation set will be created
"""
if annotation_set_id and create_new_annotation_set:
raise Exception("You passed an annotation set but also set create_new_annotation_set = True. You can't have both.")
if annotation_set_id:
annotation_set = self.get_annotation_set(annotation_set_id)
else:
annotation_sets = self.annotation_sets()
if len(annotation_sets)>0:
annotation_set = self.get_annotation_set()
annotation_set_id = annotation_set.id
temp_path, list_of_classes = create_tempfile(annotations)
if create_new_annotation_set or (not annotation_set_id):
n_annotation_sets = len(self.annotation_sets())
self.create_annotation_set(annotation_task=annotations[0].task, name='my_ann_set_{}'.format(n_annotation_sets+1),
classes = list_of_classes, path_to_annotation_file = temp_path)
else:
self.add_data(annotation_task = annotation_set.task, annotation_set_id =annotation_set.id,
paths_to_upload = [temp_path])
#TODO ALR: removing the temp_path doesn't work on Windows, hence the try except as a temp fix
try:
os.remove(temp_path)
except:
pass
def export_annotations(
self,
annotation_set_id: int = None,
annotation_format: str = 'json',
export_coordinates: str = 'pixel',
full_path: str = 'true',
) -> bytes:
"""
Export annotations for a given annotation set
Args:
annotation_set_id: annotation set id, by default will be used default_annotation_set
annotation_format: can be one of ['json', 'coco', 'csv'], default='json'
export_coordinates: converts output values to percentage or pixels, can be one of ['pixel', 'percent'], default='pixel'
full_path: uses full image path (e.g. local path), can be one of ['true', 'false'], default='false'
Returns:
annotation file content
"""
annotation_set = self.get_annotation_set(annotation_set_id)
if annotation_set:
return annotation_set.export_annotations(
annotation_format=annotation_format,
export_coordinates=export_coordinates,
full_path=full_path,
)
print('ERROR: annotation set not defined')
def export_annotations_to_file(
self,
output_file: str,
annotation_set_id: int = None,
annotation_format: str = 'json',
export_coordinates: str = 'pixel',
full_path: str = 'true',
):
"""
Exports annotations in given format and save to output file
Args:
output_file: output file to save
annotation_set_id: annotation set id
annotation_format: can be one of ['json', 'coco', 'csv'], default='json'
full_path: uses full image path (e.g. local path), can be one of ['true', 'false'], default='false'
export_coordinates: converts output values to percentage or pixels, can be one of ['pixel', 'percent'], default='pixel'
"""
annotation_set = self.get_annotation_set(annotation_set_id)
if annotation_set:
self.sdk.export_annotations_to_file(
output_file,
annotation_set_id,
annotation_format=annotation_format,
full_path=full_path,
export_coordinates=export_coordinates,
)
else:
print('ERROR: annotation set not defined')
def list_image_annotations(self, annotation_set_id: int, image_id: int) -> List[Annotation]:
"""
Retrieves annotations for a given image
Args:
annotation_set_id: annotation set id
image_id: image id
Returns:
List[:class:`remo.Annotation`]
"""
return self.sdk.list_image_annotations(self.id, annotation_set_id, image_id)
def create_annotation_set(
self, annotation_task: str, name: str, classes: List[str] = [], path_to_annotation_file: str = None
) -> AnnotationSet:
"""
Creates a new annotation set within the dataset
If path_to_annotation_file is provided, it populates it with the given annotations.
The first created annotation set for the given dataset, is considered the default one.
Args:
annotation_task: annotation task. See also: :class:`remo.task`
name: annotation set name
classes: list of classes to prepopulate the annotation set. Example: ['Cat', 'Dog']. Default is no classes
path_to_annotation_file: path to .csv annotation file
Returns:
:class:`remo.AnnotationSet`
"""
annotation_set = self.sdk.create_annotation_set(annotation_task, self.id, name, classes)
if annotation_set and path_to_annotation_file:
self.add_data(
paths_to_upload=[path_to_annotation_file],
annotation_task=annotation_task,
annotation_set_id=annotation_set.id,
)
annotation_set = self.sdk.get_annotation_set(annotation_set.id)
return annotation_set
def get_annotation_set(self, annotation_set_id: int = None) -> AnnotationSet:
"""
Retrieves annotation set with given id.
If no annotation set id is passed, it returns the default annotation set.
Args:
annotation_set_id: annotation set id
Returns:
:class:`remo.AnnotationSet`
"""
if not annotation_set_id:
return self.default_annotation_set()
annotation_set = self.sdk.get_annotation_set(annotation_set_id)
if annotation_set and annotation_set.dataset_id == self.id:
return annotation_set
else:
raise Exception('Annotation set with ID = {} is not part of dataset {}. You can check the list of annotation sets in your dataset using dataset.annotation_sets()'.format(annotation_set_id, self.__str__()))
def default_annotation_set(self) -> AnnotationSet:
"""
If the dataset has only one annotation set, it returns that annotation set.
Otherwise, it raises an exception.
"""
annotation_sets = self.annotation_sets()
if len(annotation_sets)>1:
raise Exception('Define which annotation set you want to use. ' + self.__str__() + ' has ' + str(len(annotation_sets)) + ' annotation sets. You can see them with `my_dataset.annotation_sets()`')
elif len(annotation_sets) ==0:
raise Exception(self.__str__() + " doesn't have any annotations. You can check the list of annotation sets with `my_dataset.annotation_sets()`")
return annotation_sets[0]
def get_annotation_statistics(self, annotation_set_id: int = None):
"""
Prints annotation statistics of all the available annotation sets within the dataset
Returns:
list of dictionaries with fields annotation set id, name, num of images, num of classes, num of objects, top3 classes, release and update dates
"""
# TODO: ALR - Improve output formatting
# TODO: ALR - Optional annotation set id as input
statistics = []
for ann_set in self.annotation_sets():
if (annotation_set_id is None) or (annotation_set_id == ann_set.id):
stat = {
'AnnotationSet ID': ann_set.id,
'AnnotationSet name': ann_set.name,
'n_images': ann_set.total_images,
'n_classes': ann_set.total_classes,
'n_objects': ann_set.total_annotation_objects,
'top_3_classes': ann_set.top3_classes,
'creation_date': ann_set.released_at,
'last_modified_date': ann_set.updated_at,
}
statistics.append(stat)
return statistics
def classes(self, annotation_set_id: int = None) -> List[str]:
"""
Lists all the classes within the dataset
# ALR TODO: if user doens't specify id, and there is >1 sets, get_annotation_set raises an error. Wouldn't it be better if the error was raised from inside this calss?
Args:
annotation_set_id: annotation set id. If not specified the default annotation set is considered.
Returns:
List of classes
"""
annotation_set = self.get_annotation_set(annotation_set_id)
if annotation_set:
return annotation_set.classes()
def annotations(self, annotation_set_id: int = None) -> List[Annotation]:
"""
Returns all annotations for a given annotation set.
If no annotation set is specified, the default annotation set will be used
Args:
annotation_set_id: annotation set id
Returns:
List[:class:`remo.Annotation`]
"""
annotation_set = self.get_annotation_set(annotation_set_id)
if annotation_set:
return self.sdk.list_annotations(self.id, annotation_set.id)
print('ERROR: annotation set was not defined.')
def images(self, limit: int = None, offset: int = None) -> List[Image]:
"""
Lists images within the dataset
Args:
limit: the number of images to be listed
offset: specifies offset
Returns:
List[:class:`remo.Image`]
Example::
my_dataset.images()
"""
return self.sdk.list_dataset_images(self.id, limit=limit, offset=offset)
def image(self, img_filename = None, img_id = None) -> Image:
"""
Returns the :class:`remo.Image` with matching img_filename or img_id.
Pass either img_filename or img_id.
Args:
img_filename: filename of the Image to retrieve
img_id: id of the the Image to retrieve
Returns:
:class:`remo.Image`
"""
#TODO ALR: do we need to raise an error if no image is found?
#TODO ALR: we have a sdk.get_image by img_id. Should we implement get_image by img_name in the server for faster processing?
if (img_filename) and (img_id):
raise Exception("You passed both img_filename and img_id. Pass only one of the two")
if img_filename:
list_of_images = self.images()
for i_image in list_of_images:
if i_image.name == img_filename:
return i_image
elif img_id:
return self.sdk.get_image(img_id)
def delete(self):
"""
Deletes dataset
"""
self.sdk.delete_dataset(self.id)
def search(self, classes=None, task: str = None):
"""
Given a list of classes and annotation task, it returns a list of all the images with mathcing annotations
Args:
classes: string or list of strings - search for images which match all given classes
task: annotation task. See also: :class:`remo.task`
Returns:
subset of the dataset
"""
# TODO: add implementation
return self.sdk.search_images(classes, task, self.id)
def view(self):
"""
Opens browser on dataset page
"""
# print('self.sdk', self.sdk, type(self.sdk))
return self.sdk.view_dataset(self.id)
def view_annotate(self, annotation_set_id: int = None):
"""
Opens browser on the annotation tool for the given annotation set
Args:
annotation_set_id: annotation set id. If not specified, default one be used.
"""
annotation_set = self.get_annotation_set(annotation_set_id)
if annotation_set:
return annotation_set.view()
else:
print('ERROR: annotation set was not defined.')
def view_annotation_stats(self, annotation_set_id: int = None):
"""
Opens browser on annotation set insights page
Args:
annotation_set_id: annotation set id. If not specified, default one be used.
"""
annotation_set = self.get_annotation_set(annotation_set_id)
if annotation_set:
return annotation_set.view_stats()
else:
print('ERROR: annotation set was not defined.')
def view_image(self, image_id: int):
"""
Opens browser on image view page for the given image
Args:
image_id: image id
"""
return self.sdk.view_image(image_id, self.id) | /remo-sdk-0.1.1.tar.gz/remo-sdk-0.1.1/remo/domain/dataset.py | 0.800809 | 0.323514 | dataset.py | pypi |
from typing import List, TypeVar
from .annotation import Annotation
from remo.annotation_utils import create_tempfile
Dataset = TypeVar('Dataset')
class AnnotationSet:
"""
Remo annotation set
Args:
id: annotation set id
name: annotation set name
task: annotation task. See also: :class:`remo.task`
dataset_id: dataset id
total_classes: total annotation classes
updated_at: date, when annotation set was last updated
released_at: annotation set release date
total_images: total number of images
top3_classes: top 3 classes in annotation set
total_annotation_objects: total number of annotation objects in annotation set
"""
def __init__(
self,
id: int = None,
name: str = None,
task: str = None,
dataset_id: int = None,
total_classes=None,
updated_at=None,
released_at=None,
total_images: int = None,
top3_classes=None,
total_annotation_objects: int = None,
**kwargs
):
from remo import _sdk
self.sdk = _sdk
self.id = id
self.name = name
self.task = task
self.dataset_id = dataset_id
self.total_classes = total_classes
self.updated_at = updated_at
self.released_at = released_at
self.total_images = total_images
self.top3_classes = top3_classes
self.total_annotation_objects = total_annotation_objects
def __str__(self):
return "Annotation set {id} - '{name}', task: {task}, #classes: {total_classes}".format(
id=self.id, name=self.name, task=self.task, total_classes=self.total_classes
)
def __repr__(self):
return self.__str__()
def add_annotations(self, annotations: List[Annotation]):
"""
Upload of annotations to the annotation set.
Example::
urls = ['https://remo-scripts.s3-eu-west-1.amazonaws.com/open_images_sample_dataset.zip']
ds = remo.create_dataset(name = 'D1', urls = urls)
ann_set = ds.create_annotation_set(annotation_task = 'Object Detection', name = 'test_set')
image_name = '000a1249af2bc5f0.jpg'
annotations = []
annotation = remo.Annotation()
annotation.img_filename = image_name
annotation.classes='Human hand'
annotation.bbox=[227, 284, 678, 674]
annotations.append(annotation)
annotation = remo.Annotation()
annotation.img_filename = image_name
annotation.classes='Fashion accessory'
annotation.bbox=[496, 322, 544,370]
annotations.append(annotation)
ann_set.add_annotations(annotations)
Args:
annotations: list of Annotation objects
"""
temp_path, list_of_classes = create_tempfile(annotations)
self.sdk.add_data_to_dataset(
dataset_id = self.dataset_id,
paths_to_upload=[temp_path],
annotation_task=self.task,
annotation_set_id=self.id
)
def add_image_annotation(self, image_id: int, annotation: Annotation):
"""
Adds new annotation to the image
Args:
image_id: image id
annotation: annotation data
"""
self.sdk.add_annotation(self.id, image_id, annotation)
def export_annotations(
self, annotation_format: str = 'json', export_coordinates: str = 'pixel', full_path: str = 'true'
):
"""
Exports annotations in a given format
Args:
annotation_format: choose format from this list ['json', 'coco', 'csv']
full_path: uses full image path (e.g. local path), can be one of ['true', 'false'], default='false'
export_coordinates: converts output values to percentage or pixels, can be one of ['pixel', 'percent'], default='pixel'
Returns:
annotation file content
"""
return self.sdk.export_annotations(
self.id,
annotation_format=annotation_format,
export_coordinates=export_coordinates,
full_path=full_path,
)
def export_annotations_to_file(
self,
output_file: str,
annotation_format: str = 'json',
export_coordinates: str = 'pixel',
full_path: str = 'true',
):
"""
Exports annotations in given format and save to output file
Args:
output_file: output file to save
annotation_format: can be one of ['json', 'coco', 'csv'], default='json'
full_path: uses full image path (e.g. local path), can be one of ['true', 'false'], default='false'
export_coordinates: converts output values to percentage or pixels, can be one of ['pixel', 'percent'], default='pixel'
"""
self.sdk.export_annotations_to_file(
output_file,
self.id,
annotation_format=annotation_format,
full_path=full_path,
export_coordinates=export_coordinates,
)
def classes(self) -> List[str]:
"""
List classes within the annotation set
Returns:
List of classes
"""
return self.sdk.list_annotation_set_classes(self.id)
def view(self):
"""
Opens browser on the annotation tool page for this annotation set
"""
return self.sdk.view_annotation_tool(self.id)
def view_stats(self):
"""
Opens browser on annotation set insights page
"""
return self.sdk.view_annotation_stats(self.id) | /remo-sdk-0.1.1.tar.gz/remo-sdk-0.1.1/remo/domain/annotation_set.py | 0.910714 | 0.231679 | annotation_set.py | pypi |
from typing import List
from .task import *
class Bbox:
"""
Represents coordinates of a bounding box annotation. Used in object detection.
Args:
xmin: X min
ymin: Y min
xmax: X max
ymax: Y max
"""
task = object_detection
type = 'Bounding Box'
def __init__(self, xmin: int, ymin: int, xmax: int, ymax: int):
self.xmin = xmin
self.ymin = ymin
self.xmax = xmax
self.ymax = ymax
self.coordinates = [xmin, ymin, xmax, ymax]
class Segment:
"""
Represents coordinates of a segment annotation. Used in instance segmentation.
Args:
points: list of segment coordinates ``[x0, y0, x1, y1, ..., xN, yN]``
"""
task = instance_segmentation
type = 'Polygon'
def __init__(self, points: List[int]):
self.points = [{'x': x, 'y': y} for x, y in zip(points[::2], points[1::2])]
self.coordinates = points
class Annotation:
"""
Represents a single annotation object. This can be:
- list of classes only: to assign classes to an image for image classification tasks
- bounding box and list of classes: to create a bounding box annotation object and assign it a list of classes
- segment and list of classes: to create a polygon annotation object and assign it a list of classes
Args:
img_filename: file name of the image the annotation refers to
classes: class or list of classes to add to the whole image or the object
object: the specific annotation object to add
Examples:
to create a bounding box:
annotation = Annotation('image.png', 'Dog')
annotation.bbox = [1, 23, 3, 2]
to create a polygon:
annotation = Annotation('image.png', 'Dog')
annotation.segment = [1, 23, 3, 2, 1, 2, 1, 2]
"""
def __init__(self, img_filename: str = None, classes=None, object=None):
if object and (
not isinstance(object, Bbox) and not isinstance(object, Segment)
):
raise Exception('Expected object type Annotation.Bbox or Annotation.Segment')
self.img_filename = img_filename
self.classes = classes
self.object = object
def __str__(self):
return "Annotation: {classes} (type:{ann_type}, file:{filename})".format(
classes=self.classes, ann_type=self.type, filename=self.img_filename
)
def __repr__(self):
return self.__str__()
@property
def classes(self):
if isinstance(self.__classes, list):
return self.__classes
elif isinstance(self.__classes, str):
self.__classes = [self.classes]
return self.__classes
return []
@classes.setter
def classes(self, classes):
if isinstance(classes, list):
self.__classes = classes
elif isinstance(classes, str):
self.__classes = [classes]
else:
self.__classes = []
@property
def type(self):
if not self.object:
return None
return self.object.type
@property
def coordinates(self):
if not self.object:
return None
return self.object.coordinates
@property
def task(self):
if not self.object:
return image_classification
return self.object.task
@property
def bbox(self):
if isinstance(self.object, Bbox):
return self.object
return None
@bbox.setter
def bbox(self, values: List[int]):
if len(values) != 4:
raise Exception('Bounding box expects 4 values: xmin, ymin, xmax, ymax')
self.object = Bbox(*values)
@property
def segment(self):
if isinstance(self.object, Segment):
return self.object
return None
@segment.setter
def segment(self, points: List[int]):
if not points:
raise Exception('Segment coordinates cannot be an empty list.')
if len(points) % 2 == 1:
raise Exception(
'Segment coordinates need to be an even number of elements indicating (x, y) coordinates of each point.'
)
self.object = Segment(points) | /remo-sdk-0.1.1.tar.gz/remo-sdk-0.1.1/remo/domain/annotation.py | 0.968812 | 0.62601 | annotation.py | pypi |
import os
import shutil
from typing import TypeVar, List
Annotation = TypeVar('Annotation')
AnnotationSet = TypeVar('AnnotationSet')
class Image:
"""
Remo image
Args:
id: image id
name: image file name
dataset_id: dataset id
path: local path, if available
url: image remo internal URL
size: file size in bytes
width: image width in pixels
height: image height in pixels
upload_date: upload date
"""
def __init__(
self,
id: int = None,
name: str = None,
dataset_id: int = None,
path: str = None,
url: str = None,
size: int = None,
width: int = None,
height: int = None,
upload_date: str = None,
**kwargs
):
from remo import _sdk
self.sdk = _sdk
self.id = id
self.name = name
self.dataset_id = dataset_id
self.path = path
self.url = url
self.size = size
self.width = width
self.height = height
self.upload_date = upload_date
def __str__(self):
return 'Image: {} - {}'.format(self.id, self.name)
def __repr__(self):
return self.__str__()
def get_content(self) -> bytes:
"""
Retrieves image file content
Returns:
image binary data
"""
if not self.url:
print('ERROR: image url is not set')
return
return self.sdk.get_image_content(self.url)
def save_to(self, dir_path: str):
"""
Save image to giving directory
Args:
dir_path: path to the directory
"""
dir_path = self.sdk._resolve_path(dir_path)
os.makedirs(dir_path, exist_ok=True)
file_path = os.path.join(dir_path, self.name)
if self.path:
shutil.copy(self.path, file_path)
return
img_content = self.get_content()
if not img_content:
return
self.sdk._save_to_file(img_content, file_path)
def list_annotation_sets(self) -> List[AnnotationSet]:
"""
Lists annotations sets
Returns:
List[:class:`remo.AnnotationSet`]
"""
return self.sdk.list_annotation_sets(self.dataset_id)
def annotations(self, annotation_set_id: int) -> List[Annotation]:
"""
Retrieves image annotations from giving annotation set
Args:
annotation_set_id: annotation set id
Returns:
List[:class:`remo.Annotation`]
"""
return self.sdk.list_image_annotations(self.dataset_id, annotation_set_id, self.id)
def get_annotation_set(self, annotation_set_id: int = None) -> AnnotationSet:
ds = self.sdk.get_dataset(self.dataset_id)
return ds.get_annotation_set(annotation_set_id)
def add_annotation(self, annotation: Annotation, annotation_set_id: int = None):
"""
Adds new annotation to the image
Args:
annotation_set_id: annotation set id
annotation: annotation data
"""
if not annotation_set_id:
annotation_set = self.get_annotation_set()
annotation_set_id = annotation_set.id
if annotation_set_id:
self.sdk.add_annotations_to_image(annotation_set_id, self.id, annotation)
else:
print('ERROR: annotation set not defined')
def view(self):
"""
Opens browser on image view for the image
"""
return self.sdk.view_image(self.id, self.dataset_id)
def view_annotate(self, annotation_set_id: int):
"""
Opens browser on the annotation tool for giving annotation set
Args:
annotation_set_id: annotation set id
"""
return self.sdk.view_annotate_image(annotation_set_id, self.id) | /remo-sdk-0.1.1.tar.gz/remo-sdk-0.1.1/remo/domain/image.py | 0.876225 | 0.197309 | image.py | pypi |
import re
from datetime import datetime
class DataTools:
pass
def digits_only(txt):
"""This function extracts the digits only from a string
Args:
txt: the string to extract the digits from it
Returns:
the extracted numbers if they exist.
if no numbers are found it returns None
examples:
> digits_only("a12bc45")
> 1245
> digits_only("abc")
> None
"""
digits = ""
for i in txt:
if i.isnumeric():
digits += i
if len(digits) > 0:
return digits
else:
return None
def email_format_check(email):
"""This function checks if a given string has the correct email format or not
Args:
email: the string that represents the email
Returns:
True/False
Examples:
> email_format_check("someemail@domain.com")
> True
> email_format_check("someemail.com@")
> False
> email_format_check("someemail@domain.com@")
> False
"""
mail_pattern = re.compile(r".*@.*\.\w*$")
match = re.match(mail_pattern, email)
if match != None:
return True
else:
return False
def days_between(date1, date2, unit= "years"):
"""This fucntion returns the difference between two dates the input date should be dd/mm/yyyy the output number of days/months/years between the two dates
Args:
date1: the first date "dd/mm/yyyy"
date2: the second date "dd/mm/yyyy"
unit: the difference between the two dates in days/months/years
Returns:
the difference between the two dates
returns None if the dates are in wrong format
Examples:
> days_between("01/11/2020","31/12/2020","days")
> 60
> days_between("01/11/2020","31/12/2020","years")
> 0.164
"""
try:
date1 = datetime.strptime(date1, "%d/%m/%Y")
date2 = datetime.strptime(date2, "%d/%m/%Y")
diff = abs((date2 - date1).days)
# converting days to years
fin_diff = 0.0
if unit=="years" or unit=="y":
fin_diff = round(diff/365,3)
if unit=="months" or unit=="m":
fin_diff = round(diff/30,3)
if unit=="days" or unit=="d":
fin_diff = diff
return fin_diff
except ValueError:
"The date format is not correct. It should be dd/mm/yyyy"
return None
def list_to_dict(lst):
"""
This function enumerates a list and return enumerated list in form of a dictionary
Args:
list: the list to be convetrted to dictionary
Returns:
dictionary: the enumarted list values as keys and numbers as values in a dictionary
example:
> list_to_dict(['a','b','c'])
> {'a':0,'b':1,'c':2}
"""
try:
dictionary = {}
for value, key in enumerate(lst):
dictionary[key] = value
return dictionary
except TypeError:
"The entered value is not a list"
return None | /remo-tools-0.1.tar.gz/remo-tools-0.1/remo_tools/data_tools.py | 0.573678 | 0.436562 | data_tools.py | pypi |
# Changelog
Here we list the history of changes in Remo across the various releases.
## Coming up in the next releases
This is a general plan on what we are working on next. If you have any feedback or preference, we would love to hear them in our discuss forum.
**1. Improved dataset experience**
Including a list view for images details and folders to organise your images.
**2. Faster processing**
- Improved uploading of thousands of annotations at once
**3. Increase supported formats and tasks**
- DICOM
- Pose Estimation or Semantic Segmentation up next, if we have enough requests
---
## v0.6.0 - 5 Feb 21
Thanks to the support from our friends at [Apeel Sciences](https://www.apeel.com/) who have sponsored the initiative, we are introducing a settings section for dataset, featuring option to sort classes in the annotation tool page (with the ability to add more settings in the future).
For the rest, we issuing a bug fix to enable export annotations files with images filepath when using Remo from the browser (thanks to Matt who reported it).
---
## v0.5.9 - 15 Jan 21
ure
Issuing a bug fix to shortcuts behaviour within the annotation tool.
After annotating a picture, the class-hotkey binding used to change on each picture to match the sorting of objects count. We now keep the binding constant within an annotation set.
---
## v0.5.8 - 09 Jan 21
**Bug fixes**
- Fixed uploading CSV files on Windows
- Fixed on hover message on the annotation tool
- Fixed Team page view
---
## v0.5.7 - 20 Nov 20
We are finally introducing a Docker installation for Remo! We are also releasing some changes that make Remo more flexible and a paid Team version.
**Features**
- Installation via Docker is now supported. Among other things, this allows to have a PostgreSQL server running remotely for increased reliability
- Releasing a new pip package for remo-python. Main improvement: search function inside a dataset now works
- Improved documentation across the board
- We have removed limitations to the IP address where you can serve Remo
- Paid Team version of Remo is now available
**Bug fixes**
- Deleting specific images at times failed. This is now fixed
---
## v0.5.4 - 24 Oct 20
Fixed a bug in the dataset page where scrolling with a filter applied did not display all the pictures correctly. Thanks to Richard for reporting it!
---
## v0.5.3 - 23 Oct 20
This release includes some minor optimization and a bug fix. Specifically:
* We optimized load time to count images in annotation export form
* Fixed a bug where we couldn't delete a dataset that had some broken images inside (this issue was affecting our demo too)
---
## v0.5.2 - 6 Oct 20
We are issuing a fix for the registration process we introduced in v0.5, and introducing the ability to export annotations based on filtering by image tag
**Features**
* Now you can export annotations for a selection on images based on filters by image tags
**Bug fixes**
* Fixed upload instance segmentation annotations in CSV format
* Fixed token validation
---
## v0.5.0 - 30 Sep 20
In this major release, we are introducing a breakdown by tags on annotation statistics, and our user registration module. We have also greatly reduced the size of the wheel, which is now about 14 MB (30% of its previous size).
**Features**
* For an annotation set, you can now break down statistics by image tags. Among other things, this allows to easily visualize the distribution of objects and images in train/test splits
* We have added a user registration module. You may now verify your email and register your free token to enjoy unrestricted access to remo.
**Optimizations**
* Reduced the size of distribution pip package from 50 MB to 14 MB
---
## v0.4.24 - 18 Sep 20
This release contains fixes to our notification system.
**Bug fixes**
* Cancel on the upload sessions persisted on the view after being closed.
* When starting an upload, there was a case when the upload statistics arent displayed and the page is cropped from the top.
---
## v0.4.23 - 16 Sep 20
In this release, we are adding persistance of active upload sessions and a shortcut to delete objects in the annotation tool (thanks to user Om for the feedback).
**Features**
* On annotation tool, you can now use the Delete or Backspace key to delete the selected annotation object, without a Delete confirmation form
**Optimizations**
* We are improving the notification system experience. Notifications are now persistent, so you will not lose any of active upload session details in case you close the tab or reload the page. And you will be able to watch status of all active uploads
* In case of failure to access our online demo, we have added a fail-safe view that allows you to access the demo with a button click
**Bug fixes**
* On dataset view, classes and tags from different annotation sets now have colours matching that of the corresponding annotation set
* Dropdown to select annotation sets was overflowing from the page
---
## v0.4.20 - 4 Sep 20
Releasing some minor UI optimization.
---
## v0.4.19 - 2 Sep 20
Adding a tutorial for Object Detection in PyTorch, and fixing a bug that prompted the user for login in browser
* Added PyTorch Object Detection tutorial
* Bug fix: login issue
---
## v0.4.18 - 28 Aug 20
We are moving the discuss forum to [discuss.remo.ai](https://discuss.remo.ai) and doing some small optimization.
**Improvements**
* Added keyboard left/right navigation on Image view
* Optimized screen for Create annotation set page
**Bug fixes**
* Fixed issue where Remo would lose connection to the db when laptop went in sleep or hibernate mode
* Fixed some UI issues around display of boxes, labels and tags on Image view and dataset page
---
## v0.4.17 - 24 Aug 20
Quite a big release, as we are introducing a number of improvements and features.
We are now supporting [Remo on Google Colab](https://remo.ai/docs/colab/), we are showing how to integrate Remo with PyTorch, and we are further improving on the Datasets upload and browsing experience.
**Features and Improvements**
* [Remo on Google Colab](https://remo.ai/docs/colab/): now you can embed and run Remo entirely on Colab servers, including backing up data on GDrive.
* Remo allows upload of annotations with file paths in the filename
* Remo-python: we improved on the library and added a PyTorch image classification tutorial
* Faster upload of tags: according to our benchmarking, it has come down from 25s to 3.5s for 1k tags
* Adding upload data status: we are now showing ETA and progress % while uploading data.
* Introducing a sticky header inside a Dataset, to change filters and image size while scrolling down on the dataset
* Optimized layout for smaller screens (including Colab and Jupyter Notebook)
* Reframed the feedback modal, with more focus on feature request
**Bug fixes**
* UI on upload data: we added a file picker to link local data on Electron view
* In case of notebooks, the "add data" button was missing on dataset view
---
## v0.4.16 - 07 Aug 20
In this release, we did some optimization around search filters and annotations saving. We are also introducing a visual display for tags in dataset view.
**Improvements**
* Faster saving of annotations
* Faster dropdown menus in dataset filters (executing on the backend now)
* Visualizing tags in dataset view
* Pop up asking for confirmation when deleting an annotation object
---
## v0.4.15 - 31 Jul 20
We are introducing filters in datasets. You can filter by class, tag, image names and task. You will also be able to carry over the filters to image view.
**Features**
* Dataset filters: filter by class, tag, image names and task
* Tags can now be exported as a separate csv file
**Bug fixes**
* Duplicating a dataset resulted in some naming conflicts
* Issues with uploading tags from the Python library
---
## v0.4.14 - 24 Jul 20
Another round of small incremental improvements:
* Based on user feedback, we are not allowing to have multiple datasets with the same name (if upgrading remo, existing dataset with duplicated name will be renamed)
* Some UI fixes: improvement to object pop up in annotation tool, better handling of long annotation set names inside a Dataset, more explicit loading behaviour when remo is loading a large set of annotations
---
## v0.4.13 - 20 Jul 20
Two main changes in this release:
* Fixing a bug we introduced in a recent release - some times new annotations were not saved properly if you were quickly drawing annotations and changing images
* Renaming remo-sdk to remo-python
---
## v0.4.12 - 17 Jul 20
In this release we worked on some improvements to the annotation experience and implemented a warning notification when adding duplicate images.
**Improvements**
* Design tweaks on the classes and object menus of Image View and Annotation tool
* Annotation tool: not allowing to create objects without class
* Images uploading: Warning notification when uploading a duplicated image within a dataset
* Increased test coverage on the Front End and Backend
---
## v0.4.9 - 9 Jul 20
We are improving on the search experience from within an image, fixed some issues around PostgreSQL installation and did some minor UX improvements.
We can now search by filename and we have an autocomplete menu to speed up the search.
**Improvements**
* Image View filters: search by filename and autocomplete for faster filtering
* Datasets: made it easier to export annotations, responding to users feedback
* Annotation tool: redesigned the On Hold / To Do button
**Bug fixes**
* Postgres-10 install failed on ubuntu 20.04 as reported by user Marco
* Remo init failed on Mac with different existing version of Postgres installed
* Remo init failed when a previous config file was present and corrupted
* Remo creates new tables in dataset with local user role instead of remo role
---
## v0.4.8 - 28 Jun 20
* Fixed postgres installation issue
---
## v0.4.7 - 26 Jun 20
Most of the work on this release has been around annotations upload and export.
We are adding the ability to upload and export tags, and some nice convenience functions for annotation exporting.
* ability to add and export tags
* export annotations: functionality to include or exclude images with no annotations
* export annotations: option to append paths to filenames
* more clear counting on annotation statistics in Annotations Tab: now displaying annotated as the count of images actual having annotations
* removing truncation for long filename on dataset page
---
## v0.4.5 - 19 Jun 20
We are introducing a check for the latest version on command line, particularly useful given we are iterating quite fast.
We also fixed some small reported bugs:
* installation issues with PostgresSQL on Windows
* installation script to use pip3 instead of pip when python 2 is installed
* sort by TODO in annotation page occasionally not working
---
## v0.4.4 - 09 Jun 20
Cosmetic changes to address links behaviour and formatting in documentation and discuss forum
---
## v0.4.3 - 04 Jun 20
Some small changes:
* Fixed bug with upload folder when using electron
* Added license page
---
## v0.4.2 - 03 Jun 20
Some quick fixes post the bigger release:
* CSS on loading bar
* demo access occasional login fail
* handling of tags: now it's case insensitive
---
## v0.4.1 - 29 May 20
**Main Changes**
Most of the work has been about introducing a notification system to inform on progress for images and annotation uploading and parsing, including a detailed breakdown of any error. Nothing fancy, but a great improvement to reliability of the app.
We also expanded the Command Line Interface, allowing for more options (such as kill remo, delete datasets), and simplified the connection between the sdk and the remo server.
For the rest, we completed a series of smaller UX-driven changes to improve usability (including showing selected annotation sizes) and fixed few reported bugs.
**Breakdown**
Bug fixes:
* Delete all images from dataset causes error
* Delete an image and re-upload it: should be allowed
* Up-to-date count of images after fast deleting
* Fix download latest available electron app
Changes:
* Notification system for data upload. You can now monitor the upload of data and get a breakdown of errors that occur.
* Expanded Command Line Interface suit of options
* SDK: rework the connection with remo app and introduce clear option to connect to a remote remo server. "import remo" in conda now does not launch the server
* Annotation tool, delete object: ask for confirmation
* Image view and Annotation tool - objects height and width information
---
## v0.3.42 - 19 May 20
Quick fix to handle directories with space within their name
* Edited installation scripts to handle folder names containing a space e.g. 'C:/remo ai/script'
---
## v0.3.41 - 8 May 20
**Main Changes**
Switched to PostgresSQL for database management, instead of SQLlite. This makes the whole app more responsive and reliable.
For the rest, we implemented a number of small fixes aimed at making making remo more robust
**Breakdown**
Bug fixes:
* Fixed annotation statistics inconsistencies for image classification
* Fixed sending feedback form
* Fixed autologin in browser, and in electron after user changes password
* Fixed rename annotation set
* Fixed export annotation form - missing annotation set name
* Fixed Windows installation in conda env - pip failed to install package
* Fixed duplicate annotation objects
Changes:
* Added support for PostgresSQL as main database
* Improved duplicate annotation set flow
* After annotations uploaded - images marked as annotated
* Improved create annotation set flow
* Improved save annotations behaviour
* Improved data uploading and parsing - moved it to a separate process, which allows to use remo while long uploads are in progress
* Added ability to bulk delete annotations for an image in annotation tool
* Added ability to mark image as TODO in annotation tool
* Improved description of installation steps. Also we are now asking user for explicit permission to install PostgresSQL and additional packages
| /remo-0.6.1-py3-none-any.whl/remo_app/CHANGELOG.md | 0.795579 | 0.758645 | CHANGELOG.md | pypi |
remodel
=======
[](https://travis-ci.org/linkyndy/remodel)
Very simple yet powerful and extensible Object Document Mapper for RethinkDB, written in Python.
## It is plain simple!
```python
from remodel.models import Model
class User(Model):
pass
```
That's really everything you need to do to set up a model!
> Don't forget to turn on your RethinkDB server and to create your tables (check the examples below for a helper that does just that!).
## Features
- schemaless;
- `dict` interface;
- full support for relations;
- indexes;
- convention over configuration;
- lazy-loading;
- caching;
- thoroughly tested;
## Installation
```bash
pip install remodel
```
## Examples
### Basic CRUD operations
```python
class Order(Model):
pass
# Create
my_order = Order.create(customer='Andrei', shop='GitHub')
# Update
my_order['total'] = 100
my_order.save()
# Read
saved_order = Order.get(customer='Andrei')
# Delete
saved_order.delete()
```
### Creating tables
```python
from remodel.models import Model
from remodel.helpers import create_tables, create_indexes
class Party(Model):
has_many = ('Guest',)
class Guest(Model):
belongs_to = ('Party',)
# Creates all database tables defined by models
create_tables()
# Creates all table indexes based on model relations
create_indexes()
```
### Configuring database connection
Setups are widely different, so here's how you need to configure remodel in order to connect to your RethinkDB database:
```python
from remodel.connection import pool
pool.configure(host='localhost', port=28015, auth_key=None, user='admin', password='', db='test')
```
### Relations
#### Has one / Belongs to
```python
class User(Model):
has_one = ('Profile',)
class Profile(Model):
belongs_to = ('User',)
andrei = User.create(name='Andrei')
profile = Profile.create(user=andrei, network='GitHub', username='linkyndy')
print profile['user']['name'] # prints Andrei
```
#### Has many / Belongs to
```python
class Country(Model):
has_many = ('City',)
class City(Model):
belongs_to = ('Country',)
romania = Country.create(name='Romania')
romania['cities'].add(City(name='Timisoara'), City(name='Bucharest'))
print romania['cities'].count() # prints 2
```
#### Has and belongs to many
```python
class Post(Model):
has_and_belongs_to_many = ('Tag',)
class Tag(Model):
has_and_belongs_to_many = ('Post',)
my_post = Post.create(name='My first post')
personal_tag = Tag.create(name='personal')
public_tag = Tag.create(name='public')
my_post['tags'].add(personal_tag, public_tag)
print my_post['tags'].count() # prints 2
```
#### Has many through
```python
class Recipe(Model):
has_many = ('SpecificSpice',)
class Chef(Model):
has_many = ('SpecificSpice',)
class SpecificSpice(Model):
belongs_to = ('Recipe', 'Chef')
quattro_formaggi = Recipe.create(name='Pizza Quattro Formaggi')
andrei = Chef.create(name='Andrei')
andreis_special_quattro_formaggi = SpecificSpice.create(chef=andrei, recipe=quattro_formaggi, oregano=True, love=True)
print andreis_special_quatro_formaggi['love'] # prints True
```
### Callbacks
```python
from remodel.models import Model
class Shirt(Model):
def after_init(self):
self.wash()
def wash(self):
print 'Gotta wash a shirt after creating it...'
```
or
```python
from remodel.models import Model, after_save
class Prize(Model):
@after_save
def brag(self):
print 'I just won a prize!'
```
### Custom table name
```python
class Child(Model):
table_name = 'kids'
print Child.table_name # prints 'kids'
```
### Custom model queries
```python
import rethinkdb as r
class Celebrity(Model):
pass
Celebrity.create(name='george clooney')
Celebrity.create(name='kate winslet')
upper = Celebrity.map({'name': r.row['name'].upcase()}).run()
print list(upper) # prints [{u'name': u'GEORGE CLOONEY'}, {u'name': u'KATE WINSLET'}]
```
### Custom instance methods
```python
class Child(Model):
def is_minor(self):
if 'age' in self:
return self['age'] < 18
jack = Child.create(name='Jack', age=15)
jack.is_minor() # prints True
```
### Custom class methods
```python
from remodel.object_handler import ObjectHandler, ObjectSet
class TripObjectHandler(ObjectHandler):
def in_europe(self):
return ObjectSet(self, self.query.filter({'continent': 'Europe'}))
class Trip(Model):
object_handler = TripObjectHandler
Trip.create(continent='Europe', city='Paris')
Trip.create(continent='Asia', city='Shanghai')
Trip.create(continent='Europe', city='Dublin')
print len(Trip.in_europe()) # prints 2
```
### Viewing object fields
```python
class Train(Model):
pass
train = Train.create(nr=12345, destination='Paris', has_restaurant=True, classes=[1, 2])
print train.fields.as_dict()
# prints {u'classes': [1, 2], u'nr': 12345, u'destination': u'Paris', u'has_restaurant': True, u'id': u'd9b8d57f-5d67-4ff7-acf8-cbf7fdd65581'}
```
## Concepts
### Relations
Remodel supports various types of relationships:
- has one
- belongs to
- has many
- has and belongs to many
- has many through
#### Defining relations
Related models are passed as tuples in a model's definition. All other aspects, such as foreign keys, indexes, lazy relation loading and relation cache are magically handled for you.
If you need precise definition for your related models, you can pass a configuration tuple instead of the string name of your related model:
```python
class Artist(Model):
has_many = (('Song', 'songs', 'id', 'song_id'), 'Concert')
# Tuple definition: (<related model name>, <related objects accessor field>, <model key>, <related model key>)
```
> One important thing to notice is that reverse relationships are **not automatically ensured** if only one end of the relationship is defined. This means that if ``Artist has_many Song``, ``Song belongs_to Artist`` is not automatically enforced unless explicitly defined.
#### Using relations
Assigning `has_one` and `belongs_to` objects doesn't mean that they are persisted. You need to manually call `save()` on them; assuming `Profile belongs_to User`:
```python
profile['user'] = User(...)
profile.save()
```
On the other side, assigning `has_many` and `has_and_belongs_to_many` objects automatically persist them, so there is no need for you to call `save()` on them; assuming `Shop has_many Product`:
```python
shop.add(product1, produt2)
# No need to call save() on products!
```
> Note that certain assignments of related objects can not be performed unless one (or both) of the objects is saved. You can not save a `GiftSize` with a `Gift` attached without saving the `Gift` object first (when having a `GiftSize belongs_to Gift`).
## Documentation
Can be found at https://github.com/linkyndy/remodel/wiki.
## Motivation
The main reason for Remodel's existence was the need of a light-weight ODM for RethinkDB, one that doesn't force you to ensure a document schema, one that provides a familiar interface and one that gracefully handles relations between models.
## Status
Remodel is under active development and it is not _yet_ production-ready.
## How to contribute?
Any contribution is **highly** appreciated! See [CONTRIBUTING.md](CONTRIBUTING.md) for more details.
## License
See [LICENSE](LICENSE)
| /remodel-1.0.0.tar.gz/remodel-1.0.0/README.md | 0.434941 | 0.826081 | README.md | pypi |
# REMoDNaV - Robust Eye Movement Detection for Natural Viewing
[](https://ci.appveyor.com/project/mih/remodnav/branch/master) [](https://codecov.io/github/psychoinformatics-de/remodnav?branch=master) [](https://opensource.org/licenses/MIT) [](https://GitHub.com/psychoinformatics-de/remodnav/releases/) [](https://pypi.python.org/pypi/remodnav/) [](https://zenodo.org/badge/latestdoi/147316247)
REMoDNaV is a velocity based eye movement event detection algorithm that is based on, but
extends the adaptive Nyström & Holmqvist algorithm (Nyström & Holmqvist, 2010).
It is built to be suitable for both static and dynamic stimulation, and is
capable of detecting saccades, post-saccadic oscillations, fixations, and smooth
pursuit events. REMoDNaV is especially suitable for data without a trial structure
and performs robustly on data with temporally varying noise level.
## Support
All bugs, concerns and enhancement requests for this software can be submitted here:
https://github.com/psychoinformatics-de/remodnav
If you have a problem or would like to ask a question about how to use REMoDNaV,
please [submit a question to
NeuroStars.org](https://neurostars.org/new-topic?body=-%20Please%20describe%20the%20problem.%0A-%20What%20steps%20will%20reproduce%20the%20problem%3F%0A-%20What%20version%20of%20REMoDNaV%20are%20you%20using%3F%20On%20what%20operating%20system%20%3F%0A-%20Please%20provide%20any%20additional%20information%20below.%0A-%20Have%20you%20had%20any%20luck%20using%20REMoDNaV%20before%3F%20%28Sometimes%20we%20get%20tired%20of%20reading%20bug%20reports%20all%20day%20and%20a%20lil'%20positive%20end%20note%20does%20wonders%29&tags=remodnav)
with a ``remodnav`` tag. NeuroStars.org is a platform similar to StackOverflow
but dedicated to neuroinformatics.
Any previous REMoDNaV questions can be found here:
http://neurostars.org/tags/remodnav/
## Installation via pip
Install the latest version of `remodnav` from
[PyPi](https://pypi.org/project/remodnav). It is recommended to use
a dedicated [virtualenv](https://virtualenv.pypa.io):
# create and enter a new virtual environment (optional)
virtualenv --python=python3 ~/env/remodnav
. ~/env/remodnav/bin/activate
# install from PyPi
pip install remodnav
## Example usage
**required (positional) arguments:**
REMoDNaV is easiest to use from the command line.
To get REMoDNaV up and running, supply the following required information in a
command line call:
- ``infile``: Data file with eye gaze recordings to process. The first two columns
in this file must contain x and y coordinates, while each line is a timepoint
(no header). The file is read with NumPy's ``recfromcsv`` and may be compressed.
The columns are expected to be seperated by tabulators (``\t``).
- ``outfile``: Output file name. This file will contain information on all detected
eye movement events in BIDS events.tsv format.
- ``px2deg``: Factor to convert pixel coordinates to visual degrees, i.e. the visual
angle of a single pixel. Pixels are assumed to be square. This will typically be a
rather small value.
Note: you can compute this factor from *screensize*,
*viewing distance* and *screen resolution* with the following formula:
``degrees(atan2(.5 * screen_size, viewing_distance)) / (.5 * screen_resolution)``
- ``sampling rate``: Sampling rate of the data in Hertz. Only data with dense regular
sampling are supported.
Exemplary command line call:
remodnav "inputs/raw_eyegaze/sub-01/ses-movie/func/sub-01_ses-movie_task-movie_run-1_recording-eyegaze_physio.tsv.gz" \
"sub-01/sub-01_task-movie_run-1_events.tsv" 0.0185581232561 1000.0
**optional parameters:**
REMoDNaV comes with many configurable parameters. These parameters have sensible default values,
but they can be changed by the user within the command line call.
Further descriptions of these parameters can be found in the corresponding [publication](https://link.springer.com/article/10.3758/s13428-020-01428-x).
| Parameter | Unit | Description |
| -------------------------- | ------ | ---------------------------------------------------------------------------------------- |
| ``--min-blink-duration``| sec | missing data windows shorter than this duration will not be considered for ``dilate nan``|
| ``--dilate-nan``| sec | duration for which to replace data by missing data markers on either side of a signal-loss window. |
| ``--median-filter-length``| sec | smoothing median-filter size (for initial data chunking only).|
| ``--savgol-length``| sec | size of Savitzky-Golay filter for noise reduction. |
| ``--savgol-polyord``| | polynomial order of Savitzky-Golay filter for noise reduction. |
| ``--max-vel``| deg/sec | maximum velocity threshold, will issue warning if exceeded to inform about potentially inappropriate filter settings. |
| ``--min-saccade_duration``| sec | minimum duration of a saccade event candidate. |
| ``--max-pso_duration``| sec | maximum duration of a post-saccadic oscillation (glissade) candidate. |
| ``--min-fixation_duration``| sec | minimum duration of a fixation event candidate. |
| ``--min-pursuit_duration``| sec | minimum duration of a pursuit event candidate. |
| ``--min-intersaccade_duration``| sec | no saccade detection is performed in windows shorter than twice this value, plus minimum saccade and PSO duration. |
| ``--noise-factor`` | | adaptive saccade onset threshold velocity is the median absolute deviation of velocities in the window of interest, times this factor (peak velocity threshold is twice the onset velocity); increase for noisy data to reduce false positives (Nyström and Holmqvist, 2010, equivalent: 3.0). |
| ``--velthresh-startvelocity``| deg/sec | start value for adaptive velocity threshold algorithm (Nyström and Holmqvist, 2010), should be larger than any conceivable minimum saccade velocity. |
| ``--max-initial-saccade-freq``| Hz | maximum saccade frequency for initial detection of major saccades, initial data chunking is stopped if this frequency is reached (should be smaller than an expected (natural) saccade frequency in a particular context).|
| ``--saccade-context-window-length``| sec | size of a window centered on any velocity peak for adaptive determination of saccade velocity thresholds (for initial data chunking only). |
| ``--lowpass-cutoff-freq``| Hz | cut-off frequency of a Butterworth low-pass filter applied to determine drift velocities in a pursuit event candidate. |
| ``--pursuit-velthresh``| deg/sec | fixed drift velocity threshold to distinguish periods of pursuit from periods of fixation. |
Thus, to change the default value of any parameter(s), it is sufficient to include the parameter(s) and
the desired value(s) into the command line call:
remodnav "inputs/raw_eyegaze/sub-01/ses-movie/func/sub-01_ses-movie_task-movie_run-1_recording-eyegaze_physio.tsv.gz" \
"sub-01/sub-01_task-movie_run-1_events.tsv" 0.0185581232561 1000.0 --min-blink-duration 0.05
## Citation
Dar, A. H., Wagner, A. S. & Hanke, M. (2019). [REMoDNaV: Robust Eye Movement Detection for Natural Viewing](https://doi.org/10.1101/619254). *bioRxiv*. DOI: ``10.1101/619254``
*(first two authors contributed equally)*
## License
MIT/Expat
## Contributing
Contributions in the form of issue reports, bug fixes, feature extensions are always
welcome.
## References
Nyström, M., & Holmqvist, K. (2010). [An adaptive algorithm for fixation, saccade, and
glissade detection in eyetracking data](https://doi.org/10.3758/BRM.42.1.188).
Behavior research methods, 42(1), 188-204. DOI: ``10.3758/BRM.42.1.188``
| /remodnav-1.1.tar.gz/remodnav-1.1/README.md | 0.732496 | 0.753376 | README.md | pypi |
try:
from accpac import *
except ImportError:
pass
from remote_actions import get_logger
from remote_actions.handlers import FormHandler
from remote_actions.pollers.errors import (
PollerWorkflowSaveError,
PollerValidationError, )
class WorkflowApprovalFormHandler(FormHandler):
"""Handle a workflow approval form.
This handler validates that a workflow approval form has
all the required fields and progresses to the next step based
on the user action.
:param form: the form to handle.
:type form: dict
"""
type = 'workflow_approval'
def __init__(self, form):
super(WorkflowApprovalFormHandler, self).__init__(form)
self.log = get_logger("WorkflowApprovalFormHandler")
self.app = None
self.wiid = None
self.steps = None
self.stepname = None
self.result_action = None
self._validated = False
def cleanup(self):
pass
def validate(self):
"""Validate a completed workflow approval self.form.
Sets:
- self.stepname: next workflow step
- self.wiid: workflow instance id
- self.app: app parameters for this form
:returns: validated form
:rtype: dict
:raises: PollerValidationError
"""
self.app = self.form.get('app', {})
self.wiid = self.app.get('wiid')
# forms must have a valid workflow instance id
if not self.wiid:
raise PollerValidationError(
"action {} has no workflow ID set.".format(
self.form.get('code', 'unset')))
# and a mapping of form actions to steps.
self.steps = self.app.get('steps', {})
if not self.steps:
raise PollerValidationError(
"action {} has no steps set.".format(
self.form.get('code', 'unset')))
# an action must have been performed and recorded (i.e. Approve)
self.result_action = self.form.get('result', {}).pop('action', None)
if not self.result_action:
raise PollerValidationError(
"No form actions for remote action {}".format(
self.form.get('code', 'unset')))
# there must be a step to progress to for that action.
self.stepname = self.steps.get(self.result_action)
if not self.stepname:
raise PollerValidationError(
"action {} has no stepname for {}.".format(
self.form.get('code', 'unset'),
self.result_action))
# all is well.
self._validated = True
return self.form
def apply(self):
"""Apply a workflow validation form.
Applies a workflow validation form by setting the result
values in the workflow instance values and progressing the
workflow to the next step.
:returns: True
:raises: PollerError
"""
if not self._validated:
raise PollerValidationError(".validate() must be called on the "
"form before .apply().")
# Validate sets self.app, self.wiid, self.stepname
wi = WorkflowInstance()
_r = wi.loadInstance(self.wiid)
if _r != 0:
raise PollerValidationError(
"failed to load Workflow Instance {}".format(self.wiid))
# Copy all keys from the result into the workflow values.
result = self.form.get('result', {})
self.log.debug("[{}] - result: {}.".format(self.form['code'], result))
for (key, value) in result.items():
wi.setValue(key, value)
# If the RUNUSER result key is set, use it to change the
# user executing the action.
runuser = result.get("RUNUSER", user)
wi.viworkih.put("RUNUSER", runuser)
upd = wi.viworkih.update()
if upd != 0:
self.log.error("[{}] - error setting RUNUSER field to {} in "
"workflow header. .update() returned '{}'.".format(
self.form['code'], runuser, upd))
# Progress the workflow to the next step.
self.log.info('[{}] - progressing WIID {} to STEP {} as USER {}.'.format(
self.form['code'], self.wiid, self.stepname, runuser))
r = wi.progressTo(self.stepname)
if r != 0:
raise PollerWorkflowSaveError(
"Failed to progress to STEP {} for WIID {}".format(
self.stepname, self.wiid))
# First call succeeds only if the workflow has no params
if wi.save() == 0:
return True
elif wi.parameters != None:
# Secondcall succeeds for workflows with params
if wi.save == 0:
return True
# In any other case, the save has truly failed.
raise PollerWorkflowSaveError(
"Failed to save WIID {} after progress to STEP {}".format(
self.wiid, self.stepname,)) | /remote_actions-6.0.18.tar.gz/remote_actions-6.0.18/remote_actions/handlers/workflow_approval.py | 0.686685 | 0.178562 | workflow_approval.py | pypi |
try:
from accpac import *
except ImportError:
pass
import base64
from remote_actions.services.fleeting_forms import (
create_workflow_approval_form, )
from remote_actions import (
resolve_users,
render_title_and_content_for,
parse_action_parameter,
get_logger, )
VERSION = '6.0.18'
form_controls = [
{
'name': 'RUNUSER',
'type': 'text',
'label': 'Sage User',
'required': True,
'disabled': True
},
{
'name': 'APPROVALAMOUNT',
'type': 'float',
'label': 'Amount',
'help_text': 'Enter the amount to approve',
'required': True,
},
{
'name': 'APPROVALCOMMENT',
'type': 'text',
'label': 'Comments',
'help_text': 'Enter your comment',
'required': True,
}
]
def workflow(e):
"""Execute the workflow step.
This function is invoked by the workflow engine. It is called
with ``accpac.WorkflowArgs`` and must return ``0`` on success and
``1`` on failed.
:param e: the workflow arguments for this action.
:type e: ``accpac.WorkflowArgs``
:returns: 0/1
:rtype: int
"""
wiid = e.wi.viworkih.get("WIID")
if not wiid:
error("Failed to get workflow ID.")
return 1
logger = get_logger(
"SendRemoteActionCommentAmountFormEmail wiid({})".format(
wiid))
# Parse the actions from P4 into a { label: nextstep, } data structure
action_param = e.resolve(e.p4)
try:
actions = parse_action_parameter(action_param)
except (IndexError, ValueError):
showMessageBox("The actions (P4) must be a comma-separated list "
"of label=nextstep pairs, "
"eg. 'Approve=Approved+RTP,Reject=Rejected'" )
logger.exception("P4 invalid {}".format(action_param))
return 1
# Create the form, setting the initial value for the approval amount
try:
title, content = render_title_and_content_for(e.resolve(e.p3), e)
approval_amount = e.resolve("{TOVALUE}").replace(",", "")
form = create_workflow_approval_form(
e.wi.viworkih.get("WIID"),
form_controls,
title[:120],
content[:5000],
actions,
APPROVALAMOUNT=approval_amount)
except Exception as exc:
showMessageBox("Failed to create approval form: {}".format(exc))
logger.exception("failed to create form: {}".format(exc))
return 1
# Get the url for the form.
url = form.get('url')
if not url:
error("Unable to get approval form URL.")
return 1
# And set it in the workflow for troubleshooting and posterity
e.wi.setValue("FORMURL", url)
# Resolve all users, groups, and emails from P2
users = resolve_users(e.resolve(e.p2))
# For each user identified, send an email with a custom link that sets
# RUNUSER.
sent_emails = 0
for (username, email_address) in users:
email = Email()
email.setTo(email_address)
email_template = e.resolve(e.p1)
if not email.load(email_template):
error("Unable to load message template {}.".format(email_template))
return 1
# Build a custom URL for the user that defaults the runuser field.
b64_username = base64.urlsafe_b64encode(username.encode())
user_url = "{}?RUNUSER=b64:{}&".format(url, b64_username.decode())
# And interpolate it into the template
email.replace("FORMURL", user_url)
# Do all the remaining interpolation for Workflow, View, and Globals
# to build the subject and body.
email.replace("", e.wi.getView())
email.setSubject(ReplaceFields(e.resolve(email.subject)))
if email.textBody != None:
email.setText(ReplaceFields(e.resolve(email.textBody)))
if email.htmlBody != None:
email.setHtml(ReplaceFields(e.resolve(email.htmlBody)))
logger.debug("sending email {} to {} with url {} for wiid {}.".format(
email_template, email_address, user_url, wiid))
# Send the email.
if email.send() == 0:
sent_emails += 1
else:
logger.error("failed to send email to {}.".format(email_address))
# If at least one email has been sent, the action is considered successful
# see https://bitbucket.org/cbinckly/remote_actions/issues/6
if not sent_emails:
logger.error("no emails sent successfully.")
error("Could not send any emails. "
"Sending approval form email step failed. "
"Check the remote actions log for more details.")
return 1
else:
if sent_emails < len(users):
message = "{} of {} emails failed to send.".format(
len(users) - sent_emails, len(users))
warning(message)
logger.warn(message)
# Success.
return 0 | /remote_actions-6.0.18.tar.gz/remote_actions-6.0.18/remote_actions/REMOTEACTION/workflow/SendRemoteActionCommentAmountFormEmail.py | 0.542136 | 0.171581 | SendRemoteActionCommentAmountFormEmail.py | pypi |
try:
from accpac import *
except ImportError:
pass
import base64
from remote_actions.services.fleeting_forms import (
create_workflow_approval_form, )
from remote_actions import (
resolve_users,
render_title_and_content_for,
parse_action_parameter,
get_logger, )
VERSION = '6.0.18'
form_controls = [
{
'name': 'RUNUSER',
'type': 'text',
'label': 'Sage User',
'required': True,
'disabled': True
},
{
'name': 'APPROVALCOMMENT',
'type': 'text',
'label': 'Comments',
'help_text': 'Enter your comment',
'required': True,
}
]
def workflow(e):
"""Execute the workflow step.
This function is invoked by the workflow engine. It is called
with ``accpac.WorkflowArgs`` and must return ``0`` on success and
``1`` on failed.
:param e: the workflow arguments for this action.
:type e: ``accpac.WorkflowArgs``
:returns: 0/1
:rtype: int
"""
wiid = e.wi.viworkih.get("WIID")
if not wiid:
error("Failed to get workflow ID.")
return 1
logger = get_logger("SendRemoteActionCommentFormEmail wiid({})".format(
wiid))
# Parse the actions from P4 into a { label: nextstep, } data structure
action_param = e.resolve(e.p4)
try:
actions = parse_action_parameter(action_param)
except (IndexError, ValueError):
showMessageBox("The actions (P4) must be a comma-separated list "
"of label=nextstep pairs, "
"eg. 'Approve=Approved+RTP,Reject=Rejected'" )
logger.exception("P4 invalid {}".format(action_param))
return 1
# Create the form, setting the initial value for the credit limit.
try:
title, content = render_title_and_content_for(e.resolve(e.p3), e)
form = create_workflow_approval_form(
e.wi.viworkih.get("WIID"),
form_controls,
title[:120],
content[:5000],
actions, )
except Exception as exc:
showMessageBox("Failed to create approval form: {}".format(exc))
logger.exception("failed to create form: {}".format(exc))
return 1
# Get the url for the form.
url = form.get('url')
if not url:
error("Unable to get approval form URL.")
return 1
# And set it in the workflow for troubleshooting and posterity
e.wi.setValue("FORMURL", url)
# Resolve all users, groups, and emails from P2
users = resolve_users(e.resolve(e.p2))
# For each user identified, send an email with a custom link that sets
# RUNUSER.
sent_emails = 0
for (username, email_address) in users:
email = Email()
email.setTo(email_address)
email_template = e.resolve(e.p1)
if not email.load(email_template):
error("Unable to load message template {}.".format(email_template))
return 1
# Build a custom URL for the user that defaults the runuser field.
b64_username = base64.urlsafe_b64encode(username.encode())
user_url = "{}?RUNUSER=b64:{}&".format(url, b64_username.decode())
# And interpolate it into the template
email.replace("FORMURL", user_url)
# Do all the remaining interpolation for Workflow, View, and Globals
# to build the subject and body.
email.replace("", e.wi.getView())
email.setSubject(ReplaceFields(e.resolve(email.subject)))
if email.textBody != None:
email.setText(ReplaceFields(e.resolve(email.textBody)))
if email.htmlBody != None:
email.setHtml(ReplaceFields(e.resolve(email.htmlBody)))
logger.debug("sending email {} to {} with url {} for wiid {}.".format(
email_template, email_address, user_url, wiid))
# Send the email.
if email.send() == 0:
sent_emails += 1
else:
logger.error("failed to send email to {}.".format(email_address))
# If at least one email has been sent, the action is considered successful
# see https://bitbucket.org/cbinckly/remote_actions/issues/6
if not sent_emails:
logger.error("no emails sent successfully.")
error("Could not send any emails. "
"Sending approval form email step failed. "
"Check the remote actions log for more details.")
return 1
else:
if sent_emails < len(users):
message = "{} of {} emails failed to send.".format(
len(users) - sent_emails, len(users))
warning(message)
logger.warn(message)
# Success.
return 0 | /remote_actions-6.0.18.tar.gz/remote_actions-6.0.18/remote_actions/REMOTEACTION/workflow/SendRemoteActionCommentFormEmail.py | 0.487795 | 0.186354 | SendRemoteActionCommentFormEmail.py | pypi |
try:
from accpac import *
except ImportError:
pass
import base64
from remote_actions.services.fleeting_forms import (
create_workflow_approval_form, )
from remote_actions import (
resolve_users,
render_title_and_content_for,
parse_action_parameter,
get_logger, )
VERSION = '6.0.18'
form_controls = [
{
'name': 'RUNUSER',
'type': 'text',
'label': 'Sage User',
'required': True,
'disabled': True
},
]
def workflow(e):
"""Execute the workflow step.
This function is invoked by the workflow engine. It is called
with ``accpac.WorkflowArgs`` and must return ``0`` on success and
``1`` on failed.
:param e: the workflow arguments for this action.
:type e: ``accpac.WorkflowArgs``
:returns: 0/1
:rtype: int
"""
wiid = e.wi.viworkih.get("WIID")
if not wiid:
error("Failed to get workflow ID.")
return 1
logger = get_logger("SendRemoteActionFormEmail wiid({})".format(wiid))
# Parse the actions from P4 into a { label: nextstep, } data structure
action_param = e.resolve(e.p4)
try:
actions = parse_action_parameter(action_param)
except (IndexError, ValueError):
showMessageBox("The actions (P4) must be a comma-separated list "
"of label=nextstep pairs, "
"eg. 'Approve=Approved+RTP,Reject=Rejected'" )
logger.exception("P4 invalid {}".format(action_param))
return 1
# Create the form, setting the initial value for the credit limit.
try:
title, content = render_title_and_content_for(e.resolve(e.p3), e)
form = create_workflow_approval_form(
wiid,
form_controls,
title[:120],
content[:5000],
actions, )
except Exception as exc:
showMessageBox("Failed to create approval form: {}".format(exc))
logger.exception("failed to create form: {}".format(exc))
return 1
# Get the url for the form.
url = form.get('url')
if not url:
error("Unable to get approval form URL.")
logger.error("failed to get approval form url from {}.".format(form))
return 1
# And set it in the workflow for troubleshooting and posterity
e.wi.setValue("FORMURL", url)
# Resolve all users, groups, and emails from P2
users = resolve_users(e.resolve(e.p2))
# For each user identified, send an email with a custom link that sets
# RUNUSER.
sent_emails = 0
for (username, email_address) in users:
email = Email()
email.setTo(email_address)
email_template = e.resolve(e.p1)
if not email.load(email_template):
error("Unable to load message template {}.".format(email_template))
return 1
# Build a custom URL for the user that defaults the runuser field.
b64_username = base64.urlsafe_b64encode(username.encode())
user_url = "{}?RUNUSER=b64:{}&".format(url, b64_username.decode())
# And interpolate it into the template
email.replace("FORMURL", user_url)
# Do all the remaining interpolation for Workflow, View, and Globals
# to build the subject and body.
email.replace("", e.wi.getView())
email.setSubject(ReplaceFields(e.resolve(email.subject)))
if email.textBody != None:
email.setText(ReplaceFields(e.resolve(email.textBody)))
if email.htmlBody != None:
email.setHtml(ReplaceFields(e.resolve(email.htmlBody)))
logger.debug("sending email {} to {} with url {} for wiid {}.".format(
email_template, email_address, user_url, wiid))
# Send the email.
if email.send() == 0:
sent_emails += 1
else:
logger.error("failed to send email to {}.".format(email_address))
# If at least one email has been sent, the action is considered successful
# see https://bitbucket.org/cbinckly/remote_actions/issues/6
if not sent_emails:
logger.error("no emails sent successfully.")
error("Could not send any emails. "
"Sending approval form email step failed. "
"Check the remote actions log for more details.")
return 1
else:
if sent_emails < len(users):
message = "{} of {} emails failed to send.".format(
len(users) - sent_emails, len(users))
warning(message)
logger.warn(message)
# Success.
return 0 | /remote_actions-6.0.18.tar.gz/remote_actions-6.0.18/remote_actions/REMOTEACTION/workflow/SendRemoteActionFormEmail.py | 0.548674 | 0.2194 | SendRemoteActionFormEmail.py | pypi |
from accpac import *
from pathlib import Path
REPORT_WAIT_RETRIES = 5
REPORT_WAIT_SLEEP = 2
def workflow(e):
"""Execute the workflow step.
This function is invoked by the workflow engine. It generates a new A/P
Invoice Batch Listing and sets the ARINVRPTPTH workflow variable.
:param e: the workflow arguments for this action.
:type e: ``accpac.WorkflowArgs``
:returns: 0/1
:rtype: int
"""
batch_number = e.wi.getViewKey()
filename = "{}-AP_Invoice_Batch_Listing.pdf".format(batch_number)
report_dir = Path(getOrgPath(), "ap_invoices")
if not report_dir.exists():
report_dir.mkdir()
report_path = report_dir / filename
if generate_ap_invoice_batch_report(report_path, batch_number):
e.wi.setValue("APINVRPTPATH", str(report_path))
return 0
return 1
def generate_ap_invoice_batch_report(report_path, cntbtch):
"""Generate an A/P Batch Listing Report.
:param report_path: Path to write the report file to.
:type report_path: pathlib.Path or str
:returns: report path on success, else ""
:rtype: str
"""
report = Report()
report.reportName = "APIBTCLZ"
report.destination = "file"
report.printDirectory = str(report_path)
try:
report.setParameter("FROMBATCH", cntbtch)
report.setParameter("TOBATCH", cntbtch)
report.setParameter("FROMDATE", "19990101")
report.setParameter("TODATE", "20510311")
report.setParameter("SHOWJOB", "1")
report.setParameter("TAXDETAIL", "1")
report.setParameter("SCHED", "Y")
report.setParameter("SHOWCMTS", "1")
report.setParameter("SWRET", "1")
report.setParameter("RETDETAIL", "1")
report.setParameter("INCLPRNDBTCH", "1")
report.setParameter("ENTERED", "1")
report.setParameter("IMPORTED", "2")
report.setParameter("GENERATED", "3")
report.setParameter("RECURRING", "4")
report.setParameter("EXTERNAL", "5")
report.setParameter("RETAINAGE", "6")
report.setParameter("OPEN", "1")
report.setParameter("READYPOST", "7")
report.setParameter("POSTED", "3")
report.setParameter("BATCHTYPE", "Entered, Imported, Generated, Recurring, External, Retainage")
report.setParameter("BATCHSTATUS", "Open, Ready To Post, Posted")
report.setParameter("FCURNDEC", "2")
report.setParameter("MULTCURN", "N")
report.setParameter("SWPMACTIVE", "1")
report.setParameter("CONTRACT", "Contract")
report.setParameter("PROJECT", "Project")
report.setParameter("CATEGORY", "Category")
report.setParameter("OPTFLDS?", "Y")
report.setParameter("SHOWRCWHT", "1")
report.print(None)
except Exception as err:
_debug("Report generation exception: {}".format(err))
return ""
tries = 0
while tries < REPORT_WAIT_RETRIES:
if report_path.exists():
return report_path
tries += 1
time.sleep(REPORT_WAIT_SLEEP)
return "" | /remote_actions-6.0.18.tar.gz/remote_actions-6.0.18/remote_actions/REMOTEACTION/workflow/GenerateAPInvoiceBatchReport.py | 0.617397 | 0.166675 | GenerateAPInvoiceBatchReport.py | pypi |
try:
from accpac import *
except ImportError:
pass
from pathlib import Path
from remote_actions import (resolve_users, get_logger, )
from remote_actions.services.adobe_sign import (
AdobeSignClient,
add_workflow_agreement,
add_agreement_docs)
VERSION = '6.0.18'
def parse_action_parameter(value):
"""Parse the action parameter (P4) of a workflow action.
P4 for the workflow actions defines the button labels and progress to steps
in a separated key value pair string format::
<label>=<next step>,<next step>;<label>=<next step>;
Approve=Approved1,Approved+RTP;Rejected=Rejected
:param value: the action parameter as input into the template.
:type value: string
:returns: label to next step name mappings
:rtype: collections.OrderedDict
"""
actions = {}
steps = value.split(';')
for step in steps:
label, next_step = step.split('=')
if not next_step:
raise ValueError("The step to proceed to must be set.")
if ',' in next_step:
next_step = next_step.split(',')
actions[label] = next_step
return actions
def workflow(e):
"""Execute the workflow step.
This function is invoked by the workflow engine. It takes all the steps
required to start a new Adobe Sign Agreement:
1. Upload the required documents.
2. Create new agreement.
After the agreement is created Adobe Sign will automatically notify
signers.
:param e: the workflow arguments for this action.
:type e: ``accpac.WorkflowArgs``
:returns: 0/1
:rtype: int
"""
wiid = e.wi.viworkih.get("WIID")
if not wiid:
error("Failed to get workflow ID.")
return 1
logger = get_logger("CreateAdobeSignAgreement wiid({})".format(wiid))
# Parse the actions from P4 into a { label: nextstep, } data structure
action_param = e.resolve(e.p4)
try:
actions = parse_action_parameter(action_param)
except (IndexError, ValueError):
showMessageBox("The actions (P4) must be a ;-separated list "
"of label=nextstep pairs, e.g."
"'Approve=Approved+RTP;Rejected=Rejected;Error=Error'")
logger.exception("P4 invalid {}".format(action_param))
return 1
logger.info("parsed actions {} from {}".format(actions, action_param))
# Get a new client and setup the token.
ac = AdobeSignClient()
if not ac.setup_token():
showMessageBox("Failed to setup connection. Check the Adobe Sign "
"Connect utility in Remote Actions.")
logger.error("failed to setup connection.")
return 1
# Upload the documents
doc_param = e.resolve(e.p1)
docs = [Path(d) for d in doc_param.split(",")]
tdids = []
for doc in docs:
logger.debug("Uploading document {}.".format(doc))
tdids.append(ac.upload_document(doc))
# Create the agreement
users = resolve_users(e.resolve(e.p2))
user_emails = [email for _, email in users if email]
logger.debug("Creating new agreement '{}' for docs {} and "
"signers {}".format(
e.resolve(e.p3),
", ".join([t[0:10] for t in tdids]),
", ".join(user_emails)))
aid = ac.create_agreement(e.resolve(e.p3), tdids, user_emails,
externalId={"id": wiid}, )
signed_action = actions.get("Signed")
if isinstance(signed_action, list):
signed_action = ",".join(signed_action)
add_workflow_agreement(
wiid, aid, signed_action,
actions.get('Rejected', ''), actions.get('Error', ""))
add_agreement_docs(aid, tdids)
logger.debug("Created new agreement {}.".format(aid))
return 0 | /remote_actions-6.0.18.tar.gz/remote_actions-6.0.18/remote_actions/REMOTEACTION/workflow/CreateAdobeSignAgreement.py | 0.752922 | 0.335895 | CreateAdobeSignAgreement.py | pypi |
try:
from accpac import *
except ImportError:
pass
from datetime import datetime, timedelta
from pathlib import Path
import requests
from remote_actions import get_token
def create_form(type_, wiid, form_controls,
title, content, actions, **initials):
"""Create a form.
Any arguments passed in the keyword arguments (``initials``) will
be treated as initial values for fields of that name.
For example, if a form contained a field name ``APPROVALCOMMENT``,
it can be defaulted to "Comments are required" by using the following
call to create_form::
create_form(wiid, title, content,
APPROVALCOMMENT="Comments are required.")
:param type_: the form type, determines the handler used by the poller.
:type type_: str
:param wiid: workflow instance ID
:type wiid: int
:param form_controls: list of control definitions for the form.
:type form_controls: dict
:param title: the title to display above the form
:type title: str
:param content: the instructions to display above the form.
:type content: str
:param actions: a map of button labels to next steps.
:type actions: { label: stepname, label2: stepname2, ...}
:param initials: key value pairs of initial field values.
:type initials: ``str=object``
:returns: form dictionary
:rtype: dict
:raises Exception: on API create failure.
"""
app = {
'wiid': wiid,
'steps': actions,
'type': type_,
'org': org,
}
template = {
'title': title,
'content': content,
'actions': [{'label': a} for a in actions.keys()],
'form_controls': form_controls,
}
for field in template['form_controls']:
if field['name'] in initials:
field['initial'] = initials[field['name']]
return FormClient().create(template=template, app=app)
def create_workflow_approval_form(wiid, form_controls, title, content,
actions, **initials):
"""Create a workflow approval form.
Any arguments passed in the keyword arguments (``initials``) will
be treated as initial values for fields of that name.
For example, if a form contained a field name ``APPROVALCOMMENT``,
it can be defaulted to "Comments are required" by using the following
call to create_form::
create_form(wiid, title, content,
APPROVALCOMMENT="Comments are required.")
:param wiid: workflow instance ID
:type wiid: int
:param form_controls: list of control definitions for the form.
:type form_controls: dict
:param title: the title to display above the form
:type title: str
:param content: the instructions to display above the form.
:type content: str
:param actions: a map of button labels to next steps.
:type actions: { label: stepname, label2: stepname2, ...}
:param initials: key value pairs of initial field values.
:type initials: ``str=object``
:returns: form dictionary
:rtype: dict
:raises Exception: on API create failure.
"""
return create_form('workflow_approval', wiid, form_controls,
title, content, actions, **initials)
class FleetingClientBadRequestError(Exception):
def __init__(self, message, response):
self.message = message
self.response = response
self._responses = []
def response_to_str(self, response, parent_key=""):
if not isinstance(response, dict):
return response
# Find leaves
for key, value in response.items():
if not parent_key:
local_key = key
else:
local_key = ".".join([parent_key, key])
if isinstance(value, dict):
self.response_to_str(value, local_key)
else:
self._responses.append("{}: {}".format(local_key, value))
return self._responses
def __str__(self):
# collect keys
return "The API request was invalid:\n{}".format(
"\n".join(self._responses))
class FleetingClient():
"""Abstract base class for all Fleeting API clients.
Handles authentication and URL generation, which is common to all
clients.
:param token: override the company token in the database
:type token: str (UUID4 format)
"""
API_ROOT = "https://fleetingforms.io"
MODEL_ROOT = "forms"
TRAILING_SLASH = True
def __init__(self, token=None):
self.__token = token
@property
def headers(self):
"""Headers for authentication to a namespace.
Make it easy to get namespace authentication headers for this client.
:returns: authentication headers for use with requests.
:rtype: dict
"""
return {'X-FLEETING-TOKEN': self.token}
@property
def token(self):
"""Get the token."""
if not self.__token:
self.__token = get_token()
return self.__token
def url_for(self, action='create', _id=None):
"""Get the URL for an action type.
Supported actions are ``create``, ``retrieve``, ``list``, ``delete``.
:param action: the action the url is required form.
:type action: str
:param _id: the id of the form to retrieve or delete.
:type _id: int
:returns: url for the action and _id.
:rtype: str
:raises Exception: Unsupported action if action not supported.
"""
if action in ['create', 'list']:
url = "/".join([self.API_ROOT, self.MODEL_ROOT])
elif action in ['get', 'delete']:
url ="/".join([self.API_ROOT, self.MODEL_ROOT, str(_id)])
else:
raise Exception("Unsupported URL action: {}".format(action))
if self.TRAILING_SLASH:
url += "/"
return url
class FormClient(FleetingClient):
"""The form client class is used to interact with the fleetingforms.io api.
:param namespace_token: the unique token for the user namespace
:type token: str (uuid4 format)
The client supports standard ReSTful actions against the API. To walk
through the lifecycle of a form as seen from the api:
.. code-block:: python
# Instantiate a new client
client = FormClient()
# And define a minimalist form with two buttons.
form_template = {
'title': 'Approval Request for More Eggs',
'content': 'Can we buy more eggs?',
'form_controls': [],
'actions': [{'label': 'Yes!'}, {'label': 'No.'}],
}
# Create the form using a POST request to the API.
form = client.create(form_template)
_id = form['id']
# print an integer ID unique to the form
print(form['id'])
# print the unique URL for the form
print(form['url'])
# Retrieve the form to see if the form has been opened
form = client.get(_id)
# Check to see if the opened_on field has a datetime
if form['opened_on']:
print("The form was opened on {}".format(form['opened_on']))
# Get a list of all the forms defined in the namespace:
forms = client.list()
for form in forms:
print("Form {} at URL {}".format(form['_id'], form['url']))
# Delete a form
deleted = client.delete(_id)
"""
MODEL_ROOT = "forms"
def create(self, template={}, auth={}, app={}):
"""Create a new form.
:param template: the form template.
:type template: dict
:param auth: the form authentication parameters.
:type auth: dict
:param app: the form appentication parameters.
:type app: dict
:returns: form dictionary
:rtype: dict
:raises Exception: API failure.
"""
payload = {
'template': template,
'auth': auth,
'app': app,
}
try:
resp = requests.post(self.url_for('create'),
json=payload,
headers=self.headers)
if resp.status_code == 201:
rj = resp.json()
elif resp.status_code == 400:
raise FleetingClientBadRequestError(
"Invalid HTTP request.",
resp.json())
else:
raise Exception("HTTP create request status {}".format(
resp.status_code))
except Exception as e:
raise
return rj
def get(self, _id):
"""Retrieve a specific form from the service.
:param _id: the ``id`` of the form to retrieve
:type _id: int
:returns: form dictionary
:rtype: dict
:raises Exception: API failure.
"""
try:
resp = requests.get(self.url_for('get', _id), headers=self.headers)
if resp.status_code == 200:
return resp.json()
except Exception as e:
pass
return None
def list(self, status=None, offset=0, limit=20, simple=False, _all=False):
"""List all the forms in this namespace.
:param status: get forms with status.
:type status: str
:param offset: get forms offset from beginning.
:type offset: int
:param limit: max number of forms.
:type limit: int
:param simple: use the simple serializer?
:type simple: bool
:param _all: Get all results from offset to end, ignoring limit.
:type _all: bool
:returns: a list of form dictionaries
:rtype: [{'id': 1, }, ...]
:raises Exception: API failure.
"""
params = {}
if status:
params['status'] = status
if simple:
params['simple'] = simple
limit = min([limit, 20])
params['limit'] = limit
results = []
while True:
params['offset'] = offset + len(results)
try:
resp = requests.get(
self.url_for('list'),
headers=self.headers,
params=params)
if resp.status_code == 200:
result = resp.json()
if not _all:
return result
results.extend(result)
if len(result) < limit:
return results
else:
break
except Exception as e:
raise
return results
def delete(self, _id):
"""Delete a form from the service.
:param _id: the ``id`` of the form to delete
:type _id: int
:returns: True if deleted, else False
:rtype: bool
:raises Exception: API failure.
"""
try:
resp = requests.delete(self.url_for('delete', _id),
headers=self.headers)
if resp.status_code == 204:
return True
else:
raise Exception("Failed to delete: {}".format(resp.text))
except Exception as e:
raise Exception("Failed to delete: {}".format(e))
return False
class NamespaceClient(FleetingClient):
"""The namespace client is used to interact with the fleetingforms.io api.
:param namespace_token: the unique token for the user namespace
:type token: str (uuid4 format)
The client supports standard ReSTful actions against the API.
.. code-block:: python
# Instantiate a new client
client = FormClient()
# And define a minimalist form with two buttons.
form_template = {
'title': 'Approval Request for More Eggs',
'content': 'Can we buy more eggs?',
'form_controls': [],
'actions': [{'label': 'Yes!'}, {'label': 'No.'}],
}
# Create the form using a POST request to the API.
form = client.create(form_template)
_id = form['id']
# print an integer ID unique to the form
print(form['id'])
# print the unique URL for the form
print(form['url'])
# Retrieve the form to see if the form has been opened
form = client.get(_id)
# Check to see if the opened_on field has a datetime
if form['opened_on']:
print("The form was opened on {}".format(form['opened_on']))
# Get a list of all the forms defined in the namespace:
forms = client.list()
for form in forms:
print("Form {} at URL {}".format(form['_id'], form['url']))
# Delete a form
deleted = client.delete(_id)
"""
MODEL_ROOT = "namespaces"
def get(self, _id):
"""Retrieve a specific form from the service.
:param _id: the ``id`` of the form to retrieve
:type _id: int
:returns: form dictionary
:rtype: dict
:raises Exception: API failure.
"""
try:
resp = requests.get(self.url_for('get', _id), headers=self.headers)
if resp.status_code == 200:
return resp.json()
except Exception as e:
pass
return None
def list(self):
"""List all the namespaces belonging to this token.
:returns: a list of form dictionaries
:rtype: [{'id': 1, }, ...]
:raises Exception: API failure.
"""
try:
resp = requests.get(self.url_for('list'), headers=self.headers)
if resp.status_code == 200:
return resp.json()
except Exception as e:
raise
return []
def update(self, _id, data={}):
"""Update a namespace's settings.
:param _id: the ``id`` of the form to delete
:type _id: int
:param data: update payload
:type data: dict
:returns: True if updated, else False
:rtype: bool
:raises Exception: API failure.
"""
try:
resp = requests.put(self.url_for('get', _id),
json=data,
headers=self.headers)
if resp.status_code == 200:
return True
else:
raise Exception("Failed to update: {}".format(resp.text))
except Exception as e:
raise Exception("Failed to update: {}".format(e))
return False | /remote_actions-6.0.18.tar.gz/remote_actions-6.0.18/remote_actions/services/fleeting_forms.py | 0.793186 | 0.292755 | fleeting_forms.py | pypi |
import os
import re
import unicodedata
"""
This code has been copied over from the werkzeug project.
It is licensed under the BSD 3-Clause license
"""
_entity_re = re.compile(r"&([^;]+);")
_filename_ascii_strip_re = re.compile(r"[^A-Za-z0-9_.-]")
_windows_device_files = (
"CON",
"AUX",
"COM1",
"COM2",
"COM3",
"COM4",
"LPT1",
"LPT2",
"LPT3",
"PRN",
"NUL",
)
def secure_filename(filename: str) -> str:
r"""Pass it a filename and it will return a secure version of it. This
filename can then safely be stored on a regular file system and passed
to :func:`os.path.join`. The filename returned is an ASCII only string
for maximum portability.
On windows systems the function also makes sure that the file is not
named after one of the special device files.
>>> secure_filename("My cool movie.mov")
'My_cool_movie.mov'
>>> secure_filename("../../../etc/passwd")
'etc_passwd'
>>> secure_filename('i contain cool \xfcml\xe4uts.txt')
'i_contain_cool_umlauts.txt'
The function might return an empty filename. It's your responsibility
to ensure that the filename is unique and that you abort or
generate a random filename if the function returned an empty one.
:param filename: the filename to secure
"""
filename = unicodedata.normalize("NFKD", filename)
filename = filename.encode("ascii", "ignore").decode("ascii")
for sep in os.path.sep, os.path.altsep:
if sep:
filename = filename.replace(sep, " ")
filename = str(_filename_ascii_strip_re.sub("", "_".join(filename.split()))).strip(
"._"
)
# on nt a couple of special files are present in each folder. We
# have to ensure that the target file is not such a filename. In
# this case we prepend an underline
if (
os.name == "nt"
and filename
and filename.split(".")[0].upper() in _windows_device_files
):
filename = f"_{filename}"
return filename | /remote_builder-0.2.tar.gz/remote_builder-0.2/remote_builder/server/utils.py | 0.485844 | 0.283881 | utils.py | pypi |
import abc
import logging
log = logging.getLogger(__name__)
class Action(abc.ABC):
"""
A callable that does something with a :class:`~.message.Message`
Instances will be called with :class:`~.message.Message` instances.
The :attr:`remote` attribute will be set to a :class:`Remote` before
calling.
"""
def __init__(self):
self.remote = None
@abc.abstractmethod
def __call__(self, msg) -> 'Iterable[Action]':
"""
When called, an :class:`Action` should return an iterable of other
:class:`Action` s that will be applied to the ``msg`` being processed.
"""
pass
class Stop(Action):
"""
Stop processing any futher :class:`Action` for the current
:class:`~.message.Message`
"""
def __call__(self, msg):
raise StopIteration()
class Move(Action):
def __init__(self, destination: tuple[str]):
"""
Move the :class:`~.message.Message` to ``destination`` directory.
:param tuple[str] destination: the destination directory on the server
"""
super().__init__()
self.destination = destination
def __call__(self, msg):
target_dir = self.destination
log.info(f'Moving {msg.dir_}/{msg.Subject} to {target_dir}')
self.remote.move_message(msg, target_dir)
return []
class ChangeFlags(Action):
def __init__(self, add=set(), remove=set()):
"""
Add or remove flags from a :class:`~.message.Message`.
:param set[bytes] add: Set of flags to add
:param set[bytes] remove: Set of flags to remove
The intersection of `add` and `remove` must be empty
"""
super().__init__()
if add & remove:
raise ValueError("Add and remove sets must not intersect")
self.add = add
self.remove = remove
def __call__(self, msg):
new_flags = msg.flags
log.info(f'Set {msg.dir_}/{msg.Subject} +({self.add})-({self.remove})')
if self.add:
new_flags = msg.remote.add_flags(msg.uid, self.add)
if self.remove:
new_flags = msg.remote.remove_flags(msg.uid, self.remove)
msg._flags = new_flags
log.info(f'{msg.dir_}/{msg.Subject} =({msg.flags})')
return [] | /remote_email_filtering-0.2.1.tar.gz/remote_email_filtering-0.2.1/src/remote_email_filtering/action.py | 0.84699 | 0.235713 | action.py | pypi |
# Remote
[](https://github.com/remote-cli/remote/actions?query=branch%3Amaster+workflow%3A%22Python+Code+Quality%22)
[](https://pypi.org/project/remote-exec-api)
[](https://github.com/remote-cli/remote)
[](https://github.com/remote-cli/remote/blob/master/LICENSE)
The `remote` CLI lets you execute long or computation-heavy tasks (e.g., compilation, integration tests, etc.)
on a powerful remote host while you work on the source code locally.
This process is known as remote execution and can enable remote build capabilities, among other things.
When you execute `remote <cmd>`, it will first sync your local workspace to the remote host you selected using `rsync`.
It will then execute the command `<cmd>` on this host using `ssh` and finally, bring all the created/modified files back to your local workspace.
`remote` supports a host of configuration options to allow for complete customization of patterns for files and folders to include during the synchronization process in both directions.
## System Requirements
The CLI supports **Linux** and **Mac OS X** operating systems
with **Python 3.6 or higher** installed. You can also use it on **Windows**
if you have [WSL](https://docs.microsoft.com/en-us/windows/wsl/about) configured.
The remote host must also be running on **Linux** or **Mac OS X**. The local and remote hosts can be running different operating systems. The only requirement is that the remote host must be accessible using `ssh` from the local host.
## Getting Started
### Installing on Mac OS X
If you use Mac OS X, you can install `remote` using [Homebrew](https://brew.sh/)
from our [custom tap](https://github.com/remote-cli/homebrew-remote):
```bash
brew install remote-cli/remote/remote
```
Then, you will always be able to update it to the latest version:
```bash
brew upgrade remote
```
### Installing on other systems
`remote` doesn't support any package managers other than `brew` yet. However, it can be manually downloaded
and installed. To do it, visit https://github.com/remote-cli/remote/releases and download the latest released `-shiv` archive, unpack it to some local directory (e.g., `~/.bin`) and add it to PATH:
```bash
mkdir -p ~/.bin
tar -C ~/.bin -xzf ~/Downloads/remote-1.4.5-shiv.tgz
echo 'export PATH=$PATH:/home/username/.bin/remote/bin' >> ~/.bash_profile
source ~/.bash_profile
```
Don't forget to replace the `/home/username` above with the actual path to your home directory.
### Configuring the remote host
`remote` CLI needs to be able to establish a passwordless SSH connection to the remote host.
Please run `ssh -o BatchMode=yes <your-host> echo OK` to confirm that everything is ready for you.
If this command fails, please go through [SSH guide](https://www.ssh.com/ssh/keygen/) to set up
SSH keys locally and remotely.
### First run
After you are done with the configuration, switch the working directory to the root of your workspace in
terminal and run `remote-init` to create a configuration file:
```bash
cd ~/path/to/workspace
remote-init remote-host.example.com
```
This will create a config file named `.remote.toml` in the workspace root
(`~/path/to/workspace/.remote.toml`). This file controls the remote connection and synchronization options.
You can read more about this file in the Configuration section of this doc.
After it, you can start using remote:
```bash
# This will sync workspace and run './gradlew build' remotely
remote ./gradlew build
# This will forcefully push all local files to the remote machine
remote-push
# This will bring in ./build directory from the remote machine to local even if
# the CLI is configured to ignore it
remote-pull build
```
## Distribution
`remote`'s distribution comes with a set of executables:
* `remote-init`: set up a local directory to point to a remote directory on a target host
* `remote-ignore`: set up directories/files to ignore while pushing
* `remote-push`: explicitly push local changes remote
* `remote-pull`: pull a directory from remote to local
* `remote`: execute a command remotely, after first syncing the local tree with the remote tree
* `remote-explain`: explain your remote setup, explain what command actually will get run
* `remote-quick`: execute a command remotely without syncing the trees
* `remote-add`: add another remote host to the mirror list
* `mremote`: execute a remote command on all the hosts, after first syncing the local tree with the remote trees
You can run each of these commands with `--help` flag to get a list of options and arguments they accept.
## Configuration
Three configuration files control the behavior of `remote`:
* `~/.config/remote/defaults.toml` is a global config file. It sets options that affect all the workspaces
unless they are overwritten by `.remote.toml` file.
* `.remote.toml` is a workspace config that is expected to be placed in the root of every workspace.
The `remote` CLI cannot execute any commands remotely until this file is present, or the global config
overwrites this with `allow_uninitiated_workspaces` option.
* `.remoteignore.toml` is a workspace config that controls only sync exclude and include patterns
and has the highest priority. While the same settings can be specified in the `.remote.toml` file,
you can use this file to check in project-specific ignore settings in the VCS because it doesn't contain
host-specific information in it.
Both configs use [TOML](https://github.com/toml-lang/toml) format.
**Workspace root** is a root directory of the project you're working on.
It is identified by the `.remote.toml` file. Each time you execute `remote` from workspace root or any of its
subdirectories, `remote` syncs everything under workspace root with the destination host before running the command.
### Global Configuration File
Global configuration file should be placed in `~/.config/remote/defaults.toml`. This config file is optional
and the `remote` CLI will work with the default values if it is absent. This is the example of how it looks like:
```toml
[general]
allow_uninitiated_workspaces = false
use_relative_remote_paths = false
remote_root = ".remotes"
[[hosts]]
host = "linux-host.example.com"
label = "linux"
[[hosts]]
host = "macos-host.example.com"
port = 2022
supports_gssapi_auth = false
default = true
label = "mac"
[push]
exclude = [".git"]
[pull]
exclude = ["src/generated"]
include = ["build/reports"]
[both]
include_vcs_ignore_patterns = true
```
1. `[general]` block controls system-wide behavior for the `remote` CLI.
Reference:
* `allow_uninitiated_workspaces` (optional, defaults to `false`) - If this flag is set to `true` and
the global config contains at least one remote host, `remote` will treat its current working directory
as a workspace root even if it doesn't have `.remote.toml` file in it.
**Warning:** If this option is on and you run `remote` in the subdirectory of an already configured workspace,
`remote` will ignore workspaces configuration and treat subdirectory as a separate workspace root.
* `remote_root` (optional, defaults to `".remotes"`) - The default directory on the remote machine that
will be used to store synced workspaces. The path is expected to be relative to the remote user's home
directory, so `.remotes` will resolve in `/home/username/.remotes`.
If the workspace-level configuration sets the `directory` for a host, this setting will be ignored.
* `use_relative_remote_paths` (optional, defaults to `false`)
* If set to `false`, all the workspaces will be stored in the `remote_root` of the target host in a flat
structure. Each directory will have a name like `<workspace_name>_<workspace_path_hash>`.
* If set to `false`, the remote path will be placed in the `remote_root` tree like it was placed in the user's
home directory tree locally. Some examples:
* If the local path is `/home/username/projects/work/project_name`, the remote path will be
`/home/username/.remotes/projects/work/project_name`
* If the local path is `/tmp/project_name`, the remote path will be
`/home/username/.remotes/tmp/project_name`
2. `[[hosts]]` block lists all the remote hosts available for the workspaces. Used when the workspace
configuration doesn't overwrite it.
You can provide multiple hosts in this block, but only one will be selected when you execute `remote`.
It would be either the host that is marked by `default = true` or the first one in the list if no
default was set explicitly.
You can run most of the commands with `--label label|number` or `-l label|number` option to run a
command on the non-default host. `label` here is the text label you put in the config file, `number` is
a number of required host in the hosts' list, starting from 1.
Reference:
* `host` - a hostname, IP address, or ssh alias of a remote machine that you want to use for remote execution.
* `port` (optional, defaults to `22`) - a port used by the ssh daemon on the host.
* `supports_gssapi_auth` (optional, defaults to `true`) - `true` if the remote host supports `gssapi-*` auth
methods. We recommend disabling it if the ssh connection to the host hangs for some time during establishing.
* `default` (optional, defaults to `false`) - `true` if this host should be used by default
* `label` (optional) - a text label that later can be used to identify the host when running the `remote` CLI.
3. `[push]`, `[pull]`, and `[both]` blocks control what files are synced from local to a remote machine and back
before and after the execution. These blocks are used when the workspace configuration doesn't overwrite them.
`push` block controls the files that are uploaded from local machine to the remote one. `pull` block controls files that are downloaded from remote machine to local one. `both` block extends previous two.
Each one of these blocks supports the following options:
* `exclude` (optional, defaults to empty list) - a list of rsync-style patterns. Every file in the workspace
that matches these patterns won't be synced unless it is explicitly specified in `include`.
* `include` (optional, defaults to empty list) - a list of rsync-style patterns. Every file in the workspace
that matches these patterns will be synced even if it matches the `exclude`.
* `include_vcs_ignore_patterns` (optional, defaults to `false`) - if `true` and `.gitignore` is present,
all its patterns will be included in the `exclude` list.
### Workspace Configuration File
This is the example of how standalone workspace-level `.remote.toml` configuration file looks like:
```toml
[[hosts]]
host = "linux-host.example.com"
directory = ".remotes/workspace"
label = "linux"
supports_gssapi_auth = true
[[hosts]]
host = "macos-host.example.com"
port = 2022
directory = ".remotes/other-workspace"
supports_gssapi_auth = false
default = true
label = "mac"
[push]
exclude = [".git"]
[pull]
exclude = ["src/generated"]
include = ["build/reports"]
[both]
include_vcs_ignore_patterns = true
```
All the used blocks here are similar to the ones in the global config file. However, you cannot put
`[general]` block in this file. Also, you can provide one more option in `[[hosts]]` block:
* `directory` (optional) - a path relative to remote user's home. It will be used to store the workspace's
file on the remote machine.
Also, if you set at least one value for any of the blocks in the workspace-level config,
all the values from this block in the global config will be ignored.
There is a way to change this behavior. You can use `[extends.*]` blocks to do it.
Here is an example. Imagine, you have a following global config:
```toml
[[hosts]]
host = "linux-host.example.com"
label = "linux"
default = true
[push]
exclude = [".git"]
[both]
include_vcs_ignore_patterns = true
```
If you want to be able to use the same Linux host in the workspace, but you want to add one more and modify some exclude patterns, you can create the following workspace config:
```toml
[[extends.hosts]]
host = "mac-host.example.com"
directory = ".remotes/mac-workspace"
label = "mac"
default = true
[extends.push]
exclude = ["workspace-specific-dir"]
include = [".git/hooks"]
[both]
include_vcs_ignore_patterns = false
```
As you can see, some block names start with `extends.`. This name tells remote to merge the
workspace and global settings.
There are a few things to note:
* If both workspace-level and global configs define a default host, the workspace-level config wins
* Hosts ordering is preserved, and globally configured hosts always go first.
* If an option value is a list (e.g. `exclude`), it is extended. Otherwise, the value is overwritten.
### Workspace Files Sync Configuration File
`.remoteignore.toml` files is similar to `.remote.toml`, but only supports `push`, `pull`, `both`,
`extends.push`, `extends.pull` and `extends.both` blocks. It also cannot be used to identify
the workspace root.
### .remoteenv file
Sometimes you will need to do some action each time before you execute some remote command.
A common example will be to execute `pytest` in the virtual environment: you need to activate it
first, but the activation state won't be preserved between the `remote` runs.
There are two ways of solving this problem:
1. Running both initiation logic and the command together:
```bash
remote 'source env/bin/activate && pytest'
```
2. Creating a file called `.remoteenv` in the workspace root. If this file is present, `remote` will
always run `source .remoteenv` on the destination host before running the actual command. For example,
here is how you can run `remote`'s tests on the other hosts:
```bash
git clone git@github.com:remote-cli/remote.git
cd remote
remote-init <remote-host-name>
remote python3 -m venv env
echo '. env/bin/activate' >> .remoteenv
# starting from this point, all python commands will be executed in virtualenv remotely
# This should print virtualenv's python path
remote which python
remote pip install -e .
remote pip install -r test_requirements.txt
remote pytest
```
The `.remoteenv` file is guaranteed to sync to remote machine even if it is excluded by the workspace's
`.gitignore` file or other rules.
## Development & Contribution
To bootstrap the development run:
```bash
git clone git@github.com:remote-cli/remote.git
cd remote
python3 -m venv env
source env/bin/activate
pip install -e .
pip install -r test_requirements.txt
```
After it, you can open the code in any editor or IDE you like. If you prefer VSCode, the project contains the configuration file for it.
Before submitting your pull request, please check it by running:
```bash
flake8 src test && mypy -p remote && black --check -l 120 src test && isort --check-only src test && pytest
```
If `black` or `isort` fails, you can fix it using the following command:
```bash
black -l 120 src test && isort src test
```
Don't forget to add changed files to your commit after you do it.
| /remote-exec-api-1.13.2.tar.gz/remote-exec-api-1.13.2/README.md | 0.570092 | 0.867092 | README.md | pypi |
import logging
import shlex
import subprocess
import sys
import tempfile
import time
from contextlib import contextmanager
from dataclasses import dataclass, field, fields, is_dataclass
from enum import IntEnum
from pathlib import Path
from typing import List, Optional, Sequence, TextIO, Union
from remote.exceptions import InvalidInputError
from .exceptions import RemoteConnectionError, RemoteExecutionError
logger = logging.getLogger(__name__)
DEFAULT_SSH_PORT = 22
def _temp_file(lines: List[str]) -> Path:
"""Create a temporary file with provided content and return its path
:param lines: list of lines to be written in the file
"""
_, path = tempfile.mkstemp(prefix="remote.", dir="/tmp", text=True)
tmpfile = Path(path)
tmpfile.write_text("\n".join(lines) + "\n")
return tmpfile
def _gen_rsync_patterns_file(patterns, opt, args, cleanup):
if patterns:
exclude_file = _temp_file(patterns)
cleanup.append(exclude_file)
args.extend((opt, str(exclude_file)))
logger.info(f"{opt} patterns:")
for p in patterns:
logger.info(" - %s", p)
@contextmanager
def _measure_duration(operation: str):
start = time.time()
yield None
runtime = time.time() - start
logger.info("%s done in %.2f seconds", operation, runtime)
@dataclass(frozen=True)
class ForwardingOption:
"""Port forwarding options for ssh"""
remote_port: int
local_port: int
remote_interface: str = "localhost"
local_interface: Optional[str] = None
@classmethod
def from_string(cls, port_args: str) -> "ForwardingOption":
"""Parse port values from the user input.
:param host: the input string from port tunnelling option.
:returns: A tuple of remote port, local port.
"""
ports: List = port_args.split(":")
if len(ports) > 2:
raise InvalidInputError("Please pass a valid value to enable local port forwarding")
try:
if len(ports) == 1:
return cls(int(ports[0]), int(ports[0]))
return cls(int(ports[0]), int(ports[1]))
except ValueError as e:
raise InvalidInputError("Please pass valid integer value for ports") from e
def to_ssh_string(self) -> str:
prefix = f"{self.local_interface}:" if self.local_interface else ""
return f"{prefix}{self.local_port}:{self.remote_interface}:{self.remote_port}"
class VerbosityLevel(IntEnum):
QUIET = 1
DEFAULT = 2
VERBOSE = 3
@dataclass(frozen=True)
class CommunicationOptions:
stdin: Optional[TextIO] = sys.stdin
stdout: TextIO = sys.stdout
stderr: TextIO = sys.stderr
@dataclass(frozen=True)
class Ssh:
"""Ssh configuration class, pregenrates and executes commands remotely"""
host: str
port: Optional[int] = None
force_tty: bool = True
verbosity_level: VerbosityLevel = VerbosityLevel.QUIET
use_gssapi_auth: bool = True
disable_password_auth: bool = True
local_port_forwarding: List[ForwardingOption] = field(default_factory=list)
communication: CommunicationOptions = CommunicationOptions()
def generate_command(self) -> List[str]:
"""Generate the base ssh command to execute (without host)"""
command = ["ssh"]
options = "t" if self.force_tty else ""
if self.use_gssapi_auth:
options += "K"
if self.verbosity_level <= VerbosityLevel.QUIET:
options += "q"
elif self.verbosity_level >= VerbosityLevel.VERBOSE:
options += "v"
if options:
command.append(f"-{options}")
if self.disable_password_auth:
command.extend(("-o", "BatchMode=yes"))
if self.port and self.port != DEFAULT_SSH_PORT:
command.extend(("-p", str(self.port)))
for port in self.local_port_forwarding:
command.extend(("-L", port.to_ssh_string()))
return command
def generate_command_str(self) -> str:
"""Generate the base ssh command to execute (without host)"""
return prepare_shell_command(self.generate_command())
def execute(
self,
command: str,
raise_on_error: bool = True,
extra_args: Optional[List[str]] = None,
) -> int:
"""Execute a command remotely using SSH and return it's exit code
:param command: a command to execute
:param raise_on_error: raise an exception is remote execution
:param extra_args: Extra arguments for SSH command
:returns: exit code of remote command or 255 if connection didn't go through
"""
subprocess_command = self.generate_command()
if extra_args:
subprocess_command.extend(extra_args)
logger.info("Executing:\n%s %s <<EOS\n%sEOS", " ".join(subprocess_command), self.host, command)
subprocess_command.extend((self.host, command))
with _measure_duration("Execution"):
result = subprocess.run(
subprocess_command,
stdout=self.communication.stdout,
stderr=self.communication.stderr,
stdin=self.communication.stdin,
)
if raise_on_error:
# ssh exits with the exit status of the remote command or with 255 if an error occurred
if result.returncode == 255:
raise RemoteConnectionError(f"Failed to connect to {self.host}")
elif result.returncode != 0:
raise RemoteExecutionError(f'Failed to execute "{command}" on host {self.host} ({result.returncode})')
return result.returncode
def rsync(
src: str,
dst: str,
ssh: Ssh,
info: bool = False,
verbose: bool = False,
dry_run: bool = False,
delete: bool = False,
mirror: bool = False,
excludes: List[str] = None,
includes: List[str] = None,
extra_args: List[str] = None,
communication=CommunicationOptions(),
):
"""Run rsync to sync files from src into dst
:param src: Source files to copy. If source is a directory and you need to copy its contents, append / to its path
:param dst: Destination file or directory
:param ssh: ssh configuration to use for rsync
:param info: True if need to add -i flag to rsync
:param verbose: True if need to add -v flag to rsync
:param dry_run: True if need to add -n flag to rsync
:param delete: True if all files inside destination directory need to be deleted if they were not found at source and
they are not excluded by exclude filters
:param mirror: True if all files inside destination directory need to be deleted if they were not found at source
:param excludes: List of file patterns to exclude from syncing
:param includes: List of file patterns to include even if they were excluded by exclude filters
:param extra_args: Extra arguments for rsync function
:param communication: file descriptors to use for process communication
"""
logger.info("Sync files from %s to %s", src, dst)
args = ["rsync", "-arlpmchz", "--copy-unsafe-links", "-e", ssh.generate_command_str(), "--force"]
if info:
args.append("-i")
if verbose:
args.append("-v")
if dry_run:
args.append("-n")
if delete or mirror:
args.append("--delete")
if mirror:
args.extend(("--delete-after", "--delete-excluded"))
if extra_args:
args.extend(extra_args)
cleanup: List[Path] = []
# It is important to add include patterns before exclude patters because rsync might ignore includes if you do otherwise.
_gen_rsync_patterns_file(includes, "--include-from", args, cleanup)
_gen_rsync_patterns_file(excludes, "--exclude-from", args, cleanup)
args.extend((src, dst))
logger.info("Starting sync with command %s", " ".join(args))
with _measure_duration("Sync"):
result = subprocess.run(args, stdout=communication.stdout, stderr=communication.stderr)
for file in cleanup:
file.unlink()
if result.returncode != 0:
raise RemoteConnectionError(f"Failed to sync files between {src} and {dst}. Is remote host reachable?")
def prepare_shell_command(command: Union[str, Sequence[str]]) -> str:
"""Format command parts into one shell command"""
if isinstance(command, str):
return command
# This means the whole command is already preformatted for us
if len(command) == 1 and " " in command[0]:
return command[0]
return " ".join([shlex.quote(c) for c in command])
def shell_quote(command_arg: Union[str, Path]) -> str:
return shlex.quote(str(command_arg))
def pformat_dataclass(obj, indent=" "):
"""Return a string with an object contents prettified"""
result = []
has_dataclass_fields = False
for field in fields(obj): # noqa: F402 'field' shadows the import
value = getattr(obj, field.name)
if is_dataclass(value):
str_value = "\n" + pformat_dataclass(value, indent + " ")
has_dataclass_fields = True
else:
str_value = str(value)
result.append((field.name, str_value))
if has_dataclass_fields:
return "\n".join(f"{indent}- {name}: {value}" for name, value in result)
else:
width = max(len(name) for name, _ in result)
return "\n".join(f"{indent}- {name: <{width}}: {value}" for name, value in result) | /remote-exec-api-1.13.2.tar.gz/remote-exec-api-1.13.2/src/remote/util.py | 0.598195 | 0.158956 | util.py | pypi |
import logging
import re
import sys
from concurrent.futures import ThreadPoolExecutor, as_completed
from datetime import datetime
from functools import wraps
from pathlib import Path
from typing import List, Optional, Union
import click
from .configuration import WorkspaceConfig
from .configuration.discovery import get_configuration_medium, load_cwd_workspace_config, save_config
from .configuration.shared import HOST_REGEX, PATH_REGEX
from .exceptions import InvalidInputError, RemoteError
from .explain import explain
from .util import CommunicationOptions, ForwardingOption, shell_quote
from .workspace import SyncedWorkspace
BASE_LOGGING_FORMAT = "%(message)s"
CONNECTION_STRING_FORMAT_REGEX = re.compile(f"^{HOST_REGEX}(:{PATH_REGEX})?$")
DEFAULT_CONTEXT_SETTINGS = dict(help_option_names=["-h", "--help"])
EXECUTION_CONTEXT_SETTINGS = dict(
help_option_names=["-h", "--help"], ignore_unknown_options=True, allow_interspersed_args=False
)
def log_exceptions(f):
"""A decorator that prints the custom exceptions and exit, but propagates internal ones"""
@wraps(f)
def wrapper(*args, **kwards):
try:
f(*args, **kwards)
except Exception as e:
if isinstance(e, RemoteError):
click.secho(str(e), fg="yellow")
sys.exit(1)
raise
return wrapper
def validate_connection_string(ctx, param, value):
matcher = CONNECTION_STRING_FORMAT_REGEX.match(value)
if matcher is None:
raise click.BadParameter(
"Please fix value to match the specified format for connection string", ctx=ctx, param=param
)
return value
def int_or_str_label(label: Optional[str]) -> Optional[Union[int, str]]:
"""Try to convert the label to int and return the result, if it's not successful, return the label"""
if label is None:
return None
try:
# Users enter indexes starting with 1 and internally we use indexes starting with 0
return int(label) - 1
except ValueError:
return label
def check_command(command: List[str]):
if command and command[0].startswith("-"):
# Our execution entry points use ignore_unknown_options=True and allow_interspersed_args=False
# to be able to stream the command to the remote machine. However, there is a downside.
# If user runs this command with an unknown option, this option will become a part of the command.
# That's why we need to manually check if the command starts with an unknown option and print an
# error message in this case.
ctx = click.get_current_context()
click.echo(ctx.get_usage())
click.echo(f"Try '{ctx.info_name} -h' for help\n\nError: no such option {command[0]}")
sys.exit(2)
def _add_remote_host(config: WorkspaceConfig, connection: str):
"""Add a new remote host to the workspace config, check the connection, and save it if connection is ok
:param config: the workspace config decription object
:param connection: connection string in format of 'host-name[:remote_dir]'
"""
parts = connection.split(":")
remote_host = parts[0]
config_medium = get_configuration_medium(config)
remote_dir = config_medium.generate_remote_directory(config) if len(parts) == 1 else Path(parts[1])
added, index = config.add_remote_host(remote_host, remote_dir)
if not added:
click.echo(f"{connection} already exists in config")
sys.exit(0)
# Check if we can connect to the remote host and create a directory there
workspace = SyncedWorkspace.from_config(config, config.root, index)
try:
workspace.create_remote()
except RemoteError:
click.secho(f"Failed to create {workspace.remote.directory} on remote host {remote_host}", fg="yellow")
click.secho("Please check if host is accessible via SSH", fg="yellow")
sys.exit(1)
click.echo(f"Created remote directory at {workspace.remote.host}:{shell_quote(workspace.remote.directory)}")
click.echo("Remote is configured and ready to use")
# No errors when executing the above code means we can save the config
config_medium.save_config(config)
@click.command(context_settings=DEFAULT_CONTEXT_SETTINGS)
@click.argument("connection", metavar="host-name[:remote_dir]", callback=validate_connection_string)
@log_exceptions
def remote_add(connection: str):
"""Add one more host for remote connection to a config file"""
config = load_cwd_workspace_config()
_add_remote_host(config, connection)
@click.command(context_settings=DEFAULT_CONTEXT_SETTINGS)
@click.argument("connection", metavar="host-name[:remote_dir]", callback=validate_connection_string)
@log_exceptions
def remote_init(connection: str):
"""Initiate workspace for the remote execution in the current working directory"""
try:
workspace = load_cwd_workspace_config()
if workspace.root == Path.cwd():
click.secho("A configured workspace already exists in the current working directory.", fg="yellow")
else:
click.secho(
f"A configured workspace already initiated in the current working directory's parent {workspace.root}.",
fg="yellow",
)
click.secho("If you want to add a new host to it, please use remote-add.", fg="yellow")
sys.exit(1)
except RemoteError:
# we expect it to fail. It means we don't overwrite an existing workspace
pass
config = WorkspaceConfig.empty(Path.cwd())
_add_remote_host(config, connection)
# help out with .gitignore if we are in a git repository
if not (config.root / ".git").exists():
return
# make sure we don't keep adding to .gitignore
gitignore = config.root / ".gitignore"
if gitignore.exists():
for line in gitignore.read_text().splitlines():
if line.startswith(".remote"):
return
with gitignore.open("a") as f:
f.write("\n")
f.write(".remote*")
f.write("\n")
click.echo("Added '.remote*' to .gitignore")
@click.command(context_settings=DEFAULT_CONTEXT_SETTINGS)
@click.option(
"-p", "--push", is_flag=True, help="add IGNORE pattern to push ignore list (mutually exclusive with '--pull')"
)
@click.option(
"-l", "--pull", is_flag=True, help="add IGNORE pattern to pull ignore list (mutually exclusive with '--push')"
)
@click.argument("ignore", nargs=-1, required=True)
@log_exceptions
def remote_ignore(ignore: List[str], push: bool, pull: bool):
"""Add new IGNORE patterns to the ignores list
IGNORE pattern should be a string in rsync-friendly format.
If no options provided these patterns will be ignored on both push and pull
"""
config = load_cwd_workspace_config()
if not push and not pull:
config.ignores.add(ignore)
elif pull and not push:
config.ignores.pull.add(ignore)
elif push and not pull:
config.ignores.push.add(ignore)
else:
raise InvalidInputError("You cannot use both '--pull' and '--push' flags")
config.ignores.trim()
save_config(config)
@click.command(context_settings=DEFAULT_CONTEXT_SETTINGS)
@log_exceptions
def remote_host():
"""Print the default remote host in use and exit"""
workspace = SyncedWorkspace.from_cwd()
click.echo(workspace.remote.host)
@click.command(context_settings=DEFAULT_CONTEXT_SETTINGS)
@click.argument("index", type=int)
@log_exceptions
def remote_set(index: int):
"""Set a new default remote host for the workspace
INDEX is an index of host in config file to use by default (strating from 1)
"""
config = load_cwd_workspace_config()
if len(config.configurations) < index:
click.secho(
f"Index is too big ({index}). Only have {len(config.configurations)} hosts to choose from.", fg="yellow"
)
sys.exit(1)
elif index < 1:
click.secho("Index should be 1 or higher", fg="yellow")
sys.exit(1)
# we use 0-base index internally
index = index - 1
config.default_configuration = index
save_config(config)
click.echo(f"Remote host is set to {config.configurations[index].host}")
@click.command(context_settings=EXECUTION_CONTEXT_SETTINGS)
@click.option("-n", "--dry-run", is_flag=True, help="do a dry run of the whole cycle")
@click.option("-m", "--mirror", is_flag=True, help="mirror local files on the remote host")
@click.option("-v", "--verbose", is_flag=True, help="increase verbosity")
@click.option("-e", is_flag=True, help="(deprecated) kept for backward compatibility, noop")
@click.option(
"-t",
"--tunnel",
"port_args",
type=str,
multiple=True,
help="Enable local port forwarding. Pass value as <remote port>:<local port>. \
If local port is not passed, the local port value would be set to <remote port> value by default",
)
@click.option(
"-s",
"--stream-changes",
default=False,
is_flag=True,
help="Resync local changes if any while the command is being run remotely",
)
@click.option("-l", "--label", help="use the host that has corresponding label for the remote execution")
@click.option("--multi", is_flag=True, help="sync and run the remote commands on each remote host from config")
@click.option(
"--log",
type=click.Path(file_okay=False, resolve_path=True),
help="Write sync and remote command output to the log file instead of stdout. "
"Log file will be located inside DIRECTORY/<timestamp>/<host>_output.log",
)
@click.argument("command", nargs=-1, required=True)
@log_exceptions
def remote(
command: List[str],
dry_run: bool,
mirror: bool,
verbose: bool,
e: bool,
port_args: List[str],
label: Optional[str],
stream_changes: bool,
log: Optional[str],
multi: bool,
):
"""Sync local workspace files to remote machine, execute the COMMAND and sync files back regardless of the result"""
check_command(command)
if verbose:
logging.basicConfig(level=logging.INFO, format=BASE_LOGGING_FORMAT)
ports = [ForwardingOption.from_string(port_arg) for port_arg in port_args]
if multi and label:
raise InvalidInputError("--multi and --label options cannot be used together")
workspaces = SyncedWorkspace.from_cwd_mass() if multi else [SyncedWorkspace.from_cwd(int_or_str_label(label))]
with ThreadPoolExecutor(max_workers=len(workspaces)) as executor:
futures = {}
descriptors = []
start_timestamp = datetime.now().strftime("%Y-%m-%d_%H:%M:%S")
for workspace in workspaces:
host = workspace.remote.host
if multi or log:
# We save logs into the <log_dir>/<timestamp>/<hostname>_output.log
log_dir = Path(log) if log else (workspace.local_root / "logs")
log_dir = log_dir / start_timestamp
log_dir.mkdir(parents=True, exist_ok=True)
try:
# If the logs are enabled and they are inside the workspace root, we need to exclude them from
# syncing
relative_path = log_dir.relative_to(workspace.local_root)
log_path = f"{relative_path}/*_output.log"
workspace.pull_rules.excludes.append(log_path)
workspace.push_rules.excludes.append(log_path)
except ValueError:
# Value error means that logs are placed outside of the workspace root
pass
fd = (log_dir / f"{host}_output.log").open("w")
descriptors.append(fd)
workspace.communication = CommunicationOptions(stdin=None, stdout=fd, stderr=fd)
future = executor.submit(
workspace.execute_in_synced_env,
command,
dry_run=dry_run,
verbose=verbose,
mirror=mirror,
ports=ports,
stream_changes=stream_changes,
)
futures[future] = workspace
final_exit_code = 0
for future in as_completed(list(futures.keys())):
workspace = futures[future]
try:
exit_code = future.result(timeout=0)
if exit_code != 0:
click.secho(f"Remote command on {workspace.remote.host} exited with {exit_code}", fg="yellow")
final_exit_code = exit_code
except Exception as e: # noqa: F841
class_name = e.__class__.__name__
click.secho(f"{class_name}: {e}", fg="yellow")
final_exit_code = 255
for fd in descriptors:
fd.close()
sys.exit(final_exit_code)
@click.command(context_settings=EXECUTION_CONTEXT_SETTINGS)
@click.option(
"-t",
"--tunnel",
"port_args",
type=str,
multiple=True,
help="Enable local port forwarding. Pass value as <remote port>:<local port>. \
If local port is not passed, the local port value would be set to <remote port> value by default",
)
@click.option("-l", "--label", help="use the host that has corresponding label for the remote execution")
@click.argument("command", nargs=-1, required=True)
@log_exceptions
def remote_quick(
command: List[str],
port_args: List[str],
label: Optional[str],
):
"""Execute the COMMAND remotely, without syncing any files"""
check_command(command)
ports = [ForwardingOption.from_string(port_arg) for port_arg in port_args]
workspace = SyncedWorkspace.from_cwd(int_or_str_label(label))
code = workspace.execute(command, ports=ports, raise_on_error=False)
sys.exit(code)
@click.command(context_settings=DEFAULT_CONTEXT_SETTINGS)
@click.option("-n", "--dry-run", is_flag=True, help="do a dry run of a pull")
@click.option("-v", "--verbose", is_flag=True, help="increase verbosity")
@click.option("-l", "--label", help="use the host that has corresponding label for the remote execution")
@click.argument("path", nargs=-1)
@log_exceptions
def remote_pull(dry_run: bool, verbose: bool, path: List[str], label: Optional[str]):
"""Bring in files from the default remote directory to local workspace.
Optionally bring in PATH instead of the whole workspace.
PATH is a path of file or directory to bring back relative to the remote workspace root.
All sync exclude rules will be omitted if PATH is provided.
"""
if verbose:
logging.basicConfig(level=logging.INFO, format=BASE_LOGGING_FORMAT)
workspace = SyncedWorkspace.from_cwd(int_or_str_label(label))
if not path:
workspace.pull(info=True, verbose=verbose, dry_run=dry_run)
return
for subpath in path:
workspace.pull(info=True, verbose=verbose, dry_run=dry_run, subpath=Path(subpath))
@click.command(context_settings=DEFAULT_CONTEXT_SETTINGS)
@click.option("-n", "--dry-run", is_flag=True, help="do a dry run of a push")
@click.option("-m", "--mirror", is_flag=True, help="mirror local files on the remote host")
@click.option("-v", "--verbose", is_flag=True, help="increase verbosity")
@click.option("-l", "--label", help="use the host that has corresponding label for the remote execution")
@click.argument("path", nargs=-1)
@click.option(
"--multi", is_flag=True, help="push files to all available remote workspaces instead of pushing to the default one"
)
@log_exceptions
def remote_push(dry_run: bool, mirror: bool, verbose: bool, path: List[str], multi: bool, label: Optional[str]):
"""Push local workspace files to the remote directory
Optionally push PATH instead of the whole workspace.
PATH is a path of file or directory to push relative to the remote workspace root.
All sync exclude rules will be omitted if PATH is provided.
"""
if verbose:
logging.basicConfig(level=logging.INFO, format=BASE_LOGGING_FORMAT)
if multi and label:
raise InvalidInputError("--multi and --label options cannot be used together")
workspaces = SyncedWorkspace.from_cwd_mass() if multi else [SyncedWorkspace.from_cwd(int_or_str_label(label))]
for workspace in workspaces:
if not path:
workspace.push(info=True, verbose=verbose, dry_run=dry_run, mirror=mirror)
continue
for subpath in path:
workspace.push(info=True, verbose=verbose, dry_run=dry_run, mirror=mirror, subpath=Path(subpath))
@click.command(context_settings=DEFAULT_CONTEXT_SETTINGS)
@click.option("-l", "--label", help="use the host that has corresponding label for the remote execution")
@log_exceptions
def remote_delete(label: Optional[str]):
"""Delete the remote directory"""
workspace = SyncedWorkspace.from_cwd(int_or_str_label(label))
workspace.clear_remote()
click.echo(f"Successfully deleted {workspace.remote.directory} on host {workspace.remote.host}")
@click.command(context_settings=DEFAULT_CONTEXT_SETTINGS)
@click.option("-l", "--label", help="use the host that has corresponding label for the remote execution")
@click.option("-d", "--deep", is_flag=True, help="check latency and download/upload speed if connection is ok")
@log_exceptions
def remote_explain(label: Optional[str], deep: bool):
"""Print out various debug information to debug the workspace"""
logging.basicConfig(level=logging.INFO, format=BASE_LOGGING_FORMAT)
workspace = SyncedWorkspace.from_cwd(int_or_str_label(label))
explain(workspace, deep)
@click.command(context_settings=DEFAULT_CONTEXT_SETTINGS)
@log_exceptions
def mremote():
click.secho("mremote is deprecated. Please use 'remote --multi' instead.", fg="yellow")
sys.exit(1)
@click.command(context_settings=DEFAULT_CONTEXT_SETTINGS)
@log_exceptions
def mremote_push():
click.secho("mremote-push is deprecated. Please use 'remote-push --multi' instead.", fg="yellow")
sys.exit(1) | /remote-exec-api-1.13.2.tar.gz/remote-exec-api-1.13.2/src/remote/entrypoints.py | 0.557243 | 0.167151 | entrypoints.py | pypi |
import subprocess
import sys
from typing import Optional
from uuid import uuid4
import click
from .util import pformat_dataclass
from .workspace import SyncedWorkspace
SPEED_TEST_FILE_SIZE_MB = 25
def explain(workspace: SyncedWorkspace, deep: bool, host_override: Optional[str] = None) -> None:
"""Print out various debug information to debug the workspace"""
# First, print out the configuration in use
click.secho("Configuration:", fg="yellow")
click.echo(pformat_dataclass(workspace))
click.echo()
# Then, check if host is pingable
# It might not be pingable if the user has ssh alias in configuration
click.secho("Checking connection.", fg="yellow")
remote_host = host_override or workspace.remote.host
ping_result = subprocess.run(["ping", "-c", "1", remote_host], capture_output=True, text=True)
if ping_result.returncode == 0:
click.secho("The remote host is reachable", fg="green")
else:
click.secho("The remote host is unreachable:", fg="red")
click.secho(f"{ping_result.stderr}", fg="red")
click.echo("We will try to do an ssh connection anyway, since the host in config may be an ssh alias")
# Then, try to execute a command remotely. It will show us if there are any ssh-related issues
quick_exec_code = workspace.execute("test", simple=True, raise_on_error=False, verbose=True)
if quick_exec_code == 255:
click.secho(
"The remote host is unreachable or doesn't support passwordless connection",
fg="red",
)
sys.exit(1)
click.secho("The remote host supports passwordless connection via SSH", fg="green")
click.echo()
# Then, do a sync dry-run. It will show us what wiles will be synced.
click.secho("Doing a dry-run of a full execution cycle.", fg="yellow")
execution_code = workspace.execute_in_synced_env(["Hello World"], verbose=True, dry_run=True)
if execution_code != 0:
click.secho(
"Execution cycle failed",
fg="red",
)
sys.exit(1)
if not deep:
return
# If deep check is required, we will also check for average ping, download and upload speed
click.echo()
if ping_result.returncode == 0:
# Only check for latency if the ping was successful before
click.secho("Checking latency.", fg="yellow")
ping_result = subprocess.run(["ping", "-c", "10", remote_host], capture_output=True, text=True)
for line in ping_result.stdout.splitlines():
if line.startswith("round-trip") or "transmitted" in line:
click.echo(line)
else:
click.secho("Not checking latency since the previous ping attemp failed", fg="yellow")
click.echo()
# Create a file remotely and try to download it
filename = f"speed_test_{uuid4()}"
click.secho(
f"Pulling {SPEED_TEST_FILE_SIZE_MB}MB file from the remote host to check the download speed.", fg="yellow"
)
workspace.execute(f"dd if=/dev/urandom of={filename} bs=1048576 count={SPEED_TEST_FILE_SIZE_MB} &>/dev/null")
workspace.pull(info=True, verbose=True, subpath=filename)
# Remove a file remotely to be able to upload it
workspace.execute(f"rm {filename}")
click.echo()
# Upload the same file to the remote machine
click.secho(f"Pushing {SPEED_TEST_FILE_SIZE_MB}MB file to the remote host to check the upload speed.", fg="yellow")
workspace.push(info=True, verbose=True, subpath=filename)
# Clean up the file locally and remotely
if (workspace.local_root / filename).exists():
(workspace.local_root / filename).unlink()
workspace.execute(f"rm {filename}")
click.echo() | /remote-exec-api-1.13.2.tar.gz/remote-exec-api-1.13.2/src/remote/explain.py | 0.547948 | 0.251306 | explain.py | pypi |
from abc import ABCMeta, abstractmethod
from dataclasses import dataclass
from pathlib import Path
from typing import List, Optional, Tuple
@dataclass
class RemoteConfig:
"""Single remote connection description"""
# remote machine's hostname
host: str
# relative path to the working directory on remote machine starting from user home dir
directory: Path
# a shell to use on remote machine
shell: str = "sh"
# shell options to use on remote machine
shell_options: str = ""
# whether remote machine supports gssapi-* auth or not
supports_gssapi: bool = True
# Add label to identify remote
label: Optional[str] = None
# A SSH port, if it differs from default
port: Optional[int] = None
@dataclass
class SyncRules:
"""Patterns used by rsync to forcefully exclude or include files while syncyng with remote location"""
# patterns used by rsync to forcefully exclude or include files while pulling from remote
pull: List[str]
# patterns used by rsync to forcefully exclude or include files while pushing from local
push: List[str]
# patterns used by rsync to forcefully exclude or include while transferring files in both directions
both: List[str]
def __post_init__(self):
self.trim()
def compile_push(self):
result = set()
result.update(self.push)
result.update(self.both)
return sorted(result)
def compile_pull(self):
result = set()
result.update(self.pull)
result.update(self.both)
return sorted(result)
def add(self, ignores: List[str]):
new_ignores = set()
new_ignores.update(ignores)
new_ignores.update(self.both)
self.both = sorted(new_ignores)
def trim(self):
self.pull = sorted(set(self.pull))
self.push = sorted(set(self.push))
self.both = sorted(set(self.both))
def is_empty(self):
return not (self.pull or self.push or self.both)
@classmethod
def new(cls) -> "SyncRules":
return cls([], [], [])
@dataclass
class WorkspaceConfig:
"""Complete remote workspace config"""
# absolute path to the workspace root
root: Path
# remote host connection options that can be used in this workspace
configurations: List[RemoteConfig]
# index of default remote host connection
default_configuration: int
# patterns to ignore while syncing the workspace
ignores: SyncRules
# patterns to include while syncing the workspace
includes: SyncRules
@classmethod
def empty(cls, root: Path) -> "WorkspaceConfig":
return cls(
root=root,
configurations=[],
default_configuration=0,
ignores=SyncRules.new(),
includes=SyncRules.new(),
)
def add_remote_host(
self,
host: str,
directory: Path,
shell: Optional[str] = None,
shell_options: Optional[str] = None,
label: Optional[str] = None,
port: Optional[int] = None,
) -> Tuple[bool, int]:
remote_config = RemoteConfig(
host=host,
directory=directory,
shell=shell or "sh",
shell_options=shell_options or "",
label=label,
port=port,
)
for num, cfg in enumerate(self.configurations):
if cfg.host == remote_config.host and cfg.directory == remote_config.directory:
return False, num
self.configurations.append(remote_config)
return True, len(self.configurations) - 1
class ConfigurationMedium(metaclass=ABCMeta):
"""A medium class that knows how to load, save, or process a certain type of configuration layout"""
@abstractmethod
def load_config(self, workspace_root: Path) -> WorkspaceConfig:
"""Load configuration for the workspace that is located in provided root directory.
If this method is called, we could assume that check in `is_workspace_root` passed
"""
@abstractmethod
def save_config(self, config: WorkspaceConfig) -> None:
"""Save configuration to its root"""
@abstractmethod
def is_workspace_root(self, path: Path) -> bool:
"""Return true is the path provided contains a configured workspace that can be loaded by this medium"""
@abstractmethod
def generate_remote_directory(self, config: WorkspaceConfig) -> Path:
"""Renerate a default remote directory path for the workspace with provided configuration""" | /remote-exec-api-1.13.2.tar.gz/remote-exec-api-1.13.2/src/remote/configuration/__init__.py | 0.91441 | 0.308099 | __init__.py | pypi |
# Remote
[](https://github.com/remote-cli/remote/actions?query=branch%3Amaster+workflow%3A%22Python+Code+Quality%22)
[](https://pypi.org/project/remote-exec)
[](https://github.com/remote-cli/remote)
[](https://github.com/remote-cli/remote/blob/master/LICENSE)
The `remote` CLI lets you execute long or computation-heavy tasks (e.g. compilation, integration tests etc.)
on a powerful remote host while you work on the source code locally.
This process is known as remote execution and can be used to enable remote build capabilities among other things.
When you execute `remote <cmd>`, it will first sync your local workspace to the remote host you selected using `rsync`.
It will then execute the command `<cmd>` on this host using `ssh` and finally, bring all the created/modified files back to your local workspace.
`remote` supports a host of configuration options to allow for complete customization of patterns for files and folders to include during the synchronization process in both directions.
## System Requirements
The CLI supports **Linux** and **Mac OS X** operating systems
with **Python 3.6 or higher** installed. You can also use it on **Windows**
if you have [WSL](https://docs.microsoft.com/en-us/windows/wsl/about) configured.
The remote host must also be running on **Linux** or **Mac OS X**. The local and remote hosts can be running different operating systems. The only requirement is that the remote host must be accessible using `ssh` from the local host.
## Getting Started
### Installing on Mac OS X
If you use Mac OS X, you can install `remote` using [Homebrew](https://brew.sh/)
from our [custom tap](https://github.com/remote-cli/homebrew-remote):
```bash
brew install remote-cli/remote/remote
```
Then, you will always be able to update it to the latest version:
```bash
brew upgrade remote
```
### Installing on other systems
`remote` doesn't support any package managers other than `brew` yet. However, it can be manually downloaded
and installed. To do it, visit https://github.com/remote-cli/remote/releases and download the latest released `-shiv` archive, unpack it to some local directory (e.g., `~/.bin`) and add it to PATH:
```bash
mkdir -p ~/.bin
tar -C ~/.bin -xzf ~/Downloads/remote-1.4.5-shiv.tgz
echo 'export PATH=$PATH:/home/username/.bin/remote/bin' >> ~/.bash_profile
source ~/.bash_profile
```
Don't forget to replace the `/home/username` above with the actual path to your home directory.
### Configuring the remote host
`remote` CLI needs to be able to establish a passwordless SSH connection to the remote host.
Please run `ssh -o BatchMode=yes <your-host> echo OK` to confirm that everything is ready for you.
If this command fails, please go through [SSH guide](https://www.ssh.com/ssh/keygen/) to set up
SSH keys locally and remotely.
### First run
After you are done with the configuration, switch the working directory to the root of your workspace in
terminal and run `remote-init` to create a configuration file:
```bash
cd ~/path/to/workspace
remote-init remote-host.example.com
```
This will create a config file named `.remote.toml` in the workspace root
(`~/path/to/workspace/.remote.toml`). This file controls the remote connection and synchronization options.
You can read more about this file in the Configuration section of this doc.
After it, you can start using remote:
```bash
# This will sync workspace and run './gradlew build' remotely
remote ./gradlew build
# This will forcefully push all local files to the remote machine
remote-push
# This will bring in ./build directory from the remote machine to local even if
# the CLI is configured to ignore it
remote-pull build
```
## Distribution
`remote`'s distribution comes with a set of executables:
* `remote-init`: set up a local directory to point to a remote directory on a target host
* `remote-ignore`: set up directories/files to ignore while pushing
* `remote-push`: explicitly push local changes remote
* `remote-pull`: pull a directory from remote to local
* `remote`: execute a command remotely, after first syncing the local tree with the remote tree
* `remote-explain`: explain your remote setup, explain what command actually will get run
* `remote-quick`: execute a command remotely without syncing the trees
* `remote-add`: add another remote host to the mirror list
* `mremote`: execute a remote command on all the hosts, after first syncing the local tree with the remote trees
You can run each of these commands with `--help` flag to get a list of options and arguments they accept.
## Configuration
Three configuration files control the behavior of `remote`:
* `~/.config/remote/defaults.toml` is a global config file. It sets options that affect all the workspaces
unless they are overwritten by `.remote.toml` file.
* `.remote.toml` is a workspace config that is expected to be placed in the root of every workspace.
The `remote` CLI cannot execute any commands remotely until this file is present, or the global config
overwrites this with `allow_uninitiated_workspaces` option.
* `.remoteignore.toml` is a workspace config that controls only sync exlude and include patterns
and has the highest priority. While the same settings can be specified in the `.remote.toml` file,
you can use this file to check in project-specific ignore settings in the VCS because it doesn't contain
host-specific information in it.
Both configs use [TOML](https://github.com/toml-lang/toml) format.
**Workspace root** is a root directory of the project you're working on.
It is identified by the `.remote.toml` file. Each time you execute `remote` from workspace root or any of its
subdirectories, `remote` syncs everything under workspace root with the destination host before running the command.
### Global Configuration File
Global configuration file should be placed in `~/.config/remote/defaults.toml`. This config file is optional
and the `remote` CLI will work with the default values if it is absent. This is the example of how it looks like:
```toml
[general]
allow_uninitiated_workspaces = false
use_relative_remote_paths = false
remote_root = ".remotes"
[[hosts]]
host = "linux-host.example.com"
label = "linux"
[[hosts]]
host = "macos-host.example.com"
port = 2022
supports_gssapi_auth = false
default = true
label = "mac"
[push]
exclude = [".git"]
[pull]
exclude = ["src/generated"]
include = ["build/reports"]
[both]
include_vcs_ignore_patterns = true
```
1. `[general]` block controls system-wide behavior for the `remote` CLI.
Reference:
* `allow_uninitiated_workspaces` (optional, defaults to `false`) - if this flag is set to `true` and
the global config contains at least one remote host, `remote` will treat its current working directory
as a workspace root even if it doesn't have `.remote.toml` file in it.
**Warning:** if this option is on and you run `remote` in the subdirectory of already configured workspace,
`remote` will ignore workspaces configuration and treat subdirectory as a separate workspace root.
* `remote_root` (optional, defaults to `".remotes"`) - a default directory on the remote machine that
will be used to store synced workspaces. The path is expected to be relative to the remote user's home
directory, so `.remotes` will resolve in `/home/username/.remotes`.
If the workspace-level configuration sets the `directory` for a host, this setting will be ignored.
* `use_relative_remote_paths` (optional, defaults to `false`)
* if set to `false` all the workspaces will be stored in the `remote_root` of the target host in a flat
structure. Each directory will have a name like `<workspace_name>_<workspace_path_hash>`.
* if set to `false`, the remote path will be placed in `remote_root` tree like it was placed in the users
home directory tree locally. Some examples:
* If local path is `/home/username/projects/work/project_name`, the remote path will be
`/home/username/.remotes/projects/work/project_name`
* If local path is `/tmp/project_name`, the remote path will be
`/home/username/.remotes/tmp/project_name`
2. `[[hosts]]` block lists all the remote hosts available for the workspaces. Used when the workspace
configuration doesn't overwrite it.
You can provide multiple hosts in this block, but only one will be selected when you execute `remote`.
It will be either the host that is marked by `default = true` or the first one in the list if no
default was set explicitly.
You can run most of the commands with `--label label|number` or `-l label|number` option to run a
command on non-default host. `label` here is the text label you put in the config file, `number` is
a number of required host in the hosts list, starting from 1.
Reference:
* `host` - a hostname, IP address, or ssh alias of a remote machine that you want to use for remote execution.
* `port` (optional, defaults to `22`) - a port used by the ssh daemon on the host.
* `supports_gssapi_auth` (optional, defaults to `true`) - `true` if the remote host supports `gssapi-*` auth
methods. We recommend disabling it if the ssh connection to the host hangs for some time during establishing.
* `default` (optional, defaults to `false`) - `true` if this host should be used by default
* `label` (optional) - a text label that later can be used to identify the host when running the `remote` CLI.
3. `[push]`, `[pull]`, and `[both]` blocks control what files are synced from local to a remote machine and back
before and after the execution. These blocks are used when the workspace configuration doesn't overwrite them.
`push` block controls the files that are uploaded from local machine to the remote one. `pull` block controls files that are downloaded from remote machine to local one. `both` block extends previous two.
Each one of these blocks supports the following options:
* `exclude` (optional, defaults to empty list) - a list of rsync-style patterns. Every file in the workspace
that matches these patterns won't be synced unless it is explicitly specified in `include`.
* `include` (optional, defaults to empty list) - a list of rsync-style patterns. Every file in the workspace
that matches these patterns will be synced even if it matches the `exclude`.
* `include_vcs_ignore_patterns` (optional, defaults to `false`) - if `true` and `.gitignore` is present,
all its patterns will be included in the `exclude` list.
### Workspace Configuration File
This is the example of how standalone workspace-level `.remote.toml` configuration file looks like:
```toml
[[hosts]]
host = "linux-host.example.com"
directory = ".remotes/workspace"
label = "linux"
supports_gssapi_auth = true
[[hosts]]
host = "macos-host.example.com"
port = 2022
directory = ".remotes/other-workspace"
supports_gssapi_auth = false
default = true
label = "mac"
[push]
exclude = [".git"]
[pull]
exclude = ["src/generated"]
include = ["build/reports"]
[both]
include_vcs_ignore_patterns = true
```
All the used blocks here are similar to the ones in the global config file. However, you cannot put
`[general]` block in this file. Also, you can provide one more option in `[[hosts]]` block:
* `directory` (optional) - a path relative to remote user's home. It will be used to store the workspace's
file on the remote machine.
Also, if you set at least one value for any of the blocks in the workspace-level config,
all the values from this block in the global config will be ignored.
There is a way to change this behavior. You can use `[extends.*]` blocks to do it.
Here is an example. Imagine, you have a following global config:
```toml
[[hosts]]
host = "linux-host.example.com"
label = "linux"
default = true
[push]
exclude = [".git"]
[both]
include_vcs_ignore_patterns = true
```
If you want to be able to use the same Linux host in the workspace but you want to add one more and modify some exclude patterns, you can create the following workspace config:
```toml
[[extends.hosts]]
host = "mac-host.example.com"
directory = ".remotes/mac-workspace"
label = "mac"
default = true
[extends.push]
exclude = ["workspace-specific-dir"]
include = [".git/hooks"]
[both]
include_vcs_ignore_patterns = false
```
As you can see, some block names start with `extends.`. This name tells remote to merge the
workspace and global settings.
There are a few things to note:
* If both workspace-level and global configs define a default host, the workspace-level config wins
* Hosts ordering is preserver, globally configured hosts always go first.
* If an option value is a list (e.g. `exclude`), it is extended. Otherwise, the value is overwritten.
### Workspace Files Sync Configuration File
`.remoteignore.toml` files is similar to `.remote.toml`, but only supports `push`, `pull`, `both`,
`extends.push`, `extends.pull` and `extends.both` blocks. It also cannot be used to identify
the workspace root.
### .remoteenv file
Sometimes you will need to do some action each time before you execute some remote command.
A common example will be to execute `pytest` in the virtual environment: you need to activate it
first, but the activation state won't be preserved between the `remote` runs.
There are two ways of solving this problem:
1. Running both initiation logic and the command together:
```bash
remote 'source env/bin/activate && pytest'
```
2. Creating a file called `.remoteenv` in the workspace root. If this file is present, `remote` will
always run `source .remoteenv` on the destination host before running the actual command. For example,
here is how you can run `remote`'s tests on the other hosts:
```bash
git clone git@github.com:remote-cli/remote.git
cd remote
remote-init <remote-host-name>
remote python3 -m venv env
echo '. env/bin/activate' >> .remoteenv
# starting from this point all python commands will be executed in virtualenv remotely
# This should print virtualenv's python path
remote which python
remote pip install -e .
remote pip install -r test_requirements.txt
remote pytest
```
The `.remoteenv` file is guaranteed to sync to remote machine even if it is excluded by the workspace's
`.gitignore` file or other rules.
## Development & Contribution
To bootstrap the development run:
```bash
git clone git@github.com:remote-cli/remote.git
cd remote
python3 -m venv env
source env/bin/activate
pip install -e .
pip install -r test_requirements.txt
```
After it, you can open the code in any editor or IDE you like. If you prefer VSCode, the project contains the configuration file for it.
Before submitting your pull request, please check it by running:
```bash
flake8 src test && mypy -p remote && black --check -l 120 src test && isort --check-only src test && pytest
```
If `black` or `isort` fails, you can fix it using the following command:
```bash
black -l 120 src test && isort src test
```
Don't forget to add changed files to your commit after you do it.
| /remote-exec-1.11.0.tar.gz/remote-exec-1.11.0/README.md | 0.572962 | 0.886764 | README.md | pypi |
import logging
import shlex
import subprocess
import sys
import tempfile
import time
from contextlib import contextmanager
from dataclasses import dataclass, field, fields, is_dataclass
from enum import IntEnum
from pathlib import Path
from typing import List, Optional, Sequence, TextIO, Union
from remote.exceptions import InvalidInputError
from .exceptions import RemoteConnectionError, RemoteExecutionError
logger = logging.getLogger(__name__)
DEFAULT_SSH_PORT = 22
def _temp_file(lines: List[str]) -> Path:
"""Create a temporary file with provided content and return its path
:param lines: list of lines to be written in the file
"""
_, path = tempfile.mkstemp(prefix="remote.", dir="/tmp", text=True)
tmpfile = Path(path)
tmpfile.write_text("\n".join(lines) + "\n")
return tmpfile
def _gen_rsync_patterns_file(patterns, opt, args, cleanup):
if patterns:
exclude_file = _temp_file(patterns)
cleanup.append(exclude_file)
args.extend((opt, str(exclude_file)))
logger.info(f"{opt} patterns:")
for p in patterns:
logger.info(" - %s", p)
@contextmanager
def _measure_duration(operation: str):
start = time.time()
yield None
runtime = time.time() - start
logger.info("%s done in %.2f seconds", operation, runtime)
@dataclass(frozen=True)
class ForwardingOption:
"""Port forwarding options for ssh"""
remote_port: int
local_port: int
remote_interface: str = "localhost"
local_interface: Optional[str] = None
@classmethod
def from_string(cls, port_args: str) -> "ForwardingOption":
"""Parse port values from the user input.
:param host: the input string from port tunnelling option.
:returns: A tuple of remote port, local port.
"""
ports: List = port_args.split(":")
if len(ports) > 2:
raise InvalidInputError("Please pass a valid value to enable local port forwarding")
try:
if len(ports) == 1:
return cls(int(ports[0]), int(ports[0]))
return cls(int(ports[0]), int(ports[1]))
except ValueError as e:
raise InvalidInputError("Please pass valid integer value for ports") from e
def to_ssh_string(self) -> str:
prefix = f"{self.local_interface}:" if self.local_interface else ""
return f"{prefix}{self.local_port}:{self.remote_interface}:{self.remote_port}"
class VerbosityLevel(IntEnum):
QUIET = 1
DEFAULT = 2
VERBOSE = 3
@dataclass(frozen=True)
class CommunicationOptions:
stdin: Optional[TextIO] = sys.stdin
stdout: TextIO = sys.stdout
stderr: TextIO = sys.stderr
@dataclass(frozen=True)
class Ssh:
"""Ssh configuration class, pregenrates and executes commands remotely"""
host: str
port: Optional[int] = None
force_tty: bool = True
verbosity_level: VerbosityLevel = VerbosityLevel.QUIET
use_gssapi_auth: bool = True
disable_password_auth: bool = True
local_port_forwarding: List[ForwardingOption] = field(default_factory=list)
communication: CommunicationOptions = CommunicationOptions()
def generate_command(self) -> List[str]:
"""Generate the base ssh command to execute (without host)"""
command = ["ssh"]
options = "t" if self.force_tty else ""
if self.use_gssapi_auth:
options += "K"
if self.verbosity_level <= VerbosityLevel.QUIET:
options += "q"
elif self.verbosity_level >= VerbosityLevel.VERBOSE:
options += "v"
if options:
command.append(f"-{options}")
if self.disable_password_auth:
command.extend(("-o", "BatchMode=yes"))
if self.port and self.port != DEFAULT_SSH_PORT:
command.extend(("-p", str(self.port)))
for port in self.local_port_forwarding:
command.extend(("-L", port.to_ssh_string()))
return command
def generate_command_str(self) -> str:
"""Generate the base ssh command to execute (without host)"""
return prepare_shell_command(self.generate_command())
def execute(self, command: str, raise_on_error: bool = True) -> int:
"""Execute a command remotely using SSH and return it's exit code
:param command: a command to execute
:param raise_on_error: raise an exception is remote execution
:returns: exit code of remote command or 255 if connection didn't go through
"""
subprocess_command = self.generate_command()
logger.info("Executing:\n%s %s <<EOS\n%sEOS", " ".join(subprocess_command), self.host, command)
subprocess_command.extend((self.host, command))
with _measure_duration("Execution"):
result = subprocess.run(
subprocess_command,
stdout=self.communication.stdout,
stderr=self.communication.stderr,
stdin=self.communication.stdin,
)
if raise_on_error:
# ssh exits with the exit status of the remote command or with 255 if an error occurred
if result.returncode == 255:
raise RemoteConnectionError(f"Failed to connect to {self.host}")
elif result.returncode != 0:
raise RemoteExecutionError(f'Failed to execute "{command}" on host {self.host} ({result.returncode})')
return result.returncode
def rsync(
src: str,
dst: str,
ssh: Ssh,
info: bool = False,
verbose: bool = False,
dry_run: bool = False,
delete: bool = False,
mirror: bool = False,
excludes: List[str] = None,
includes: List[str] = None,
extra_args: List[str] = None,
communication=CommunicationOptions(),
):
"""Run rsync to sync files from src into dst
:param src: Source files to copy. If source is a directory and you need to copy its contents, append / to its path
:param dst: Destination file or directory
:param ssh: ssh configuration to use for rsync
:param info: True if need to add -i flag to rsync
:param verbose: True if need to add -v flag to rsync
:param dry_run: True if need to add -n flag to rsync
:param delete: True if all files inside destination directory need to be deleted if they were not found at source and
they are not excluded by exclude filters
:param mirror: True if all files inside destination directory need to be deleted if they were not found at source
:param excludes: List of file patterns to exclude from syncing
:param includes: List of file patterns to include even if they were excluded by exclude filters
:param extra_args: Extra arguments for rsync function
:param communication: file descriptors to use for process communication
"""
logger.info("Sync files from %s to %s", src, dst)
args = ["rsync", "-arlpmchz", "--copy-unsafe-links", "-e", ssh.generate_command_str(), "--force"]
if info:
args.append("-i")
if verbose:
args.append("-v")
if dry_run:
args.append("-n")
if delete or mirror:
args.append("--delete")
if mirror:
args.extend(("--delete-after", "--delete-excluded"))
if extra_args:
args.extend(extra_args)
cleanup: List[Path] = []
# It is important to add include patterns before exclude patters because rsync might ignore includes if you do otherwise.
_gen_rsync_patterns_file(includes, "--include-from", args, cleanup)
_gen_rsync_patterns_file(excludes, "--exclude-from", args, cleanup)
args.extend((src, dst))
logger.info("Starting sync with command %s", " ".join(args))
with _measure_duration("Sync"):
result = subprocess.run(args, stdout=communication.stdout, stderr=communication.stderr)
for file in cleanup:
file.unlink()
if result.returncode != 0:
raise RemoteConnectionError(f"Failed to sync files between {src} and {dst}. Is remote host reachable?")
def prepare_shell_command(command: Union[str, Sequence[str]]) -> str:
"""Format command parts into one shell command"""
if isinstance(command, str):
return command
# This means the whole command is already preformatted for us
if len(command) == 1 and " " in command[0]:
return command[0]
return " ".join([shlex.quote(c) for c in command])
def pformat_dataclass(obj, indent=" "):
"""Return a string with an object contents prettified"""
result = []
has_dataclass_fields = False
for field in fields(obj): # noqa: F402 'field' shadows the import
value = getattr(obj, field.name)
if is_dataclass(value):
str_value = "\n" + pformat_dataclass(value, indent + " ")
has_dataclass_fields = True
else:
str_value = str(value)
result.append((field.name, str_value))
if has_dataclass_fields:
return "\n".join(f"{indent}- {name}: {value}" for name, value in result)
else:
width = max(len(name) for name, _ in result)
return "\n".join(f"{indent}- {name: <{width}}: {value}" for name, value in result) | /remote-exec-1.11.0.tar.gz/remote-exec-1.11.0/src/remote/util.py | 0.612773 | 0.164584 | util.py | pypi |
import logging
import re
import sys
from concurrent.futures import ThreadPoolExecutor, as_completed
from datetime import datetime
from functools import wraps
from pathlib import Path
from typing import List, Optional, Union
import click
from .configuration import WorkspaceConfig
from .configuration.discovery import get_configuration_medium, load_cwd_workspace_config, save_config
from .configuration.shared import HOST_REGEX, PATH_REGEX
from .exceptions import InvalidInputError, RemoteError
from .explain import explain
from .util import CommunicationOptions, ForwardingOption
from .workspace import SyncedWorkspace
BASE_LOGGING_FORMAT = "%(message)s"
CONNECTION_STRING_FORMAT_REGEX = re.compile(f"^{HOST_REGEX}(:{PATH_REGEX})?$")
DEFAULT_CONTEXT_SETTINGS = dict(help_option_names=["-h", "--help"])
EXECUTION_CONTEXT_SETTINGS = dict(
help_option_names=["-h", "--help"], ignore_unknown_options=True, allow_interspersed_args=False
)
def log_exceptions(f):
"""A decorator that prints the custom exceptions and exit, but propagates internal ones"""
@wraps(f)
def wrapper(*args, **kwards):
try:
f(*args, **kwards)
except Exception as e:
if isinstance(e, RemoteError):
click.secho(str(e), fg="yellow")
sys.exit(1)
raise
return wrapper
def validate_connection_string(ctx, param, value):
matcher = CONNECTION_STRING_FORMAT_REGEX.match(value)
if matcher is None:
raise click.BadParameter(
"Please fix value to match the specified format for connection string", ctx=ctx, param=param
)
return value
def int_or_str_label(label: Optional[str]) -> Optional[Union[int, str]]:
"""Try to convert the label to int and return the result, if it's not successful, return the label"""
if label is None:
return None
try:
# Users enter indexes starting with 1 and internally we use indexes starting with 0
return int(label) - 1
except ValueError:
return label
def check_command(command: List[str]):
if command and command[0].startswith("-"):
# Our execution entry points use ignore_unknown_options=True and allow_interspersed_args=False
# to be able to stream the command to the remote machine. However, there is a downside.
# If user runs this command with an unknown option, this option will become a part of the command.
# That's why we need to manually check if the command starts with an unknown option and print an
# error message in this case.
ctx = click.get_current_context()
click.echo(ctx.get_usage())
click.echo(f"Try '{ctx.info_name} -h' for help\n\nError: no such option {command[0]}")
sys.exit(2)
def _add_remote_host(config: WorkspaceConfig, connection: str):
"""Add a new remote host to the workspace config, check the connection, and save it if connection is ok
:param config: the workspace config decription object
:param connection: connection string in format of 'host-name[:remote_dir]'
"""
parts = connection.split(":")
remote_host = parts[0]
config_medium = get_configuration_medium(config)
remote_dir = config_medium.generate_remote_directory(config) if len(parts) == 1 else Path(parts[1])
added, index = config.add_remote_host(remote_host, remote_dir)
if not added:
click.echo(f"{connection} already exists in config")
sys.exit(0)
# Check if we can connect to the remote host and create a directory there
workspace = SyncedWorkspace.from_config(config, config.root, index)
try:
workspace.create_remote()
except RemoteError:
click.secho(f"Failed to create {workspace.remote.directory} on remote host {remote_host}", fg="yellow")
click.secho("Please check if host is accessible via SSH", fg="yellow")
sys.exit(1)
click.echo(f"Created remote directory at {workspace.remote.host}:{workspace.remote.directory}")
click.echo("Remote is configured and ready to use")
# No errors when executing the above code means we can save the config
config_medium.save_config(config)
@click.command(context_settings=DEFAULT_CONTEXT_SETTINGS)
@click.argument("connection", metavar="host-name[:remote_dir]", callback=validate_connection_string)
@log_exceptions
def remote_add(connection: str):
"""Add one more host for remote connection to a config file"""
config = load_cwd_workspace_config()
_add_remote_host(config, connection)
@click.command(context_settings=DEFAULT_CONTEXT_SETTINGS)
@click.argument("connection", metavar="host-name[:remote_dir]", callback=validate_connection_string)
@log_exceptions
def remote_init(connection: str):
"""Initiate workspace for the remote execution in the current working directory"""
try:
workspace = load_cwd_workspace_config()
if workspace.root == Path.cwd():
click.secho("A configured workspace already exists in the current working directory.", fg="yellow")
else:
click.secho(
f"A configured workspace already initiated in the current working directory's parent {workspace.root}.",
fg="yellow",
)
click.secho("If you want to add a new host to it, please use remote-add.", fg="yellow")
sys.exit(1)
except RemoteError:
# we expect it to fail. It means we don't overwrite an existing workspace
pass
config = WorkspaceConfig.empty(Path.cwd())
_add_remote_host(config, connection)
# help out with .gitignore if we are in a git repository
if not (config.root / ".git").exists():
return
# make sure we don't keep adding to .gitignore
gitignore = config.root / ".gitignore"
if gitignore.exists():
for line in gitignore.read_text().splitlines():
if line.startswith(".remote"):
return
with gitignore.open("a") as f:
f.write("\n")
f.write(".remote*")
f.write("\n")
click.echo("Added '.remote*' to .gitignore")
@click.command(context_settings=DEFAULT_CONTEXT_SETTINGS)
@click.option(
"-p", "--push", is_flag=True, help="add IGNORE pattern to push ignore list (mutually exclusive with '--pull')"
)
@click.option(
"-l", "--pull", is_flag=True, help="add IGNORE pattern to pull ignore list (mutually exclusive with '--push')"
)
@click.argument("ignore", nargs=-1, required=True)
@log_exceptions
def remote_ignore(ignore: List[str], push: bool, pull: bool):
"""Add new IGNORE patterns to the ignores list
IGNORE pattern should be a string in rsync-friendly format.
If no options provided these patterns will be ignored on both push and pull
"""
config = load_cwd_workspace_config()
if not push and not pull:
config.ignores.add(ignore)
elif pull and not push:
config.ignores.pull.add(ignore)
elif push and not pull:
config.ignores.push.add(ignore)
else:
raise InvalidInputError("You cannot use both '--pull' and '--push' flags")
config.ignores.trim()
save_config(config)
@click.command(context_settings=DEFAULT_CONTEXT_SETTINGS)
@log_exceptions
def remote_host():
"""Print the default remote host in use and exit"""
workspace = SyncedWorkspace.from_cwd()
click.echo(workspace.remote.host)
@click.command(context_settings=DEFAULT_CONTEXT_SETTINGS)
@click.argument("index", type=int)
@log_exceptions
def remote_set(index: int):
"""Set a new default remote host for the workspace
INDEX is an index of host in config file to use by default (strating from 1)
"""
config = load_cwd_workspace_config()
if len(config.configurations) < index:
click.secho(
f"Index is too big ({index}). Only have {len(config.configurations)} hosts to choose from.", fg="yellow"
)
sys.exit(1)
elif index < 1:
click.secho("Index should be 1 or higher", fg="yellow")
sys.exit(1)
# we use 0-base index internally
index = index - 1
config.default_configuration = index
save_config(config)
click.echo(f"Remote host is set to {config.configurations[index].host}")
@click.command(context_settings=EXECUTION_CONTEXT_SETTINGS)
@click.option("-n", "--dry-run", is_flag=True, help="do a dry run of the whole cycle")
@click.option("-m", "--mirror", is_flag=True, help="mirror local files on the remote host")
@click.option("-v", "--verbose", is_flag=True, help="increase verbosity")
@click.option("-e", is_flag=True, help="(deprecated) kept for backward compatibility, noop")
@click.option(
"-t",
"--tunnel",
"port_args",
type=str,
multiple=True,
help="Enable local port forwarding. Pass value as <remote port>:<local port>. \
If local port is not passed, the local port value would be set to <remote port> value by default",
)
@click.option(
"-s",
"--stream-changes",
default=False,
is_flag=True,
help="Resync local changes if any while the command is being run remotely",
)
@click.option("-l", "--label", help="use the host that has corresponding label for the remote execution")
@click.option("--multi", is_flag=True, help="sync and run the remote commands on each remote host from config")
@click.option(
"--log",
type=click.Path(file_okay=False, resolve_path=True),
help="Write sync and remote command output to the log file instead of stdout. "
"Log file will be located inside DIRECTORY/<timestamp>/<host>_output.log",
)
@click.argument("command", nargs=-1, required=True)
@log_exceptions
def remote(
command: List[str],
dry_run: bool,
mirror: bool,
verbose: bool,
e: bool,
port_args: List[str],
label: Optional[str],
stream_changes: bool,
log: Optional[str],
multi: bool,
):
"""Sync local workspace files to remote machine, execute the COMMAND and sync files back regardless of the result"""
check_command(command)
if verbose:
logging.basicConfig(level=logging.INFO, format=BASE_LOGGING_FORMAT)
ports = [ForwardingOption.from_string(port_arg) for port_arg in port_args]
if multi and label:
raise InvalidInputError("--multi and --label options cannot be used together")
workspaces = SyncedWorkspace.from_cwd_mass() if multi else [SyncedWorkspace.from_cwd(int_or_str_label(label))]
with ThreadPoolExecutor(max_workers=len(workspaces)) as executor:
futures = {}
descriptors = []
start_timestamp = datetime.now().strftime("%Y-%m-%d_%H:%M:%S")
for workspace in workspaces:
host = workspace.remote.host
if multi or log:
# We save logs into the <log_dir>/<timestamp>/<hostname>_output.log
log_dir = Path(log) if log else (workspace.local_root / "logs")
log_dir = log_dir / start_timestamp
log_dir.mkdir(parents=True, exist_ok=True)
try:
# If the logs are enabled and they are inside the workspace root, we need to exclude them from
# syncing
relative_path = log_dir.relative_to(workspace.local_root)
log_path = f"{relative_path}/*_output.log"
workspace.pull_rules.excludes.append(log_path)
workspace.push_rules.excludes.append(log_path)
except ValueError:
# Value error means that logs are placed outside of the workspace root
pass
fd = (log_dir / f"{host}_output.log").open("w")
descriptors.append(fd)
workspace.communication = CommunicationOptions(stdin=None, stdout=fd, stderr=fd)
future = executor.submit(
workspace.execute_in_synced_env,
command,
dry_run=dry_run,
verbose=verbose,
mirror=mirror,
ports=ports,
stream_changes=stream_changes,
)
futures[future] = workspace
final_exit_code = 0
for future in as_completed(list(futures.keys())):
workspace = futures[future]
try:
exit_code = future.result(timeout=0)
if exit_code != 0:
click.secho(f"Remote command on {workspace.remote.host} exited with {exit_code}", fg="yellow")
final_exit_code = exit_code
except Exception as e: # noqa: F841
class_name = e.__class__.__name__
click.secho(f"{class_name}: {e}", fg="yellow")
final_exit_code = 255
for fd in descriptors:
fd.close()
sys.exit(final_exit_code)
@click.command(context_settings=EXECUTION_CONTEXT_SETTINGS)
@click.option(
"-t",
"--tunnel",
"port_args",
type=str,
multiple=True,
help="Enable local port forwarding. Pass value as <remote port>:<local port>. \
If local port is not passed, the local port value would be set to <remote port> value by default",
)
@click.option("-l", "--label", help="use the host that has corresponding label for the remote execution")
@click.argument("command", nargs=-1, required=True)
@log_exceptions
def remote_quick(
command: List[str], port_args: List[str], label: Optional[str],
):
"""Execute the COMMAND remotely, without syncing any files"""
check_command(command)
ports = [ForwardingOption.from_string(port_arg) for port_arg in port_args]
workspace = SyncedWorkspace.from_cwd(int_or_str_label(label))
code = workspace.execute(command, ports=ports, raise_on_error=False)
sys.exit(code)
@click.command(context_settings=DEFAULT_CONTEXT_SETTINGS)
@click.option("-n", "--dry-run", is_flag=True, help="do a dry run of a pull")
@click.option("-v", "--verbose", is_flag=True, help="increase verbosity")
@click.option("-l", "--label", help="use the host that has corresponding label for the remote execution")
@click.argument("path", nargs=-1)
@log_exceptions
def remote_pull(dry_run: bool, verbose: bool, path: List[str], label: Optional[str]):
"""Bring in files from the default remote directory to local workspace.
Optionally bring in PATH instead of the whole workspace.
PATH is a path of file or directory to bring back relative to the remote workspace root.
All sync exclude rules will be omitted if PATH is provided.
"""
if verbose:
logging.basicConfig(level=logging.INFO, format=BASE_LOGGING_FORMAT)
workspace = SyncedWorkspace.from_cwd(int_or_str_label(label))
if not path:
workspace.pull(info=True, verbose=verbose, dry_run=dry_run)
return
for subpath in path:
workspace.pull(info=True, verbose=verbose, dry_run=dry_run, subpath=Path(subpath))
@click.command(context_settings=DEFAULT_CONTEXT_SETTINGS)
@click.option("-n", "--dry-run", is_flag=True, help="do a dry run of a push")
@click.option("-m", "--mirror", is_flag=True, help="mirror local files on the remote host")
@click.option("-v", "--verbose", is_flag=True, help="increase verbosity")
@click.option("-l", "--label", help="use the host that has corresponding label for the remote execution")
@click.option(
"--multi", is_flag=True, help="push files to all available remote workspaces instead of pushing to the default one"
)
@log_exceptions
def remote_push(dry_run: bool, mirror: bool, verbose: bool, multi: bool, label: Optional[str]):
"""Push local workspace files to the remote directory"""
if verbose:
logging.basicConfig(level=logging.INFO, format=BASE_LOGGING_FORMAT)
if multi and label:
raise InvalidInputError("--multi and --label options cannot be used together")
workspaces = SyncedWorkspace.from_cwd_mass() if multi else [SyncedWorkspace.from_cwd(int_or_str_label(label))]
for workspace in workspaces:
workspace.push(info=True, verbose=verbose, dry_run=dry_run, mirror=mirror)
@click.command(context_settings=DEFAULT_CONTEXT_SETTINGS)
@click.option("-l", "--label", help="use the host that has corresponding label for the remote execution")
@log_exceptions
def remote_delete(label: Optional[str]):
"""Delete the remote directory"""
workspace = SyncedWorkspace.from_cwd(int_or_str_label(label))
workspace.clear_remote()
click.echo(f"Successfully deleted {workspace.remote.directory} on host {workspace.remote.host}")
@click.command(context_settings=DEFAULT_CONTEXT_SETTINGS)
@click.option("-l", "--label", help="use the host that has corresponding label for the remote execution")
@click.option("-d", "--deep", is_flag=True, help="check latency and download/upload speed if connection is ok")
@log_exceptions
def remote_explain(label: Optional[str], deep: bool):
"""Print out various debug information to debug the workspace"""
logging.basicConfig(level=logging.INFO, format=BASE_LOGGING_FORMAT)
workspace = SyncedWorkspace.from_cwd(int_or_str_label(label))
explain(workspace, deep)
@click.command(context_settings=DEFAULT_CONTEXT_SETTINGS)
@log_exceptions
def mremote():
click.secho("mremote is deprecated. Please use 'remote --multi' instead.", fg="yellow")
sys.exit(1)
@click.command(context_settings=DEFAULT_CONTEXT_SETTINGS)
@log_exceptions
def mremote_push():
click.secho("mremote-push is deprecated. Please use 'remote-push --multi' instead.", fg="yellow")
sys.exit(1) | /remote-exec-1.11.0.tar.gz/remote-exec-1.11.0/src/remote/entrypoints.py | 0.530966 | 0.157687 | entrypoints.py | pypi |
import subprocess
import sys
from typing import Optional
from uuid import uuid4
import click
from .util import pformat_dataclass
from .workspace import SyncedWorkspace
SPEED_TEST_FILE_SIZE_MB = 25
def explain(workspace: SyncedWorkspace, deep: bool, host_override: Optional[str] = None) -> None:
"""Print out various debug information to debug the workspace"""
# First, print out the configuration in use
click.secho("Configuration:", fg="yellow")
click.echo(pformat_dataclass(workspace))
click.echo()
# Then, check if host is pingable
# It might not be pingable if the user has ssh alias in configuration
click.secho("Checking connection.", fg="yellow")
remote_host = host_override or workspace.remote.host
ping_result = subprocess.run(["ping", "-c", "1", remote_host], capture_output=True, text=True)
if ping_result.returncode == 0:
click.secho("The remote host is reachable", fg="green")
else:
click.secho("The remote host is unreachable:", fg="red")
click.secho(f"{ping_result.stderr}", fg="red")
click.echo("We will try to do an ssh connection anyway, since the host in config may be an ssh alias")
# Then, try to execute a command remotely. It will show us if there are any ssh-related issues
quick_exec_code = workspace.execute("test", simple=True, raise_on_error=False, verbose=True)
if quick_exec_code == 255:
click.secho(
"The remote host is unreachable or doesn't support passwordless connection", fg="red",
)
sys.exit(1)
click.secho("The remote host supports passwordless connection via SSH", fg="green")
click.echo()
# Then, do a sync dry-run. It will show us what wiles will be synced.
click.secho("Doing a dry-run of a full execution cycle.", fg="yellow")
execution_code = workspace.execute_in_synced_env(["Hello World"], verbose=True, dry_run=True)
if execution_code != 0:
click.secho(
"Execution cycle failed", fg="red",
)
sys.exit(1)
if not deep:
return
# If deep check is required, we will also check for average ping, download and upload speed
click.echo()
if ping_result.returncode == 0:
# Only check for latency if the ping was successful before
click.secho("Checking latency.", fg="yellow")
ping_result = subprocess.run(["ping", "-c", "10", remote_host], capture_output=True, text=True)
for line in ping_result.stdout.splitlines():
if line.startswith("round-trip") or "transmitted" in line:
click.echo(line)
else:
click.secho("Not checking latency since the previous ping attemp failed", fg="yellow")
click.echo()
# Create a file remotely and try to download it
filename = f"speed_test_{uuid4()}"
click.secho(
f"Pulling {SPEED_TEST_FILE_SIZE_MB}MB file from the remote host to check the download speed.", fg="yellow"
)
workspace.execute(f"dd if=/dev/urandom of={filename} bs=1048576 count={SPEED_TEST_FILE_SIZE_MB} &>/dev/null")
workspace.pull(info=True, verbose=True, subpath=filename)
# Remove a file remotely to be able to upload it
workspace.execute(f"rm {filename}")
click.echo()
# Upload the same file to the remote machine
click.secho(f"Pushing {SPEED_TEST_FILE_SIZE_MB}MB file to the remote host to check the upload speed.", fg="yellow")
workspace.push(info=True, verbose=True, subpath=filename)
# Clean up the file locally and remotely
if (workspace.local_root / filename).exists():
(workspace.local_root / filename).unlink()
workspace.execute(f"rm {filename}")
click.echo() | /remote-exec-1.11.0.tar.gz/remote-exec-1.11.0/src/remote/explain.py | 0.551091 | 0.254454 | explain.py | pypi |
from abc import ABCMeta, abstractmethod
from dataclasses import dataclass
from pathlib import Path
from typing import List, Optional, Tuple
@dataclass
class RemoteConfig:
"""Single remote connection description"""
# remote machine's hostname
host: str
# relative path to the working directory on remote machine starting from user home dir
directory: Path
# a shell to use on remote machine
shell: str = "sh"
# shell options to use on remote machine
shell_options: str = ""
# whether remote machine supports gssapi-* auth or not
supports_gssapi: bool = True
# Add label to identify remote
label: Optional[str] = None
# A SSH port, if it differs from default
port: Optional[int] = None
@dataclass
class SyncRules:
"""Patterns used by rsync to forcefully exclude or include files while syncyng with remote location"""
# patterns used by rsync to forcefully exclude or include files while pulling from remote
pull: List[str]
# patterns used by rsync to forcefully exclude or include files while pushing from local
push: List[str]
# patterns used by rsync to forcefully exclude or include while transferring files in both directions
both: List[str]
def __post_init__(self):
self.trim()
def compile_push(self):
result = set()
result.update(self.push)
result.update(self.both)
return sorted(result)
def compile_pull(self):
result = set()
result.update(self.pull)
result.update(self.both)
return sorted(result)
def add(self, ignores: List[str]):
new_ignores = set()
new_ignores.update(ignores)
new_ignores.update(self.both)
self.both = sorted(new_ignores)
def trim(self):
self.pull = sorted(set(self.pull))
self.push = sorted(set(self.push))
self.both = sorted(set(self.both))
def is_empty(self):
return not (self.pull or self.push or self.both)
@classmethod
def new(cls) -> "SyncRules":
return cls([], [], [])
@dataclass
class WorkspaceConfig:
"""Complete remote workspace config"""
# absolute path to the workspace root
root: Path
# remote host connection options that can be used in this workspace
configurations: List[RemoteConfig]
# index of default remote host connection
default_configuration: int
# patterns to ignore while syncing the workspace
ignores: SyncRules
# patterns to include while syncing the workspace
includes: SyncRules
@classmethod
def empty(cls, root: Path) -> "WorkspaceConfig":
return cls(
root=root, configurations=[], default_configuration=0, ignores=SyncRules.new(), includes=SyncRules.new(),
)
def add_remote_host(
self,
host: str,
directory: Path,
shell: Optional[str] = None,
shell_options: Optional[str] = None,
label: Optional[str] = None,
port: Optional[int] = None,
) -> Tuple[bool, int]:
remote_config = RemoteConfig(
host=host,
directory=directory,
shell=shell or "sh",
shell_options=shell_options or "",
label=label,
port=port,
)
for num, cfg in enumerate(self.configurations):
if cfg.host == remote_config.host and cfg.directory == remote_config.directory:
return False, num
self.configurations.append(remote_config)
return True, len(self.configurations) - 1
class ConfigurationMedium(metaclass=ABCMeta):
"""A medium class that knows how to load, save, or process a certain type of configuration layout"""
@abstractmethod
def load_config(self, workspace_root: Path) -> WorkspaceConfig:
"""Load configuration for the workspace that is located in provided root directory.
If this method is called, we could assume that check in `is_workspace_root` passed
"""
@abstractmethod
def save_config(self, config: WorkspaceConfig) -> None:
"""Save configuration to its root"""
@abstractmethod
def is_workspace_root(self, path: Path) -> bool:
"""Return true is the path provided contains a configured workspace that can be loaded by this medium"""
@abstractmethod
def generate_remote_directory(self, config: WorkspaceConfig) -> Path:
"""Renerate a default remote directory path for the workspace with provided configuration""" | /remote-exec-1.11.0.tar.gz/remote-exec-1.11.0/src/remote/configuration/__init__.py | 0.917515 | 0.310335 | __init__.py | pypi |
from fastapi import FastAPI, Response, status, Request, Depends, APIRouter
import warnings
from starlette.exceptions import HTTPException as StarletteHTTPException
from dataclasses import dataclass
from typing import Optional, Tuple, get_type_hints, List, Dict, Any
from pydantic import BaseModel
import uvicorn
import codecs
import traceback
import inspect
import base64
import pickle
registered_functions = []
"""
internal status codes:
0 = successful execution
1 = failed execution (example: missing arguments for the function)
2 = exception raised by the function being executed. This will be accompanied by the full traceback
"""
"""todo:
Make decorator be able to be used on a class and then create function paths for each function in the class
"""
app = FastAPI(docs_url=None, redoc_url=None)
router = APIRouter()
class HTTPException(StarletteHTTPException):
def __init__(
self,
status_code: int,
error_code: int,
detail: Any = None,
fields: List[Dict] = None,
) -> None:
"""
Generic HTTP Exception with support for custom status & error codes.
:param status_code: HTTP status code of the response
:param error_code: Custom error code, unique throughout the app
:param detail: detailed message of the error
:param fields: list of dicts with key as field and value as message
"""
super().__init__(status_code=status_code, detail=detail)
self.error_code = error_code
self.fields = fields or []
@app.get("/functions")
def get_functions():
return function_manager.list_functions()
@dataclass
class Settings:
authorization: str = None
class _AuthHolder:
def __init__(self, settings: Settings):
self.settings = settings
def _check_authorization(self, request: Request):
if self.settings is None or self.settings.authorization is None:
return True
if not request.headers.get("authorization") == self.settings.authorization:
error_msg = "Forbidden."
status_code = status.HTTP_403_FORBIDDEN
error_code = status.HTTP_403_FORBIDDEN
raise HTTPException(
status_code=status_code,
detail=error_msg,
error_code=error_code
)
else:
return True
@dataclass
class _Check:
invalid: bool
response: dict = None
@dataclass
class Function:
callback_function: Any
parent_class_name: str
def __post_init__(self):
self.parent_class_name = self.parent_class_name.lower()
self.function_path = f"{self.parent_class_name.lower()}/{self.callback_function.__name__.lower()}"
def __eq__(self, other):
return other == self.function_path
def __hash__(self):
return hash(self.function_path)
class Functions:
def __init__(self):
self.functions: List[Function] = []
def register_function(self, callback_function: object, enforce_types: bool = False, settings: Settings = None) -> Function:
"""
registers a function
:param settings:
:param enforce_types:
:param callback_function:
:return:
True if function has been registered
False if function already exists
"""
if len(str(callback_function.__qualname__).split(".")) == 1:
# function is not located inside an object
parent_class_name = "main"
else:
# function is part of an object
parent_class_name = str(callback_function.__qualname__).split(".")[0]
func = Function(callback_function, parent_class_name)
if func not in self.functions:
self.functions.append(func)
register_api_path(func, enforce_types=enforce_types, settings=settings)
return func
else:
warnings.warn(f"Function is already registered: {func.function_path}")
def register_multiple_functions(self, input_object: object, enforce_types: bool = False, settings: Settings = None) -> List[Function]:
results = []
for name, function_exec in inspect.getmembers(input_object,
lambda x: inspect.isfunction(x) or inspect.ismethod(x)):
if name != "__init__":
function_obj = Function(function_exec, input_object.__class__.__name__)
if function_obj not in self.functions:
self.functions.append(function_obj)
register_api_path(function_obj, enforce_types=enforce_types, settings=settings)
results.append(function_obj)
else:
warnings.warn(f"Function is already registered: {function_obj.function_path}")
return results
def find_function(self, function_path: str) -> Function:
filtered_functions = list((filter(lambda x: x == function_path, self.functions)))
if len(filtered_functions) != 0:
return filtered_functions[0]
def list_functions(self):
all_functions = list(map(lambda x: x.function_path, self.functions))
return all_functions
function_manager = Functions()
class _PostData(BaseModel):
args: Optional[dict] = None
async def _arguments_missing(data: _PostData, stored_function_callback) -> _Check:
"""
check if all required arguments are present
:param data: post data
:return _Check object:
_Check.invalid == True if arguments have incorrect types
"""
# I can just call registered_function.callback_function directly, but it may cause issues
# may want to use function_manager.find() with the path if there are issues
args = inspect.getfullargspec(stored_function_callback).args
missing_args = []
for arg in args:
if arg not in data.args.keys():
missing_args.append(f"'{arg}'")
if len(missing_args) == 0:
return _Check(invalid=False)
else:
joined = " ".join(missing_args)
joined = joined.replace(" ", ", ")
return _Check(
invalid=True,
response={
"status": 1,
"exception": f"TypeError: {stored_function_callback.__name__}() missing {len(missing_args)} required positional arguments: {joined}",
},
)
async def _arguments_correct_type(data: _PostData, stored_function_callback) -> _Check:
"""
checks if all arguments have the correct types
:param data: post data
:return _Check object:
_Check.invalid == True if arguments have incorrect types
"""
args = data.args
hints = get_type_hints(stored_function_callback)
incorrect_types = []
for arg in args.keys():
if type(data.args[arg]) != hints[arg]:
incorrect_types.append(f"'{arg}' requires {hints[arg]}")
if len(incorrect_types) == 0:
return _Check(invalid=False)
else:
joined = ", ".join(incorrect_types)
return _Check(
invalid=True,
response={
"status": 1,
"exception": f"TypeError: {stored_function_callback.__name__}() incorrect types: {joined}",
},
)
def register_api_path(function: Function, enforce_types: bool, settings: Settings = None):
print(f"registered_api_path: {function.function_path}")
func_path = function.function_path
holder = _AuthHolder(settings)
function_is_async = inspect.iscoroutinefunction(function.callback_function)
@app.post(f"/function/{func_path}", dependencies=[Depends(holder._check_authorization)])
async def wrap(data: _PostData, response: Response, request: Request):
function_path = "/".join(request.url.path.split("/")[2:])
stored_function = function_manager.find_function(function_path)
stored_function_callback = stored_function.callback_function
args = inspect.getfullargspec(stored_function_callback).args
if len(args) > 0:
# arguments are required
arg_check = await _arguments_missing(data, stored_function_callback)
if arg_check.invalid:
# there are arguments missing
response.status_code = status.HTTP_400_BAD_REQUEST
return arg_check.response
else:
# no arguments missing
pass
if enforce_types:
type_check = await _arguments_correct_type(data, stored_function_callback)
if type_check.invalid:
# arguments have wrong types
response.status_code = status.HTTP_400_BAD_REQUEST
return type_check.response
else:
# all arguments have correct types
pass
try:
if data.args is None:
func_args = {}
else:
func_args = data.args
if function_is_async:
result = await stored_function_callback(**func_args)
else:
result = stored_function_callback(**func_args)
except:
error_info = traceback.format_exc()
encoded_error = base64.b64encode(
error_info.encode("ascii")
).decode()
response.status_code = status.HTTP_500_INTERNAL_SERVER_ERROR
return {
"status": 2,
"exception": encoded_error,
}
pickled_result = codecs.encode(pickle.dumps(result), "base64").decode()
response.status_code = status.HTTP_200_OK
return {"status": 0, "result": pickled_result}
return wrap
def remote(enforce_types: bool = False, settings: Settings = None):
def remote_inside(func):
# automatically registers the api path
registered_function = function_manager.register_function(func, enforce_types=enforce_types, settings=settings)
return remote_inside
def start(host: str = "127.0.0.1", port: int = 8000, reload: bool = False, __dev: bool = False, **kwargs):
if not __dev:
uvicorn.run("remote_functions.tools:app", host=host, port=port, reload=reload, **kwargs)
else:
uvicorn.run("src.remote_functions.tools:app", host=host, port=port, reload=reload, **kwargs) | /remote-functions-1.0.1.tar.gz/remote-functions-1.0.1/src/remote_functions/tools.py | 0.821546 | 0.152631 | tools.py | pypi |
import socket
import struct
from .commands import Commands
class RemoteI2CClient:
def __init__(self, remote_host, remote_port=5446):
self._remote_host = remote_host
self._remote_port = remote_port
self._server = None
def connect(self):
self._server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._server.connect((self._remote_host, self._remote_port))
def disconnect(self):
self._server.close()
def read_byte(self, i2c_addr: int, force:bool=None) -> int:
"""
Read a single byte from a device.
:param i2c_addr: i2c address
:param force: Unused - here for compatibility with other libraries
:return: Read byte value
"""
self._server.sendall(bytes([Commands.ReadByte, i2c_addr]))
value, = self._server.recv(1)
return value
def write_byte(self, i2c_addr: int, value: int, force:bool=None) -> None:
"""
Write a single byte to a device.
:param i2c_addr: i2c address
:param value: Byte value to transmit
:param force: Unused - here for compatibility with other libraries
"""
self._server.sendall(bytes([Commands.WriteByte, i2c_addr, value]))
def read_byte_data(self, i2c_addr: int, register: int, force:bool=None) -> int:
"""
Read a single byte from a designated register.
:param i2c_addr: i2c address
:param register: Register to read
:param force: Unused - here for compatibility with other libraries
:return: Read byte value
"""
self._server.sendall(bytes([Commands.ReadByteData, i2c_addr, register]))
value, = self._server.recv(1)
return value
def write_byte_data(self, i2c_addr: int, register: int, value: int, force:bool=None) -> None:
"""
Write a byte to a given register.
:param i2c_addr: i2c address
:param register: Register to read
:param value: Byte value to transmit
:param force: Unused - here for compatibility with other libraries
"""
self._server.sendall(bytes([Commands.WriteByteData, i2c_addr, register, value]))
def read_word_data(self, i2c_addr: int, register: int, force:bool=None) -> int:
"""
Read a single word (2 bytes) from a given register.
:param i2c_addr: i2c address
:param register: Register to read
:param force: Unused - here for compatibility with other libraries
:return: Read byte value
"""
self._server.sendall(bytes([Commands.ReadWordData, i2c_addr, register]))
value = self._server.recv(2)
return struct.unpack('>H', value)[0]
def write_word_data(self, i2c_addr: int, register: int, value: int, force:bool=None) -> None:
"""
Write a single word (2 bytes) to a given register.
:param i2c_addr: i2c address
:param register: Register to read
:param value: Word value to transmit
:param force: Unused - here for compatibility with other libraries
"""
self._server.sendall(bytes([Commands.WriteWordData, i2c_addr, register]))
self._server.sendall(struct.pack('>H', value))
def read_i2c_block_data(self, i2c_addr: int, register: int, length: int, force:bool=None) -> int:
"""
Read a block of byte data from a given register.
:param i2c_addr: i2c address
:param register: Start register
:param length: Desired block length
:param force: Unused - here for compatibility with other libraries
:return: List of bytes
"""
self._server.sendall(bytes([Commands.ReadI2CBlockData, i2c_addr, register, length]))
value = self._server.recv(length)
return value
def write_i2c_block_data(self, i2c_addr: int, register: int, data: list, force:bool=None) -> None:
"""
Write a block of byte data to a given register.
:param i2c_addr: i2c address
:param register: Start register
:param data: List of bytes
:param force: Unused - here for compatibility with other libraries
"""
self._server.sendall(bytes([Commands.WriteI2CBlockData, i2c_addr, register, len(data)]))
self._server.sendall(bytes(data)) | /remote-i2c-0.0.9.tar.gz/remote-i2c-0.0.9/remote_i2c/client.py | 0.669421 | 0.170957 | client.py | pypi |
from __future__ import print_function
import argparse
import getpass
import json
import os
import re
import sys
from os import path
from subprocess import list2cmdline
# How we identify kernels that rik will manage
from remote_ikernel import RIK_PREFIX
# These go through a compatibility layer to work with IPython and Jupyter
from remote_ikernel.compat import kernelspec as ks
from remote_ikernel.compat import tempdir
def delete_kernel(kernel_name):
"""
Delete the kernel by removing the kernel.json and directory.
Parameters
----------
kernel_name : str
The name of the kernel to delete
Raises
------
KeyError
If the kernel is not found.
"""
spec = ks.get_kernel_spec(kernel_name)
os.remove(path.join(spec.resource_dir, 'kernel.json'))
try:
os.rmdir(spec.resource_dir)
except OSError:
# Non empty directory, just leave it
pass
def show_kernel(kernel_name):
"""
Print the contents of the kernel.json to the terminal, plus some extra
information.
Parameters
----------
kernel_name : str
The name of the kernel to show the information for.
"""
# Load the raw json, since we store some unexpected data in there too
spec = ks.get_kernel_spec(kernel_name)
with open(path.join(spec.resource_dir, 'kernel.json')) as kernel_file:
kernel_json = json.load(kernel_file)
# Manually format the json to put each key: value on a single line
print(" * Kernel found in: {0}".format(spec.resource_dir))
print(" * Name: {0}".format(spec.display_name))
print(" * Kernel command: {0}".format(list2cmdline(spec.argv)))
print(" * remote_ikernel command: {0}".format(list2cmdline(
kernel_json['remote_ikernel_argv'])))
print(" * Raw json: {0}".format(json.dumps(kernel_json, indent=2)))
def add_kernel(interface, name, kernel_cmd, cpus=1, pe=None, language=None,
system=False, workdir=None, host=None, precmd=None,
launch_args=None, tunnel_hosts=None, verbose=False):
"""
Add a kernel. Generates a kernel.json and installs it for the system or
user.
"""
kernel_name = []
display_name = []
argv = [sys.executable, '-m', 'remote_ikernel']
# How to connect to kernel
if interface == 'local':
argv.extend(['--interface', 'local'])
kernel_name.append('local')
display_name.append("Local")
elif interface == 'pbs':
argv.extend(['--interface', 'pbs'])
display_name.append('PBS')
elif interface == 'sge':
argv.extend(['--interface', 'sge'])
kernel_name.append('sge')
display_name.append("GridEngine")
elif interface == 'ssh':
if host is None:
raise KeyError('A host is required for ssh.')
argv.extend(['--interface', 'ssh'])
argv.extend(['--host', host])
kernel_name.append('ssh')
kernel_name.append(host)
display_name.append("SSH")
display_name.append(host)
elif interface == 'mosh':
if host is None:
raise KeyError('A host is required for mosh.')
argv.extend(['--interface', 'mosh'])
argv.extend(['--host', host])
kernel_name.append('mosh')
kernel_name.append(host)
display_name.append("MOSH")
display_name.append(host)
elif interface == 'slurm':
argv.extend(['--interface', 'slurm'])
kernel_name.append('slurm')
display_name.append("SLURM")
else:
raise ValueError("Unknown interface {0}".format(interface))
display_name.append(name)
kernel_name.append(re.sub(r'\W', '', name).lower())
if pe is not None:
argv.extend(['--pe', pe])
kernel_name.append(pe)
display_name.append(pe)
if cpus and cpus > 1:
argv.extend(['--cpus', '{0}'.format(cpus)])
kernel_name.append('{0}'.format(cpus))
display_name.append('{0} CPUs'.format(cpus))
if workdir is not None:
argv.extend(['--workdir', workdir])
if precmd is not None:
argv.extend(['--precmd', precmd])
if launch_args is not None:
argv.extend(['--launch-args', launch_args])
if tunnel_hosts:
# This will be a list of hosts
kernel_name.append('via_{0}'.format("_".join(tunnel_hosts)))
display_name.append("(via {0})".format(" ".join(tunnel_hosts)))
argv.extend(['--tunnel-hosts'] + tunnel_hosts)
if verbose:
argv.extend(['--verbose'])
# protect the {connection_file} part of the kernel command
kernel_cmd = kernel_cmd.replace('{connection_file}',
'{host_connection_file}')
argv.extend(['--kernel_cmd', kernel_cmd])
# remote_ikernel needs the connection file too
argv.append('{connection_file}')
# Prefix all kernels with 'rik_' for management.
kernel_name = RIK_PREFIX + '_'.join(kernel_name)
# Having an @ in the string messes up the javascript;
# so get rid of evrything just in case.
kernel_name = re.sub(r'\W', '_', kernel_name)
kernel_json = {
'display_name': " ".join(display_name),
'argv': argv,
}
if language is not None:
kernel_json['language'] = language
# Put the commandline in so that '--show' will show how to recreate
# the kernel
kernel_json['remote_ikernel_argv'] = sys.argv
# False attempts a system install, otherwise install as the current user
if system:
username = False
else:
username = getpass.getuser()
# kernel.json file installation
with tempdir.TemporaryDirectory() as temp_dir:
os.chmod(temp_dir, 0o755) # Starts off as 700, not user readable
with open(path.join(temp_dir, 'kernel.json'), 'w') as kernel_file:
json.dump(kernel_json, kernel_file, sort_keys=True, indent=2)
ks.install_kernel_spec(temp_dir, kernel_name,
user=username, replace=True)
return kernel_name
def manage():
"""
Manage the available remote_ikernels.
All the options are pulled from arguments so we take no
arguments here.
"""
description = ["Remote IKernel management utility", "",
"Currently installed kernels:"]
existing_kernels = {}
# Sort so they are always in the same order
for kernel_name in sorted(ks.find_kernel_specs()):
if kernel_name.startswith(RIK_PREFIX):
spec = ks.get_kernel_spec(kernel_name)
display = " ['{kernel_name}']: {desc}".format(
kernel_name=kernel_name, desc=spec.display_name)
existing_kernels[kernel_name] = spec
description.append(display)
# The raw formatter stops lines wrapping
parser = argparse.ArgumentParser(
prog='%prog manage', description="\n".join(description),
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('--show', '-s', help="Print the contents of the "
"kernel.")
parser.add_argument('--add', '-a', action="store_true", help="Add a new "
"kernel according to other commandline options.")
parser.add_argument('--delete', '-d', help="Remove the kernel and delete "
"the associated kernel.json.")
parser.add_argument('--kernel_cmd', '-k', help="Kernel command "
"to install.")
parser.add_argument('--name', '-n', help="Name to identify the kernel,"
"e.g. 'Python 2.7'.")
parser.add_argument('--language', '-l', help="Explicitly specify the "
"language of the kernel.")
parser.add_argument('--cpus', '-c', type=int, help="Launch the kernel "
"as a multi-core job with this many cores if > 1.")
parser.add_argument('--pe', help="Parallel environment to use on when"
"running on gridengine.")
parser.add_argument('--host', '-x', help="The hostname or ip address "
"running through an SSH connection. For non standard "
"ports use host:port.")
parser.add_argument('--interface', '-i',
choices=['local', 'ssh', 'mosh', 'pbs', 'sge', 'slurm'],
help="Specify how the remote kernel is launched.")
parser.add_argument('--system', help="Install the kernel into the system "
"directory so that it is available for all users. "
"Might need admin privileges.", action='store_true')
parser.add_argument('--workdir', help="Directory in which to start the "
"kernel. If not specified it will use the current "
"directory. This is important if the local and remote "
"filesystems differ.")
parser.add_argument('--remote-precmd', help="Command to execute on the "
"remote host before launching the kernel, but after "
"changing to the working directory.")
parser.add_argument('--remote-launch-args', help="Arguments to add to the "
"command that launches the remote session, i.e. the "
"ssh or qlogin command, such as '-l h_rt=24:00:00' to "
"limit job time on GridEngine jobs.")
parser.add_argument('--tunnel-hosts', '-t', nargs='+', help="Tunnel the "
"connection through the given ssh hosts before "
"starting the endpoint interface. Works with any "
"interface. For non standard ports use host:port.")
parser.add_argument('--verbose', '-v', action='store_true', help="Running "
"kernel will produce verbose debugging on the console.")
# Temporarily remove 'manage' from the arguments
raw_args = sys.argv[:]
sys.argv.remove('manage')
args = parser.parse_args()
sys.argv = raw_args
if args.add:
kernel_name = add_kernel(args.interface, args.name, args.kernel_cmd,
args.cpus, args.pe, args.language, args.system,
args.workdir, args.host, args.remote_precmd,
args.remote_launch_args, args.tunnel_hosts,
args.verbose)
print("Installed kernel {0}.".format(kernel_name))
elif args.delete:
if args.delete in existing_kernels:
delete_kernel(args.delete)
else:
print("Can't delete {0}".format(args.delete))
print("\n".join(description[2:]))
elif args.show:
if args.show in existing_kernels:
show_kernel(args.show)
else:
print("Kernel {0} doesn't exist".format(args.show))
print("\n".join(description[2:]))
else:
parser.print_help() | /remote_ikernel_mosh-0.4.4.tar.gz/remote_ikernel_mosh-0.4.4/remote_ikernel/manage.py | 0.450118 | 0.179081 | manage.py | pypi |
from datetime import datetime
from os.path import join
from ...config import Config, Oarsub
from ...user_interaction import to_bold, print_info
def print_cluster_request_prop(
property_name: str,
value: str
) -> None:
num_dots: int = 25 - len(property_name)
to_print: str = '\t' + to_bold(property_name + ':') + '.' * num_dots + str(value)
print_info(to_print)
def _print_cluster_request(
oarsub_config: Oarsub,
host_id: str,
job_name: str
) -> None:
print_info(to_bold('Cluster request:'))
print_cluster_request_prop(
property_name='Job name',
value=job_name
)
print_cluster_request_prop(
property_name='Number of hosts',
value=oarsub_config.num_hosts
)
if oarsub_config.num_cpu_cores > 0:
print_cluster_request_prop(
property_name='Number of CPU cores',
value=oarsub_config.num_cpu_cores
)
print_cluster_request_prop(
property_name='Number of GPUs',
value=oarsub_config.num_gpus
)
print_cluster_request_prop(
property_name='Walltime',
value=oarsub_config.walltime
)
print_cluster_request_prop(
property_name='Cluster name',
value=oarsub_config.cluster_name
)
if host_id != '':
print_cluster_request_prop(
property_name='Host ID',
value=host_id
)
if oarsub_config.custom_property_query:
print_cluster_request_prop(
property_name='Custom property query',
value=oarsub_config.custom_property_query
)
print_cluster_request_prop(
property_name='Besteffort',
value=oarsub_config.besteffort
)
print_cluster_request_prop(
property_name='Idempotent',
value=oarsub_config.idempotent
)
def get_oarsub_cmd_prefix(
host_id: str,
job_name: str
) -> list[str]:
# Get the oarsub config
oarsub_config: Oarsub = Config().oarsub
# Oarsub command
oarsub_cmd: list[str] = ['oarsub']
# Name the job
oarsub_cmd += ['--name', job_name]
# Resource list
resource_list: str = f'/host={oarsub_config.num_hosts}'
if oarsub_config.num_cpu_cores > 0:
resource_list += f'/core={oarsub_config.num_cpu_cores}'
if oarsub_config.num_gpus > 0:
resource_list += f'/gpudevice={oarsub_config.num_gpus}'
if oarsub_config.walltime != '':
resource_list += f',walltime={oarsub_config.walltime}'
oarsub_cmd += ['-l', resource_list]
# Check that the cluster name is valid
assert oarsub_config.cluster_name in [
'beagle',
'cp',
'kinovis',
'mistis',
'nanod',
'perception',
'thoth'
], f"Unknown cluster name: {oarsub_config.cluster_name}"
# Property list
property_list: str
# Using directly the provided property query, ignoring other options.
if oarsub_config.custom_property_query:
property_list = '\"' + oarsub_config.custom_property_query + '\"'
# Using the other config settings.
else:
property_list = f"\"cluster='{oarsub_config.cluster_name}'"
if host_id:
assert 4 <= len(host_id) <= 5
assert host_id.startswith('gpu') \
or host_id.startswith('node')
cluster_host_name: str = f'{host_id}-{oarsub_config.cluster_name}.inrialpes.fr'
property_list += f" AND host='{cluster_host_name}'"
property_list += '\"'
oarsub_cmd += ['-p', property_list]
if oarsub_config.besteffort:
oarsub_cmd += ['-t', 'besteffort']
if oarsub_config.idempotent:
oarsub_cmd += ['-t', 'idempotent']
_print_cluster_request(
oarsub_config=oarsub_config,
job_name=job_name,
host_id=host_id
)
return oarsub_cmd
def add_standard_output_params(
oarsub_log_path: str,
base_name: str = ''
) -> list[str]:
time_stamp: str = datetime.now().strftime('%Y-%m-%d_%H-%M-%S')
if not base_name:
base_name = f"OAR.{time_stamp}.%jobid%.%jobname%"
return [
'--stdout', join(oarsub_log_path, f'{base_name}.stdout'),
'--stderr', join(oarsub_log_path, f'{base_name}.stderr')
] | /remote-inria-1.4.0.tar.gz/remote-inria-1.4.0/remi/remote_compute/clusters/oarsub.py | 0.487551 | 0.208461 | oarsub.py | pypi |
"""Code related to managing kernels running in containers."""
import os
import signal
import abc
import urllib3 # docker ends up using this and it causes lots of noise, so turn off warnings
from jupyter_kernel_mgmt import localinterfaces
from .launcher import launch_kernel
from .lifecycle_manager import RemoteKernelLifecycleManager
urllib3.disable_warnings()
local_ip = localinterfaces.public_ips()[0]
default_kernel_uid = '1000' # jovyan user is the default
default_kernel_gid = '100' # users group is the default
# These could be enforced via a PodSecurityPolicy, but those affect
# all pods so the cluster admin would need to configure those for
# all applications.
uid_blacklist = os.getenv("EG_UID_BLACKLIST", "0").split(',')
gid_blacklist = os.getenv("EG_GID_BLACKLIST", "0").split(',')
mirror_working_dirs = bool((os.getenv('EG_MIRROR_WORKING_DIRS', 'false').lower() == 'true'))
class ContainerKernelLifecycleManager(RemoteKernelLifecycleManager):
"""Kernel lifecycle management for container-based kernels."""
def __init__(self, kernel_manager, lifecycle_config):
super(ContainerKernelLifecycleManager, self).__init__(kernel_manager, lifecycle_config)
self.container_name = ''
self.assigned_node_ip = None
self._determine_kernel_images(lifecycle_config)
def _determine_kernel_images(self, lifecycle_config):
"""Determine which kernel images to use.
Initialize to any defined in the lifecycle manager override that then let those provided
by client via env override.
"""
if lifecycle_config.get('image_name'):
self.kernel_image = lifecycle_config.get('image_name')
self.kernel_image = os.environ.get('KERNEL_IMAGE', self.kernel_image)
self.kernel_executor_image = self.kernel_image # Default the executor image to current image
if lifecycle_config.get('executor_image_name'):
self.kernel_executor_image = lifecycle_config.get('executor_image_name')
self.kernel_executor_image = os.environ.get('KERNEL_EXECUTOR_IMAGE', self.kernel_executor_image)
async def launch_process(self, kernel_cmd, **kwargs):
"""Launches the specified process within the container environment."""
# Set env before superclass call so we see these in the debug output
kwargs['env']['KERNEL_IMAGE'] = self.kernel_image
kwargs['env']['KERNEL_EXECUTOR_IMAGE'] = self.kernel_executor_image
if not mirror_working_dirs: # If mirroring is not enabled, remove working directory if present
if 'KERNEL_WORKING_DIR' in kwargs['env']:
del kwargs['env']['KERNEL_WORKING_DIR']
self._enforce_uid_gid_blacklists(**kwargs)
await super(ContainerKernelLifecycleManager, self).launch_process(kernel_cmd, **kwargs)
self.local_proc = launch_kernel(kernel_cmd, **kwargs)
self.pid = self.local_proc.pid
self.ip = local_ip
self.log.info("{}: kernel launched. Kernel image: {}, KernelID: {}, cmd: '{}'"
.format(self.__class__.__name__, self.kernel_image, self.kernel_id, kernel_cmd))
await self.confirm_remote_startup()
return self
def _enforce_uid_gid_blacklists(self, **kwargs):
"""Determine UID and GID with which to launch container and ensure they do not appear in blacklist."""
kernel_uid = kwargs['env'].get('KERNEL_UID', default_kernel_uid)
kernel_gid = kwargs['env'].get('KERNEL_GID', default_kernel_gid)
if kernel_uid in uid_blacklist:
http_status_code = 403
error_message = "Kernel's UID value of '{}' has been denied via EG_UID_BLACKLIST!".format(kernel_uid)
self.log_and_raise(http_status_code=http_status_code, reason=error_message)
elif kernel_gid in gid_blacklist:
http_status_code = 403
error_message = "Kernel's GID value of '{}' has been denied via EG_GID_BLACKLIST!".format(kernel_gid)
self.log_and_raise(http_status_code=http_status_code, reason=error_message)
# Ensure the kernel's env has what it needs in case they came from defaults
kwargs['env']['KERNEL_UID'] = kernel_uid
kwargs['env']['KERNEL_GID'] = kernel_gid
def poll(self):
"""Determines if container is still active.
Submitting a new kernel to the container manager will take a while to be Running.
Thus kernel ID will probably not be available immediately for poll.
So will regard the container as active when no status is available or one of the initial
phases.
Returns
-------
None if the container cannot be found or its in an initial state. Otherwise False.
"""
result = False
container_status = self.get_container_status(None)
if container_status is None or container_status in self.get_initial_states():
result = None
return result
def send_signal(self, signum):
"""Send signal `signum` to container.
Parameters
----------
signum : int
The signal number to send. Zero is used to determine heartbeat.
"""
if signum == 0:
return self.poll()
elif signum == signal.SIGKILL:
return self.kill()
else:
# This is very likely an interrupt signal, so defer to the super class
# which should use the communication port.
return super(ContainerKernelLifecycleManager, self).send_signal(signum)
async def kill(self):
"""Kills a containerized kernel.
Returns
-------
None if the container is gracefully terminated, False otherwise.
"""
result = None
if self.container_name: # We only have something to terminate if we have a name
result = self.terminate_container_resources()
return result
async def cleanup(self):
# Since container objects don't necessarily go away on their own, we need to perform the same
# cleanup we'd normally perform on forced kill situations.
await self.kill()
await super(ContainerKernelLifecycleManager, self).cleanup()
async def confirm_remote_startup(self):
"""Confirms the container has started and returned necessary connection information."""
self.start_time = RemoteKernelLifecycleManager.get_current_time()
i = 0
ready_to_connect = False # we're ready to connect when we have a connection file to use
while not ready_to_connect:
i += 1
await self.handle_timeout()
container_status = self.get_container_status(str(i))
if container_status:
if self.assigned_host != '':
ready_to_connect = await self.receive_connection_info()
self.pid = 0 # We won't send process signals for kubernetes lifecycle management
self.pgid = 0
else:
self.detect_launch_failure()
def get_lifecycle_info(self):
"""Captures the base information necessary for kernel persistence relative to containers."""
lifecycle_info = super(ContainerKernelLifecycleManager, self).get_lifecycle_info()
lifecycle_info.update({'assigned_node_ip': self.assigned_node_ip, })
return lifecycle_info
def load_lifecycle_info(self, lifecycle_info):
"""Loads the base information necessary for kernel persistence relative to containers."""
super(ContainerKernelLifecycleManager, self).load_lifecycle_info(lifecycle_info)
self.assigned_node_ip = lifecycle_info['assigned_node_ip']
@abc.abstractmethod
def get_initial_states(self):
"""Return list of states indicating container is starting (includes running)."""
raise NotImplementedError
@abc.abstractmethod
def get_container_status(self, iteration_string):
"""Return current container state."""
raise NotImplementedError
@abc.abstractmethod
def terminate_container_resources(self):
"""Terminate any artifacts created on behalf of the container's lifetime."""
raise NotImplementedError | /remote_kernel_provider-0.3.0-py3-none-any.whl/remote_kernel_provider/container.py | 0.733356 | 0.19252 | container.py | pypi |
from dataclasses import dataclass
from typing import (
Any,
List,
Dict,
Optional,
)
from .llm_rpc.api import (
GenerateReply,
GenerateReplyGeneration,
GenerateReplyGenerationList,
GenerationalGutsReply,
GenerationalGutsReplyTokenStack,
GenerationalGutsReplyGeneration,
)
import numpy
try:
from langchain.schema import Generation, LLMResult
__all__ = ['Generation', 'LLMResult']
except ImportError:
@dataclass
class Generation:
text: str
generation_info: Optional[Dict[str, Any]] = None
@dataclass
class LLMResult:
generations: List[List[Generation]]
@dataclass
class GenerationalGuts:
positional_embeddings: numpy.ndarray
token_embeddings: numpy.ndarray
hidden_states: numpy.ndarray
sentence_tokens: List[str]
sentence_ids: List[int]
top_k_generated_token: List[str]
top_k_generated_token_id: List[int]
top_k_generated_token_logits: List[float]
def pack(self) -> GenerationalGutsReply:
tokens = []
for pe, te, hs, st, si in zip(
self.positional_embeddings,
self.token_embeddings,
self.hidden_states,
self.sentence_tokens,
self.sentence_ids,
):
tokens.append(GenerationalGutsReplyTokenStack(
token=st,
token_id=si,
positional_embedding=list(pe),
token_embedding=list(te),
hidden_state=list(hs),
))
generations = []
for t, i, l in zip(
self.top_k_generated_token,
self.top_k_generated_token_id,
self.top_k_generated_token_logits,
):
generations.append(GenerationalGutsReplyGeneration(
token=t,
id=i,
logit=l,
))
return GenerationalGutsReply(
tokens=tokens,
generations=generations,
)
def unpack_generational_guts(guts: GenerationalGutsReply) -> GenerationalGuts:
positional_embeddings = numpy.array([t.positional_embedding for t in guts.tokens])
token_embeddings = numpy.array([t.token_embedding for t in guts.tokens])
hidden_states = numpy.array([t.hidden_state for t in guts.tokens])
sentence_tokens = [t.token for t in guts.tokens]
sentence_ids = [t.token_id for t in guts.tokens]
top_k_generated_token = [g.token for g in guts.generations]
top_k_generated_token_id = [g.id for g in guts.generations]
top_k_generated_token_logits = [g.logit for g in guts.generations]
return GenerationalGuts(
positional_embeddings=positional_embeddings,
token_embeddings=token_embeddings,
hidden_states=hidden_states,
sentence_tokens=sentence_tokens,
sentence_ids=sentence_ids,
top_k_generated_token=top_k_generated_token,
top_k_generated_token_id=top_k_generated_token_id,
top_k_generated_token_logits=top_k_generated_token_logits,
)
def unpack_result(result: GenerateReply) -> LLMResult:
return LLMResult(generations=[[Generation(text=gg.text, generation_info=gg.generation_info) for gg in g.generations] for g in result.generations])
def pack_result(result: LLMResult) -> GenerateReply:
return GenerateReply(generations=[GenerateReplyGenerationList(generations=[GenerateReplyGeneration(text=gg.text, generation_info=gg.generation_info) for gg in g]) for g in result.generations]) | /remote_llm-0.0.1-py3-none-any.whl/remote_llm/schema.py | 0.709019 | 0.215536 | schema.py | pypi |
import abc
from typing import List, Optional
import torch
from remote_llm.schema import GenerationalGuts, LLMResult
class AbstractLLM(object):
@abc.abstractmethod
def llm_name(self) -> str:
pass
@abc.abstractmethod
def generate(self, prompts: List[str], stop: Optional[List[str]] = None) -> LLMResult:
pass
class EducationalLLM(AbstractLLM):
@abc.abstractmethod
def tokens(self, text: str) -> List[str]:
pass
@abc.abstractmethod
def tokens(self, text: str) -> List[int]:
pass
@abc.abstractmethod
def token_embeddings(self, text: str) -> torch.tensor:
pass
@abc.abstractmethod
def position_embeddings(self, text: str) -> torch.tensor:
pass
@abc.abstractmethod
def forward(self, text: str) -> torch.tensor:
pass
@abc.abstractmethod
def logits(self, text: str) -> torch.tensor:
pass
@abc.abstractmethod
def displayable_tokens(self, tokens: List[int]) -> List[str]:
pass
def get_generational_guts(llm: EducationalLLM, text: str, top_k_logits: int = 5, fft: bool = True, embedding_trunkation: Optional[int] = 100) -> GenerationalGuts:
sentence_ids = llm.tokens(text)
sentence_tokens = llm.displayable_tokens(sentence_ids)
token_embeddings = llm.token_embeddings(text).to(torch.float32).detach().cpu()
position_embeddings = llm.position_embeddings(text).to(torch.float32).detach().cpu()
hidden_states = llm.forward(text).to(torch.float32).detach().cpu()
logits = llm.logits(text)[-1,:].to(torch.float32).detach().cpu()
logits = torch.topk(logits, k=top_k_logits, dim=-1)
if fft:
token_embeddings = torch.fft.fft(token_embeddings, dim=1).real.numpy()
position_embeddings = torch.fft.fft(position_embeddings, dim=1).real.numpy()
hidden_states = torch.fft.fft(hidden_states, dim=1).real.numpy()
if embedding_trunkation is not None:
token_embeddings = token_embeddings[:, :embedding_trunkation]
position_embeddings = position_embeddings[:, :embedding_trunkation]
hidden_states = hidden_states[:, :embedding_trunkation]
top_k_generated_token_id = logits.indices
top_k_generated_token = llm.displayable_tokens(top_k_generated_token_id)
top_k_generated_token_logits = logits.values
return GenerationalGuts(
positional_embeddings=position_embeddings,
token_embeddings=token_embeddings,
hidden_states=hidden_states,
sentence_ids=sentence_ids,
sentence_tokens=sentence_tokens,
top_k_generated_token=top_k_generated_token,
top_k_generated_token_id=top_k_generated_token_id,
top_k_generated_token_logits=top_k_generated_token_logits,
) | /remote_llm-0.0.1-py3-none-any.whl/remote_llm/base_llm.py | 0.92779 | 0.361221 | base_llm.py | pypi |
import torch
import logging
from typing import (
List,
Optional,
)
from transformers import TextGenerationPipeline, AutoTokenizer, AutoModelForCausalLM
from .base_llm import EducationalLLM, get_generational_guts
from .schema import Generation, GenerationalGuts, LLMResult
logger = logging.getLogger(__name__)
class GPTNeoWrap(EducationalLLM):
model: AutoModelForCausalLM
tokenizer: AutoTokenizer
generator: TextGenerationPipeline
max_new_tokens: int
num_sequences: int
def __init__(self, *, model: AutoModelForCausalLM, tokenizer: AutoTokenizer, max_new_tokens = 20, num_sequences = 1):
self.model = model
self.tokenizer = tokenizer
self.generator = TextGenerationPipeline(model=model, tokenizer=tokenizer, device=0)
self.max_new_tokens = max_new_tokens
self.num_sequences = num_sequences
def generate(self, prompts: List[str], stop: Optional[List[str]] = None) -> LLMResult:
generations = []
for prompt in prompts:
generated = self.generator(
prompt,
max_new_tokens=self.max_new_tokens,
num_return_sequences=self.num_sequences,
do_sample=True,
top_k=50,
top_p=0.95,
repetition_penalty=1.0,
)
generated = [Generation(text=gen['generated_text'][len(prompt):]) for gen in generated]
generations.append(generated)
return LLMResult(generations=generations)
def llm_name(self) -> str:
return self.model.config._name_or_path
def tokens(self, text: str) -> List[int]:
return self.tokenizer(text)["input_ids"]
def token_embeddings(self, text: str) -> torch.tensor:
tokens = self.tokenizer(text, return_tensors="pt")
tokens.to(self.model.device)
return self.model._modules["transformer"].wte(tokens["input_ids"])[0]
def position_embeddings(self, text: str) -> torch.tensor:
tokens = self.tokenizer(text, return_tensors="pt")["input_ids"]
input_shape = tokens.shape
position_ids = torch.arange(0, input_shape[-1], dtype=torch.long, device=self.model.device)
position_ids = position_ids.unsqueeze(0).view(-1, input_shape[-1])
return self.model._modules["transformer"].wpe(position_ids)[0]
def forward(self, text: str) -> torch.tensor:
tokens = self.tokenizer(text, return_tensors="pt")
tokens.to(self.model.device)
return self.model.transformer(**tokens, output_hidden_states=True).last_hidden_state[0]
def logits(self, text: str) -> torch.tensor:
tokens = self.tokenizer(text, return_tensors="pt")
tokens.to(self.model.device)
return torch.sigmoid(self.model.lm_head(self.forward(text)))
def displayable_tokens(self, tokens: List[int]) -> List[str]:
def raw(string: str, replace: bool = False) -> str:
"""Returns the raw representation of a string. If replace is true, replace a single backslash's repr \\ with \."""
r = repr(string)[1:-1] # Strip the quotes from representation
if replace:
r = r.replace('\\\\', '\\')
return r
return [raw(self.tokenizer.convert_tokens_to_string([token])) for token in self.tokenizer.convert_ids_to_tokens(tokens)]
def get_generational_guts(self, text: str, *, top_k_logits: int = 5, fft: bool = True, embedding_trunkation: Optional[int] = 25) -> GenerationalGuts:
return get_generational_guts(self, text, top_k_logits=top_k_logits, fft=fft, embedding_trunkation=embedding_trunkation) | /remote_llm-0.0.1-py3-none-any.whl/remote_llm/gpt_neo_wrap.py | 0.915427 | 0.214342 | gpt_neo_wrap.py | pypi |
from dataclasses import dataclass
from typing import List
import betterproto
import grpclib
@dataclass
class GenerateRequest(betterproto.Message):
api_key: str = betterproto.string_field(1)
prompts: List[str] = betterproto.string_field(2)
stop: List[str] = betterproto.string_field(3)
@dataclass
class GenerateReply(betterproto.Message):
generations: List["GenerateReplyGenerationList"] = betterproto.message_field(1)
@dataclass
class GenerateReplyGeneration(betterproto.Message):
text: str = betterproto.string_field(1)
# JSON object with additional information about the generation.
generation_info: str = betterproto.string_field(2)
@dataclass
class GenerateReplyGenerationList(betterproto.Message):
generations: List["GenerateReplyGeneration"] = betterproto.message_field(1)
@dataclass
class LLMTypeRequest(betterproto.Message):
api_key: str = betterproto.string_field(1)
@dataclass
class LLMTypeReply(betterproto.Message):
llm_type: str = betterproto.string_field(1)
@dataclass
class GenerationalGutsRequest(betterproto.Message):
api_key: str = betterproto.string_field(1)
prompt: str = betterproto.string_field(2)
# / This performs a FFT of the token embeddings. It picks out the high
# frequency components of the embeddings.
fft_embeddings: bool = betterproto.bool_field(3)
# / This truncates the token embeddings so you can present the parts that
# have the most "information".
embedding_trunkation: int = betterproto.int32_field(4)
# / This controls how many possible next tokens are returned.
top_k_logits: int = betterproto.int32_field(5)
@dataclass
class GenerationalGutsReply(betterproto.Message):
tokens: List["GenerationalGutsReplyTokenStack"] = betterproto.message_field(1)
generations: List["GenerationalGutsReplyGeneration"] = betterproto.message_field(2)
@dataclass
class GenerationalGutsReplyTokenStack(betterproto.Message):
"""
/ The token stack is a trace of the LLM's internal state as it processes
the prompt./ The string is tokenized, which takes chunks of the string and
converts them into a lookup index./ The lookup index is then used to look
up the token embedding for the token./ The token embedding is then combined
with the positional embedding to create the token embedding./ The token
embedding is then passed through the LLM to produce the hidden state./ The
hidden state is then used to generate the next token. This stack does not
include the
"""
# / The token string. This is the original token string, not the token id./
# Example: "the"
token: str = betterproto.string_field(1)
# / The token id. This is the lookup index for the token in the embedding./
# Example: 464 is the lookup index for "the" in the GPT-2 embedding.
token_id: int = betterproto.int32_field(2)
# / The positional embedding for the token. This encodes which position the
# token is in the sequence.
positional_embedding: List[float] = betterproto.float_field(3)
# / The token embedding for the token. This is the embedding for the token
# itself that has semantic meaning./ It's position in the high dimensional
# embedding space encodes the "meaning" of the token for the LLM.
token_embedding: List[float] = betterproto.float_field(4)
# / The hidden state of the token. This is the output of the LLM after
# processing the token./ This is also called the "context embedding".
hidden_state: List[float] = betterproto.float_field(5)
@dataclass
class GenerationalGutsReplyGeneration(betterproto.Message):
"""
/ Each generation is a possible next token and its probability. They are
determined by the a map from the final hidden state/ to the "token space".
The token space is the space of all possible tokens that the LLM can
generate.// We return the top_k tokens (by probability) and their
probabilities.
"""
token: str = betterproto.string_field(1)
id: int = betterproto.int32_field(2)
logit: float = betterproto.float_field(3)
class RemoteLLMStub(betterproto.ServiceStub):
async def generate(
self, *, api_key: str = "", prompts: List[str] = [], stop: List[str] = []
) -> GenerateReply:
request = GenerateRequest()
request.api_key = api_key
request.prompts = prompts
request.stop = stop
return await self._unary_unary(
"/llm_rpc.api.RemoteLLM/Generate",
request,
GenerateReply,
)
async def get_llm_type(self, *, api_key: str = "") -> LLMTypeReply:
request = LLMTypeRequest()
request.api_key = api_key
return await self._unary_unary(
"/llm_rpc.api.RemoteLLM/GetLlmType",
request,
LLMTypeReply,
)
async def generational_guts(
self,
*,
api_key: str = "",
prompt: str = "",
fft_embeddings: bool = False,
embedding_trunkation: int = 0,
top_k_logits: int = 0,
) -> GenerationalGutsReply:
request = GenerationalGutsRequest()
request.api_key = api_key
request.prompt = prompt
request.fft_embeddings = fft_embeddings
request.embedding_trunkation = embedding_trunkation
request.top_k_logits = top_k_logits
return await self._unary_unary(
"/llm_rpc.api.RemoteLLM/GenerationalGuts",
request,
GenerationalGutsReply,
) | /remote_llm-0.0.1-py3-none-any.whl/remote_llm/llm_rpc/api.py | 0.876397 | 0.42471 | api.py | pypi |
import io
from .remote_executor_interface import RemoteExecutorInterface
from .remote_path_info import RemotePathInfo, RemotePathType
import typing
import functools
from dataclasses import dataclass
import io
@dataclass(frozen=True)
class FileChunk:
'''
RemoteFileBase caches chunks of a file using this type.
'''
offset: int
size: int
data: typing.Union[memoryview, str] # memoryview for binary data. str for text mode
class RemoteFileBase:
'''
Base remote file functionality. The underlying file should be an append-only file such as a log file or
a static file which is not changing for the duration of interaction with RemoteFileBase and sub-classes.
'''
def __init__(self, path: str, executor: RemoteExecutorInterface,
block_size: int = 1048576,
start_offset: int = 0,
end_offset: typing.Optional[int] = None,
text_mode: bool = True,
max_cached_blocks: int = 64):
'''
:param path: Remote file path to read
:param executor: Low level I/O interface to remote host
:param block_size: Chunk size to read in
:param start_offset: Start offset in the file. Seeks to file offsets less than this are updated to
seeks to this value.
:param end_offset: Offset beyond which no data will be returned. Seeks beyond this location are
set to this offset.
:param max_cached_blocks: Parameter to functools.lrucache
start_offset is rounded down to a multiple of block_size and end_offset is
rounded up to a multiple of block_size if specified.
'''
self._path = path
self._executor = executor
self._block_size = block_size
self._start_offset = (start_offset // self._block_size) * self._block_size
self._end_offset = end_offset
self._offset = self._start_offset
self._text_mode = text_mode
self._max_cached_blocks = max_cached_blocks
self._stat_file()
self._size = self._stat.size
self._reader = None
self.reset()
self._validate()
def reset(self):
self._stat_file()
self._size = self._stat.size
self._offset = self._start_offset
self._reader = functools.lru_cache(self._max_cached_blocks)(self._read_impl)
def _validate(self):
if self._block_size < 4096 or self._block_size > 16 * (1024 ** 2):
raise ValueError('block_size must be between 4096 and 16M')
if (self._block_size - 1) & self._block_size:
raise ValueError('block_size must be a power of 2')
if self._start_offset > self._size:
raise ValueError('start_offset cannot be beyond the end of the file')
if self._end_offset:
# round up to next highest multiple of block_size unless already a multiple of block_size
self._end_offset = ((self._end_offset - 1) // self._block_size + 1) * self._block_size
def _stat_file(self):
self._stat = self._executor.file_stat(self._path)
if self._stat.type != RemotePathType.FILE:
raise FileNotFoundError('Path must refer to a regular file')
@property
def path(self) -> str:
return self._path
@property
def executor(self) -> RemoteExecutorInterface:
return self._executor
@property
def block_size(self) -> int:
return self._block_size
@property
def start_offset(self) -> int:
return self._start_offset
@property
def end_offset(self) -> int:
return self._end_offset or self._size
@property
def stat(self) -> RemotePathInfo:
return self._stat
@property
def text_mode(self):
return self._text_mode
def seek(self, offset: int) -> int:
'''
Seek to the specified offset or nearby location subject to rounding to the block size.
:param offset: Offset to seek to
:return: Actual offset positioned to subject to startoffset/endoffset and file size checks.
Positioning beyond the end of the file is allowed but will raise EOFError when attempting to read from there
'''
offset = (offset // self._block_size) * self._block_size
if offset < self._start_offset:
offset = self._start_offset
elif self._end_offset and offset > self._end_offset:
offset = self._end_offset
self._offset = offset
return self._offset
def tell(self) -> int:
''' Return current offset '''
return self._offset
def rewind(self) -> int:
''' Convenience method to seek to start offset '''
self._offset = self._start_offset
return self._offset
def read(self) -> FileChunk:
# read upto a block_size of data and return it
# raises EOFError when past end of range.
if ((self._end_offset and self._offset >= self._end_offset) or
(self._offset >= self._size)):
raise EOFError
result = self._reader(self._offset)
self._offset += result.size
return result
def __iter__(self):
end_value = self._end_offset if self._end_offset else self._size
while self._offset < end_value:
try:
yield self.read()
except EOFError:
return
def _read_impl(self, offset):
if self._end_offset and offset >= self._end_offset:
raise EOFError
success = False
result = None
bytes_to_read = 0
for i in range(2):
try:
bytes_to_read = self._block_size
if self._end_offset and offset + self._block_size > self._end_offset:
bytes_to_read = self._end_offset - offset
elif self._size < offset + self._block_size:
bytes_to_read = self._size - offset
result = self._executor.read_file_range(self._path, offset=offset, bytes_to_read=bytes_to_read,
text_mode=self._text_mode)
success = True
break
except EOFError:
raise
except Exception:
self._executor.reset()
if not success:
raise EOFError
chunk = FileChunk(offset=offset, size=bytes_to_read,
data=(result.getbuffer().toreadonly() if not self._text_mode
else result.getvalue()))
return chunk | /remote_log_analysis-0.1.2.tar.gz/remote_log_analysis-0.1.2/remote_log_analysis/remote_file_base.py | 0.801587 | 0.340732 | remote_file_base.py | pypi |
from .remote_text_log import RemoteTextLog
import typing
from datetime import datetime
from dateutil import parser as dateparser
import re
class RemoteLogSearch:
def __init__(self,
log: RemoteTextLog,
message_regex: typing.Optional[str] = None,
start_time: typing.Union[datetime, str, None] = None,
end_time: typing.Union[datetime, str, None] = None,
log_level_regex: typing.Optional[str] = None):
'''
:param log: RemoteTextLog object to search for specified criteria within the specified time range.
The timestamps in the log must be parseable with dateutil.parser.parse. The entire log file will be
considered to fit within the time range if there is no timestamp in the log format.
:param message_regex: Optional regex to match against the message portion of the log line.
:param start_time: Optional start time for the search. If not specified, the beginning of the log will be used.
:param end_time: Optional end time for the search. If not specified, the end of the log will be used.
:param log_level_regex: Lines with a loglevel which don't match this regex are discarded if specified.
Note: The results will extend slightly beyond the specified time range for efficiency.
'''
self._log = log
self._message_regex = re.compile(message_regex) if message_regex is not None else None
self._log_level_regex = re.compile(log_level_regex) if log_level_regex is not None else None
if start_time:
self._start_time = start_time if isinstance(start_time, datetime) else dateparser.parse(start_time)
else:
self._start_time = None
if end_time:
self._end_time = end_time if isinstance(end_time, datetime) else dateparser.parse(end_time)
else:
self._end_time = None
def _locate_start(self):
if self._start_time is None:
return
block_size = self._log.block_size
start_offset = self._log.tell()
end_offset = self._log.end_offset
mid = self._log.seek((end_offset + start_offset) // 2)
while start_offset + block_size < end_offset:
line = self._log.read_line()
if line is None:
raise EOFError
if not line.timestamp:
return # Means ignore timestamps
line_time = dateparser.parse(line.timestamp)
if line_time > self._start_time:
end_offset = mid
mid = self._log.seek((end_offset + start_offset) // 2)
elif line_time < self._start_time:
start_offset = mid
mid = self._log.seek((end_offset + start_offset) // 2)
else:
break
self._log.seek(mid)
def rewind(self):
self._log.rewind()
def seek(self, offset:int):
return self._log.seek(offset)
def __iter__(self):
try:
self._locate_start()
except EOFError:
return
count = 0
for line in self._log.read_line_iter():
count += 1
if self._end_time and line.timestamp and count % 20 == 0:
ts = dateparser.parse(line.timestamp)
if self._end_time < ts:
return # end iteration as end time has been reached
if self._log_level_regex and not self._log_level_regex.match(line.log_level):
continue
if self._message_regex and not self._message_regex.search(line.message, re.MULTILINE):
continue
yield line | /remote_log_analysis-0.1.2.tar.gz/remote_log_analysis-0.1.2/remote_log_analysis/remote_log_search.py | 0.825519 | 0.257497 | remote_log_search.py | pypi |
from .remote_file_base import RemoteFileBase, FileChunk
from .remote_log_utils import LogLineData, LogLineSplitterInterface, UnixLogLineSplitter, LogLineFormatInterface
from .remote_linux_executor import RemoteLinuxExecutor
import typing
class RemoteTextLog:
'''
Provides a line oriented and chunk oriented traversal of a RemoteFileBase object.
The RemoteFileBase object should be a text_mode object.
The provided LogLineSplitterInterface object is used to split the file into lines.
The details of splitting a data chunk into lines is delegated to the LogLineSplitterInterface object.
The chunk oriented traversal reads from the current file offset. Any re-positioning of the underlying
RemoteFileBase object will discard any cached data.
It is best to use either the line oriented traversal or the chunk oriented traversal. Mixing the two
requires care to avoid skipping data.
'''
def __init__(self, file: RemoteFileBase, line_splitter: LogLineSplitterInterface):
self._file = file
self._line_splitter = line_splitter
self._offset = self._file.tell()
@property
def offset(self):
return self._offset
@property
def block_size(self):
return self._file.block_size
@property
def stat(self):
return self._file.stat
@property
def start_offset(self) -> int:
return self._file.start_offset
@property
def end_offset(self) -> int:
return self._file.end_offset
def tell(self) -> int:
return self._file.tell()
def seek(self, offset) -> int:
offset = self._file.seek(offset)
self._offset = offset
self._line_splitter.clear()
return offset
def rewind(self):
self._file.rewind()
self._offset = self._file.tell()
self._line_splitter.clear()
return self._offset
def read_line(self) -> typing.Optional[LogLineData]:
if self._offset != self._file.tell():
self._line_splitter.clear()
self._offset = self._file.tell()
result = self._line_splitter.read()
self._offset = self._file.tell()
return result
def read_line_iter(self):
while True:
line = self.read_line()
if line is None:
break
yield line
def read_chunk(self) -> typing.Optional[FileChunk]:
'''
Read a chunk of data from the file. The chunk size is determined by the underlying RemoteFileBase object.
:return: FileChunk
raises EOFError when past end of range.
'''
self._line_splitter.clear()
try:
return self._file.read()
except EOFError:
return None
def read_chunk_iter(self):
self._line_splitter.clear()
for chunk in self._file:
yield chunk
class RemoteLinuxLog(RemoteTextLog):
'''
Provides a line oriented and chunk oriented traversal of a typical log file on a Linux host.
'''
def __init__(self, path: str, executor: RemoteLinuxExecutor,
log_format: LogLineFormatInterface, start_offset: int = 0, end_offset: typing.Optional[int] = None,
block_size: int = 4096):
'''
:param path: Path on remote host
:param executor: RemoteLinuxExecutor for connecting to remote host
:param log_format: An object that implements the LogLineFormatInterface. Typically, this will be an instance
of CommonRegexLineFormat
'''
file = RemoteFileBase(path, executor, start_offset=start_offset, end_offset=end_offset, block_size=block_size,
text_mode=True)
line_splitter = UnixLogLineSplitter(file, log_format)
super().__init__(file, line_splitter) | /remote_log_analysis-0.1.2.tar.gz/remote_log_analysis-0.1.2/remote_log_analysis/remote_text_log.py | 0.737064 | 0.268231 | remote_text_log.py | pypi |
import re
from dataclasses import dataclass
import typing
import copy
import abc
from .remote_file_base import RemoteFileBase
def timestamp_format_to_regex(strftime_notation):
'''
:param strftime_notation: A string using strtime notation describing a timestamp format
:return: A regex which can match the timestamp format
'''
special_chars = {
'Y': r'\d{4}',
'm': r'\d{2}',
'd': r'\d{2}',
'H': r'\d{2}',
'I': r'\d{2}',
'M': r'\d{2}',
'S': r'\d{2}',
'f': r'\d+',
'z': r'[+-]\d{4}',
'Z': r'\w+',
'a': r'\w{3}',
'A': r'\w+',
'b': r'\w{3}',
'B': r'\w+',
'c': r'.+',
'x': r'\d{2}/\d{2}/\d{2}',
'X': r'\d{2}:\d{2}:\d{2}',
'p': r'(?:AM|PM)',
's': r'\d+',
'w': r'\d',
'j': r'\d{3}',
'U': r'\d{2}',
'W': r'\d{2}',
'g': r'\d{2}',
'G': r'\d{4}',
'V': r'\d{2}',
'%': r'%'
}
chars_to_escape = {'{', '}', '[', ']', '(', ')', '|', '*', '+', '?', '.', '\\', '^', '$'}
escaped_chars = [re.escape(t) if t in chars_to_escape else t for t in strftime_notation]
strftime_notation = ''.join(escaped_chars)
pattern = re.compile(r"(%)(.)")
def replace_special(match):
if match.group(1) == '%':
char = match.group(2)
if char in special_chars:
return special_chars[char]
else:
return (f'%{char}')
else:
raise RuntimeError('Unexpected match while processing timestamp format')
return pattern.sub(replace_special, strftime_notation)
@dataclass(frozen=True)
class LogLineData:
timestamp: str
log_level: str
source: str
message: str
class LogLineFormatInterface(metaclass=abc.ABCMeta):
'''
Interface for a log line format. The only required method is match which returns
details about the matched line in a LogLineData object if a match occurs. Text log traversal implementations can
use this interface to extract details about log lines in a variety of formats.
'''
@abc.abstractmethod
def match(self, line: str) -> typing.Optional[LogLineData]:
'''
:param line: A line to match
:return: A LogLineData object if the line matches the format, None otherwise
'''
raise NotImplementedError
class CommonRegexLineFormat(LogLineFormatInterface):
'''
Describes the format of a log line. A logline is described with the following tokens:
%t - Timestamp
%l - Log level
%s - Source
%m - Log message
Other characters are treated as literals.
The timestamp is further specified in strftime format in timestamp_format.
The log levels are specified in log_levels in order of increasing severity.
This class will work well for most common log line formats where the format of the line can be described with
a regular expression and the timestamp can be described with strftime notation.
This should cover the majority of text log formats. A custom formatter can be defined as necessary in the event
this formatter is insufficient. For convenience, a CommonNonRegexLineFormat is provided for simple cases where
the log line format is a literal string apart from %t %l %s %m as described above.
'''
def __init__(self, logline_format: str, timestamp_format: str, log_levels: typing.Iterable[str]):
'''
:param logline_format: Specifies the log line format with the tokens described above:
%t - Timestamp
%l - Log level
%s - Source
%m - Log message
Other characters are passed to re.compile directly. Be careful to escape these special character sequences:
{, }, [, ], (, ), |, *, +, ?, ., \\, ^, $
Use CommonNonRegexLineFormat if you want to match a literal string.
:param timestamp_format: The timestamp format in strftime notation. See
https://docs.python.org/3/library/datetime.html#strftime-and-strptime-format-codes. The formatter will attempt
to match a pattern but will not validate the timestamp.
:param log_levels: A list of log levels in order of increasing severity. These are the acceptable values for %l
'''
self._logline_format = logline_format
self._timestamp_format = timestamp_format
self._log_levels = copy.copy(log_levels)
self._regex = self._generate_regex(logline_format, timestamp_format, log_levels)
@property
def regex(self):
return self._regex
@property
def timestamp_format(self):
return self._timestamp_format
@property
def log_levels(self):
return self._log_levels
def match(self, line: str) -> typing.Optional[LogLineData]:
'''
:param line: A line from a log file
:return: A LogLineData object if the line matches the format, otherwise None
'''
match_result = self._regex.search(line)
if not match_result:
return None
# source info is not always present.
match_groups = match_result.groupdict()
return LogLineData(timestamp=match_groups.get('t', ''),
log_level=match_groups.get('l', ''),
source=match_groups.get('s', ''),
message=match_groups.get('m', ''))
def _generate_regex(self, logline_format, timestamp_format, log_levels):
format_chars = {
't': timestamp_format_to_regex(timestamp_format),
'l': '(?:' + '|'.join(log_levels) + ')',
's': r'[^\s]+',
'm': r'.*',
}
regex = re.compile(re.sub(r'%([tlsm])',
lambda x: f'(?P<{x.group(1)}>{format_chars[x.group(1)]})',
logline_format))
return regex
class CommonNonRegexLineFormat(CommonRegexLineFormat):
'''
A convenience class for specifying a literal log line format apart from the special %t %l %s %m tokens.
'''
def __init__(self, logline_format: str, timestamp_format: str, log_levels: typing.Iterable[str]):
'''
:param logline_format: Specifies the log line format with the tokens described above:
%t - Timestamp
%l - Log level
%s - Source
%m - Log message
:param timestamp_format: The timestamp format in strftime notation
:param log_levels: A list of log levels in order of increasing severity. These are the acceptable values for %l
'''
# escape any special sequences in logline_format before proceeding
chars_to_escape = {'{', '}', '[', ']', '(', ')', '|', '*', '+', '?', '.', '\\', '^', '$'}
escaped_chars = [re.escape(t) if t in chars_to_escape else t for t in logline_format]
logline_format = ''.join(escaped_chars)
super().__init__(logline_format, timestamp_format, log_levels)
class LogLineSplitterInterface(metaclass=abc.ABCMeta):
@abc.abstractmethod
def __iter__(self):
raise NotImplementedError
@abc.abstractmethod
def read(self) -> typing.Optional[LogLineData]:
raise NotImplementedError
@abc.abstractmethod
def clear(self):
'''
Clear any state associated with the splitter. The next read will read from the file
:return:
'''
raise NotImplementedError
class UnixLogLineSplitter(LogLineSplitterInterface):
'''
Split a block of text into log lines. Log lines are newline separated.
A line which does not match the format of the specified log line format is considered a continuation of the
previous line. This base splitter handles files with UNIX style line endings. Sub-class and set
self._multiline_join to '\r\n' to handle files with Windows style line endings.
'''
def __init__(self, reader: RemoteFileBase, log_line_format: LogLineFormatInterface):
if not reader.text_mode:
raise RuntimeError('LogLineSplitter requires a text mode reader')
self._reader = reader
self._log_line_format = log_line_format
self._lines = []
self._current_context = []
self._match = None
self._eof = False
self._multiline_join = '\n'
def clear(self):
self._lines = []
self._current_context = []
self._match = None
self._eof = False
def _handle_return_match(self):
match = self._match
self._match = None
if not match:
return None
if self._current_context:
message = match.message + self._multiline_join + self._multiline_join.join(self._current_context)
self._current_context = []
return LogLineData(timestamp=match.timestamp,
log_level=match.log_level,
source=match.source,
message=message)
else:
return match
def __iter__(self):
while True:
result = self.read()
if not result:
return
yield result
def read(self) -> typing.Optional[LogLineData]:
'''
Read the next LogLineData from the reader. If the reader is at EOF, return None.
:return: A LogLineData object if a line was read, None otherwise
'''
if self._eof and not (self._lines or self._match):
return None
while True:
if len(self._lines) <= 1:
if not self._eof:
try:
self._from_reader()
except EOFError:
self._eof = True
if not self._lines:
return self._handle_return_match()
line = self._lines[0]
if not line[0]:
line[0] = self._log_line_format.match(line[1])
match = line[0]
if match and self._match:
return self._handle_return_match()
elif match and not self._match:
self._match = match
self._lines.pop(0)
elif not match and self._match:
self._current_context.append(line[1])
self._lines.pop(0)
else:
# non-matching line and no match in progress
self._lines.pop(0)
def _from_reader(self):
text = self._reader.read().data.splitlines(keepends=True)
# If the last line does not end with a newline, add one
if self._lines and self._lines[-1][1][-1] != '\n':
self._lines[-1][1] += text[0]
text.pop(0)
self._lines.extend([None, l] for l in text)
class DosLineSplitter(UnixLogLineSplitter):
def __init__(self, reader: RemoteFileBase, log_line_format: LogLineFormatInterface):
super().__init__(reader, log_line_format)
self._multiline_join = '\r\n'
class LineSplitter(UnixLogLineSplitter):
def __init__(self, reader: RemoteFileBase, log_line_format: LogLineFormatInterface):
super().__init__(reader, log_line_format)
self._line_ending_determined = False
def read(self) -> typing.Optional[LogLineData]:
if self._line_ending_determined:
return super().read()
else:
super()._from_reader()
if self._lines[0][1].find('\r\n') != -1:
self._multiline_join = '\r\n'
self._line_ending_determined = True
return super().read() | /remote_log_analysis-0.1.2.tar.gz/remote_log_analysis-0.1.2/remote_log_analysis/remote_log_utils.py | 0.817975 | 0.351728 | remote_log_utils.py | pypi |
from pyngrok import conf, ngrok
_type_description = {
'ssh': {
'port': 22,
'proto': 'tcp',
},
'custom': {
'port': 1209,
'proto': 'tcp',
},
'jupyter': {
'port': 8888,
'proto': 'http',
},
'grafana': {
'port': 3000,
'proto': 'http',
},
}
HOST = 'localhost'
def _get_addr_from_url(url):
return url.split("//")[-1].split(":")[0]
def _get_port_from_url(url):
return url.split("//")[-1].split(":")[1]
class TunnelAlredyOpenError(Exception):
pass
class Tunnel:
@staticmethod
def _is_already_open(tunnel_type):
PORT = str(_type_description[tunnel_type]['port'])
if _type_description[tunnel_type]['proto'] == 'tcp':
this_tunnel_config = f'{HOST}:{PORT}'
elif _type_description[tunnel_type]['proto'] == 'http':
this_tunnel_config = f'http://{HOST}:{PORT}'
else:
raise ValueError('Unexpected tunnel type')
tunnels = ngrok.get_tunnels()
if tunnels is None:
return False
for tunnel in tunnels:
if tunnel.config['addr'] == this_tunnel_config:
return True
return False
def __init__(self, tunnel_type):
ngrok.get_tunnels()
if Tunnel._is_already_open(tunnel_type):
raise TunnelAlredyOpenError(
"Such tunnel is already open"
)
self.ngrok_tunnel = ngrok.connect(
_type_description[tunnel_type]['port'],
_type_description[tunnel_type]['proto'],
)
self.address = _get_addr_from_url(self.ngrok_tunnel.public_url)
if tunnel_type == 'ssh':
self.port = _get_port_from_url(self.ngrok_tunnel.public_url)
def is_still_open(self):
tunnels = ngrok.get_tunnels()
for tunnel in tunnels:
if tunnel.public_url == self.ngrok_tunnel.public_url:
return True
return False
def close(self):
ngrok.disconnect(self.ngrok_tunnel.public_url)
if self.ngrok_tunnel.proto == 'http':
url = _get_addr_from_url(self.ngrok_tunnel.public_url)
https_pub_url = 'https://' + url
ngrok.disconnect(https_pub_url)
def authenticate(token):
ngrok.set_auth_token(token)
def set_region(region):
conf.get_default().region = region | /remote_mole-1.3.0.tar.gz/remote_mole-1.3.0/src/remote_mole/_internal/services/ngrok.py | 0.495361 | 0.187932 | ngrok.py | pypi |
import time
from collections import OrderedDict
from copy import copy
from loggers import Loggers
from pathos.multiprocessing import ProcessingPool as Pool
from ssh_paramiko import ssh_paramiko
class RemoteMultiCommand(Loggers):
'''Execute commands in parallel in remote servers
Provides a layer of abstraction for executing multiple commands in multiple servers
with multiple processes in parallel
Arguments:
keySSH(:obj:`str`): path of the ssh private key to connect (must be None if using user
and pasword to connect)
logFolder(:obj:`str`, **optional** , *default* =None): folder where the log files of
this class will be generated
username(:obj:`str`, *optional* , *default* =root): username using the connection
password(:obj:`str`,optional, *default* =None): password for connection if using user
and password instead of key
sshPort(:obj:`str`, optional, *default* =22): ssh tcp port
serverHasDns(:obj:`bool`, optional, *default* =True): if the server is not registered
in a DNS domain and/or has not its DNS name equals to its hostname, this flag must
de set to False, otherwise this condition will be checked to certify we are trully
connected to the right server.
'''
def __init__(self, ssh_key, **kwargs):
self.cmd = None
self.ssh_key_path = ssh_key
self.servers_cmd_dict = {}
self.ssh_log_level = 'ERROR'
self.ssh_opt_args = kwargs
self.ssh = ssh_paramiko.RemoteServer(self.ssh_key_path, **self.ssh_opt_args)
self.ssh.set_log_level(self.ssh_log_level)
if 'logFolder' in kwargs:
super(RemoteMultiCommand, self).__init__('RemoteMultiCommand',
logFolder=kwargs['logFolder'])
else:
super(RemoteMultiCommand, self).__init__('RemoteMultiCommand')
def execute_command(self, server):
''' Execute a command in a remote server
Issues a command in the server and updates the dictionary self.servers_cmd_dict,
which maintains the state of all commands executed in this object
Arguments:
server (:obj:`str`): server where the command will be executed
Returns:
dictionary containing the server, the command executed, the result of the
connection attempt and the result of the command issued
'''
ret, output_msg = self.ssh.connect_server(server, False)
if ret:
cmd_ret, std_out, std_error = self.ssh.execute_cmd(self.cmd)
self.ssh.close_connection()
if cmd_ret:
std = std_out
else:
self.log.error('Error executing command: "'+self.cmd+'" in server '+server
+' :'+std_error)
std = std_error
else:
cmd_ret = ret
std = output_msg
if not output_msg == 'Host is not registered in DNS domain':
# Need to reinstantiate the class in this cases
self.ssh = ssh_paramiko.RemoteServer(self.ssh_key_path, **self.ssh_opt_args)
self.ssh.set_log_level(self.ssh_log_level)
self.log.error('Cannot connect to server '+server+' :'+output_msg)
cmd_dict = OrderedDict()
cmd_dict['command'] = self.cmd
cmd_dict['access'] = ret
cmd_dict['result'] = cmd_ret
cmd_dict['output'] = std
return {server:cmd_dict}
def launch_multicommand(self, cmd, num_of_process, servers_list, ssh_log_level='CRITICAL'):
'''Launches several processes that execute the command in a list of servers
Arguments:
cmd (:obj:`str`): command to be executed in each server of the list
num_of_process (:obj:`int`): number of separated process launched in each iteration
servers_list (:obj:`list`): servers list
ssh_log_level (:obj:`str`, *default* = 'CRITICAL'): log level of the ssh connection.
Could be 'DEBUG', 'INFO', 'ERROR' or 'CRITICAL'
Returns:
servers_cmd_dict (:obj:`dict`): dictionary containing the servers and the result of
the command
'''
self.ssh_log_level = ssh_log_level
cmd_servers_dict = {}
num_of_servers = len(servers_list)
if num_of_process > num_of_servers:
num_of_process = num_of_servers
numb_of_iterations, servers_left = divmod(num_of_servers, num_of_process)
start = time.time()
counter = 0
self.cmd = cmd
self.log.info('Processing in the '+str(num_of_servers)+' servers will be done in '
+str(numb_of_iterations+(0 if servers_left == 0 else 1))+' iterations.')
for iter_num in range(0, num_of_servers, num_of_process):
self.log.debug('Processing '+str(len(servers_list[iter_num:iter_num+(num_of_process)]))
+' servers in this iteration.')
self.log.debug('Servers: '+str(servers_list[iter_num:iter_num+(num_of_process)]))
pool = Pool(num_of_process)
result = pool.map(self.execute_command,
servers_list[iter_num:iter_num+(num_of_process)])
for server_results in result:
for server, cmd_results in server_results.iteritems():
cmd_servers_dict[server] = cmd_results
if not server in self.servers_cmd_dict:
self.servers_cmd_dict[server] = []
self.servers_cmd_dict[server].append(cmd_results)
counter = counter+num_of_process
servers_to_process = num_of_servers - counter
if not servers_to_process <= 0:
self.log.debug('Still has '+str(num_of_servers-counter)+' servers to process...')
self.log.info("It took "+str(round(time.time()-start, 3))+" seconds to execute command '"
+cmd+"' in all "+str(num_of_servers)+" servers.")
return cmd_servers_dict
def launch_list_of_commands(self, script_cmds, num_of_process, servers_list,
ssh_log_level='CRITICAL'):
''' Launch a list of parallel commands
Launches several processes that execute a sequence of commands in a list of servers
For each server, the next commands will only be executed if the preceding command was
successfull.
Arguments:
script_cmds (:obj:`str` or :obj:`list`): list or string containing the commands
(interprets ";", new line character and comments)
num_of_process: (:obj:`int`) number of separated process launched in each iteration
servers_list (:obj:`list`): servers list
ssh_log_level (:obj:`str`, *default* = 'CRITICAL'): log level of the ssh connection.
Could be 'DEBUG', 'INFO', 'ERROR' or 'CRITICAL'
Returns:
servers_cmd_dict (:obj:`dict`): dictionary containing the servers and the result of
the command
'''
start = time.time()
self.servers_cmd_dict = {}
num_of_servers = len(servers_list)
servers_list_temp = copy(servers_list)
if type(script_cmds).__name__ != 'list':
# Turn script_cmds into a list
script_cmds = script_cmds.replace(';', '\n')
cmds_list = script_cmds.split('\n')
else:
cmds_list = script_cmds
# Filter null elements and commented lines
cmds_list = filter(lambda x: x, cmds_list)
cmds_list = filter(lambda x: x[0] != '#', cmds_list)
self.log.info('Executing '+str(len(cmds_list))+' commands in the list of servers:')
for cmd in cmds_list:
if servers_list_temp:
result_dict = self.launch_multicommand(cmd, num_of_process,
servers_list_temp, ssh_log_level)
for server, results in result_dict.iteritems():
if not results['result']:
# If this command fails, we remove the server from list
servers_list_temp.remove(server)
self.log.error('Command "'+cmd+'" returned error. Removing server '
+server+' from execution list')
for server, result_dict in self.servers_cmd_dict.iteritems():
log_message = 'Server '+server+':'
log_message = log_message+'\n - All '+str(len(cmds_list))+' commands were issued: '\
+('Yes' if len(result_dict) == len(cmds_list) else 'No')
log_message = log_message+'\n - Number of commands issued: '+str(len(result_dict))
log_message = log_message+'\n - Number of commands bypassed: '\
+str(len(cmds_list) - len(result_dict))
self.log.info(log_message)
self.log.info("It took "+str(round(time.time()-start, 3))+" seconds to execute the list \
of commands in all "+str(num_of_servers)+" servers.")
return self.servers_cmd_dict | /remote_multicommand-0.1.3.tar.gz/remote_multicommand-0.1.3/remote_multicommand/remote_multicommand.py | 0.635788 | 0.152064 | remote_multicommand.py | pypi |
import socket
import pickle
from .errors import CallMethodError as _CallMethodError
def mixin_error(error,msg=""):
# Create new error which inherits from both the base
# class and _CallMethodError, so the new error can
# be caught by both execpt types
class CallMethodError(error.__class__,_CallMethodError):
pass
return CallMethodError(msg)
# The choice of BUFFER_SIZE and TIMEOUT are somewhat arbitrary,
# it would be good in the future to test some ideal values,
# or at least let the user choose these when instantiating
# a client class.
BUFFER_SIZE = 1024
TIMEOUT = 3 # seconds
class Client:
"""A remote_object Client.
This class allows a use to check attributes and make method calls
on a python object hosted by the server.Server class. This is
achieved by making a method call on the client.Client instance,
which is then forwarded over the TCP socket to the server, which
actually makes the requested method call and passes any return
values or Exceptions back to the client to be returned or raised
respectively.
:param __ip: A string holding the address of the server.Server
instance which the Client will call from.
:param __port: An integer holding the port number of the server.Server
instance which the Client will call from
:method __call_method: The private method which actually parses the
client-requested call, passes this to the
server and then returns/raises the result.
This is not intended to be directly used.
:Note: The Client class treats attribute lookups like method calls,
but without arguments. So if the server-hosted python object
has an attribute "a" it can be check by the Client class as
client.Client(...).a()
Example Usage::
_client = client.Client('some ip',9999)
# Attempts to call ".a()" on server,
# may return a value or error, depending
# on the hosted object
print(_client.a())
# Same as above
print(_client.__getattr__("a")())
# Raises AttributeError, since the client.Client
# class does not have a natuarl method "a"
print(_client.__getattribute__("a")())
"""
def __init__(self,ip,port):
self.__ip,self.__port = ip,port
def __repr__(self):
try:
_repr = self.__call_method("__repr__")
except:
_repr = "pointed at {0}:{1}".format(self.__ip,self.__port)
return "<Remote Wrapper {}>".format(_repr)
def __call_method(self,fname,*args,**kwargs):
"""Handler for method calls and returns
This method takes a
"""
socket = MessageSocket(self.__ip,self.__port)
rmsg = socket.send_message(pickle.dumps((fname,args,kwargs)))
socket.close()
return_value = pickle.loads(rmsg)
if isinstance(return_value, Exception):
# If the server raised an error, reraise it here.
# In that case, return_values is an error.TraceableError
# type, hence it is called during reraising so as to print
# the server traceback
error = return_value()
# Using mixin_error allows any try ... except .... statments to
# catch either from `error.__class__` OR from `ro.errors.CallMethodError`
raise mixin_error(error,"Remote Method Call Failed") from error
return return_value
def __getattr__(self,key):
def f(*args,**kwargs):
return self.__call_method(key,*args,**kwargs)
return f
class MessageSocket(socket.socket):
"""A TCP socket wrapper for use by a remote_object.client.Client instance
This class inherits from the base socket.socket class, see that
documentation for details on the TCP socket connection. The
main extension of this class is implement a single method to
both send and receive a message, since when functioning properly,
the server should always send a single response for the single message
recieved from a socket. After this exchange, the socket is no longer
used, and can be deleted.
"""
def __init__(self,ip,port):
socket.socket.__init__(self,socket.AF_INET,
socket.SOCK_STREAM)
self.settimeout(TIMEOUT)
self.connect((ip,port))
def send_message(self,msg):
"""Sends a message, waits and returns the response
:param msg: a bytes type containing the messasge to
be send (note, do NOT include a term char)
:return rmsg: a bytes type containing the response
to the message (note, does NOT include
a term char)
"""
self.sendall(msg + b'\n')
self.buffer = b''
while True:
rmsg = self.recv(BUFFER_SIZE)
if rmsg == b'':
break
self.buffer = self.buffer + rmsg
return self.buffer.strip() | /remote-object-0.2.4.tar.gz/remote-object-0.2.4/remote_object/client.py | 0.682891 | 0.209429 | client.py | pypi |
from collections.abc import Callable
from io import BytesIO
from multiprocessing import Process
from typing import Any
import numpy as np
from PIL import Image
import requests
import matplotlib.pyplot as plt
from .server import SharedDataServer, ImageHandler, run_server, get_best_family
class PlotClient:
def __init__(self, port=8000):
self._port = None
self.server = None
self.server_class = SharedDataServer
self.handler_class = ImageHandler
self.httpd = None
self.port = port
self._figure = None
self._axes = None
self.auto_show = True
@property
def port(self):
return self._port
@port.setter
def port(self, port):
self._port = port
self.server_class.address_family, self.addr = get_best_family(None, port)
"""
Start the server in a background process.
"""
def start_server(self):
self.httpd = self.server_class(self.addr, self.handler_class)
# start the server in a new process
self.server = Process(target=run_server, args=(self.httpd,))
self.server.start()
"""
Stop the server background process.
"""
def stop_server(self):
if self.httpd is not None:
self.httpd.shutdown()
if self.server is not None:
self.server.close()
"""
Starts the server only if it is not already running.
"""
def maybe_start_server(self):
if self.httpd is None:
self.start_server()
"""
Wraps a matplotlib plot function to display the plot in a browser.
Arguments:
plot_func: The matplotlib plot function to wrap.
is_3d: Whether the plot is a 3D plot.
clear_figure: Whether to clear the figure before plotting.
call_on_figure: Whether to call the plot function on the figure instead of the axes.
"""
def _matplotlib_figure(self, plot_func: Callable, is_3d: bool=False, clear_figure: bool=True, call_on_figure: bool=False):
self.maybe_start_server()
# initialize the figure and axes
if self._figure is None or clear_figure:
self.figure()
# call the plot function either on the figure or the axes
result = None
if call_on_figure:
self._axes = plot_func(self._figure)
else:
if self._axes is None:
self._axes = self._figure.add_subplot(111, projection='3d' if is_3d else None)
result = plot_func(self._axes)
if self.auto_show:
self.show()
return result
"""
Show the figure on the remote server
"""
def show(self):
data = BytesIO()
self._figure.savefig(data, format="png")
data.seek(0)
r = requests.post(f"http://localhost:{self.port}", data=data)
"""
Instantiate a new figure.
"""
def figure(self, *args, **kwargs):
if self._figure:
plt.close(self._figure)
self._figure = plt.figure(*args, **kwargs)
"""
Emulate matplotlib plt.subplots
"""
def subplots(self):
return self, self
"""
Show an image without going through matplotlib
"""
def imshow_native(self, img):
self.maybe_start_server()
# send the image to the server
data = BytesIO()
if isinstance(img, np.ndarray):
img = Image.fromarray(img)
img.save(data, 'png')
data.seek(0)
r = requests.post(f"http://localhost:{self.port}", data=data)
"""
Get an attribute of the plt module by mapping each name to a function that calls the
corresponding function on the axes object or the figure object.
"""
def __getattr__(self, name: str) -> Any:
matplotlib_attributes = [
'imshow', 'plot', 'scatter', 'bar', 'stem', 'step', 'fill_between',
'stackplot', 'hist', 'boxplot', 'errorbar', 'violinplot', 'eventplot',
'hist2d', 'hexbin', 'pie', 'tricontour', 'tricontourf', 'tripcolor',
'triplot', 'pcolormesh', 'contour', 'contourf', 'barbs', 'quiver',
'streamplot'
]
matplotlib_attributes_3d = [
'plot_surface', 'plot_wireframe', 'plot_trisurf', 'scatter3D', 'bar3D',
'contour3D', 'quiver3D', 'streamplot3D'
]
matplotlib_axes_attributes_map = {
'title': 'set_title',
'text': 'text',
'ylim': 'set_ylim',
'xlim': 'set_xlim',
'xlabel': 'set_xlabel',
'ylabel': 'set_ylabel',
'yscale': 'set_yscale',
'xscale': 'set_xscale',
'xticks': 'set_xticks',
'yticks': 'set_yticks',
'legend': 'legend',
'clf': 'clear',
'grid': 'grid',
'axis': 'axis',
'annotate': 'annotate'
}
matplotlib_figure_attributes_map = {
'subplots_adjust': 'subplots_adjust',
'subplot': 'add_subplot',
'suptitle': 'suptitle',
}
# get the matplotlib function name and set the meta flags
call_on_figure = False
is_3d = name in matplotlib_attributes_3d
matplotlib_func_name = None
if name in matplotlib_attributes:
matplotlib_func_name = name
elif name in matplotlib_attributes_3d:
matplotlib_func_name = name.replace('3D', '')
is_3d = True
elif name in matplotlib_axes_attributes_map:
matplotlib_func_name = matplotlib_axes_attributes_map[name]
elif name in matplotlib_figure_attributes_map:
matplotlib_func_name = matplotlib_figure_attributes_map[name]
call_on_figure = True
# get a callable function for matplotlib
if matplotlib_func_name is not None:
return lambda *args, **kwargs: \
self._matplotlib_figure(
lambda ax: getattr(ax, matplotlib_func_name)(*args, **kwargs),
is_3d=is_3d,
call_on_figure=call_on_figure,
clear_figure=kwargs.pop("clear_figure", False)
) | /remote_plot-1.2.1.tar.gz/remote_plot-1.2.1/remote_plot/client.py | 0.87046 | 0.29605 | client.py | pypi |
import abc # noqa
import typing
from aio_pika.abc import (
Arguments, # noqa
TimeoutType,
)
class RPCRouterProtocol(abc.ABC):
@abc.abstractmethod
def __init__(self, *, prefix: str = '') -> None: # noqa
raise NotImplementedError
@abc.abstractclassmethod
def validate_prefix(cls, prefix: str):
raise NotImplementedError
@abc.abstractmethod
def add_rpc_route(
self,
path: str,
endpoint: typing.Callable[..., typing.Any],
**kwargs
):
raise NotImplementedError
@abc.abstractmethod
def rpc_route(self, path: str, **kwargs):
raise NotImplementedError
@abc.abstractmethod
def include_route(self, router: 'RPCRouter', *, prefix: str = ''):
raise NotImplementedError
@abc.abstractmethod
def procedure(
self,
path: str,
*,
durable: bool = False,
exclusive: bool = False,
passive: bool = False,
auto_delete: bool = False,
arguments: Arguments = None,
timeout: TimeoutType = None,
):
raise NotImplementedError
class RPCRouter(RPCRouterProtocol):
def __init__(self, *, prefix: str = '') -> None:
if prefix:
self.validate_prefix(prefix)
self.routes: list[dict[str, typing.Any]] = []
self.prefix: str = prefix
@classmethod
def validate_prefix(cls, prefix: str):
assert prefix.startswith('/'), 'A path prefix must start with "/"'
assert not prefix.endswith('/'), 'A path prefix must not end with "/"'
def add_rpc_route(
self,
path: str,
endpoint: typing.Callable[..., typing.Any],
**kwargs
):
self.routes.append(
dict(path=path, endpoint=endpoint, kwargs=kwargs)
)
def rpc_route(self, path: str, **kwargs):
def wrapper(endpoint: typing.Callable[..., typing.Any]):
self.add_rpc_route(path, endpoint, **kwargs)
return endpoint
return wrapper
def include_route(self, router: 'RPCRouter', *, prefix: str = ''):
if prefix:
self.validate_prefix(prefix)
for route in router.routes:
path = self.prefix + prefix + route['path']
self.add_rpc_route(
path.replace('/', '_'), route['endpoint'], **route['kwargs'],
)
def procedure(
self,
path: str,
*,
durable: bool = False,
exclusive: bool = False,
passive: bool = False,
auto_delete: bool = False,
arguments: Arguments = None,
timeout: TimeoutType = None,
):
return self.rpc_route(
path=path,
durable=durable,
exclusive=exclusive,
passive=passive,
auto_delete=auto_delete,
arguments=arguments or {},
timeout=timeout,
) | /remote_procedure-1.3.2.tar.gz/remote_procedure-1.3.2/remote_procedure/router.py | 0.708818 | 0.205197 | router.py | pypi |
from __future__ import print_function
__version__ = '0.6'
def run_local(command, conf, arg):
from . import utils
_command = list(map(lambda x: x.format(**utils.injecting_vals(conf, arg)), command))
print('exec: "{}"'.format(' '.join(_command)))
code = utils.run_command_attach_output(_command)
return code
def run_remote(command, conf, arg):
from . import utils
serialized_command = ' '.join(command).format(**utils.injecting_vals(conf, arg))
print('remote exec: "{}"'.format(serialized_command))
_command = [
'ssh', '-T', conf['host'], '"cd {remote_path} && {command}"'.format(
remote_path=conf['remote_path'],
command=serialized_command
)
]
code = utils.run_command_attach_output(' '.join(_command), shell=True)
return code
def run(arg):
from . import utils
conf = utils.load_config()
if conf is None:
raise Exception('no config found, .remoterunrc might not be created')
assert 'host' in conf, '`host` is not defined'
assert 'remote_path' in conf, '`remote_path` is not defined'
assert 'steps' in conf, '`steps` is not defined'
assert isinstance(conf['steps'], list), '`steps` must be a list'
for step in conf['steps']:
assert isinstance(step, dict), 'each `step` must be a dict'
assert 'name' in step, '`step.name` is not defined'
if 'command' in step:
assert isinstance(step['command'], list)
if 'remote' in step:
assert isinstance(step['remote'], list)
if 'command' not in step and 'remote' not in step:
raise AssertionError('`step` should have either `command` or `remote`')
current_step = utils.get_current_step()
if current_step is None:
current_step = conf['steps'][0]['name']
utils.save_lockfile(current_step)
all_steps = list(map(lambda x: x['name'], conf['steps']))
if current_step not in all_steps:
raise Exception('step {} not found in the .remoterunrc'.format(current_step))
print('RemoteRun start running at step {}'.format(current_step))
start = False
for i, step in enumerate(conf['steps'], 1):
if step['name'] == current_step:
start = True
if not start:
continue
utils.save_lockfile(step['name'])
if 'command' in step:
print('({}/{}) running "{}"'.format(i, len(conf['steps']), step['name']))
run_local(step['command'], conf, arg)
elif 'remote' in step:
print('({}/{}) remote running "{}"'.format(i, len(conf['steps']), step['name']))
run_remote(step['remote'], conf, arg)
utils.delete_lockfile()
print('RemoteRun finished!')
def init():
from shutil import copy
from . import utils
print('creating initial config file .remoterunrc')
copy(utils.path_default_config(), utils.path_config())
print('creating initial ignore file .remoterunignore')
copy(utils.path_default_ignorefile(), utils.path_ignorefile())
def main():
import argparse
parser = argparse.ArgumentParser('RemoteRun (v. {})'.format(__version__))
parser.add_argument('arg', nargs='*', help='arguments passed to the .remoterunrc file, using by {arg[x]}')
parser.add_argument('--init', default=False, action='store_true',
help='init the required files in the current directory')
args = parser.parse_args()
if args.init:
init()
else:
run(args.arg) | /remote-run-0.6.tar.gz/remote-run-0.6/remoterun/remoterun.py | 0.509032 | 0.191819 | remoterun.py | pypi |
from glob import glob
import shutil
import os
import sys
from remote_sensing_processor.common.torch_test import cuda_test
from remote_sensing_processor.unzip.unzip import unzip_sentinel, unzip_landsat
from remote_sensing_processor.sentinel2.sen2cor.sen2cor import sen2correct
from remote_sensing_processor.sentinel2.superres.superres import Superresolution
from remote_sensing_processor.sentinel2.sentinel_postprocessing.sentinel_postprocessing import s2postprocess_superres, s2postprocess_no_superres, get_first_proj
from remote_sensing_processor.landsat.landsat import landsat_proc
from remote_sensing_processor.mosaic.mosaic import mosaic_main, order, ismultiband
from remote_sensing_processor.indices.normalized_difference import nd
from remote_sensing_processor.imagery_types.types import get_type, get_index
from remote_sensing_processor.common.normalize import normalize_file
from remote_sensing_processor import segmentation
__version__ = '0.2.1'
cuda_test()
def sentinel2(archives, sen2cor = True, superres = True, projection = None, cloud_mask = True, clipper = None):
"""
Preprocess Sentinel-2 imagery.
Parameters
----------
archives : string or list of strings
Path to archive or list of pathes to archives.
sen2cor : bool (default = True)
Is atmospheric correction using Sen2Cor needed. Set to False if you have troubles with Sen2Cor.
superres : bool (default = True)
Is upscaling 20- and 60-m bands to 10 m resolution needed. Set to False if you do not have GPU that supports CUDA.
projection : string (optional)
CRS in which output data should be.
cloud_mask : bool (default = True)
Is cloud masking needed.
clipper : string (optional)
Path to vector file to be used to crop the image.
Returns
----------
list of strings
List of paths where preprocessed Sentinel-2 products are saved.
Examples
--------
>>> import remote_sensing_processor as rsp
>>> from glob import glob
>>> sentinel2_imgs = glob('/home/rsp_test/sentinels/*.zip')
>>> print(sentinel2_imgs)
['/home/rsp_test/sentinels/L1C_T42VWR_A032192_20210821T064626.zip',
'/home/rsp_test/sentinels/L1C_T42WXS_A032192_20210821T064626.zip',
'/home/rsp_test/sentinels/L1C_T43VCL_A032192_20210821T064626.zip',
'/home/rsp_test/sentinels/L1C_T43VDK_A031391_20210626T063027.zip',
'/home/rsp_test/sentinels/L1C_T43VDL_A023312_20210823T063624.zip',
'/home/rsp_test/sentinels/L1C_T43VDL_A031577_20210709T064041.zip']
>>> output_sentinels = rsp.sentinel2(sentinel2_imgs)
Preprocessing of /home/rsp_test/sentinels/L1C_T42VWR_A032192_20210821T064626.zip completed
Preprocessing of /home/rsp_test/sentinels/L1C_T42WXS_A032192_20210821T064626.zip completed
Preprocessing of /home/rsp_test/sentinels/L1C_T43VCL_A032192_20210821T064626.zip completed
Preprocessing of /home/rsp_test/sentinels/L1C_T43VDK_A031391_20210626T063027.zip completed
Preprocessing of /home/rsp_test/sentinels/L1C_T43VDL_A023312_20210823T063624.zip completed
Preprocessing of /home/rsp_test/sentinels/L1C_T43VDL_A031577_20210709T064041.zip completed
>>> print(output_sentinels)
['/home/rsp_test/sentinels/L1C_T42VWR_A032192_20210821T064626/',
'/home/rsp_test/sentinels/L1C_T42WXS_A032192_20210821T064626/',
'/home/rsp_test/sentinels/L1C_T43VCL_A032192_20210821T064626/',
'/home/rsp_test/sentinels/L1C_T43VDK_A031391_20210626T063027/',
'/home/rsp_test/sentinels/L1C_T43VDL_A023312_20210823T063624/',
'/home/rsp_test/sentinels/L1C_T43VDL_A031577_20210709T064041/']
"""
if isinstance(archives, str):
archives = [archives]
paths = []
for archive in archives:
path = unzip_sentinel(archive)
path1 = glob(path+'*')[0]
if sen2cor == True:
sen2correct(path1)
path1 = glob(path+'*')[0]
if superres == True:
Superresolution(input_dir = path1, output_dir = path1, copy_original_bands = True, clip_to_aoi = False, geometry = None, bounds = None).start()
img = glob(path+'**/*_superresolution.tif')[0]
if projection == 'same':
projection = get_first_proj(img)
s2postprocess_superres(img = img, projection = projection, cloud_mask = cloud_mask, clipper = clipper, path = path, path1 = path1)
else:
if projection == 'same':
img = glob(path1 + '/**/*.jp2')[0]
projection = get_first_proj(img)
s2postprocess_no_superres(projection = projection, cloud_mask = cloud_mask, clipper = clipper, path = path, path1 = path1)
shutil.rmtree(path1)
paths.append(path)
print('Preprocessing of ' + archive + ' completed')
return paths
def landsat(archives, projection = None, cloud_mask = True, pansharpen = True, keep_pan_band = False, resample = 'bilinear', t = 'k', clipper = None):
"""
Preprocess Landsat imagery.
Parameters
----------
archives : string or list of strings
Path to archive or list of pathes to archives.
projection : string (optional)
CRS in which output data should be.
cloud_mask : bool (default = True)
Is cloud masking needed.
pansharpen : bool (default = True)
Is pansharpening needed. RSP uses Brovey transform for pansarpening Landsat 7, 8 and 9.
keep_pan_band : bool (default = False)
Keep pansharpening band or delete it. Pansharpening band have the same wavelengths as optical bands, so it does not contain any additional information to other bands. Affects only Landsat 7, 8 and 9.
resample : resampling method from rasterio as a string (default = 'bilinear')
Resampling method that will be used to upscale bands that cannot be upscaled in pansharpening operation. You can read more about resampling methods `here <https://rasterio.readthedocs.io/en/latest/topics/resampling.html>`_. Affects only Landsat 7, 8 and 9.
t : string ('k' or 'c', default = 'k')
Convert thermal band to kelvins or celsius (no farenheit lol).
clipper : string (optional)
Path to vector file to be used to crop the image.
Returns
----------
list of strings
List of paths where preprocessed Landsat products are saved.
Examples
--------
>>> import remote_sensing_processor as rsp
>>> from glob import glob
>>> landsat_imgs = glob('/home/rsp_test/landsat/*.tar')
>>> print(landsat_imgs)
['/home/rsp_test/landsat/LC08_L1TP_160023_20210825_20210901_02_T1.tar',
'/home/rsp_test/landsat/LT05_L1TP_160023_20110814_20200820_02_T1.tar',
'/home/rsp_test/landsat/LE07_L1TP_159023_20210826_20210921_02_T1.tar',
'/home/rsp_test/landsat/LT05_L1TP_162023_20110812_20200820_02_T1.tar',
'/home/rsp_test/landsat/LM05_L1TP_161023_19930803_20211018_02_T2.tar']
>>> output_landsats = rsp.landsat(landsat_imgs)
Preprocessing of /home/rsp_test/landsat/LC08_L1TP_160023_20210825_20210901_02_T1.tar completed
Preprocessing of /home/rsp_test/landsat/LT05_L1TP_160023_20110814_20200820_02_T1.tar completed
Preprocessing of /home/rsp_test/landsat/LE07_L1TP_159023_20210826_20210921_02_T1.tar completed
Preprocessing of /home/rsp_test/landsat/LT05_L1TP_162023_20110812_20200820_02_T1.tar completed
Preprocessing of /home/rsp_test/landsat/LM05_L1TP_161023_19930803_20211018_02_T2.tar completed
>>> print(output_landsats)
['/home/rsp_test/landsat/LC08_L1TP_160023_20210825_20210901_02_T1/',
'/home/rsp_test/landsat/LT05_L1TP_160023_20110814_20200820_02_T1/',
'/home/rsp_test/landsat/LE07_L1TP_159023_20210826_20210921_02_T1/',
'/home/rsp_test/landsat/LT05_L1TP_162023_20110812_20200820_02_T1/',
'/home/rsp_test/landsat/LM05_L1TP_161023_19930803_20211018_02_T2/']
"""
if isinstance(archives, str):
archives = [archives]
paths = []
for archive in archives:
path = unzip_landsat(archive)
landsat_proc(path = path, projection = projection, cloud_mask = cloud_mask, pansharpen = pansharpen, keep_pan_band = keep_pan_band, resample = resample, t = t, clipper = clipper)
paths.append(path)
print('Preprocessing of ' + archive + ' completed')
return paths
def mosaic(inputs, output_dir, fill_nodata = False, fill_distance = 250, clipper = None, crs = None, nodata = None, reference_raster = None, resample = 'average', nodata_order = False, keep_all_channels = True):
"""
Creates mosaic from several rasters.
Parameters
----------
inputs : list of strings
List of pathes to rasters to be merged or to folders where multiband imagery data is stored.
output_dir: path to output directory as a string
Path where mosaic raster or rasters will be saved.
fill_nodata : bool (default = False)
Is filling the gaps in the raster needed.
fill_distance : int (default = 250)
Fill distance for `fill_nodata` function.
clipper : string (optional)
Path to vector file to be used to crop the image.
crs : string (optional)
CRS in which output data should be.
nodata : int or float (default = None)
Nodata value. If not set then is read from file or set to 0.
reference_raster : path to reference raster as a string (optional)
Reference raster is needed to bring output mosaic raster to same resolution and projection as other data source. Is useful when you need to use data from different sources together.
resample : resampling method from rasterio as a string (default = 'average
Resampling method that will be used to reshape to a reference raster shape. You can read more about resampling methods `here <https://rasterio.readthedocs.io/en/latest/topics/resampling.html>`_. Use 'nearest' if you want to keep only class values.
nodata_order : bool (default = False)
Is needed to merge images in order from images with most nodata values on bottom (they usually are most distorted and cloudy) to images with less nodata on top (they are usually clear).
keep_all_channels : bool (default = True)
Is needed only when you are merging Landsat images from different generations. If True, all bands are processed, if False, only bands that are present in all input images are processed and others are omited.
Returns
----------
list of strings
List of paths to mosaic rasters.
Examples
--------
>>> import remote_sensing_processor as rsp
>>> input_sentinels = ['/home/rsp_test/sentinels/L1C_T42VWR_A032192_20210821T064626/',
... '/home/rsp_test/sentinels/L1C_T42WXS_A032192_20210821T064626/',
... '/home/rsp_test/sentinels/L1C_T43VCL_A032192_20210821T064626/',
... '/home/rsp_test/sentinels/L1C_T43VDK_A031391_20210626T063027/',
... '/home/rsp_test/sentinels/L1C_T43VDL_A023312_20210823T063624/',
... '/home/rsp_test/sentinels/L1C_T43VDL_A031577_20210709T064041/']
>>> border = '/home/rsp_test/border.gpkg'
>>> mosaic_sentinel = rsp.mosaic(input_sentinels, '/home/rsp_test/mosaics/sentinel/', clipper = border, projection = 'EPSG:4326', nodata_order = True)
Processing completed
>>> print(mosaic_sentinel)
['/home/rsp_test/mosaics/sentinel/B1.tif',
'/home/rsp_test/mosaics/sentinel/B2.tif',
'/home/rsp_test/mosaics/sentinel/B3.tif',
'/home/rsp_test/mosaics/sentinel/B4.tif',
'/home/rsp_test/mosaics/sentinel/B5.tif',
'/home/rsp_test/mosaics/sentinel/B6.tif',
'/home/rsp_test/mosaics/sentinel/B7.tif',
'/home/rsp_test/mosaics/sentinel/B8.tif',
'/home/rsp_test/mosaics/sentinel/B8A.tif',
'/home/rsp_test/mosaics/sentinel/B9.tif',
'/home/rsp_test/mosaics/sentinel/B11.tif',
'/home/rsp_test/mosaics/sentinel/B12.tif']
>>> lcs = glob('/home/rsp_test/landcover/*.tif')
>>> print(lcs)
['/home/rsp_test/landcover/ESA_WorldCover_10m_2020_v100_N60E075_Map.tif',
'/home/rsp_test/landcover/ESA_WorldCover_10m_2020_v100_N63E072_Map.tif',
'/home/rsp_test/landcover/ESA_WorldCover_10m_2020_v100_N63E075_Map.tif']
>>> mosaic_landcover = rsp.mosaic(lcs, '/home/rsp_test/mosaics/landcover/', clipper = border, reference_raster = '/home/rsp_test/mosaics/sentinel/B1.tif', nodata = -1)
Processing completed
>>> print(mosaic_landcover)
['/home/rsp_test/mosaics/landcover/ESA_WorldCover_10m_2020_v100_N60E075_Map_mosaic.tif']
"""
mb = ismultiband(inputs[0])
if mb == True:
for i in range(len(inputs)):
if not inputs[i].endswith(r'/'):
inputs[i] = inputs[i] + r'/'
if not output_dir.endswith(r'/'):
output_dir = output_dir + r'/'
if nodata_order == True:
inputs = order(inputs)
paths = mosaic_main(inputs = inputs, output_dir = output_dir, fill_nodata = fill_nodata, fill_distance = fill_distance, clipper = clipper, crs = crs, nodata = nodata, reference_raster = reference_raster, resample = resample, mb = mb, keep_all_channels = keep_all_channels)
return paths
def calculate_index(name, folder = None, b1 = None, b2 = None):
"""
Calculates vegetation indexes.
Parameters
----------
name : string
Name of index.
folder: path to input product as a string (optional)
If you define path to a supported imagery product and a name of supported index, you do not need to define `b1` and `b2`. Bands needed for index calculation are picked automatically.
b1, b2 : path as string (optional)
Path to band to calculate normalized difference index. If you define bands, you do not need to define `folder`, but still need to define `name` - it will be an output file name.
Returns
----------
string
Path where index raster is saved.
Examples
--------
>>> ndvi = rsp.calculate_index('NDVI', '/home/rsp_test/mosaics/sentinel/')
>>> print(ndvi)
'/home/rsp_test/mosaics/sentinel/NDVI.tif'
>>> ndvi = rsp.calculate_index('NDVI', b1 = '/home/rsp_test/mosaics/sentinel/B8.tif', b2 = '/home/rsp_test/mosaics/sentinel/B4.tif')
>>> print(ndvi)
'/home/rsp_test/mosaics/sentinel/NDVI.tif'
"""
if (folder != None) and ((b1 == None) or (b2 == None)):
if not folder.endswith(r'/'):
folder = folder + r'/'
t = get_type(folder)
if t == 'Undefined':
raise ValueError('Cannot define imagery type')
b1, b2 = get_index(t, name, folder)
if (b1 != None) and (b2 != None):
path = nd(name = name, b1 = b1, b2 = b2, folder = folder)
else:
raise ValueError('Bands 1 and 2 must be defined')
return path
def normalize(input_file, output_file, minimum = None, maximum = None):
"""
Applies min-max normalization to input file.
Parameters
----------
input_file : string
Path to input file.
output_file : string
Path to output file.
min: int or float (optional)
Min value for normalization. If not defined then min and max of data type of `input_file` will be used.
max: int or float (optional)
Max value for normalization. If not defined then min and max of data type of `input_file` will be used.
Examples
--------
>>> rsp.normalize('/home/rsp_test/mosaics/sentinel/B1.tif', '/home/rsp_test/mosaics/sentinel/B1_norm.tif', 0, 10000)
"""
normalize_file(input_file, output_file, minimum, maximum) | /remote-sensing-processor-0.2.1.tar.gz/remote-sensing-processor-0.2.1/src/remote_sensing_processor/__init__.py | 0.542379 | 0.359589 | __init__.py | pypi |
import torch
import torch.nn as nn
import torch.nn.functional as F
class DSen2Net(nn.Module):
def __init__(self, input_shape, num_layers = 32, feature_size = 256):
super(DSen2Net, self).__init__()
self.input_shape = input_shape
self.num_layers = num_layers
self.feature_size = feature_size
self.conv1 = nn.Conv2d(sum(x[0] for x in input_shape), feature_size, kernel_size=3, padding='same')
torch.nn.init.kaiming_uniform_(self.conv1.weight)
self.layers = nn.ModuleList()
for i in range(num_layers):
self.layers.append(nn.Conv2d(feature_size, feature_size, kernel_size=3, padding='same'))
torch.nn.init.kaiming_uniform_(self.layers[i*2].weight)
self.layers.append(nn.Conv2d(feature_size, feature_size, kernel_size=3, padding='same'))
torch.nn.init.kaiming_uniform_(self.layers[i*2+1].weight)
self.conv2 = nn.Conv2d(feature_size, input_shape[-1][0], kernel_size=3, padding='same')
torch.nn.init.kaiming_uniform_(self.conv2.weight)
def forward(self, inputs):
if len(self.input_shape) == 3:
combined = torch.cat((inputs[0], inputs[1], inputs[2]), dim=1)
else:
combined = torch.cat((inputs[0], inputs[1]), dim=1)
x = self.conv1(combined)
x = F.relu(x)
for i in range(self.num_layers):
tmp = self.layers[i*2](x)
tmp = F.relu(tmp)
tmp = self.layers[i*2+1](tmp)
tmp = torch.mul(tmp, 0.1)
x = torch.add(x, tmp)
x = self.conv2(x)
if len(self.input_shape) == 3:
x = torch.add(x, inputs[2])
else:
x = torch.add(x, inputs[1])
return x
"""from __future__ import division
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Conv2D, Concatenate, Activation, Lambda, Add, Input
import tensorflow.keras.backend as K
K.set_image_data_format('channels_first')
def resBlock(x, channels, kernel_size=[3, 3], scale=0.1):
tmp = Conv2D(channels, kernel_size, kernel_initializer='he_uniform', padding='same')(x)
tmp = Activation('relu')(tmp)
tmp = Conv2D(channels, kernel_size, kernel_initializer='he_uniform', padding='same')(tmp)
tmp = Lambda(lambda x: x * scale)(tmp)
return Add()([x, tmp])
def s2model(input_shape, num_layers=32, feature_size=256):
input10 = Input(shape=input_shape[0])
input20 = Input(shape=input_shape[1])
if len(input_shape) == 3:
input60 = Input(shape=input_shape[2])
x = Concatenate(axis=1)([input10, input20, input60])
else:
x = Concatenate(axis=1)([input10, input20])
# Treat the concatenation
x = Conv2D(feature_size, (3, 3), kernel_initializer='he_uniform', activation='relu', padding='same')(x)
for i in range(num_layers):
x = resBlock(x, feature_size)
# One more convolution, and then we add the output of our first conv layer
x = Conv2D(input_shape[-1][0], (3, 3), kernel_initializer='he_uniform', padding='same')(x)
# x = Dropout(0.3)(x)
if len(input_shape) == 3:
x = Add()([x, input60])
model = Model(inputs=[input10, input20, input60], outputs=x)
else:
x = Add()([x, input20])
model = Model(inputs=[input10, input20], outputs=x)
return model""" | /remote-sensing-processor-0.2.1.tar.gz/remote-sensing-processor-0.2.1/src/remote_sensing_processor/sentinel2/superres/DSen2Net.py | 0.83508 | 0.60679 | DSen2Net.py | pypi |
from dataclasses import dataclass, asdict
from typing import Any
from remote_sensors import datatypes
from remote_sensors.exc import (
InvalidResponse,
ResponseMarshalerError,
)
from remote_sensors.log import logger
# same as ResponseType
STATUSES = {
'ACK': 0x05,
'NOT_FOUND': 0x06,
'SEND': 0x07,
'RESOURCE_DELETED': 0x08,
'ERROR_FOUND': 0x09,
}
CONTENT_TYPE = {
'UNSIGNED': 0x00,
'SIGNED': 0x01,
'CHAR': 0x02,
'FLOAT': 0x03,
}
@dataclass(order=True)
class Response:
"""Response instance.
Frame order:
|STATUS|TRANS|WORD_SIZE|CNT_TYPE|CNT_LENGTH|DATA...|
:WORD_SIZE: unit is 1 byte
"""
status: str
transaction: int
content_type: str = 'UNSIGNED'
content_length: int = 0
data: Any = None
raw_data: Any = None
word_size: int = 1 # byte
@classmethod # noqa: C901
def from_bytearray(cls, data: bytearray):
"""Returns a Response instance from the given bytearray."""
if not data:
raise InvalidResponse('The frame is not a valid response')
status = data[0] # first byte is the responseType/Status
if status not in STATUSES.values():
raise ResponseMarshalerError(f'The response bytearray does not contain'
f'a valid Status/Type ({status})')
# get the canonical name
for key, value in STATUSES.items():
if value == status:
status = key
break
transaction = data[1] # second byte is the transaction
word_size = data[2] # third byte is the word size
content_type = data[3] # fourth byte is the content type
if content_type not in CONTENT_TYPE.values():
raise ResponseMarshalerError(f'The response bytearray does not contain'
f'a valid Content Type ({content_type})')
# get the cannonical name
for key, value in CONTENT_TYPE.items():
if value == content_type:
content_type = key
break
data_length = data[4] # fifth byte is the length of the data
raw_data = None # sometimes the response is empty
logger.debug(f'Data length: {data_length}')
if data_length > 0:
raw_data = data[5:5 + data_length] # data is from the 5th element until the end
response = cls(content_type=content_type, status=status,
transaction=transaction, word_size=word_size,
content_length=data_length, raw_data=raw_data)
try:
_cnt = datatypes.get(content_type, raw_data, word_size)
except NotImplemented:
response.data = None
else:
response.data = _cnt
return response
def as_bytearray(self):
"""Dump the response as bytearray."""
headers = list()
status = STATUSES[self.status]
headers.append(status)
headers.append(self.transaction)
headers.append(self.word_size)
content_type = CONTENT_TYPE[self.content_type]
headers.append(content_type)
content_length = self.content_length
if self.raw_data is not None:
content_length = len(self.raw_data)
headers.append(content_length)
header = bytearray(headers)
data = self.raw_data if self.raw_data is not None else bytearray()
return header + data
def as_dict(self, with_bytearrays: bool = True, encoding: str = 'utf8'):
dictionary = asdict(self)
# we have our own data types
if self.data and isinstance(self.data, datatypes.RemoteSensorsType):
dictionary['data'] = self.data.value
del dictionary['raw_data'] # we dont need it :D
return dictionary
# custom datatypes: convert the bytearrays to utf-8 (ascii) supported
if not with_bytearrays and isinstance(self.data, bytearray):
dictionary['data'] = self.data.decode(encoding)
if not with_bytearrays and isinstance(self.raw_data, bytearray):
dictionary['raw_data'] = self.data.decode(encoding)
return dictionary
def is_response(message) -> bool:
if isinstance(message, Response):
return True
if not hasattr(message, 'data'):
return False
try:
data = message.data
if not isinstance(data, bytearray):
logger.debug(f'Checked to see if its bytearray failed, {data}')
return False
except Exception as e:
logger.error(f'Failed to check if it is a response, {e}, returning False')
return False
if len(data) == 0:
logger.debug(f'Getting data length 0!')
return False
first_byte = data[0]
logger.debug(f'Evaluating {first_byte}, {first_byte in STATUSES.values()}')
return first_byte in STATUSES.values() | /remote_sensors-0.1.1.tar.gz/remote_sensors-0.1.1/remote_sensors/models/response.py | 0.834137 | 0.317347 | response.py | pypi |
import urllib.parse
from dataclasses import (
dataclass,
asdict,
field,
)
from remote_sensors.exc import (
InvalidRequest,
RequestMarshalerError,
)
from remote_sensors.log import logger
REQUEST_HEADER_SIZE = 0x04
METHODS = {
'GET': 0x01,
'POST': 0x02,
'DELETE': 0x03,
'REGISTRATION': 0x04,
'END_REGISTRATION': 0x00,
}
CONNECTIONS = {
'ONE_TIME': 0x01,
'STREAM': 0x02,
'CLOSE': 0x03,
}
@dataclass
class URI:
base: str = '/'
params: dict = field(default_factory=dict)
def __post_init__(self):
if not self.base.startswith('/'):
self.base = f'/{self.base}'
if '?' in self.base:
self.base, self.params = self.base.split('?')
self.params = urllib.parse.parse_qs(self.params)
def __truediv__(self, path: str):
"""Append the path to the base url."""
if path.startswith('/'):
path = path[1:]
self.base = f'{self.base}/{path}'
return self
def __str__(self):
"""Returns the string representation of the URI"""
uri = self.base
if self.params:
params_dump = urllib.parse.urlencode(self.params)
uri = f'{uri}?{params_dump}'
return uri
def __eq__(self, other) -> bool:
if isinstance(other, URI):
return self.path == other.path or self.path == other.path[:-1]
trimming_slash = other
if other.endswith('/'):
trimming_slash = other[:-1]
return self.path == other or str(self) == other or \
self.path == trimming_slash or str(self) == trimming_slash
@property
def path(self) -> str:
return self.base
def add_params(self, params: dict):
"""Adds the parameters to the current parameters."""
_params = {**self.params, **params}
self.params = _params
@dataclass
class Request:
"""Request instance.
Frame order:
|METHOD|TRANS|CONNECTION|URI_LENGTH|URI...|
"""
method: str
transaction: int
connection: str
uri: URI
def __post_init__(self):
"""Validate the input."""
if self.method not in METHODS:
raise InvalidRequest('Request Method is not valid.')
if self.connection not in CONNECTIONS:
raise InvalidRequest('Request Connection Type is not valid.')
@classmethod
def from_bytearray(cls, data: bytearray):
"""Returns a parsed request instance."""
if not data or len(data) < REQUEST_HEADER_SIZE:
raise RequestMarshalerError('Frame is invalid')
method = data[0] # first byte is the request method
if method not in METHODS.values():
raise RequestMarshalerError(f'Method {method} is not valid')
# set the canonical name
for key, value in METHODS.items():
if value == method:
method = key
break
transaction = data[1] # first byte is the transaction
connection = data[2] # third byte is the connection type
if connection not in CONNECTIONS.values():
raise RequestMarshalerError(f'Connection {connection} is not valid')
# set the canonical name
for key, value in CONNECTIONS.items():
if value == connection:
connection = key
uri_length = data[3] # fourth byte is the URI length
raw_uri = data[4:4 + uri_length]
raw_uri = raw_uri.decode('ascii')
uri = URI(raw_uri)
req = cls(method=method, connection=connection,
transaction=transaction, uri=uri)
return req
def as_bytearray(self):
"""Dump the Request to a bytearray."""
data = list()
method = METHODS[self.method]
data.append(method)
trans = self.transaction
data.append(trans)
connection = CONNECTIONS[self.connection]
data.append(connection)
uri = str(self.uri)
uri = uri.encode('ascii')
uri_length = len(uri)
data.append(uri_length) # add uri length to header
headers = bytearray(data)
return headers + uri
def as_dict(self):
return asdict(self)
def is_request(message) -> bool:
if isinstance(message, Request):
return True
if not hasattr(message, 'data'):
return False
data = message.data
if not isinstance(data, bytearray):
return False
if len(data) == 0:
logger.debug(f'Data from message was empty')
return False
first_byte = data[0]
return first_byte in METHODS.values() | /remote_sensors-0.1.1.tar.gz/remote_sensors-0.1.1/remote_sensors/models/request.py | 0.766731 | 0.183502 | request.py | pypi |
from tempfile import SpooledTemporaryFile
from os.path import basename, join, isabs
from io import BytesIO
import glob
import re
import tarfile
from base64 import b64decode, b64encode
UTF8 = r"utf-8"
def extract_job(data, destination):
"""
Receives a readable stream containing a tgz archive and extracts it to the given destination.
Args:
iobase (:class:`io.IOBase`): a readable IOBase object to extract. Note that this
is the base class of the :class:`~urllib3.response.HTTPResponse` class,
which is given by the ``raw`` attribute of the :class:`~requests.models.Response`
returned by :func:`~requests.get`.
destination (str): filesystem location in which to extract the archive.
Path must exist and be writable by this process
Returns:
None
"""
with tarfile.open(mode="r|gz", fileobj=BytesIO(b64decode(data))) as archive:
archive.extractall(destination)
def extract_unencoded(iobase, destination):
"""
Receives a readable stream containing a tgz archive and extracts it to the given destination.
Args:
iobase (:class:`io.IOBase`): a readable IOBase object to extract. Note that this
is the base class of the :class:`~urllib3.response.HTTPResponse` class,
which is given by the ``raw`` attribute of the :class:`~requests.models.Response`
returned by :func:`~requests.get`.
destination (str): filesystem location in which to extract the archive.
Path must exist and be writable by this process
Returns:
None
"""
with tarfile.open(mode="r|gz", fileobj=iobase) as archive:
archive.extractall(destination)
def package_job(*individual_files, working_dir=None, mask="*", filespec=None):
"""
Receives a specification of files and returns a string containing a Base64-encoded, gzipped
tarfile, containing the specified files.
If the inputs for the remote command (or even the command itself) are not present in the
remote system, it is necessary to package the required data for submission to the remote.
Files specified via *individual_files* parameter will be placed in the root path inside
the archive. Files inside of *working_dir* will retain their same path relative to
*working_dir*.
Args:
individual_files: a list of :class:`str` representing complete paths to specific
files to add to the archive. These files will have their entire path stripped and
will be added to the root path of the
archive. *please note that no care is taken to ensure against name collisions.*
working_dir (:class:`str`): string path to the directory containing files to be archived.
mask (:class:`str`): selector. If working_dir is given, files in that directory will be
included if they match the pattern given here.
Returns: a string containing a Base64-encoded gzipped tarfile.
"""
specs = list(individual_files)
if working_dir:
specs.append(mask)
if filespec:
specs += filespec.split(":")
print(specs)
file_object = SpooledTemporaryFile()
relative_path_pattern = re.compile(r'%s(.*)$' % working_dir)
with tarfile.open(mode='x:gz', fileobj=file_object) as archive:
for pathname in specs:
if isabs(pathname):
for file in glob.glob(pathname, recursive=True):
archive.add(file, arcname=basename(file))
else:
for file in glob.glob(join(working_dir, pathname), recursive=True):
path_match = relative_path_pattern.match(file)
archive.add(file, arcname=path_match[1])
file_object.seek(0)
return b64encode(file_object.read()).decode(UTF8) | /remote_sge-0.2.8-py3-none-any.whl/sge_client/io/jobarchive.py | 0.771155 | 0.370055 | jobarchive.py | pypi |
from os.path import expanduser
from json import loads, dumps
from datetime import datetime
from filelock import FileLock
DBFILE = expanduser('~/.sge_client.db.json')
LOCKFILE = expanduser('~/.sge_client.lock')
R_ID = 'remote_id'
L_ID = 'local_id'
def initialize():
"Initializes a new empty data file. Overwrites the current file, if one exists."
with open(DBFILE, 'w') as file:
file.write(dumps([]))
class Db(object):
"""
Basic file storage for a JSON object.
"""
def where_not_in(self, column, *values):
"""
Selects results which do not match the given column/values expression.
Args:
column (str): The named field to test against.
values (str): Vales to search for. A record will not be returned if the field named
in *column* is contained inside of the list of values given.
"""
return [x for x in self.data if x[column] not in values]
def all_jobs(self):
"Retrieve all records."
return self.data
def values(self):
"Synonym for #all_jobs."
return self.data
def find_by_remote_job_id(self, job_id):
"""
Finds a record by the id number for that job on the remote cluster.
Args:
job_id(str): the job id for this job as scheduled or running on the remote.
Returns:
A :class:`dict` object containing various attributes of the job on the local and the remote::
{
local_id: 123123,
remote_id: 1234324,
local_wd: '/var/whatever',
last_checked: '2017-12-27 16:35:30.898984'
}
"""
for value in self.values():
print("It's %s, looking for %s" % (value['remote_id'], job_id))
if int(value['remote_id']) == int(job_id):
return value
def find_by_local_job_id(self, job_id):
return next((x for x in self.values() if x[L_ID] == job_id), None)
def insert(self, local_id, remote_id, local_wd):
self.data.append(dict(local_id=local_id,
remote_id=remote_id,
local_wd=local_wd,
last_checked=str(datetime.now())))
def update(self, job):
job['last_checked'] = str(datetime.now())
def delete(self, item):
self.data.remove(item)
def __init__(self):
self.lock = FileLock(LOCKFILE)
self.data = None
def save(self):
with open(DBFILE, 'w') as file:
file.write(dumps(self.data))
def open(self):
self.lock.acquire(timeout=2)
with open(DBFILE) as file:
self.data = loads(file.read())
def close(self):
self.save()
self.data = None
self.lock.release()
def __enter__(self):
self.open()
return self
def __exit__(self, *_):
"Exit method for resource"
self.close() | /remote_sge-0.2.8-py3-none-any.whl/sge_client/io/database.py | 0.654784 | 0.210817 | database.py | pypi |
"For obtaining the status of a job"
from os import path
from xml.etree import ElementTree
import re
import sge.shell
from sge.util.xpath_attr_desciptor import XPathAttr
from sge.util.serializers import (IntConverter, XmlEnvironmentDeserializer,
XmlJobArgumentsDeserializer, XmlIntDeserializer)
QSTAT = path.expandvars("${SGE_ROOT}/bin/${SGE_ARCH}/qstat")
STATE_PATTERN = re.compile(r"^.{40}(\w+)")
class SgeJobStateCode(object):
"Job state codes as returned by qstat."
SUSPENDED = r's'
RUNNING = r'r'
QUEUED_ACTIVE = r'qw'
QUEUED_HELD = r'hqw'
UNKNOWN = r'unknown'
def get_job_status(job_id):
"""
Calls qstat_ and parses the output for the state of the job.
Returns:
a state code from :class:`~sge.status.SgeJobStateCode`
.. _qstat: http://gridscheduler.sourceforge.net/htmlman/htmlman1/qstat.html
"""
print("OJB ID: %s" % job_id)
output = sge.shell.run_in_shell(r"%s | grep -e '^\s*%s'" % (QSTAT, job_id))
match = STATE_PATTERN.match(output)
if match:
return match[1]
else:
return SgeJobStateCode.UNKNOWN
class JobDetail(object):
"""
Receives an XML ElementTree and provides attributes which read from it.
"""
command_path = XPathAttr('JB_script_file')
owner = XPathAttr('JB_owner')
working_dir = XPathAttr('JB_cwd')
job_id = XPathAttr('JB_job_number', XmlIntDeserializer)
uid = XPathAttr('JB_uid', XmlIntDeserializer)
gid = XPathAttr('JB_gid', XmlIntDeserializer)
name = XPathAttr('JB_job_name')
environment = XPathAttr('JB_env_list', XmlEnvironmentDeserializer)
arguments = XPathAttr('JB_job_args', XmlJobArgumentsDeserializer)
def __init__(self, xml_node):
self.xml_node = xml_node
def to_dict(self):
"Makes it into a dictionary"
return dict(command_path=self.command_path, owner=self.owner, job_id=self.job_id,
uid=self.uid, gid=self.gid, name=self.name, working_directory=self.working_dir,
environment=self.environment, arguments=self.arguments)
def get_job_detail(job_id):
"""
Retrieves the XML output from qstat_ and parses it.
.. _qstat: http://gridscheduler.sourceforge.net/htmlman/htmlman1/qstat.html
"""
xml = sge.shell.run(QSTAT, '-j', str(job_id), '-xml')
if re.findall('unknown_jobs', xml):
return None
else:
root = ElementTree.fromstring(xml)
return JobDetail(root.find('djob_info/element')) | /remote_sge-0.2.8-py3-none-any.whl/sge/status.py | 0.602997 | 0.222404 | status.py | pypi |
from sge.util.serializers import StringSerializer
class CmdOptionAttr(object):
"""
Descriptor for properties on the JobTemplate class, to translate DRMAA options
to properly formatted qsub options.
A qsub option is internally identified by the option name as it is seen
on the command line, e.g. "-V".
Setting the attribute on a JobTemplate will result in the value being encoded
as needed (e.g. boolean values become yes/no, etc), and then stored in the
:func:`drmaa.JobTemplate.options` dictionary, using *option_name* as its key.
"""
def __init__(self, option_name, type_converter=StringSerializer, doc=None, del_value=None):
"""
Args:
option_name (str): the name of the qsub_ option, as described
`on the qsub man page`__.
type_converter
.. _qsub: http://gridscheduler.sourceforge.net/htmlman/htmlman1/qsub.html
__ qsub_
"""
self.option_name = option_name
self.converter = type_converter
self.del_value = del_value
try:
if doc:
self.__doc__ = (doc +
"\n\n" + type_converter.doctext() +
"\n\nSetting this attribute to %s will cause it to " % str(del_value) +
"be removed from the command options.")
except Exception as identifier:
print(option_name)
print(doc)
print(type_converter)
raise identifier
def __get__(self, instance, _):
if not self.option_name in instance.qsub_options:
return self.del_value
raw_value = instance.qsub_options[self.option_name]
if self.converter:
return self.converter.deserialize(raw_value)
else:
return raw_value
def __set__(self, instance, value):
print(instance)
if self.converter:
value = self.converter.serialize(value)
if value == self.del_value and self.option_name in instance.qsub_options:
del instance.qsub_options[self.option_name]
else:
instance.qsub_options[self.option_name] = value
def __delete__(self, instance):
print("deleted in descriptor object")
del instance.qsub_options[self.option_name] | /remote_sge-0.2.8-py3-none-any.whl/sge/util/cmd_opt_descriptor.py | 0.757794 | 0.168686 | cmd_opt_descriptor.py | pypi |
from PySide2 import QtCore
qt_resource_data = b"\
\x00\x00\x17p\
<\
?xml version=\x221.\
0\x22 encoding=\x22UTF\
-8\x22?>\x0a<!-- Creat\
ed with Inkscape\
(http://www.ink\
scape.org/) -->\x0a\
<svg id=\x22svg3272\
\x22 width=\x2224\x22 hei\
ght=\x2224\x22 version\
=\x221.0\x22 xmlns=\x22ht\
tp://www.w3.org/\
2000/svg\x22 xmlns:\
xlink=\x22http://ww\
w.w3.org/1999/xl\
ink\x22>\x0a <defs id=\
\x22defs3274\x22>\x0a <l\
inearGradient id\
=\x22linearGradient\
3404\x22 x1=\x2218.031\
\x22 x2=\x2220.055\x22 y1\
=\x2216.408\x22 y2=\x2224\
.628\x22 gradientUn\
its=\x22userSpaceOn\
Use\x22>\x0a <stop i\
d=\x22stop2687\x22 sty\
le=\x22stop-color:#\
fff;stop-opacity\
:.27451\x22 offset=\
\x220\x22/>\x0a <stop i\
d=\x22stop2689\x22 sty\
le=\x22stop-color:#\
fff;stop-opacity\
:.078431\x22 offset\
=\x221\x22/>\x0a </linea\
rGradient>\x0a <ra\
dialGradient id=\
\x22radialGradient3\
402\x22 cx=\x224.02\x22 c\
y=\x225.5927\x22 r=\x2210\
.273\x22 gradientTr\
ansform=\x22matrix(\
-.016802 1.3943 \
-1.7966 -.021651\
-5.3658 -19.339\
)\x22 gradientUnits\
=\x22userSpaceOnUse\
\x22>\x0a <stop id=\x22\
stop3754\x22 style=\
\x22stop-color:#fff\
\x22 offset=\x220\x22/>\x0a \
<stop id=\x22stop\
3760\x22 style=\x22sto\
p-color:#fff\x22 of\
fset=\x22.84754\x22/>\x0a\
<stop id=\x22sto\
p3756\x22 style=\x22st\
op-color:#fff;st\
op-opacity:0\x22 of\
fset=\x221\x22/>\x0a </r\
adialGradient>\x0a \
<linearGradient\
id=\x22linearGradi\
ent3400\x22 x1=\x229.7\
046\x22 x2=\x229.7046\x22\
y1=\x2220.882\x22 y2=\
\x224.303\x22 gradient\
Transform=\x22matri\
x(.99458 0 0 .99\
458 -19.857 -19.\
778)\x22 gradientUn\
its=\x22userSpaceOn\
Use\x22>\x0a <stop i\
d=\x22stop3624\x22 sty\
le=\x22stop-color:#\
bb2b12\x22 offset=\x22\
0\x22/>\x0a <stop id\
=\x22stop3626\x22 styl\
e=\x22stop-color:#c\
d7233\x22 offset=\x221\
\x22/>\x0a </linearGr\
adient>\x0a <radia\
lGradient id=\x22ra\
dialGradient3398\
\x22 cx=\x227.8186\x22 cy\
=\x228.5609\x22 r=\x2211.\
268\x22 gradientTra\
nsform=\x22matrix(1\
.69 0 0 1.0436 -\
24.967 -20.534)\x22\
gradientUnits=\x22\
userSpaceOnUse\x22>\
\x0a <stop id=\x22st\
op3618\x22 style=\x22s\
top-color:#edb76\
3\x22 offset=\x220\x22/>\x0a\
<stop id=\x22sto\
p3270\x22 style=\x22st\
op-color:#de7f32\
\x22 offset=\x22.5\x22/>\x0a\
<stop id=\x22sto\
p3620\x22 style=\x22st\
op-color:#d24413\
\x22 offset=\x221\x22/>\x0a \
</radialGradien\
t>\x0a <linearGrad\
ient id=\x22linearG\
radient3396\x22 x1=\
\x229.8764\x22 x2=\x229.8\
764\x22 y1=\x222.6015\x22\
y2=\x2223.062\x22 gra\
dientTransform=\x22\
translate(-19.51\
8 -21.496)\x22 grad\
ientUnits=\x22userS\
paceOnUse\x22>\x0a <\
stop id=\x22stop515\
9\x22 style=\x22stop-c\
olor:#c1c1c1\x22 of\
fset=\x220\x22/>\x0a <s\
top id=\x22stop5161\
\x22 style=\x22stop-co\
lor:#909090\x22 off\
set=\x221\x22/>\x0a </li\
nearGradient>\x0a \
<linearGradient \
id=\x22linearGradie\
nt3394\x22 x1=\x22-28.\
531\x22 x2=\x22-28.531\
\x22 y1=\x2217.956\x22 y2\
=\x2237.503\x22 gradie\
ntTransform=\x22tra\
nslate(14.86 -35\
.997)\x22 gradientU\
nits=\x22userSpaceO\
nUse\x22>\x0a <stop \
id=\x22stop11113\x22 s\
tyle=\x22stop-color\
:#fff\x22 offset=\x220\
\x22/>\x0a <stop id=\
\x22stop11115\x22 styl\
e=\x22stop-color:#c\
dcdcd\x22 offset=\x22.\
91014\x22/>\x0a <sto\
p id=\x22stop11117\x22\
style=\x22stop-col\
or:#a1a1a1\x22 offs\
et=\x221\x22/>\x0a </lin\
earGradient>\x0a <\
radialGradient i\
d=\x22radialGradien\
t3392\x22 cx=\x22605.7\
1\x22 cy=\x22486.65\x22 r\
=\x22117.14\x22 gradie\
ntTransform=\x22mat\
rix(-2.7744 0 0 \
1.9697 112.76 -8\
72.89)\x22 gradient\
Units=\x22userSpace\
OnUse\x22 xlink:hre\
f=\x22#linearGradie\
nt5060\x22/>\x0a <lin\
earGradient id=\x22\
linearGradient50\
60\x22>\x0a <stop id\
=\x22stop5062\x22 offs\
et=\x220\x22/>\x0a <sto\
p id=\x22stop5064\x22 \
style=\x22stop-opac\
ity:0\x22 offset=\x221\
\x22/>\x0a </linearGr\
adient>\x0a <radia\
lGradient id=\x22ra\
dialGradient3390\
\x22 cx=\x22605.71\x22 cy\
=\x22486.65\x22 r=\x22117\
.14\x22 gradientTra\
nsform=\x22matrix(2\
.7744 0 0 1.9697\
-1891.6 -872.89\
)\x22 gradientUnits\
=\x22userSpaceOnUse\
\x22 xlink:href=\x22#l\
inearGradient506\
0\x22/>\x0a <linearGr\
adient id=\x22linea\
rGradient3388\x22 x\
1=\x22302.86\x22 x2=\x223\
02.86\x22 y1=\x22366.6\
5\x22 y2=\x22609.51\x22 g\
radientTransform\
=\x22matrix(2.7744 \
0 0 1.9697 -1892\
.2 -872.89)\x22 gra\
dientUnits=\x22user\
SpaceOnUse\x22>\x0a \
<stop id=\x22stop50\
50\x22 style=\x22stop-\
opacity:0\x22 offse\
t=\x220\x22/>\x0a <stop\
id=\x22stop5056\x22 o\
ffset=\x22.5\x22/>\x0a \
<stop id=\x22stop50\
52\x22 style=\x22stop-\
opacity:0\x22 offse\
t=\x221\x22/>\x0a </line\
arGradient>\x0a </d\
efs>\x0a <g id=\x22g27\
01\x22>\x0a <g id=\x22g2\
902\x22>\x0a <g id=\x22\
g2589\x22 transform\
=\x22translate(.036\
304 -1.2166e-7)\x22\
>\x0a <g id=\x22g33\
77\x22 transform=\x22t\
ranslate(19.518 \
21.496)\x22>\x0a <\
g id=\x22g3490\x22 tra\
nsform=\x22matrix(.\
54593 0 0 .51685\
-20.52 -22.074)\
\x22 style=\x22stroke-\
width:1.8826\x22>\x0a \
<g id=\x22g502\
2\x22 transform=\x22ma\
trix(.021652 0 0\
.014857 43.008 \
42.685)\x22 style=\x22\
stroke-width:104\
.96\x22>\x0a <re\
ct id=\x22rect2527\x22\
x=\x22-1559.3\x22 y=\x22\
-150.7\x22 width=\x221\
339.6\x22 height=\x224\
78.36\x22 style=\x22fi\
ll:url(#linearGr\
adient3388);opac\
ity:.40206\x22/>\x0a \
<path id=\x22p\
ath2529\x22 d=\x22m-21\
9.62-150.68v478.\
33c142.87 0.9004\
5 345.4-107.17 3\
45.4-239.2 0-132\
.03-159.44-239.1\
3-345.4-239.13z\x22\
style=\x22fill:url\
(#radialGradient\
3390);opacity:.4\
0206\x22/>\x0a <\
path id=\x22path253\
1\x22 d=\x22m-1559.3-1\
50.68v478.33c-14\
2.87 0.90045-345\
.4-107.17-345.4-\
239.2 0-132.03 1\
59.44-239.13 345\
.4-239.13z\x22 styl\
e=\x22fill:url(#rad\
ialGradient3392)\
;opacity:.40206\x22\
/>\x0a </g>\x0a \
</g>\x0a <pa\
th id=\x22path3496\x22\
d=\x22m-18.841-18.\
927c-0.08554 0-0\
.15887 0.09275-0\
.15887 0.18854 0\
5.8692-0.04308 \
12.244-0.04914 1\
8.225 0.02908 0.\
89504 0.53724 1.\
5051 0.88966 1.5\
083 1.0127 0.009\
195 0.53927-0.00\
4029 1.0485 0 6.\
4703-0.016099 13\
.579-0.078173 20\
.049-0.094271 0.\
054214 0.006918-\
1.4767-0.10772-1\
.4616-1.4455 1e-\
6 -4.6729-0.5020\
5-11.187-0.50205\
-15.86 0-0.18688\
-0.01544-0.29089\
-0.031774-0.3770\
8-0.012614-0.066\
56-0.028696-0.09\
887-0.063547-0.1\
257-0.027862-0.0\
2445-0.054549-0.\
05704-0.095321-0\
.06285h-8.8201c-\
0.81532 0-1.0021\
-1.992-2.2137-1.\
992l-8.5913 0.03\
569-3e-6 3e-6z\x22 \
style=\x22fill:url(\
#linearGradient3\
394);stroke-line\
cap:round;stroke\
-linejoin:round;\
stroke-width:1.0\
111;stroke:url(#\
linearGradient33\
96)\x22/>\x0a <pat\
h id=\x22path3498\x22 \
d=\x22m-16.019-14.9\
49c10.57 0 13.03\
4.6e-4 19.994-0\
.02754 0 1.5704 \
0.25816 16.04-0.\
48411 16.04-0.71\
405 0-14.046-0.0\
94351-21.009-0.0\
66352 1.4717 0 1\
.4993-0.62275 1.\
4993-15.946v1e-6\
z\x22 style=\x22fill:u\
rl(#radialGradie\
nt3398);stroke-l\
inecap:round;str\
oke-linejoin:rou\
nd;stroke:url(#l\
inearGradient340\
0)\x22/>\x0a <path\
id=\x22path3211\x22 d\
=\x22m3.4207-13.887\
s-16.832 0.09361\
-18.397-0.09237c\
-0.08294 13.829-\
0.50095 14.439-0\
.50095 14.439\x22 s\
tyle=\x22fill:none;\
opacity:.4;strok\
e-width:.9812;st\
roke:url(#radial\
Gradient3402)\x22/>\
\x0a <path id=\x22\
path2608\x22 transf\
orm=\x22matrix(.445\
03 0 0 .40237 -1\
6.699 -17.646)\x22 \
d=\x22m3.2188 5.343\
8c-1.5389 0-2.81\
25 1.2736-2.8125\
2.8125v16c0.005\
894 0.28623 0.14\
842 0.61258 0.37\
5 0.8125 0.22658\
0.19992 0.51619\
0.28427 0.84375\
0.25 0.010415 2\
.66e-4 0.020835 \
2.66e-4 0.03125 \
0l45-7.125c0.521\
77-0.08229 0.930\
21-0.53433 0.937\
5-1.0625v-8.875c\
0-1.5389-1.2736-\
2.8125-2.8125-2.\
8125h-41.562z\x22 s\
tyle=\x22fill-rule:\
evenodd;fill:url\
(#linearGradient\
3404)\x22/>\x0a </g\
>\x0a </g>\x0a <re\
ct id=\x22rect2545\x22\
x=\x222\x22 y=\x224\x22 wid\
th=\x227\x22 height=\x221\
\x22 rx=\x22.53846\x22 ry\
=\x22.5\x22 style=\x22dis\
play:block;fill:\
#eda374\x22/>\x0a </g\
>\x0a </g>\x0a</svg>\x0a\
\x00\x00\x0bW\
<\
?xml version=\x221.\
0\x22 encoding=\x22UTF\
-8\x22?>\x0a<!-- Creat\
ed with Inkscape\
(http://www.ink\
scape.org/) -->\x0a\
<svg id=\x22svg2\x22 w\
idth=\x2222\x22 height\
=\x2222\x22 version=\x221\
.0\x22 xmlns=\x22http:\
//www.w3.org/200\
0/svg\x22 xmlns:xli\
nk=\x22http://www.w\
3.org/1999/xlink\
\x22>\x0a <defs id=\x22de\
fs4\x22>\x0a <linearG\
radient id=\x22line\
arGradient2406\x22 \
x1=\x2263.397\x22 x2=\x22\
63.397\x22 y1=\x22-12.\
489\x22 y2=\x225.4676\x22\
gradientTransfo\
rm=\x22matrix(1.086\
3 0 0 1.0862 -56\
.567 14.814)\x22 gr\
adientUnits=\x22use\
rSpaceOnUse\x22>\x0a \
<stop id=\x22stop4\
875\x22 style=\x22stop\
-color:#fff\x22 off\
set=\x220\x22/>\x0a <st\
op id=\x22stop4877\x22\
style=\x22stop-col\
or:#fff;stop-opa\
city:0\x22 offset=\x22\
1\x22/>\x0a </linearG\
radient>\x0a <line\
arGradient id=\x22l\
inearGradient241\
1\x22 x1=\x2218.379\x22 x\
2=\x2218.379\x22 y1=\x224\
4.98\x22 y2=\x223.0816\
\x22 gradientTransf\
orm=\x22matrix(.516\
04 0 0 .51604 -1\
.385 -1.385)\x22 gr\
adientUnits=\x22use\
rSpaceOnUse\x22>\x0a \
<stop id=\x22stop2\
492\x22 style=\x22stop\
-color:#791235\x22 \
offset=\x220\x22/>\x0a \
<stop id=\x22stop24\
94\x22 style=\x22stop-\
color:#dd3b27\x22 o\
ffset=\x221\x22/>\x0a </\
linearGradient>\x0a\
<radialGradien\
t id=\x22radialGrad\
ient2409\x22 cx=\x2223\
.896\x22 cy=\x223.99\x22 \
r=\x2220.397\x22 gradi\
entTransform=\x22ma\
trix(0 1.2316 -1\
.6257 0 17.487 -\
29.721)\x22 gradien\
tUnits=\x22userSpac\
eOnUse\x22>\x0a <sto\
p id=\x22stop3244\x22 \
style=\x22stop-colo\
r:#f8b17e\x22 offse\
t=\x220\x22/>\x0a <stop\
id=\x22stop3246\x22 s\
tyle=\x22stop-color\
:#e35d4f\x22 offset\
=\x22.26238\x22/>\x0a <\
stop id=\x22stop324\
8\x22 style=\x22stop-c\
olor:#c6262e\x22 of\
fset=\x22.66094\x22/>\x0a\
<stop id=\x22sto\
p3250\x22 style=\x22st\
op-color:#690b54\
\x22 offset=\x221\x22/>\x0a \
</radialGradien\
t>\x0a </defs>\x0a <g \
id=\x22layer1\x22>\x0a <\
g id=\x22g2502\x22>\x0a \
<path id=\x22path2\
555\x22 d=\x22m11 0.50\
178c-5.7926 0-10\
.498 4.7057-10.4\
98 10.498 0 5.79\
26 4.7057 10.498\
10.498 10.498 5\
.7926 0 10.498-4\
.7057 10.498-10.\
498 0-5.7926-4.7\
057-10.498-10.49\
8-10.498z\x22 style\
=\x22fill:url(#radi\
alGradient2409);\
stroke-linecap:r\
ound;stroke-line\
join:round;strok\
e-width:1.0037;s\
troke:url(#linea\
rGradient2411)\x22/\
>\x0a <path id=\x22p\
ath2463\x22 d=\x22m20.\
5 11c0 5.2469-4.\
2536 9.5003-9.49\
99 9.5003-5.2468\
0-9.5001-4.2535\
-9.5001-9.5003 0\
-5.2466 4.2534-9\
.4997 9.5001-9.4\
997 5.2463 0 9.4\
999 4.253 9.4999\
9.4997z\x22 style=\
\x22fill:none;opaci\
ty:.4;stroke:url\
(#linearGradient\
2406)\x22/>\x0a </g>\x0a\
<g id=\x22g2478\x22 \
transform=\x22trans\
late(-25.73 .027\
876)\x22>\x0a <path \
id=\x22path3243\x22 d=\
\x22m33.308 4.9721-\
1.5781 1.5781 3.\
2558 3.2392c0.10\
002 0.10128 0.10\
002 0.26416 0 0.\
36545l-3.2558 3.\
2392 1.5781 1.57\
81 3.2392-3.2392\
c0.10128-0.10002\
0.26416-0.10002\
0.36545 0l3.239\
2 3.2392 1.5781-\
1.5781-3.2392-3.\
2392c-0.10002-0.\
10128-0.10002-0.\
26416 0-0.36545l\
3.2392-3.2392-1.\
5781-1.5781-3.23\
92 3.2392c-0.101\
28 0.10002-0.264\
16 0.10002-0.365\
45 0l-3.2392-3.2\
392z\x22 style=\x22fil\
l-rule:evenodd;o\
pacity:.2\x22/>\x0a \
<path id=\x22path32\
56\x22 d=\x22m33.308 5\
.9721-1.5781 1.5\
781 3.2558 3.239\
2c0.10002 0.1012\
8 0.10002 0.2641\
6 0 0.36545l-3.2\
558 3.2392 1.578\
1 1.5781 3.2392-\
3.2392c0.10128-0\
.10002 0.26416-0\
.10002 0.36545 0\
l3.2392 3.2392 1\
.5781-1.5781-3.2\
392-3.2392c-0.10\
002-0.10128-0.10\
002-0.26416 0-0.\
36545l3.2392-3.2\
392-1.5781-1.578\
1-3.2392 3.2392c\
-0.10128 0.10002\
-0.26416 0.10002\
-0.36545 0l-3.23\
92-3.2392z\x22 styl\
e=\x22fill-rule:eve\
nodd;fill:#fff\x22/\
>\x0a </g>\x0a </g>\x0a<\
/svg>\x0a\
\x00\x00<5\
<\
?xml version=\x221.\
0\x22 encoding=\x22UTF\
-8\x22?>\x0a<!-- Creat\
ed with Inkscape\
(http://www.ink\
scape.org/) -->\x0a\
<svg id=\x22svg3425\
\x22 width=\x2224\x22 hei\
ght=\x2224\x22 version\
=\x221.0\x22 xmlns=\x22ht\
tp://www.w3.org/\
2000/svg\x22 xmlns:\
xlink=\x22http://ww\
w.w3.org/1999/xl\
ink\x22>\x0a <defs id=\
\x22defs3427\x22>\x0a <l\
inearGradient id\
=\x22XMLID_5_\x22 x1=\x22\
64\x22 x2=\x2264\x22 y1=\x22\
21.941\x22 y2=\x22104.\
06\x22 gradientUnit\
s=\x22userSpaceOnUs\
e\x22>\x0a <stop id=\
\x22stop24\x22 style=\x22\
stop-color:#dada\
da\x22 offset=\x220\x22/>\
\x0a <stop id=\x22st\
op26\x22 style=\x22sto\
p-color:#ccc\x22 of\
fset=\x221\x22/>\x0a </l\
inearGradient>\x0a \
<linearGradient\
id=\x22XMLID_6_\x22 x\
1=\x2264\x22 x2=\x2264\x22 y\
1=\x2221.523\x22 y2=\x221\
03.07\x22 gradientU\
nits=\x22userSpaceO\
nUse\x22 xlink:href\
=\x22#XMLID_5_\x22>\x0a \
<stop id=\x22stop3\
1\x22 style=\x22stop-c\
olor:#d9d9d9\x22 of\
fset=\x220\x22/>\x0a <s\
top id=\x22stop33\x22 \
style=\x22stop-colo\
r:#ccc\x22 offset=\x22\
1\x22/>\x0a </linearG\
radient>\x0a <line\
arGradient id=\x22l\
inearGradient290\
4\x22 x1=\x2264\x22 x2=\x226\
4\x22 y1=\x2221.523\x22 y\
2=\x22103.07\x22 gradi\
entTransform=\x22ma\
trix(.18137 0 0 \
.18107 .39218 .5\
9228)\x22 gradientU\
nits=\x22userSpaceO\
nUse\x22 xlink:href\
=\x22#XMLID_6_\x22/>\x0a \
<linearGradient\
id=\x22linearGradi\
ent2906\x22 x1=\x2249.\
273\x22 x2=\x2249.374\x22\
y1=\x2222.275\x22 y2=\
\x22102.05\x22 gradien\
tTransform=\x22matr\
ix(.18137 0 0 .1\
8107 .39218 .592\
28)\x22 gradientUni\
ts=\x22userSpaceOnU\
se\x22 xlink:href=\x22\
#XMLID_6_\x22/>\x0a <\
linearGradient i\
d=\x22linearGradien\
t2909\x22 x1=\x2264\x22 x\
2=\x2264\x22 y1=\x2221.94\
1\x22 y2=\x22104.06\x22 g\
radientTransform\
=\x22matrix(.15715 \
0 0 .15689 1.942\
2 2.2529)\x22 gradi\
entUnits=\x22userSp\
aceOnUse\x22 xlink:\
href=\x22#XMLID_5_\x22\
/>\x0a <linearGrad\
ient id=\x22linearG\
radient2911\x22 x1=\
\x2286.133\x22 x2=\x2284.\
639\x22 y1=\x22105.1\x22 \
y2=\x2220.895\x22 grad\
ientTransform=\x22m\
atrix(.15715 0 0\
.15689 1.9422 2\
.2529)\x22 gradient\
Units=\x22userSpace\
OnUse\x22>\x0a <stop\
id=\x22stop5130\x22 s\
tyle=\x22stop-color\
:#fff\x22 offset=\x220\
\x22/>\x0a <stop id=\
\x22stop5132\x22 style\
=\x22stop-color:#95\
9595\x22 offset=\x221\x22\
/>\x0a </linearGra\
dient>\x0a <linear\
Gradient id=\x22lin\
earGradient2914\x22\
x1=\x2264\x22 x2=\x2264\x22\
y1=\x223.1001\x22 y2=\
\x22122.9\x22 gradient\
Transform=\x22matri\
x(.17214 0 0 .17\
186 .9828 1.19)\x22\
gradientUnits=\x22\
userSpaceOnUse\x22>\
\x0a <stop id=\x22st\
op11\x22 style=\x22sto\
p-color:#f2f2f2\x22\
offset=\x220\x22/>\x0a \
<stop id=\x22stop1\
3\x22 style=\x22stop-c\
olor:#d8d8d8\x22 of\
fset=\x221\x22/>\x0a </l\
inearGradient>\x0a \
<radialGradient\
id=\x22radialGradi\
ent2919\x22 cx=\x226.7\
027\x22 cy=\x2273.616\x22\
r=\x227.2284\x22 grad\
ientTransform=\x22m\
atrix(1.579 0 0 \
.48488 1.4162 -1\
5.199)\x22 gradient\
Units=\x22userSpace\
OnUse\x22>\x0a <stop\
id=\x22stop10693\x22 \
offset=\x220\x22/>\x0a \
<stop id=\x22stop10\
695\x22 style=\x22stop\
-opacity:0\x22 offs\
et=\x221\x22/>\x0a </rad\
ialGradient>\x0a <\
linearGradient i\
d=\x22linearGradien\
t3272\x22 x1=\x22302.2\
9\x22 x2=\x22308.65\x22 y\
1=\x2258.443\x22 y2=\x226\
0.768\x22 gradientT\
ransform=\x22matrix\
(.65225 0 0 .645\
94 -194.17 -32.6\
29)\x22 gradientUni\
ts=\x22userSpaceOnU\
se\x22>\x0a <stop id\
=\x22stop7383\x22 styl\
e=\x22stop-color:#b\
abdb6\x22 offset=\x220\
\x22/>\x0a <stop id=\
\x22stop7385\x22 style\
=\x22stop-color:#ff\
f\x22 offset=\x221\x22/>\x0a\
</linearGradie\
nt>\x0a <linearGra\
dient id=\x22linear\
Gradient3274\x22 x1\
=\x22326.6\x22 x2=\x22317\
.16\x22 y1=\x2275.575\x22\
y2=\x2288.006\x22 gra\
dientTransform=\x22\
matrix(.68166 0 \
0 .67984 -203.21\
-34.645)\x22 gradi\
entUnits=\x22userSp\
aceOnUse\x22>\x0a <s\
top id=\x22stop7423\
-4\x22 style=\x22stop-\
color:#fff\x22 offs\
et=\x220\x22/>\x0a <sto\
p id=\x22stop7425-0\
\x22 style=\x22stop-co\
lor:#fff;stop-op\
acity:0\x22 offset=\
\x221\x22/>\x0a </linear\
Gradient>\x0a <lin\
earGradient id=\x22\
linearGradient32\
76\x22 x1=\x2297.442\x22 \
x2=\x2290.221\x22 y1=\x22\
35.152\x22 y2=\x2235.0\
79\x22 gradientTran\
sform=\x22matrix(.4\
1146 -.41048 .35\
988 .35903 -37.3\
14 39.996)\x22 grad\
ientUnits=\x22userS\
paceOnUse\x22>\x0a <\
stop id=\x22stop495\
8\x22 style=\x22stop-c\
olor:#f8b17e\x22 of\
fset=\x220\x22/>\x0a <s\
top id=\x22stop4960\
\x22 style=\x22stop-co\
lor:#e35d4f\x22 off\
set=\x22.26238\x22/>\x0a \
<stop id=\x22stop\
4962\x22 style=\x22sto\
p-color:#c6262e\x22\
offset=\x22.66347\x22\
/>\x0a <stop id=\x22\
stop4964\x22 style=\
\x22stop-color:#690\
b54\x22 offset=\x221\x22/\
>\x0a </linearGrad\
ient>\x0a <linearG\
radient id=\x22line\
arGradient3278\x22 \
x1=\x2229.278\x22 x2=\x22\
36.001\x22 y1=\x2234.0\
23\x22 y2=\x2227.273\x22 \
gradientTransfor\
m=\x22matrix(.50011\
0 0 .49926 -1.2\
551 -.71661)\x22 gr\
adientUnits=\x22use\
rSpaceOnUse\x22>\x0a \
<stop id=\x22stop2\
492\x22 style=\x22stop\
-color:#791235\x22 \
offset=\x220\x22/>\x0a \
<stop id=\x22stop24\
94\x22 style=\x22stop-\
color:#dd3b27\x22 o\
ffset=\x221\x22/>\x0a </\
linearGradient>\x0a\
<linearGradien\
t id=\x22linearGrad\
ient3280\x22 x1=\x22-6\
.3078\x22 x2=\x22-9.77\
47\x22 y1=\x2244.229\x22 \
y2=\x2244.14\x22 gradi\
entTransform=\x22ma\
trix(.25022 -.24\
951 .36299 .3616\
1 1.7511 -2.1529\
)\x22 gradientUnits\
=\x22userSpaceOnUse\
\x22>\x0a <stop id=\x22\
stop6223\x22 offset\
=\x220\x22/>\x0a <stop \
id=\x22stop6225\x22 st\
yle=\x22stop-opacit\
y:0\x22 offset=\x221\x22/\
>\x0a </linearGrad\
ient>\x0a </defs>\x0a \
<path id=\x22path10\
689\x22 d=\x22m23.414 \
20.495c0 1.9357-\
5.1102 3.5049-11\
.414 3.5049-6.30\
37 0-11.414-1.56\
92-11.414-3.5049\
0-1.9357 5.1102\
-3.5049 11.414-3\
.5049 6.3037 0 1\
1.414 1.5692 11.\
414 3.5049z\x22 sty\
le=\x22fill:url(#ra\
dialGradient2919\
);opacity:.3\x22/>\x0a\
<path id=\x22path6\
\x22 d=\x22m21.785 9.6\
573h-0.82852c-0.\
22361 0-0.48217-\
0.18578-0.55326-\
0.39734-4e-3 -0.\
010999-0.51419-1\
.2441-0.51419-1.\
2441-0.105-0.210\
87-0.05423-0.524\
17 0.1038-0.6821\
1l0.5858-0.58484\
c0.13462-0.13457\
0.20898-0.31399\
0.20898-0.50526\
0-0.19128-0.074\
37-0.3707-0.2089\
8-0.50509l-2.306\
5-2.3027c-0.1346\
2-0.13439-0.3143\
3-0.20846-0.5059\
3-0.20864-0.1915\
9 0-0.37131 0.07\
4071-0.50593 0.2\
0864l-0.5858 0.5\
8484c-0.15803 0.\
15759-0.47201 0.\
20846-0.67187 0.\
10879-0.01119-0.\
0051563-1.2463-0\
.51437-1.2463-0.\
51437-0.2231-0.0\
75102-0.40918-0.\
33323-0.40918-0.\
55648v-0.82733c-\
5.17e-4 -0.39407\
-0.32156-0.71459\
-0.71611-0.71459\
h-3.2618c-0.3947\
2 0-0.71577 0.32\
052-0.71577 0.71\
459v0.82733c0 0.\
22324-0.18609 0.\
48138-0.39799 0.\
55235-0.011018 0\
.004125-1.2461 0\
.51334-1.2461 0.\
51334-0.21122 0.\
10483-0.52521 0.\
053964-0.68323-0\
.1038l-0.5858-0.\
58466c-0.13479-0\
.13456-0.3145-0.\
20864-0.5061-0.2\
0864-0.19159 0-0\
.37131 0.074243-\
0.50593 0.20864l\
-2.3067 2.3027c-\
0.27904 0.27858-\
0.27904 0.73177 \
0 1.0104l0.5858 \
0.58484c0.15785 \
0.15777 0.20881 \
0.47124 0.10897 \
0.67076-0.005163\
7 0.011171-0.515\
39 1.2443-0.5153\
9 1.2443-0.07522\
6 0.22273-0.3336\
1 0.40834-0.5572\
2 0.40834h-0.613\
86c-0.39455 1.71\
6e-4 -0.93043 0.\
32069-0.93043 0.\
71476v3.2562c0 0\
.39407 0.32105 0\
.71459 0.71577 0\
.71459h0.82869c0\
.22361 0 0.48217\
0.18561 0.55309\
0.39734 0.00413\
0.011 0.51436 1\
.2441 0.51436 1.\
2441 0.10501 0.2\
1087 0.054053 0.\
52417-0.1038 0.6\
8211l-0.5858 0.5\
8484c-0.27904 0.\
27858-0.27904 0.\
73177 0 1.0104l2\
.3067 2.3027c0.1\
3462 0.13439 0.3\
1433 0.20864 0.5\
0593 0.20864 0.1\
9177 0 0.37148-0\
.07425 0.5061-0.\
20864l0.5858-0.5\
8484c0.15803-0.1\
5759 0.47201-0.2\
0846 0.67187-0.1\
0879 0.01119 0.0\
052 1.2463 0.514\
55 1.2463 0.5145\
5 0.22292 0.0751\
0.40901 0.33306\
0.40901 0.5563v\
0.82716c0 0.3940\
7 0.32105 0.7145\
9 0.71577 0.7145\
9h3.2616c0.39455\
0 0.7156-0.3205\
2 0.7156-0.71459\
v-0.82716c0-0.22\
324 0.18609-0.48\
137 0.39816-0.55\
235 0.01102-4e-3\
1.2461-0.51334 \
1.2461-0.51334 0\
.21122-0.10483 0\
.52538-0.05413 0\
.68323 0.10363l0\
.5858 0.58484c0.\
13462 0.13439 0.\
31433 0.20864 0.\
50593 0.20864s0.\
37131-0.07425 0.\
50593-0.20864l2.\
3065-2.3027c0.13\
462-0.13439 0.20\
898-0.31381 0.20\
898-0.50509 0-0.\
19128-0.07436-0.\
3707-0.20898-0.5\
0509l-0.5858-0.5\
8484c-0.15803-0.\
15777-0.20881-0.\
47124-0.10896-0.\
67076 0.0052-0.0\
1117 0.51539-1.2\
443 0.51539-1.24\
43 0.07522-0.222\
56 0.33361-0.408\
34 0.55722-0.408\
34h0.82852c0.394\
73 0 0.71577-0.3\
2052 0.71577-0.7\
1459v-3.2566c5.6\
2e-4 -0.39407-0.\
32053-0.71459-0.\
71525-0.71459zm-\
9.7849 5.3831c-1\
.6792 0-3.0455-1\
.3639-3.0455-3.0\
404 0-1.6765 1.3\
663-3.0405 3.045\
5-3.0405 1.6794 \
0 3.0454 1.364 3\
.0454 3.0405 0 1\
.6767-1.366 3.04\
04-3.0454 3.0404\
z\x22 style=\x22fill:#\
030303;opacity:.\
1\x22/>\x0a <path id=\x22\
path8\x22 d=\x22m21.78\
5 9.6533h-0.8285\
2c-0.22361 0-0.4\
8217-0.18612-0.5\
5326-0.39802-4e-\
3 -0.010999-0.51\
419-1.246-0.5141\
9-1.246-0.105-0.\
21121-0.054226-0\
.52503 0.1038-0.\
68314l0.5858-0.5\
8587c0.13462-0.1\
3474 0.20898-0.3\
145 0.20898-0.50\
612 0-0.19145-0.\
07437-0.37122-0.\
20898-0.50595l-2\
.3065-2.3065c-0.\
13462-0.13474-0.\
31433-0.20898-0.\
50593-0.20898-0.\
19159 0-0.37131 \
0.074243-0.50593\
0.20898l-0.5858\
0.58569c-0.1580\
3 0.15794-0.4720\
1 0.20881-0.6718\
7 0.10896-0.0111\
9-0.0051562-1.24\
63-0.51523-1.246\
3-0.51523-0.2231\
-0.075274-0.4091\
8-0.33375-0.4091\
8-0.55734v-0.828\
7c-5.17e-4 -0.39\
476-0.32156-0.71\
579-0.71611-0.71\
579h-3.2618c-0.3\
9472 0-0.71577 0\
.32103-0.71577 0\
.71579v0.8287c0 \
0.22359-0.18609 \
0.48224-0.39799 \
0.55321-0.011018\
0.004125-1.2461\
0.5142-1.2461 0\
.5142-0.21122 0.\
10501-0.52521 0.\
054136-0.68323-0\
.1038l-0.5858-0.\
58569c-0.13479-0\
.13474-0.3145-0.\
20898-0.5061-0.2\
0898-0.19159 0-0\
.37131 0.074415-\
0.50593 0.20898l\
-2.3067 2.3065c-\
0.27904 0.2791-0\
.27904 0.73315 0\
1.0121l0.5858 0\
.58587c0.15785 0\
.15794 0.20881 0\
.47192 0.10897 0\
.67197-0.0051637\
0.011171-0.5153\
9 1.2463-0.51539\
1.2463-0.075226\
0.22307-0.33361\
0.40902-0.55722\
0.40902h-0.6138\
6c-0.39455-1.717\
e-4 -0.93043 0.3\
2086-0.93043 0.7\
1562v3.2617c0 0.\
39459 0.32105 0.\
71562 0.71577 0.\
71562h0.82869c0.\
22361 0 0.48217 \
0.18595 0.55309 \
0.39802 0.00413 \
0.011 0.51436 1.\
246 0.51436 1.24\
6 0.10501 0.2112\
2 0.054053 0.525\
2-0.1038 0.68331\
l-0.5858 0.58587\
c-0.27904 0.2789\
3-0.27904 0.7331\
5 0 1.0121l2.306\
7 2.3065c0.13462\
0.13474 0.31433\
0.20898 0.50593\
0.20898 0.19177\
0 0.37148-0.074\
24 0.5061-0.2089\
8l0.5858-0.5857c\
0.15803-0.15794 \
0.47201-0.20898 \
0.67187-0.10896 \
0.01119 5e-3 1.2\
463 0.51523 1.24\
63 0.51523 0.222\
92 0.07528 0.409\
01 0.33358 0.409\
01 0.55734v0.828\
53c0 0.39476 0.3\
2105 0.71579 0.7\
1577 0.71579h3.2\
616c0.39455 0 0.\
7156-0.32103 0.7\
156-0.71579v-0.8\
2853c0-0.22376 0\
.18609-0.48224 0\
.39816-0.55339 0\
.01102-4e-3 1.24\
61-0.5142 1.2461\
-0.5142 0.21122-\
0.10501 0.52538-\
0.05396 0.68323 \
0.1038l0.5858 0.\
5857c0.13462 0.1\
3474 0.31433 0.2\
0898 0.50593 0.2\
0898s0.37131-0.0\
7424 0.50593-0.2\
0898l2.3065-2.30\
65c0.13462-0.134\
57 0.20898-0.314\
33 0.20898-0.505\
95 0-0.19162-0.0\
7436-0.37139-0.2\
0898-0.50595l-0.\
5858-0.58587c-0.\
15803-0.15794-0.\
20881-0.4721-0.1\
0896-0.6718 0.00\
52-0.01134 0.515\
39-1.2463 0.5153\
9-1.2463 0.07522\
-0.22324 0.33361\
-0.4092 0.55722-\
0.4092h0.82852c0\
.39473 0 0.71577\
-0.32103 0.71577\
-0.71562v-3.2617\
c5.62e-4 -0.3947\
6-0.32053-0.7157\
9-0.71525-0.7157\
9v0zm-9.7849 5.6\
275c-1.8218 0-3.\
3038-1.4795-3.30\
38-3.2981 0-1.81\
88 1.482-3.2983 \
3.3038-3.2983 1.\
8216 0 3.3036 1.\
4795 3.3036 3.29\
83 0 1.8186-1.48\
2 3.2981-3.3036 \
3.2981z\x22 style=\x22\
fill:#626262;str\
oke-linecap:roun\
d;stroke-linejoi\
n:round;stroke-w\
idth:.999;stroke\
:#818181\x22/>\x0a <pa\
th id=\x22path15\x22 d\
=\x22m21.769 9.8498\
h-0.82748c-0.305\
9 0-0.63417-0.23\
579-0.73161-0.52\
554l-0.50868-1.2\
298c-0.13651-0.2\
7222-0.07196-0.6\
7025 0.1446-0.88\
645l0.5846-0.583\
8c0.0988-0.09864\
7 0.15338-0.2302\
9 0.15338-0.3710\
4 0-0.14058-0.05\
44-0.27222-0.153\
38-0.37087l-2.30\
26-2.2988c-0.098\
46-0.098647-0.23\
067-0.15295-0.37\
131-0.15295-0.14\
081 0-0.27284 0.\
054307-0.37166 0\
.15295l-0.5846 0\
.5838c-0.21656 0\
.21603-0.61524 0\
.28065-0.88912 0\
.14385l-1.2317-0\
.50801c-0.2892-0\
.096757-0.52555-\
0.42432-0.52555-\
0.72988v-0.82595\
c0-0.28924-0.235\
66-0.52451-0.525\
38-0.52451h-3.25\
64c-0.28989 0-0.\
52555 0.23527-0.\
52555 0.52451v0.\
82595c0 0.30556-\
0.23618 0.63313-\
0.52641 0.7304 0\
0-1.2319 0.5080\
1-1.2319 0.50801\
-0.27267 0.13611\
-0.67153 0.07166\
5-0.88791-0.1443\
6l-0.58477-0.583\
8c-0.09881-0.098\
647-0.23067-0.15\
295-0.37166-0.15\
295-0.14081 0-0.\
27267 0.054307-0\
.37148 0.15295l-\
2.3027 2.299c-0.\
20485 0.20451-0.\
20485 0.53723 0 \
0.74174l0.58477 \
0.5838c0.21638 0\
.21603 0.28111 0\
.61422 0.14408 0\
.88765l-0.50885 \
1.2298c-0.096916\
0.28872-0.42485\
0.52434-0.73109\
0.52434h-0.6123\
1c-0.28558 0-0.7\
4021 0.24009-0.7\
4021 0.52468v3.2\
509c0 0.28941 0.\
23566 0.52468 0.\
52538 0.52468h0.\
82732c0.30607 0 \
0.63417 0.23579 \
0.73143 0.52572l\
0.50902 1.2298c0\
.13634 0.27222 0\
.071784 0.67025-\
0.1446 0.88645l-\
0.58477 0.58363c\
-0.20485 0.20451\
-0.20485 0.5374 \
0 0.74174l2.3027\
2.299c0.098638 \
0.09864 0.23067 \
0.15312 0.37148 \
0.15312 0.14081 \
0 0.27285-0.0543\
1 0.37166-0.1531\
2l0.58477-0.5836\
3c0.21638-0.2162\
0.61524-0.28064\
0.88912-0.14402\
0 0 1.2319 0.50\
784 1.2319 0.507\
84 0.2892 0.0967\
5 0.52521 0.4244\
9 0.52521 0.7300\
6v0.82578c0 0.28\
941 0.23566 0.52\
468 0.52555 0.52\
468h3.2562c0.289\
72 0 0.52538-0.2\
3528 0.52538-0.5\
2468v-0.82578c0-\
0.30556 0.23635-\
0.63347 0.52658-\
0.7304 0-1.71e-4\
1.2319-0.50802 \
1.2319-0.50802 0\
.27267-0.13611 0\
.67136-0.07166 0\
.88791 0.14454l0\
.5846 0.58363c0.\
09881 0.09864 0.\
23084 0.15312 0.\
37166 0.15312 0.\
14064 0 0.27285-\
0.05431 0.37131-\
0.15312l2.3028-2\
.299c0.09881-0.0\
9848 0.15338-0.2\
3029 0.15338-0.3\
7087 0-0.14058-0\
.0544-0.27222-0.\
15338-0.37087l-0\
.5846-0.58363c-0\
.21656-0.2162-0.\
28111-0.6144-0.1\
4426-0.88782l0.5\
0885-1.2296c0.09\
692-0.28872 0.42\
519-0.52451 0.73\
109-0.52451h0.82\
749c0.28971 0 0.\
52538-0.23528 0.\
52538-0.52468v-3\
.2509c-1.71e-4 -\
0.28924-0.23584-\
0.52468-0.52556-\
0.52468zm-9.7687\
7.4777c-2.933 0\
-5.3192-2.3823-5\
.3192-5.3104 0-2\
.9281 2.3862-5.3\
104 5.3192-5.310\
4 2.933 0 5.3192\
2.3823 5.3192 5\
.3104 0 2.9281-2\
.3862 5.3104-5.3\
192 5.3104z\x22 sty\
le=\x22fill:url(#li\
nearGradient2914\
)\x22/>\x0a <path id=\x22\
path28\x22 d=\x22m12 5\
.6097c-3.6052 0-\
6.5383 2.9283-6.\
5383 6.5276 0 3.\
5993 2.9331 6.52\
76 6.5383 6.5276\
3.6052 0 6.5383\
-2.9283 6.5383-6\
.5276 0-3.5993-2\
.9331-6.5276-6.5\
383-6.5276zm0 11\
.329c-2.656 0-4.\
8089-2.1491-4.80\
89-4.8009 0-2.65\
18 2.1528-4.8009\
4.8089-4.8009 2\
.656 0 4.8089 2.\
1493 4.8089 4.80\
09 0 2.6517-2.15\
28 4.8009-4.8089\
4.8009z\x22 style=\
\x22fill:url(#linea\
rGradient2909);s\
troke:url(#linea\
rGradient2911)\x22/\
>\x0a <path id=\x22pat\
h35\x22 d=\x22m12 6.38\
66c-3.1053 0-5.6\
225 2.5131-5.622\
5 5.6133 0 3.100\
2 2.5173 5.6133 \
5.6225 5.6133 3.\
1053 0 5.6225-2.\
5131 5.6225-5.61\
33 0-3.1002-2.51\
73-5.6133-5.6225\
-5.6133zm0 9.325\
3c-2.0537 0-3.71\
81-1.6619-3.7181\
-3.712 0-2.0503 \
1.6645-3.712 3.7\
181-3.712 2.0535\
0 3.7181 1.6617\
3.7181 3.712 0 \
2.0501-1.6646 3.\
712-3.7181 3.712\
z\x22 style=\x22fill:u\
rl(#linearGradie\
nt2904);stroke-w\
idth:.18122;stro\
ke:url(#linearGr\
adient2906)\x22/>\x0a \
<g id=\x22g3786\x22 tr\
ansform=\x22matrix(\
-1 0 0 1 23 1)\x22>\
\x0a <path id=\x22pat\
h6912\x22 d=\x22m4.824\
4 0.50034c-0.511\
11-0.13594-0.240\
26 0.47542-0.076\
815 0.68662 0.46\
91 0.84968 0.961\
87 1.6876 1.4158\
2.5448 0.12566 \
0.61453-0.41577 \
1.1076-0.86038 1\
.4398-0.65341 0.\
43227-1.505 0.59\
772-2.303 0.3559\
1-0.81412-0.8560\
8-1.2952-1.7904-\
1.8859-2.8307-0.\
4784-0.69669-0.5\
4102 0.42017-0.5\
6962 0.73737-0.1\
6385 1.4212 0.10\
903 3.0405 1.232\
6 4.0258 0.8637 \
0.7669 2.0914 1.\
0617 3.2192 0.85\
7 1.013 0.31818 \
3.1513 1.5719 3.\
9046 2.3299 0.69\
182-0.73541 1.02\
43-0.98074 1.716\
1-1.7161-0.67212\
-0.66109-1.8397-\
2.6827-2.186-3.5\
625 0.33904-1.24\
07-0.12247-2.606\
2-1.06-3.4661-0.\
66019-0.63244-1.\
6664-1.1469-2.54\
66-1.4016z\x22 styl\
e=\x22enable-backgr\
ound:new;fill-ru\
le:evenodd;fill:\
#c8c8c8;stroke-d\
ashoffset:.3612;\
stroke-linecap:r\
ound;stroke-line\
join:round;strok\
e-width:1.1;stro\
ke:#606060\x22/>\x0a \
<path id=\x22path73\
79\x22 d=\x22m4.808 0.\
78125c0.67241 1.\
5774 2.4588 3.10\
03 1.0207 4.2187\
-0.66246 0.63035\
-1.5745 1.1578-2\
.4987 1.0709-0.3\
5402-0.028886-0.\
74872-0.24705-0.\
88708-0.56811-0.\
46019-0.82286-1.\
0923-1.7864-1.55\
25-2.6092-0.0561\
37 1.5783-0.2479\
4 3.1645 0.93794\
4.2928 1.1351 1\
.0866 3.1394 1.3\
501 4.523 0.203 \
0.95456-0.63747 \
1.7013-1.7161 1.\
6477-2.9014-0.00\
51065-0.94782-0.\
53224-1.8138-1.2\
442-2.4084-0.547\
22-0.47281-1.263\
7-1.0611-1.9468-\
1.2984z\x22 style=\x22\
enable-backgroun\
d:new;fill-rule:\
evenodd;fill:url\
(#linearGradient\
3272)\x22/>\x0a <path\
id=\x22path4756\x22 d\
=\x22m7.585 5.3454c\
0.30248-1.4355-0\
.28632-2.3811-1.\
3199-3.3454 0.41\
379 1.3115 0.558\
19 2.1436-0.3739\
5 3.4362-1.7428 \
1.2519-3.6494 1.\
3403-4.4835-0.76\
865 0.076844 2.3\
133 1.2802 3.101\
1 3.554 3.0796\x22 \
style=\x22enable-ba\
ckground:new;fil\
l:none;opacity:.\
7;stroke-dashoff\
set:.3612;stroke\
-linecap:round;s\
troke-linejoin:r\
ound;stroke:url(\
#linearGradient3\
274)\x22/>\x0a <path \
id=\x22path6899\x22 d=\
\x22m8.443 10.409 9\
.7053 10.622c0.6\
9781 0.69616 1.8\
925 0.60502 2.69\
03-0.19091 0.797\
82-0.79593 0.889\
18-1.9878 0.1913\
6-2.6839l-10.652\
-9.6813-1.935 1.\
9341zm10.334 8.3\
749c0.39662-0.39\
568 1.0444-0.395\
68 1.441-3e-6 0.\
39662 0.39568 0.\
39662 1.0413 0 1\
.437-0.39662 0.3\
9567-1.1214 0.47\
467-1.518 0.079-\
0.39662-0.39568-\
0.31962-1.1203 0\
.077-1.516z\x22 sty\
le=\x22enable-backg\
round:new;fill:u\
rl(#linearGradie\
nt3276);stroke-d\
ashoffset:.7;str\
oke-linecap:roun\
d;stroke-linejoi\
n:round;stroke-w\
idth:.99963;stro\
ke:url(#linearGr\
adient3278)\x22/>\x0a \
<path id=\x22rect6\
533\x22 d=\x22m10.684 \
10.736c0.27724-0\
.27646 0.74349-0\
.2569 1.0454 0.0\
4386l6.3286 5.63\
84c0.30191 0.300\
76-0.01223 1.098\
9-0.28948 1.3753\
-0.27724 0.27646\
-1.0777 0.59048-\
1.3796 0.28972l-\
5.6604-6.3054c-0\
.30191-0.30076-0\
.32177-0.76546-0\
.04452-1.0419z\x22 \
style=\x22fill:url(\
#linearGradient3\
280);opacity:.23\
106\x22/>\x0a </g>\x0a</s\
vg>\x0a\
\x00\x00-\xa2\
\x00\
\x01\xd3\x12x\x9c\xed}\xdbr#9\x92\xe5{\x7f\x05\
W\xf9\xd2e+\x85p\xbf\xa8*k\xac\xb7\xdbzl\
\xd6\xb2g\xd7\xa6\xbam\x1f\xd7(\x92\xcaT\x97R\xd4\
\x90T\xde\xbe~\x8f;\x82$\x82\x01R\x94\x82Lq\
'\x83UeEz\x00\x01\xc0q\xdc\xfd8\x10\x0a\xfc\
\xf2/_>\xde\x0d>Mf\xf3\xdb\xe9\xfd\xdb3Y\
\x89\xb3\xc1\xe4~4\x1d\xdf\xde\xbf\x7f{\xf6\x8f\xbf\xff\
\xf5\x22\x9c\x0d\xe6\x8b\xe1\xfdxx7\xbd\x9f\xbc=\xbb\
\x9f\x9e\xfd\xcb\xaf\x7f\xf8\xe5\xbf]\x5c\x0c\xfe<\x9b\x0c\
\x17\x93\xf1\xe0\xf3\xed\xe2\xc3\xe0\xdf\xee\x7f\x9f\x8f\x86\x0f\
\x93\xc1\x1f?,\x16\x0fW\x97\x97\x9f?\x7f\xaenk\
a5\x9d\xbd\xbf\xfcipq\x81\x9a\xf3O\xef\xff0\
\x18\x0c\xd0\xec\xfd\xfcj<z{V\x97\x7fx\x9c\xdd\
q\xb9\xf1\xe8rr7\xf98\xb9_\xcc/e%/\
\xcf\xd6\xc5G\xeb\xe2\x9f'\xd7\xd5l2\x9f>\xceF\
\xe9\xf6\xa3Q^r6\xbeY\x17EO>k.$\
c\x8c\x97B]*u\x81\x12\x17\xf3\xaf\xf7\x8b\xe1\x97\
\x8b\xfb\xf9\x9b\xac*:X\xaa\xaa\x84\x10\x97\xb8\xb6.\
\xb9_\xa9\xab/wP\xc3\xd6\xce\xf0\xd5\xbcu\xa8\xfe\
\x01\xff\xad*,\x05U\x1a\xeb\x0djN\xaa\xfb\xc9\xe2\
\xf2/\x7f\xff\xcb\xea\xe2\x85\xa8\xc6\x8bqv\x9b\xa5\xe6\
\x1b\xed6\xa6\xe3~\xf8q2\x7f\x18\x8e&\xf3\xcb\xa5\
\x9c\xeb\x7f\xbe\x1d/>\xbc=3\xa1\x12\xfcy\xf8\xc2\
\xe2\x0f\x93\xdb\xf7\x1f\x16m\xf9\xed\xf8\xed\x19\xc6\xab\xb4\
\xf2\xfc{\xd9\xa3\xab\x15\xa6D\xa5U*Z7\x93_\
2\xb6Yk<\x1d]\x0f\xe7\xe8\xf6\xe5\x87\xe9\xc7\xc9\
\xe5xz=\xf9z\xf9\xbfg\xd3\x7fNF\x80\xc3\xfb\
{\x08/nG\xd3\xfb\x8b\xc5\x07@\xe4\x12\xf7\xbb\x1b\
^\xdf\xe1\xcbb\xb8x\x9c\xb7\xeeE\x83\x04f'\x8b\
\xcf\xd3\xd9\xef\x17\xb7\xe3\xbbI\xb5\x9c\x9cUo\xa6\x8f\
\x8b\x87\xc7\xc5\xff\x9d|YL\xeeS\xb7\xa0\x9eLW\
|\x99\xaa\xaddg\xbf\xe2\x06\xbf\x8c'7s\xbaQ\
\xd2\x01\xfd\xd2|\x01\x970\xa7\x93\xe1\xec_g\xc3\xf1\
-`\x9c\x0a\xa5b\xcd+\xdeyQ\xd7A\xad\xf9b\
\xfa\xb0,\x8ba,\xbe\xde\xa1\xef$\xbc\x18M\xef\xa6\
\xb3\xab7\xda8;4?\xb3h\x8a\xa9\xbb]|\xbd\
\x92g\xeb*\xd3\x9b\x9b\xf9\x04s$2\x19\xcf\x0f*\
\xa0)u6\xb8\xdc\xbf1%\xcc0\xf8\xa7\x1b\x93\xe5\
\xc6\xcc\xaa\xb1_.\x9b\x83\xde\xad\xa3\xe5\xac\xa0\x17w\
\x98\xf3\xb7g\xc3\xbb\xcf\xc3\xaf\xf3\xb3\xedJTZ\xf8\
g(\xd1\x0e\xfd\xb0\xa5\xc4\x9f\xf7\xd4\x22\xda\x8a\xcf\xd2\
b\xa95Qj\xad\xa4F\xa5\xa5|\xa1\x1a[Z\x92\
\xd2\x88\xadX[6H\x85\xd4\x13\xaa(\x0c29\x84\
\x96J\xb7\xea)o\xce<\xa1\x8b=\x9b\x13?\x1fJ\
QN\x99\xf5p\xb7\xc2\xf1\x89\x91\xe1&/\xd0\xe3\x0d\
\x7f\x9e\xafG\xb4\xf6\x025\x96Z;\x9c\x1a\xad\xd4[\
\xadr\xd9o\x94\x89\xcf\xd7\xd2\x84>\xa3\xe7k\xc9J\
\xf3\x94\xff*\xb5\xe6\xf0\x8f\xde\xdaZW-)e;\
\x83\x0d7\xf1\xcfV\xe3\xf5\xddp\xf4\xfb\x0b\x94\xa8T\
i\xcav+\xb1\xd0\xd6\xe1\x80\xa6C\x8c\x9dU\xa8\xa3\
x*\x90nE\xe2\xd3J\xc3\xdd\xf5\xf3\x917\xd6c\
?\xba9\x98\x9a\xe2\xf6(\x99\xf5\xf3\x05\xf6\xf8R\xaf\
\xa5\xa3|\x81=\x1e\xd7k\x19\x19\xddSZB\x99\xf0\
\xbd\xcc\xcd \x9fxUs{>\x1d\xb3\xc2=\x83\xd3\
\x96\xf5\xb2\x1f\x19CK\xcf\xa2\xb4e\xbd\xecG\xc5\xd0\
\xd6\x93\x8c\x96~\x0d\xef6U8\xc3\x0dc\xb5\xf6\xf2\
7_\x91D\xf9J\x18\xab\xe5\xda\xdan\xbe\xbc=S\
\xa2\x8aR[\xb7F\xd7\xa8XvT,\xfb\xben\xf7\
\xef\xb3\xe1\xfd\x1cY\xe2\xc7\xb7g\x1f\x87\x8b\xd9\xed\x97\
?\xcaJ\x07\xe5\x84>\xd7\x95\x0d\x08\xb9ara\xcf\
\xc5\xb9\xa8\xa4\x97\x16bS)/\xb4t\xe7\x0a\xe9\x7f\
\xf4\xea\xa7\xd6=\xffq\x7f\xbb@\xba\xfb8\x9f\xcc~\
\xa3\x94\xf1\x7f\xdd\xffc>i`\xa09v\xad3^\
\xc7\x19\xee\xd5\x87\xd9\x04\x19\xf9\x9b\x82Sz\x12k+\
\xb5\xf7\x0a\xce\x14\xacz\x05\x1fW\xc1\xa2W\xf01\x15\
\xac\xb2(\xda+\xf8(\x0av\xbd\x82\x8f\xab\xe0>\xc8\
\x1dY\xc1}\x90[+\xf8BT\x1e6\x1d\x0e\xa9`\
\xdf\xbb\x88#+\xb8w\x11GVp\xef\x22\x8e\xac\xe0\
\x9e\x07\x1fW\xc1\xae\xe7\xc1GVp\x1f\xe4\x8e\xac\xe0\
>\xc8e\x0a\x0eI\xc1\xe6\x80\x1a\x96\xd6\xf6\x1a>\xb2\
\x86{/|d\x0d;\xd9k\xf8\xc8\x1a\xee\xfd\xf0\xb1\
5\xec{\x0d\x1fW\xc3}\xbaqt\x0d\xeb^\xc3G\
\xd6p\x9fp\x1cY\xc3\xa1_\xf59\xb6\x86\xfb\x8c\xe3\
\xd8\x1a\xee3\x8e#k8\xf6\x19\xc7\xb15\xdcg\x1c\
\xc7\xd6p\x9fq\x1cW\xc3\xaa\x7f\xd0\xe7\xe8\x1a\xee3\
\x8eck\xb8\xcf8\x8e\xad\xe1\xd8k\xf8\xb8\x1a\x96}\
Nwl\x0d\xf79\xdd\xb15\xdc\xe7tG\xd6\xb0\xea\
s\xback\xb8\xcf\xe9\x8e\xaca\xddg\x1c\xc7\xd6p\
\x9fq\x1c[\xc3}\xc6ql\x0d\xf7\x19\xc7\x915l\
\xfa\x8c\xe3\xd8\x1a\xee3\x8eck\xf8\x18\x19G\xf9\x8f\
\xc1\xb7\x8f\x5cT\xce\xf8\x18y\xb4\x18\xaf\x17\xdeYK\
_\xb4Q&\x9e_\xc8\xca\x18o\xbd|\xee\x80\xbf\xaa\
\xb7g\xdaWQ\xfb\x0cG\x90\xa9X\xc9\xec\xc5\x19_\
%\xa6\xcfUBE\x93M\xdf\x17\xc9\x05mC}-\
\xb5\xec\x07\xd0\xbd\xde1\xb1\x07@\xa5\xaf\xa4Q\xc15\
Q\x1a\x1c\xe9/4Q\xea\x84\xad<\x0a\xc7&J[\
eG\xc5\xb2\xdb\xe7JU\xde\x1b\x0c\x88\xe7JV\xd1\
E/\x1c\xa6(DY9M(\xf5\xaa\x0a\xc1\x9a\xee\
\xe8\xe4\xb7%\xed\xa1^~e\xc0\x7f\x11\xf5^\x94\xf5\
+%\xe4N\x1dX\xbdr\xafd\xf8\x99\xea}\x81\xf1\
;c\xa5\xaa\x8d\xdf\xe1\xab\xd5\xe70<\x17\xd1\xf2\xe4\
B\x91\xfd+)\x84\xd2/\xb1\x7f\x05\xc3\xd6\xb1\xb1v\
\xa5\xd8/Kxe\xa7\x1aN\xc0\xc2\xadFGo\x1e\
j8\x81V\xd9\xe2[\x9f\xf6\x22\xb4\xfb\xbd\x1d\xea\x09\
\xa8\xee7\xf8]\x1a\x0f\xdai\xb9t\xb7\x16\xd0\x940\
a\x03\x0fh\xbc\x92\xe7\x17\xbaR\x96\xf4\x00\xe5\xafU\
\x9e\x02\xa4\x10B6\xedC\xab*\x02\xa3*4\xecC\
\xe1n\xc2\xc9\xec=\x0e\xa3b\xd9Q\xb1l\x1b\xacv\
O>\xcb\xaf\xc4;n\xa4\x0a\x08\x9aK\xb0\x1aC`\
\x15\x95\xd1\x1a\x1e\x90\xff:\xc2\x00CY\xa4\xe2\x18\x14\
\xaa\xe0\x9d\xc8\xde\x1fE\x18\xd4\xb1\x8a\x02\xa6\x1e\x1a\x18\
t\x95\xe6\xa2\xa6\x81Ax&\xcc\x8d\xd3\xf6\x05V\xbf\
\xf9b-\xb3\xd7\x96\xb0\xcd\xcd\xe0\xa5\x8a\xa4\xc1\x1b]\
\x05e}l\x8e]V\x013\xde\x8c\xc1\x08\xd5\xc1(\
\x15uc\xe8(\x0a\x87\xa8\xb3e\xa5\x1d\x14\xaa\x9e\x97\
\x18\x95\x0f\xf4\x83\xdc\xa9\x0cB\xdb\x97\xb8\xcb\xe6\x98\x10\
\x8d\xf6\xe2J\x87S\x9c\x80\xdb\x8b.o\x95<\x17\xd8\
!\x11\x15\xd9T\x9d\xa9\xacW6gs\x92\xcbJk\
b\xf6\xae\x8f]\xecS\x00u\xc1\xa7\xb8CH\x96\xa0\
\xdf\x86\x9c\x81@`t\x07\xd1\xa0\xdck\xe5*\xbdk\
\xf0\xa8Fla\xb7v\x15q\x8c\xf3\x8aFl@\xbb\
\x03\x99\xb3\x8b\xc2i\xff\x22\xba\xa9\xc1\x82\xbc\x0f\xbe\x09\
w\x98o\xc4L4M\x1dnA9\xef]\x93s\xa2\
\xac\x09&\xe4 j\xbfgJ\xed\xf7\xfe\x16~\x1f\xd5\
\x01\x98\x11\x80\xe4m\xb4\x1b\xc4HU.d\xcc\x9a\xdd\
\xben\xb2\xedQ\xa1\xdc\xa8P\xeei\x93f\xf7\x87o\
\x08!R\xbap\x10\xfa\xb3\xd7*\xdf~\xef\xd8{\x02\
\x8cO\xbe\x08kG\x07\xf8\x05\xa5;\x1c:\xf8\xe6.\
=\xd2;n'0o\x87l\xca9\x8a\xf7\xd1[\x97\
\x05v\x02\xddE$7k\xb3'-\x08\xa0\x92\xde\xd6\
\xebe\x16\xf0\x09\xcc\x17\x12e\x95\xb6y\xe0R\xa5\xc2\
\xbbg\xe6\x09\xd4u\xd1Xs\xca\xda\x5c\xc2)\xf1L\
\xf8t\xc0g\x09\xee%\xb3(\x99O\xc9\xccrs<\
\x22\xea\x9a\xae\xa3\x8d:\xa7\xdaQ\xe5\xe5\x84\xf4\xb9\x0e\
\xb9\xec(\xb78\xd5\xb2\x03n8\xeb#*\xb2\xc9\x07\
J\x8al3\x9b\xae`\xdc\xc5\x7f\xca\x9cj\x0b\xff*\
R\xb5\x9c\xd4\x1dQq\x1b4\xa0\xa4\xb9}}\xcdA\
\xe9O\x99Ymaae\xc6\xd6`w\xaf\x8a\xbdv\
\xe8\xe8`\xc4\xcfL\x8d\xca\x89\xcd\x96$\xa8\x9c05\
\x92\xab#F\x94fZY\x88(\xfap\x11\xe5e\xe9\
y9\x91.'\xdd\xe5\x04\xbd\x9c\xcc\xe7i\xff\x11\x91\
\xda\x5c\x18) U\x1f4\xdc<\x7f\xc5\xa9\xbc\x12\xb4\
e\xd5\xa8\xbc\xc2\xd4X\x8dzE\xc2\xe8\xf4\x01\x22\xce\
\xf7\xa7\x95\xc7$\x8c\x8d%\xce\x92y\x1f.\xd2\xbc`\
\x01\xbd\xbc\x88\x5c^p./N\x97\x17\xb2\x1bk\xde\
\xaf\xaa\xde\xc3\x85\xa1\x17,\xa0\x7f_\xf5\x1e\xde\xde\x9b\
\xdbK\x05{7\x87\x0cN\xcf\xdd\xaak\xed\xa3m\xd9\
n+\xec\xcbe\xdbwGDgss\xb3\x80Ns\
\xb8\xd0\xd3a\x8f\xb7\xbc\x9d\x5c\xdez.oS\x97\xb7\
\xb4\x97\x1b\xdf\xaf\xaa\xe1\x03\xa6@\xbd\x86\x8b\x1a>d\
\xaa\xd4k\xb8\xa4\xe1\xc3\xc5\xb0^\xc3E\x0d\xdb\x03\xae\
\xda\xf5\x1a.j\xb8\x8ft\xc7\xd6p\x1f\xe9\x8e\xad\xe1\
>\xd2\x1d[\xc3}\xa4;\xb2\x86]\x1f\xe9\x8e\xad\xe1\
>\xd2\x1d[\xc3}\xa4;\xb6\x86\xfbHwl\x0d\xf7\
\x91\xee\xc8\x1a\xf6}\xa4;\xb6\x86\xfbHwl\x0d\xf7\
\x91\xee\xd8\x1a\xee#\xdd\xb15\xdcG\xba#k8\xf4\
\x91\xee\xd8\x1a\xee#\xdd\xb15\xdcG\xback\xb8\x8f\
t\xc7\xd6p\x1f\xe9\x8e\xac\xe1\xd8G\xback\xb8\x8f\
t\xc7\xd6p\x1f\xe9\x8e\xad\xe1>\xd2\x1d[\xc3}\xa4\
;\xae\x86\xbd\xe8#\xdd\xb15\xfc\x1d#\xdd\xd6\xa3\x0d\
\xffK+\xf8;\x06\xba\x1fS\xc1\xdf1\xce\xfd\x98\x0a\
\xfe\x8ea\xee\x87T\xb0\xfc\x8eQ\xee\xc7Tp\x1f\xe4\
\x8e\xac\xe0>\xc8\x1dY\xc1\xdf/\xc8\x99Jy\xa1\xa5\
\xfb\xc1\x14\xfc\xfd\x82\xdc\x8f\xa9\xe0C\xbe\x12\xa4Wp\
I\xc1\xdf/\xc8\xfd\xa0\x0a\xfe~A\xee\x07Up\x1f\
\xe4\x8e\xac\xe0\x1f'\xc8\xfdr9\x9e\xdc\xcc\xf9\xdb|\
:\xbe}\xc0\x7fW\xf7\xc3\x8f\x93\xf1\xa7\xdb\xc9\xe7?\
\xac4t=\x5c\x8d\xf6a\xf8~\x02\xa5Oq\x9f7\
7\xfc\xa9/\x5cOg\xe3\xc9lyi\xfdTp\x92\
O\xa1\xb5\xdb\x05z\xb4|\xfd\xccj\x06\xe9\x86\xab\xab\
\xa2\x12\xa5\xeb\xf3\x0f\xc3\xf1\xf43\x06\xb9y\xf1\xdbt\
\xfa\xb1pOR\x1e\x86n\x83w\xab\xd7\x08\xad/\xa2\
\x1dz\x9f\x8eq\xd9\xfb\x84\xd6W\x1fg3L\xe5\xc5\
\xdd\xf0\xeb\x04\x03\xe1\xff-\xef?\xff0\xfd\xfc~F\
\x0a\xb9\x19\xde\xad4\xb2\xaaJ\x97.\xae\xaf\xa7h|\
1{l]\x1eOG\x8f\x1f\xe9\xd6\x8f\x09L\x0f_\
6K|\xbe\xbd\xc70/>\xdf\x8e\x17\x1f0*a\
[\x03\xabK|\x98\xdc\xbe\xff\x00\xd0\x87\xd0\xea\x7f]\
\x02}hi\xb2\xbe\x84\xe1\xaf\xde\x0e\xb0\xbaFC\xcb\
5\xbd}\x80\x0f\xd3\xdb{\xea~^\x82j\xa7y^\
\xcakx}\x9c,\x86\xe3\xe1b\xb8\x86\xd2Rb\xce\
\x96\x06>\xbe\xb9\xfa\x8f\xbf\xfc\xf5\xd7\x1a\xa0\xbf\x8cF\
W\xffg:\xfb}\x89W \x16\x05\x86\xd7\xd3G\x8c\
\xf7\xec\xd7\x95\xf8\x97\xf1\xe8\x8aLm\xb8\xf8\xf5\xf6#\
:~9\xff\xf4\xfe\xbf\x7f\xf9x\x07D\xaf.4\x0a\
/\xbe>L\xd67M\xb7\x9dM\xe6\xd3\xc7\xd9h\xf2\
\xf6\xec\xc3b\xf1puy\xf9\xf08\xbb\xab\xa6\xb3\xf7\
\xb8\x09\xfe\xfdxK\x95.\x7f[\xdc\xde\xdd\xfd\x1b5\
\xb2rK\xab\x9b\xde.\xee&\xbf\xfe\xfbd\xf1\x19]\
\xe6\xa6\x93\xa4Q\x08\xc3\x9d\xfc\xaa\x84\xb0\x17B_\x88\
\xc0\xc5X\xd6(5\x9aM\x86\x8b\xe9\xec\xd7\xac\x8f\xa4\
\x8b?\xbd\x07dra\xde\xf0\xbb\xe1\xc3t\xf0\xe7\xe1\
\xdd\xf0\xe3\xf0~<\x9b\xdc\x96z@F\xde\xbe\x0f\x97\
l5Iw\x9e?^\xff\x13\x1e\xb5q\x03\xd2\xd5\xff\
\x18\xbeo\x8c\xfe\xb2T\x96z|w;\x9a\xdc\xcf\x9f\
\xd65\xb7~\xfb\x09\xde\xe4\xe3\xc7\xe9\xfd\x9c\xd5^\xd7\
\x9d_^\x7f\xbd\x98\x0f/U%.[:O\xb7\xd9\
\x94\x8e\xa6\xf7\xf0\xb8\xd7\x8f\xcfU\xe1\xff\x1c\xfe\xfex\
=\xf8m1A\x8c\x98\x9d\x0f\xde=\x8e\x86\x83\xbfN\
\xe0\x03\x16\x8bg\xab\xb3\xdd\x05.Kp\xce\xe1\xfdn\
SA\x19\xc2\x9f\xa9\x9b\xa6\xe6\x1f&3@v\xfe\xa4\
\xe6?O\xae\xab\xa5\x90o=\x1a]\xfe\xc7\xe4a6\
\x1d?\x8e\x16\xb7\xd3\xfb\xa6\xca;\xde\xf8/\xb7\xf3\xa4\
\x95\xd2\x8dg\x93\xff|\xbcE\x8d\x17\xdd\xf9\xdf\xa7\x0b\
\xa8\xe4\xb0\xf7\xfc\xd3b{g;ia2\xbb\xfd\xc4\
SJh\x98\x1f\xb6\xd3\xbf}\x18\xce&\x7f\xba\xbb\xfd\
=W\x06C\xaf\x86Z\xedj/3_\xfb\xcb\xe5\xd2\
\x13\xf3\xaf\xf7k\x0f\xdd\x08y+\xdf\x7f7\xbc\x9e\xdc\
\xbd={G\x17\x07\xad\xab\xefg\xd3\xc7\x87\x8f\xd3\xf1\
\xa4\xae\xbet\xee\xefs\x9e\xf5\xde\x9a\xec\xe9\xd3\xc5\x9a\
,\xf1\xd7;x\xc4?JS\xc5`\xb5;\x97?\xad\
\xb0\xbd\xbe\x09\x02\xcd\xe2\xeb\x1d\xda\x18\xdf\xce\x1f\xd0\xce\
\xd5\xed=Q\xbb\xb3\xf5\xf5\xdd\xdd\xcd;\xa2rUu\
ha1\x9c/n'\xb3a\xa1\x09\x93\x9b\xe7\xfb|\
f\x8bc\xbf\xb0\x86\x88a\x10!{mg\xa9\xc9\xdf\
8F7\x8b\xd4M\xba&\xb0\xde\x97\xca\x84\xe7\xde|\
ko\xa5\xad\x94q\xf4.\xbd'[\xcd\x1e\xc6*\xcf\
\xd3\xa7\xe9l\xda(\xf2\xd4D<\xb3_\xbf\x5c\xbe?\
\xd8t\xb7\xe7:\xaa|\xae\x1f\x86\x8b\x0f[:\xba~\
\x7f\x91\xd6\xc6\xc7\xe5\xcb\xf5\x9c\xb5\x02\xfd\xae\xa4\x8c1\
\xd2\xeb\x0a\x95\x90Z\x99&\x0e\xd0\xd4\xdf\x06\xa0\xb5\xfc\
*\xa9A\xfdB\xd6\xc1\x9f\x06\xcb\xf7\xb0\x0el\x15\x82\
\xb6>\xe8\x81\x18H\xfc3\xa0<\xc3G#\xb4?\xdf\
\xa7x\xeb\xde\xdf\x9a3\xb2\xcc\x10f_\xe9usu\
\xe5-E\xbe\x14^\xd7\xdc(Qz\xf9l\xb3@\xe1\
M\xb4K\x95\x93\x86\xa1uSBL\x9dL\x5c\x89J\
\xff|\x03\xe6v\x05N\xf7\xc77\x9bO\xa5(\xf1\x13\
_\xbdX\x16\x97\xe9\xe7\xec\xf1nr5\xf94\xb9\x9f\
\x8e\xc7?#rM\x7f\x9f\x5c\xddO\xef'\xf5\xf7D\
\xce\xaf\xe4\xc3\x97\xa5\x80\x00\x03\x84\x5c!h,r\xd9\
?\xc1\x93\xaf\x10/&\xb3\xa5t\xdd\xd2\x0eP\xaf\x86\
O\xd4\x13i\xedl\xd44\xaeMl\xb5\x06\xad\x84\x0f\
\xceJ\x97\xc6\xce]\xdfw\x9c\xac\xa8\xf6\x0bv\x7fj\
\x8e\x9d\xde\xebJ\x9f\xa0\x9e\xaf\x02\xfeqw\x8b\xff]\
\x99\xa5l<D\xd21\x9bA\x1d\xb9\x9e\xf7R\x16[\
\x84B\x8e\x1bTD\xd0P\x01Y]\x80\xe5\x0c\xfe<\
@B\xed`c\x8eSk$\xc6\xd1\x84\x81\x96U4\
\x1e\xf1\xe5\x5c\xb9\x0a\xf0\xd1q\xa0E\x15\xac\xf5^\x9d\
+SI\xab\x83\x0fT9V\x9e\xc4\x8a*\x07\xe1\xb4\
3\x03\xe9+#\x82\xd0\x91J\x02\xd6\xca\xc0fB\xe5\
E\xb4\x16\xe5\xe8m\x8e>?_y\x03\xaa[\xac\x00\
\xea\x9f\xd0L#\xa1\x1a}\xfb\xf6\xc4To\xa9\xb4\xa5\
\xc5PP\x95\xc3\xb8\x8cWP\x00L\xdd\xcb\x10BR\
\x15\x06\xeet\xa0Q\x18\xa9\x9c\x96\xa4*\xab\x84u\xac\
*\x13\xbc\xd1\x8at\xa5\xbd\x91V\x9e\xc3,\xe9U\xa7\
\xd6']\x09\xaf\xea\xdaZ\x0b\x1f=\xe9E\xc6\xe0\x8c\
#])$\xe1A\x91\x0c\x9aT\xd2s+\xc2\x05i\
K\xd6\xfb|\xd4\xbe\xf1*\xde\x8cn\xca \x8d\xe6U\
@\xba=\x1e\x96b\x81\xd5\xc2\xad\xde\x03\x8a\x1f\xd1\x9e\
\xebJBG\xd2\xe1\x8b\x81\xa6\xa5\xfc\xa9\x10\xef\xad\x10\
{\x84\xccF\xee\xb2\x89\xa9-\xa8\x9a\x8f\xe8\xdf\xb3f\
\xc9\x15\xb6l\xf68T\x86.\x907\xf8j\x8ajZ\
U\xd6\x06D\x1e\xe0\xa3$E\x80\xb6Qi\x8bq\xc2\
\xaa`~\x80\x0c\xbdb6J\x96X\x04,\x87\xaa0\
g \x14\xd8c\xa1B\xbb\x1e2\x1b\xc8\xd2\xf4\xb9\x01\
DE\xf4A\x16e\xef\x06:\xf2K\x7fa\xb0\x90\xc2\
\xec\x9d\xa6\xee\x94\xa4\x90\x05\x0b4{j\xc7I\x5c\xd4\
\x03\xea\x98\x14Q)n\xdb9a\xc9\xab`\x08Fi\
Sw\xdc\xa3\xebp!d\x0c\xf0\x0b\x9a\x06\x08CG\
\xef\x8b\xb2wEU|\x1bl\xa8r\xc3\xa1\xcb\x9fy\
I\xed\xea\xfan8\xfa=\x8bg-7m\xf6\x8fg\
o\xa0\xd5a\xb0\x1b!\x0d\x01\xdd\x08iL|\x86\xc5\
|\x1c\xce~\x9f\xcc\x92M\xa4\xef\x17\xf3\xc5p\xb6h\
H>\xde\x8e\x1b\xbf'\xf7\xe3\x86\x15\xedoo$\x9d\
\xde\xdc\xcc'\x8b+\xd1\xb6\xc1O\xb7\xf3\xdb\xeb\xdb;\
\xfa\xc1_\xef&\x1bf\xf9\xf3\xf4\xd3dvs7\xfd\
\xbc\xbc\xde\xb0\xd3\xfd\xad\x83>\xdbm\xc3\x14m\x03D\
F9I\x96\xad+\xa1\x80\x1c\x99lCH\xa1\x04\xcc\
\x1c\x94\x95\x0a9#\xe9M\x96\xf8\x19u\x08d*\xaa\
R\xc1z-\xcf5\x9c\xb7P\x0e\xae\x14\xd8F\x0dr\
\xdd('\x8c\x17\x22\xd0\xd5\x00X{\xc5\xd8s\x22\x80\
\xf3\xd1UG\x9f\x81FC\xb8_\xd0\xdc<\xbfI\x9c\
\xf0X\xe8\xd4Sx\x84\x99\xeeB\xa1{=V\xb5\xc7\
d\xbe8\xc4\x841\xfe\xb9)\x1a\x8c\x15\xa1\xddm\xa4\
\xc3\xf7\xe3\xfd\xfaM\xaf&\x8fB\x19\x1d\x8a\xb8\xa1\xcb\
\xabpC\x01\xd3*\x9d`\xdcxz\x15\xb6q6\
I\xc1\xc3\x07\xec\xa5\x10\x86}V\xb0 \xdb\x0e[\xb7\
9\xefE\xe4\xbf\xd4b\xb6\xb7[\x1e6q6\xa3$\
<e$4\xcb\xe0\xd3\xb0\xa5q\xde\xba$U\x12I\
\x0c\xe1\x1eT\x02\xb4b]\xb2 +\x83\xfa\xf9H\xf8\
\xfc\x01sX\xc6\x81V\x9dp\xd0\xc4o\x96\xae\xee\xcb\
\x04\x97\x9f2\x1f\xb4R\xb4\xf9 E)\xa5TD\xe6\
i\x98-9x\x05\xb0hM\x91\xd8dR\xa6y\xe0\
\x85Q\xabL\x0aBh\x03\x92\x0d\x90D\x04\xcb\x08\x9e\
g2\x99\xa6\x17\xc8K\xf0P\xf2W\xf4bn\xeb\xa9\
\xa4 \xba\x18\x14{\xaf\xb5\x14\xf3\xedT4\x96d\x0a\
\xaaL2n\xc8P\xdb \x91>\xaaL\x86~V^\
)h>I\x03\x12]K\x18\xa9\xa2\x0d:\x08\xcfR\
\x17\x95q\x90\x19zO\xb2L\xcdD\x84u\x12\x09T\
e2*\x83\x03\xd9'\x7f\x88r \xc2\xa0\xf3\xc8\x5c\
\x81\x9fh\x22\xdfq)Ut\xae\x02\xb2\xe9\x01\x0dW\
\xfa\xa0\x5c\xae\x8c\xb6.7|i\x0e\xba\xb2\xf7\x0c\x9d\
c\xb8\xe0\x8fv\x87a\xbd\xfb\xf1\xdb\xd9d\xb4\xc8\xc7\
I/\xf0\x07\x0d\xa3O\x93\xe2\x7f\xa1\x0b\xac\x1c\xdft\
D\xcb\xfd,\xf0BIGT4}B\xbd\x1f\xa6p\
\x11#\x8b\xed\xd5\x00\xea\x00\x00\xaevk{s\x05@\
?c\x05\xe0\x8d\x12f\x18|9\xc7\xb0\x85\x00\xd0a\
5\xe0\xf99\xe0.\xab7m\xab\x07\x17\x88\xb0\x1b\xa3\
\xce\xe90\x07a@\x1b\x00\xfe\xb5\x14\xb3\xe0\x91FG\
\x8e4\xa0\x10\x0a\xc9\xb3<\x07W\x01\xc9\x96\xc8\x03e\
\xac\xa4\xa4[\x9fG\xb2Zm\xa2\x82\xd38\x0fD\x93\
\xeb\x9b\xe1\xa7i\xdcs\xddR\xd9(\xf6\xa3\x17Z\x9d\
\xce\xa2\xcdK\xa6)\xa5U\xdb'\xcb\x15'\xcb:\x11\
\x0d\xb9XF\x1b\xcd\x8a\xae\x94\x8e`\x87\xb9\x94\xf3\x0c\
P;r\xbc*Z\x90F\x97\xc9p\x1bO\xf9\x0c;\
\xf8$\x05i\x94\xb4\xce\x11='F\xe9N\xc4E\xc9\
%:\x01g\x0f\x19\x12\xab\xa8\x05\xbbm\x05G\x0d\xf2\
\xa9h\xe1\xc0\x12#\xa1|\xdf\xd0\x8csm\x84e)\
\x85Lk\x08\x1eI\x0edQ\x9b \x81\x13\x92ym\
\x8d\xe4\x9e{aSm\x80,\x88:\xe4\xf8\xa0\x01\x14\
\x92\xb2\xe7P)\xe4H\xf4\xce\x86L\x0a\x99\x96\x81N\
r\xa1\xfa\x1a\x19\x94c\x99\x0bFsm\x1d\xe8\xf8\x97\
T\xdb\x90g\x97i]Bz\x92\x80\x9d\x13\x96i\x84\
p\xe9>d2\xe8\xc2\x81\x01[\x0e-\xb5\x94p\x8b\
\xb6\x14\x02|\xbac-\x05[\xf6QR\xc0Qt\xec\
\x0b\xc6\x1b\xf2\x99\xf0d)\x08)\x99\xec]i\x22w\
\xc4\x87\xae\xa4d\xed\x9b\xbeg$(\xac\xfc\xdb\x0dg\
\xde^\xff@\xcan\xb4J'\x9e\xa4\xc3\x12@\xec\x80\
\x10\xfaM*G\xd2\xa3yiO\x827\xfc\xf4\xdc\x05\
\x8f\xcd\xe0\xc4\xe1\xe9\x02\xac\xbdr\xd1\x05\xbb\xc1\x0b\xbf\
\xf05\x0b\xbea\x95\xde\xb8\xb6\x0cR\xc6\x83 5\x1e\
\x85M\x9f\xe5c\x1b\x9aH\xa7\xd6\x9b\xb7^\x07*%\
\x9eJ\xb1\x0c\xe5\x84\x12\xd1}\xbf\xdc_\xef\xca\xfd\x81\
\xa4o\x93\xd9t\x87[|\x0ei\xfda\x13\xfe\xad\x09\
\x8c*\xae\x85](\xf2'H<\xe8L\x15\x02[\x10\
\x9c\xb0\x97\xe5\x99T\xd3\xca,\xdc\x8c,\x0bq\x07\xef\
\x10\x9e\x0db2\xc4\x88\xb9\xc8\xa0\xc0pA\x8e|\x90\
\xb4\xe0\x9a\xfc\xaa\xb7K\x99Fd\x0e\x95\xb1Fi\xe6\
\xc7+\xe9\x85\xd1\xc4g5\xbc\xea\x05\xad\x19\xc0\xf5\xe9\
\xacWnK_\x9f^ x\x02\xbd-\x1e\xb6k\xcd\
\xa0G\xef\xf3\xd0\xdbu.ve \xfd\x5c\xe4sQ\
2\xfaU\xe8\xd80\xfa\xa2<\x93fF_\x12\xd2\x1d\
\x10\x05+\xa9h\xc5ge\xf4\x172\x0a\x83\x22\xcaf\
V\x9f\x09s\xb3\xcf\xc4\xb9\xdd\x83\xa8Y\x0a\xb7\xbea\
\xf7\xc5\xee\xb6\xec>s\x81\x9bK\x9e[\x9d\xe8q\x97\
8T\x81?\xd3\x92\xaa\xf6N\xf2\x96\x9f0\xb4\xdb\xcd\
|3\xc2\xb9\xd1\x9e\xd5J\xca\xfcN\xd1j\x99\xc9\xa4\
\xbc\x13\x18\x9c\xb1\xbc\x0f\x16\x95s\xb4Dq\xae<-\
j\x80\xb9\x9ckA|\x5c\xd2B\x1b\xfd\xa0\x9d-\xe1\
\x0cIL\xa0\xe5\xda\xf3\xfa\x1a-\xc7\x89\xec\x07\xd1p\
\xcb\xbf\x89\xd8r\xaa\x95]E\x22\x8f\xb9\x22ZI\xeb\
!A\xd2-s\x19\xb5N\x9d\xe0T@\xd2s\xbf2\
\xef \xf8n\xd4\x8a\xb6*W\x03)\xe8\xa1L?\xf7\
\xa3\x1cfW\xf2\xbc\xe1(\x8aK\x15K\x82\xea[K\
\x15\xf3\xff|\x1c\xce&-\xaf\x91|\xc9Q\xbd\xc6\xd1\
|\xf5a\xd0]\xd8\xd0\xa5\x04\x1b\xc6*\x22m\xe8J\
)\x91\x9c$x#\xc3\xf3I\x08\xf7\xcf\xc9\x0b\xa5X\
\xc1R\xf2\xb3\x14R\xee\xa2\xe0HB&\x8a\xc8\xd35\
\xf2&\x12\x19\x83\xf4\xa1^\xbd3\x08#\x96\xd1\x08\x8f\
\xe0mL\xabwk)\xea\x08\xc5\xdb\xe9\x086>(\
6\x05\xf4Fp\xda\x85T\x15W3\xd9;N#\x03\
5ER\xb2\x01\x99\xd2H\xa4e\xc8\x19C&\x85\xdf\
Cf,h\xd5\x97\x13\xce\xa0US&\xe9\xb4\x05\xde\
\x15\x81\xf9\x84\xe8\x94\xe2\xde[\xab\xb9\x9b\x95\xa7\xb5<\
\xdaw^\xa9HW\xe4\xd2\xd0T&+\xa9r\xf7Z\
Eaknw\xda\xb6a\x17;\xd26\xd7\xda\xb6\xfe\
a\xadb\x9e>\xa3\xe5\x97\xfa\x17\xff^\x99L\xf3Z\
\xa3\xd8v\x83\xd2\x85\x15qSE\xa1#\xd8u\xf2\x93\
\xc2\xf2\x13\x12$\x95\x91\x9f\x9b \xa9\xc2u\x96Y\x22\
\xe2,\x93\xfc4I\xb3\xb6\x84\x97t\x5c\x9b\x96jR\
T\x91!P\xb1\xe0\xa5\x11:\x89Tp\xd1\x93\xccI\
\xe7\x89\xc1C\xa6\x95\xe2E\x0cHm0*$\xb7\x8d\
\xa4R:\x92\x19:.:5\xa2E\x90\x96e\xc2\xd6\
\x0e?\xab\xac\xbd\x96\xce\xd6\x95\xbd\xe1\xa6\x95\xb2\xb1\xbe\
\xa1q:5-\xe3\xaa;\xa0\x11L9H\xaaE\x94\
\xa9\xa4%##\x99@\x92P\x97\xc4m\x9c\x84\xcc\xc7\
\xa0\x9cO\xb1\x14\x0c\x92\x9d\x03I\xad\x8a\x22\xd5v6\
zj\xc7\x07A\xe7\xed\xb1\xccCw$\x82uj\xb2\
X\x12\xc1i\xf9T\x99\x1e8\xa9\x83\x15|\x94\xd6$\
\xb3\x08\xa82MA\xd4B\xf2\x0d\xd1\x09TJ2k\
x\x07\x00R\xa9\xe9Hc\x92:zv\x82\xdaqH\
\xf7\xa3b\x05y%\xc0\x8aH&0\x1c\xcf\xea\x09\xd2\
E\xc3O\x0dT\x986x\x16\x7f\xcet@\x82\x11\xb0\
\x13iHQ]'\x99\xb14p\xc8\xb4a\xff\xd7(\
\xa7=\xb4\xb5\xac\x1d\x22m@@j\x84p*\xc9h\
3\x22\xc9\x22\xe6t\xa36\x01\xa3\xae\x0c\xd8\xe8T\x90\
\x9e\x1a\xf0,\xa3\xed\xe1\xc82Go\x88\xdf\xa8\xec\x02\
\xfc\x7f]\xdbi^v\xa2M\x14\xf2\x9e$\x0b\x1a^\
<\x95\x8cF[\xee8\xfc\xb6!\xf5\xc6J\x0b\xa1e\
\xddq\x80\x13`MR0R\xc32`\x80\x9bF)\
\xb9YW\xf2\x1a1\xd7\x8d\x88%\x92\x17q\xb42`\
QDV0\x83\xbc\x22\x18\x89\xa3(\x17Y\xe6\xe8\x0f\
uXf\xbcw\xae\xa65\xcasl\x82\x14\xf4\xc6\xf1\
\xda!\xb8\x8eT\xa9\x1dL\xb2gQ\x10\x92\x180\x89\
(\xcc\xd4\xcb\xa3\x82\x83X\xa4\xbf\xbf\x12<Y\xca8\
B;\xcb\x22\xd1<\x96EiB\x1a\x1d\xe0*\x93\xc6\
\x94\x95tvd\x92zO|\x0a\xf8\xb6\xdeq+F\
\x04\xcb\xf3\xa7\xe8\x80b\xcd7\x04\xc8\x12\xd3\x87\x14\xda\
\x12<f:J\xde0\x19CO%\x0f\xcf\x18g\x94\
g~\x06\x0d\x8at?\x18xj\x17\x16J\xcb\xe5,\
t\x0cQ\xc8\xac\xd4\xb5f\x8d\x87\xdfq,30\xb8\
\xd4\x06\xecD\xa4\x86\xb5\xa3\x05\xe4\xd4\x1d\x0f\xb3\xe5\x92\
\x81\xb6EX\x0f\x06S\xe4X\x16\xd1\x8aOw\x84}\
\xd6\xf0\x86\x19\x08Z\xa7\xa7\x057\x89 J\xf3\x02h\
\x0b\x9f\xda\xb6\xda\xf3\xa3.\xfc\xd0\x98\x132\x93\xbd\xa3\
'\xbb\x90\xa7\x08z\x00\xaf)\xa5\xa7r\xd4\xca\x93F\
\x0a\xd1\x96\xb6\x0e`\xbc\xa1\xe1_m%\xd0)ar\
\xafK\x1b\x0fJ\x9a\xe4\x10M$\xeb\xa4\x87\xc3\x83\xa1\
\xa7\x186\x9cs\x00y\xd1K\xa9c\xdb\x867\xf0~\
\xd58\x0d\x082C\xcf\x9f\xc9\xcd\xda@\xb3H^\x80\
j\x0b\xae\xcd\x0fC\xf8Z\x86)b\x19\xb2.i6\
k+\xad\xecR\x0a\xa8\x99\x14\x18\x22=U\xc1\x1d\x07\
\x1eB1\xac|\x1b\xfc\x8d\xb6R\x02\xa0\xa0\x1b:\x22\
!&I3;\xc1\xfc!U!\xc2\x84\x09\xb1\xda\xd0\
m\x9bR\x1d\x8c\x91\xad\x1bl\xdc\x95\x1aS)\x01\xac\
C\xcc\xb2\xb0\xb2\xb4\xbfi\x12JV\xb3Gt1\xa2\
\x15\x93I\x99D\x82\xcb\xc7d\xd3\xb5\x94r)\x8c\xd6\
\xc7\x0c;D#\xe17\x82k\xa0\x8c\xa4P`\xb2\xac\
\x1a\x8f\xb4\xc6/hS?\xc3-\xc9$'\xa59\xc2\
Y\x8a\xc4Cf\xb6\xc0'\xb5\x12&2\x9b!\x99\xd6\
*\xea\xdc\xb8H\x88\xb8X[p\xb2B\x92!t\x85\
\xdcXI\x16]\xd3\xa6Y\x06\xcf\xe23\xeb'\xc7)\
\xadR~\xed$X\x84\xb4\xca6\xbc\x09\x87\x0b\xa0\xc2\
d~\x87C\x03\x9c\xb6\xcd\xfc\x13\xcb0\xeb\xb2\xe1\xc8\
84\xd8\xa5\xffe\x8f\xc7A\xc0\xa0\x93\x99cd\x87\
/d\xea\xe1\xca\x85\xd6\xa1\xc1\xab\xcc\xd9\xd6;\xe96\
w\xca$\x0b\x98i\xd5p\xdf,E\x086k?\xcf\
\x95\x89\x9cf\xd1\xa0Yy\x197X\x8a\x8e\xb8,\xc2\
\xa4\xeb\xce\xe7\x91\x88e\x08\x93\xa1\x11\xb3\xd2\xddE\x9a\
\xc1:\xba\xb1,y\xf4:\x066\xeb\xd6\xc12\x09C\
\xeau\x1dU\x93L\x04\x9dE\xdff\xe5e\x9cNR\
\xab|\x16\xd1\x9b%S\xe4\xdf\x901G`\x93A\xd6\
\x93\xcf\xa9\xf9\x04\x9b\x0c\xd1\xc4\xe4\xb3\x98yp\xe2\
E\x8f\xf7K\x9b1\x14\x185f\xbf\xf6\x8bK*C\
B\x17j\x8aR\x93\x1e\x92\xd1\xee^\xc6\x8dX\x047\
e\x1a,\x8a\xa406\xe3\xd7t\x8bDZ\xb2]\xae\
X\x19\xc9T\x88\xa2\xc9\xdfXJ3\x9b1=\x92I\
2\x80\x8c\x11\x92\x0cL\xc2\xeb\x06wd)=\x1c\x99\
\xb1L\xda\x15\x0b\xb1n:\x91Q\x169W/\xc3,\
i+K5\x87\xea\x15\xbf\xe5-\xb5\xe0\xeb\xb1$\x1e\
\xcc2\xcb[\x89\x1b\xb5\xe9\x81n\x9dqk\x92\x01\xad\
\xda\xaf)8\x89\xe0\xb9\xb5l0u\x92\x12\xed\xa8\x0b\
\x1a\xabH\x04\x85\xaaX;MzT\x8be\x01\xd9b\
3I \xa9\x87\x8f\xd1u\xc0\x12\xdc0=#\xba\x5c\
gB4\x0b,C\xe6\x19B\xab\xb2\xc0\xed\xe5:\xda\
\xf1\xf2\x9fS\x8dx\xa5\xf8\xf9\xd4\xb0\x8aV)?/\
9vv\xf8\x02\xf1\x1a\xc1\xc8\xd0\xf6\x19\x0a\xc0 \xa8\
\xb4\xa8\xc0\xc2\xbd\xb4M\x87\x8f\x9c8\x82+\x9aM\xa9\
@\xd0l\xdd\xa0u\xdb=\xd7\x92\xde\x8c\xf5\xd8\x8fn\
\xf6_:\xba\x1e^\x8f\xaf]9IV\xfaGH\x92\
770w.\xd3m\xae\xe7\x9b]\x0fL<\x7f=\
\xff\xf5\xf4\xfb\x9d\x17\xf4\xd7\x9b\xb5Z\x95\x9eE\xda\xf2\
\xf8\xd2\xe6\xd3N\xcd=\xe0\xafo\xcf6V\xa8\xe9\xaf\
\xa4\x04\x858IO\x887/}m\x5c: $\x0e\
\xbbY\xfc#B\xc2t\x80\x84?EH\x1cv\x07\xf6\
G\x84\x84\xeb\x00\x89x\x8a\x908\xecF\xf0\x8f\x08\x89\
\xe2C\xac{BB\xca\x13\xc4\x84}\xc6\x9e_\x8f\x89\
\x12&\x8c\xe8\x82\x09}\x8a\x98\xe8\x09fWLt!\
\x98\xf2\x14\x19\xa6\xed\x19fWLta\x98\xf2\x14)\
\xa6\xed)fWLt\xa1\x98\xa6\x05\x09\xff\xfa\x90\xe8\
)fWHt\xa1\x98\xee\x04!\xe1z\x86\xd9\x11\x12\
\xb6\x0b\xc3\x0c\xa7\x08\x89\x9e`v\x85D'\x82)N\
\x11\x13=\xc1\xec\x8a\x89N\x04S\x9d\x22&z\x82\xd9\
\x15\x13]\x08\xa6<E\x86\xe9z\x86\xd9\x15\x13\x9d\x16\
1O\x91b\xfa\x9ebv\xc4\x84\xeb\xb4\x88y\x8a\x1c\
\xd3\xf7\x1c\xb3+&\x0e\xbbK\x1e^\x1f\x12=\xc5\xec\
\x0a\x89\xc3\xee\x92\x9f\x00$z\x86\xd9\x15\x12\x87\xdd%\
?\x01H\xf4\x04\xb3+$\x0e\xbcK\xfe\xfa\x98\x08=\
\xc1\xec\x88\x09\x7f\xe0]\xf2\x13\xc0DO0\xbbb\xe2\
\xc0\xbb\xe4'\x80\x89\x9eav\xc5\xc4\x81w\xc9O\x00\
\x13=\xc5\xec\x8a\x89\xc3\xee\x92\xc7\xd7\x87DO1\xbb\
B\xe2\xb0\xbb\xe4\xaf\x0f\x89\xd83\xcc\x8e\x90\x08\x87\xdd\
%?\x01H\xf4\x04\xb3+$\x0e\xbcK~\x02\x98\xe8\
\x09fWL\x1cx\x97\xfc\x040\xd1\x13\xcc\xae\x988\
\xf0.\xf9\x09`\xa2g\x98]1q\xe0]\xf2W\xc7\
\x84\x17=\xc5\xec\x88\x89x\xe0]\xf2\x13\xc0D\xcf1\
\xbbb\xa2\x0b\xc7T'\xf8'\x81^\xf4\x1c\xb3+&\
\xbapLu\x82O\xe7z\xd1s\xcc\xae\x98\xe8\xc21\
\xd5\x09>\x9d\xebE\xcf1\xbbb\xa2\x0b\xc7,\xc4\x8e\
W\xdf\xec\xf0\xb2\xe7\x98\xdd0\xe1D\x17\x8e\xa9Np\
\xa3\xdc\xcb\x9ecv\xc5D'\x8ey\x82\xeb\x98^\xf6\
\x1c\xb3+&:q\xcc\x13\x5c\xc7\xf4\xb2\xe7\x98]1\
\xd1\x89c\xb6\xd7\xac^?\x17\x95=\xc7\xec\x8a\x89N\
\x1c\xb3\xfd\x90\xd5\xeb\xe7\x1d\xaa\xe7\x98\x1d1!;q\
\xcc\xf6CV'\x80\x89\x9ecv\xc5D'\x8e\xd9\x8e\
\x1d\xaf\x9fw\xa8\x9ecv\xc5D'\x8e\xd9\xde\xef8\
\x01L\xf4\x1c\xb3+&:q\xcc6\x9fx\xfd\xbcc\
\xe7\x09\xed=&\xf6\xc1D'\x8e\xd9\xe6\x13\xdd1\xb1\
:\x87p\xfdeu\x8eq:\xc3Xg\xefm\xcc\xce\
/\xe6\xafw\xc3\xc5\xe4\x8f\xa2\x8a\xc1jk\xcf\xa5\xfa\
iu\xfcp~\x1ar\xf9\xa0\xe2\xf5\xf5\xdb\xfb\xdf\xe7\
\x00\xc4\xe4\xeanx=\xb9{{\xf6n\xf8u2\x1b\
\xe4\xe3_\xf6D\xe5=\xef\xd0\xc2b8_\xdcNf\
\xc3B\x13&;A\xb9y\xa4sq\xf0\x17\xd6\x9c\xd3\
\xb1o\x22l\x9c\xce\xbc\xd9\xe4o\x1f\x86\xe3\xe9\xe7\x16\
.\xa8I\xf7\xe49\xd2z\xe3eW{\xdc|ko\
\xa5\xa5\xd3\x80\xa2\xfc\xe9\xe9V7\xde\xa7\xd4\x9e\xa7O\
\xd3\xd9\xf4\xac\xe0O\xb6N\xc43\xfb\x95\x9d\x92\xd9y\
\xba7\xe6\xfa\x8e@\xa6\xf2\xc9\xde<\x90\xad}X\xb7\
\xa0\xa3\xad\x8d\x8f|R\xb7\xa03$\xac@\xc7\xe9\xe4\
\x8b\x185$t\xc0\x13\xd8Tc\xc0\xe9d5_\xb9\
\xe8\x85\x1b\x18U\xb9\xe0\xed\xe0Ot\xba\x80\xb7\xd1\xba\
\x81\xa5\x13\x1f,\x9dI)\x06\x12\xff\x0c\x80'O\x87\
\xa0h\x7f\xbeO\xf1\xd6\xbd\xbf5\xa7dy\xae\x1c\xf9\
\x80U\xe5-Exi<5T.1\xc2M\xeav\
\xb6\x14H\xb7\x88z\xa3\xc0\xea\xe49mu\x092\xeb\
S{\xf5\xd6\x10\xa4\xb4\xd4;B\xd0\xc6\xc1\xf0\x85\x10\
\xf4\xf0e3\x08=} \xfc\xd6\xc3\xdf\x8b\xc3\xa7\x83\
\xfb\xde\x9e\x0dg\x1b\xa7\xbd\xb6\x0e\xfb\xdb\x1c\xb4\x12>\
\x90wNc\xe7\xae\xef;\xce\xc2\x11\xa5F\xd1\xf3*\
\x8d\xb1/\xcf\x8d\x08\xea\xf9*\xd8?\x8e\xee\xa5,\xb6\
\x08\xe5+\x11T\xa4s\x9eB%L\x10\xe9\xec\x1cY\
9\xd8\x98\x83\x14f\xa5m4\x81N\x93\x8d\xc6#\xc4\
\xa4\xf3T\xe8\xa8A-\xd29'\xea\x5c\x19:\xdb(\
\xf8\xfa\xac\x22ObE\x95\x83p\x9a\xce\x8d\xf5tj\
\x91\xa0\xc3\x01\x0d\x1d-\xa3\x8c\xa4\xc3\xb9<\x1dw\x89\
r\xb0h\xe7\x95\x88\xdb\xa0\xba\x05\xe4\xf9\x11\x8d\xdf\xbe\
\xbd\xe0\x5c\xc7o\xdf\xb6\xb5\xe8\x0b\xaar\x18\x97\xe1\x03\
\x18=\x9fA\x96\x0eM\x91\x15\x06\xce\xe7\xc7\x88\xcaH\
\xe5\xb4\xe4\xa3x\x95\xb0\xf5),\x81\xce\x11$]i\
o\xa4\x95\xe70K'\xf0\xcd\xd7\xc7Q\x81\xe4\xa7\xda\
Z\x0b\x1f=\xe9E\xc6\xe0\xe8$\x15C'%\x01+\
$\x83&\x15\x9d\x06\xc6\xc7\xfb\x86\x8d?'\xac\x81\xfc\
|\xd4\xbe\xf1*\xde\x8cn\xca \x8d\xad\x13@\xbf\x0b\
H\xb7\x07\xc4R,\xb0Z8\xa9\x96\xb1@\x03Q\xe7\
\xba\x92\xd0\x91t\xf8b\x04\x1dm\xf4S)\xe0\xdb\xb8\
G\xcc|\xea\xac\xf3\xe2i\xa1#\xfa\xf7\xacYr\x8d\
-'7.1\xba\xa4\x01>,E5\xad*:]\
*\xd2IB%)\x22\xb4\x8d\x8a\x98\x9e\xa6\x13f\xbd\
\x07d0r\x19%K@,=\x9d#\x04s\x06B\
\x81=\x16*\xc4H?H\xc7\xe1\x11\x84\x0d\x1d\xfa\x1c\
}\x90E\xd9\xbb\x81\x8e\x95\xd1\xf0\xf3\x91\xa40{\xa7\
\xa9;%)d\xc1\x02\xcd\x9e\xdaq\x12\x17\xf5\x80:\
&ET\x8a\xdbvNX>\xc9QUFiSw\
\x9c\x0e%\x82\x0b!c\x80_\xa0\xa3\xc6\xe9\xc0`\xf4\
\xbe({WTE\xeb<\xf0\xa7O\xc7-\xb8i\xa5\
w>\x1a\xb2i1\xa5\x93\xa4\xe1\xe8\x82\xa1\xb3\x95\xe2\
3,\xe6\xff\xf7\xe4*\xb7\xd3\xfd\xad\xa3u\x18n\xc3\
6t\xd16@d\x94\xa3\xd3\x0c\x01\x0d\xc1\xc7\x22&\
\xdb\x10R(\x013\x07g\xa5Bt\x98)\xa1\xcb\x82\
\xf7\x84@\xa6\xa2*\x15\xac\xd7\xf2\x5c\xc3y\x0b\xe5\x02\
\x1f\xd1\x8c\x1a|^\xa5E\xc4\xf3B\x04\xba\x1a\xe8\x84\
L\xc5\xd8s\x82\x8e\xd4\xa4\xab\x8e>\x03\x8d\x86p\xbf\
\xa0\xb9y)\xbd\xb3\x84\xc7B\xa7\x9e\xc2#\xcct\x07\
\x0aw>\x8crdV\xb5\xc7d\xbe8\xc4\x841\xfe\
\xb9)\x1a\x8c\x15\xa1\xdd\xed\xb4\xf4\xb0W\xbf\x11\x0c\xe9\
44\xb3q\xd0\xc5\x127t\xd9\x81\x84a\xbe1\xad\
\xd2\xa53\xeeAy\x840\xce&)b\xfd\x80\xbd\x14\
\x9d\x8b\x98\x15,\xc8\xb6\xc3\xd6n\xce{\x11\xf9/\xb5\
\x98\xed\xed\xfa\xe2\xb0\x89\xb3\x19%\xe1)#\xa1Y\x06\
\x9f\x86Mg\x5c\xd2q\xe5$U\x92\xceq\x05\xeeA\
%@+\xd6%\x0b\xb22\xa8\x9f\x8f\x84\xc2a\xe3K\
\x1ch\xd5\x09\x07M\xfcf\xf9\xeaa\xce\xbd\xd7.\xb6\
\xf9 E)\xa5TD\xe6i\xd2\x81\xe9\xe9\x00FM\
\x91\xd8dR\xa6y\xe0\x85Q\xabLJ\x87\xcb\x07$\
\x1b \x89\x08\x96\x11<\xcfd2\x0d\x87\xe4\xa4\x89t\
\xb4\x1eI\xa3\xf5TR\x10]\x0c\x8a\xbd\xd7Z\x8a\xf9\
v*\xd29\xbf\xc8`\xa0\xca$\xe3\x86\xcc\xf2\xb8\xcd\
\xa82\x19\xfaYy\xa5\xa0\xf9$\x0dHt\xe9\xacG\
P|\x1bt\xa0C<!uQ\x19\x07\x99\x91F\xd1\
\x19\xd1\xd4LDX'\x91@U&\xa328\x90}\
\xf2\x87t\x84+\xfa\x0ec\xab\xe8\xb0\xfb\xc8\x87\x9e\xae\
\xa5\x08\xda^!\x9b\x1e\xd0p\xa5\x0f\xca\xe5\xcah\xeb\
\xb2|\x9a\xdf6\xefi\xa5\xf1\x9dc\xb8\xe0\x8fv\x87\
a\xbd\xfb\xf1\xdb\xcd\xa5`$\xf6\x9a\xceO\xc4\xa7\xe9\
T\xbe\xd0\x05V\x8e\xdf\xb2Ri\x917@\xb5\xc5\xc5\
M\x85\x8b\x18Yl\xaf\x06\xf0\x92\xa8\xde\xb8gK\xdb\
\x1b\x8b\xd0J\xc7gh[\x093\x0c\xbe\x9cc\xd8B\
\x00\xe8\xb0\x1a\xf0\xfc\x1cp\x87\xd5{\xdd\xb6zp\x81\
\x08\xbb1\xea\x1c\xdf00:l\xf6]&\xc5,x\
\xa4\xd1\x91#\x0d\x9d\x1f\x8d\xe4Y\x9e\x83\xab\x80d\xd3\
\xa9\xb6\x92\x8f\x10\xa6\xd3\x9a#Y\xad6Q\xc1i\x9c\
\x07\xa2\xc9\xf5\xcd\xf0\xd34\xee\xb9n\xa9l\x14\xfb\xd0\
\x0b\xa7v\x9e\xb0\xf6\x9d\x17m^2M)\xad\xda>\
Y\xb68Y\xd6\xf1\xc1\xf3\xb5[\x91\xf5\xc9\xec\x11\xec\
0\x97r\x9e\x01jG\x8eW\xf1\xd1\xb6.\x93\xe16\
\x9e\xf2\x19v\xf0I\x0a\xd2(i\x9d#zN\x8c\xd2\
\x9d\x88\x8b\x92Kt\x82\x8f*\xa6\xc4*j\xc1n[\
\xc1Q\x83|*Z8\xb0\xc4H(\xdf74\xe3\x5c\
\x1baYJ:\xd2\x9c\xd6\x10\xbc\xa1#\xad\xc1aM\
\xe0c\xea!\xf3\xda\x1a>|^\xd3\x09\xad\x5c\x1b \
\x0b\xa2\x0e9>h\x00\x85\xa4\xec9\xea\xb3\x93%z\
gC&\xe5S\x94\x03\xc4\x5c\x9fNx\xe6\x93\x97\xb5\
\x0bFsm\x1dLH'\xd8\xd2\xd9\xd3\xf0\xec2\xad\
KH>\xea\xd8\x0aCX\xa6\x11\xc2\xa5\xfb\x90\xc9\xa0\
\x0b\x07\x06l9\xb4\xd4R\xc2-\xdaR\x08\xf0\xe9\x8e\
\xb5\x14l\xd9GI\x01\x87\x8e\xf2\x86b\xe9\x0c\xe3\xf5\
L\xd0\x89\xba\x14R2\xd9\xbb\xd2D\xee\x88\x0f]I\
\xc9\xda7}\xcfHPZ\xfa\xf7M\xee\xd5^\xff@\
\xcan4p7\xb9HK \x92\xce\x04\x07B\xe87\
\xa9\x1cI\x8f\xe6\xa5=I\xfb\x86\xcf]\xf0\xd8\x0cN\
\x1c\x9e.\xc0\xda+\x171G\x1b\xbc\xf0\x0b_\xb3|\
\x12\xf6f\x1e\xb7\x0cR\xc6\x83 \xd1sr\x1b\xd7\x97\
{p\x9aH\xa7\xd6\x9b\xb7^\x06*\xe77\x16\x09\x0b\
>\xd0PN(\x11\xdd\xf7\xc9\xfd\xad9\xf0\xfb\x13w\
\x91\xd6\x1f6\xe1\xdf\x9a\xc0l\xbc\x04\xa5v\xdb\x17\x8a\
\xfc\x09\x12\x0fw\x9e\xc0\x16\x04'\xecey&\xd5|\
^5\x9d\xff]\x14\xe2\x0e\xde!<\x1b\xc4d\x88\x11\
s\x91\x7f\x82\xe1\x82\x1c\xf9 i\xc15\xf9Uo\x97\
2\x8d\xc8\x1c*c\x8d\xd2\xcc\x8fW\xd2\x0b\xa3\x89\xcf\
jx\xd5\x0bZ3\x80\xeb\xd3Y\xaf\xdc\x96\xbe>\xbd\
@\xf0\x04z7wb\xc4.\x1e\xd6\xa3\xf7y\xe8\xed\
8\x17R\xf6s\xf1\x82\xe72\x96F\xbf\x0a\x1d\x1bF\
_\x94g\xd2\xcc\xe8KB\xba\x03\xa2`%\x15\xad\xf8\
\xac\x8c\xfeBF\x91\x8e\xac\xcf\xac>\x13\xe6f\x9f\x89\
s\xbb\x07Q\xb3\x14n}\xc3\xee\x8b\xddm\xd9}\xe6\
\x02\xd5^kG\x1b\xabG\x87_\xe2\xd8x\xd1\xccr\
\x89\x034\xd1I\xde\xf2\x13\x86v\xbb\x99oF87\
\xda\xb3ZI\x99\xdf)Z-3\x99\x94w\x02\x833\
\x96\xf7\xc1\xa2r\x8e\x96(\xce\x95\xa7E\x0d0\x97s\
-\x88\x8fKZh\xa3\x1f\xb4\xb3%\x9c!\x89\x09\xb4\
\x5c{^_\xa3\xe58\x91\xfd \x1an\xf97\x11[\
N\xb5\xb2\xabH\xe41WD+i=$H\xbae\
.\xa3\xd6\xa9\x13\x9c\x0a\x80\xdf\xdb4\xc0e\x07\xc1w\
\xa3V\xb4U\xb9\x1aHA\x0fe\xfa\xb9\x0f\xe5\xd0Q\
\xd8\xfd\x1dEq\xa9bIP}k\xa9\xe2\xf5\x9e\xe6\
:\x9a\xaf>\x0c\xba]9\x95\xa7\x97dD\xda\xd0\x95\
R\x229I\xf0F\x86\xe7\x93\x10\xee\x9f\x93\x17J\xb1\
\x82\xa5\xe4g)\xa4\xdcE\xc1\x91\x84L\x14\x91\xa7k\
\xe4M$2\x06\xe9C\xbdzg\x10F,\xa3\x11\x1e\
\xc1\xdb\x98V\xef\xd6R\xd4\x11\x8a\xb7\xd3\x11l|P\
l\x0a\xe8\x8d\xe0\xb4\x0b\xa9*\xaef\xb2w\x9cF\x06\
j\x8a\xa4d\x032\xa5\x91\x18&r\xc6\x90I\xe1\xf7\
,\xf2QZ\xf5\xe5\x843h\xd5\x94I:\x17\x93w\
E`>!:\xa5\xb8\xf7\xd6j\xeef\xe5i-\x8f\
\xf6\x9dW*\xd2\x15\xb944\x95\xc9J\xaa\xdc\xbdV\
Q\xd8\x9a\xdb\x9d\xb6m\xd8\xc5\x8e\xb4\xcd\xb5\xb6\xad\x7f\
X\xab\x98\xa7\xcfh\xf9\xa5\xfe\xc5\xbfW&\xd3\xbc\xd6\
(\xb6\xc3\xa0B\xc9\xa0\xa2\xd0\x11\xec:\xf9Ia\xf9\
\x09\x09\x92\xca\xc8\xcfM\x90T\xe1:\xcb,\x11q\x96\
I~\x9a\xa4Y[:\x04\x03\xaeMK5)\xaa\xc8\
\x10\xa8X\xf0\xd2\x08\x9dD*\xb8\xe8I\xe6\xa4\xf3\xc4\
\xe0!\xd3J\xf1\x22\x06\xa46\x18\x15\x92\xdbFR)\
\x1d\xc9\xe0\xc9}H\x8dh\x11\xa4e\x99\xb0\xb5\xc3\xcf\
*k\xaf\xa5\xb3ueo\xb8i\xa5l\xacoh\x9c\
NM\xcb\xb8\xea\x0eh\x04S\x0e\x92\x82\x9b\xcbT\xd2\
\x92\x91\x91L I\xa8K\xe26NB\xe6cP\xce\
\xa7X\xeat`\xe7@R\xab\xa2H\xb5\x9d\x8d\x9e\xda\
\xf1A\xc0\x0f\xa4\xda\x1e\xba#\x11\xacS\x93\xc5\x92\x08\
N\xcb\xa7\xca\xf4\xc0I\x1d\xac\xe0\xa3\xb4&\x99E@\
\x95i\x0a\xa2\x16\x92o\x88N\xa0R\x92Y\xc3;\x00\
\x90J\x1d\x9d\xe0\x0e9zv\x82\xdaqH\xf7\xa3b\
\x05y%\xc0\x8aH&0\x1c\xcf\xea\x09\xd2E\xc3O\
\x0dT\x986x\x16\x7f\xcet@\x82\x11\xb0\x13iH\
Q]'\x99\xb14p\xc8\xb4a\xff\xd7(\xa7=\xb4\
\xb5\xac\x1d\x22m@@j\x84p*\xc9h3\x22\xc9\
\x22\xe6t\xa36\x01\xa3\xae\x0c\xd8\xe8T\x90\x9e\x1a\xf0\
,\xa3\xed\xe1\xc82GGtmTv\x01\xfe\xbf\xae\
\xed4/;\xd1&\x0ayO\x92\x05\x0d/\x9eJF\
\xa3-w\x1c~\xdb\x90zc\xa5\x85\xd0\xb2\xee8\xc0\
\x09\xb0&)\x18\xa9a\x190\xc0M\xa3\x94\xdc\xac+\
y\x8d\x98\xebF\xc4\x12\xc9\x8b8Z\x19\xb0(\x22+\
\x98A^\x11\x8c\xc4Q\x94\x8b,s\xc81\xd3\x0d\x8d\
\xf7\xce\xd5\xb4Fy\x8eM\x90\xd23\xf0\xbcv\x08\xae\
#Uj\x07\x93\xecY\x14\x84$\x06L\x22\x0a3\xf5\
\xf2\xa8\xe0 \x06a\x82\x8f\xa6GB\x09\xed,\x8bD\
\xf3X\x16\xa5\x09it\x80\xabL\x1aSVz[\x8f\
\x19\xb8%>\x05|[\xef\xb8\x15#\x82\xe5\xf9S\xa0\
w\xb4\xcb\xce\x0b\x8d21}H\xa1-\xc1c6\x98\
,\xc3d\x0c=\x95<<c\x9cQ\x9e\xf9\x194(\
\xd2\xfd`\xe0\xa9]X(-\x97\xb3\xd01D!\xb3\
R\xd7\x9a5\x1e~\xc7\xb1\xcc\xc0\xe0R\x1b\xb0\x13\x91\
\x1a\xd6H\xe0\xa5K\xdd\xf10[.\x19 S\xac\x07\
\x83)r,\x8bh\xc5\xa7;\xc2>kx\xc3\x0c\x04\
\xad\xd3\xd3\x82\x9bD\x10\xa5y\x01\xb4\x85Om\xd3\xda\
4=\xea\xc2\x0f\x8d9!3\xd9;z\xb2\x0by\x8a\
\xa0\x07\xf0\x9aRz*G\xad<i\xa4\x10mi\xeb\
\x00\xc6\x1b\x1a\xfe\xd5V\x02\x9d\x12&\xf7\xba\xb4\xf1\xa0\
\xa4I\x0e\xd1D\xb2N\xb8\xd7\x10\x0c=\xc5\xb0\xe1\x9c\
\x03\xc8\x8b^J\x1d\xdb6\xbc\x81\xf7\xab\xc6i@\x90\
\x19z\xfeLn\xd6\x06\x9aE\xf2\x02T[pm~\
\x18\xc2\xd72L\x11\xcb\x90uI\xb3Y[ie\x97\
R@\xcd\xa4\xc0\x10\xe9\xa9\x0a\xee8\xf0\x10\x8aa\xe5\
\xdb\xe0o\xb4\x95\x12\x00\x05\xdd\xd0\x11\x091I\x9a\xd9\
\x09\xe6\x0f\xa9\x0a\x11&L\x88\xd5\x86n\xdb\x94\x22\xed\
1\xb2u\x83\x8d\xbbRc*%\x80u\x88Y\x16V\
\x96\xf67MB\xc9j\xf6\x88.F\xb4b2)\x93\
Hp\xf9\x98l\xba\x96R.\x85\xd1\xfa\x98a\x87h\
$\xfcFp\x0d\x94\x91\x14\x0aL\x96U\xe3\x91\xd6\xf8\
\x05m\xeag\xb8%\x99\xe4\xa44G8K\xad32\
\xb3\x05\x92)\xc2Df3$\xd3ZE\x9d\x1b\x17\x09\
\x11\x17k\x0bNVH2\x84\xae\x90\x1b+\xc9\xa2k\
\xda4\xcb\xe0Y|f\xfd\xe48\xa5U\xca\xaf\x9d\x04\
\x8b\x90V\xd9\x867\xe1p\x01T\x98\xcc\xefph\x80\
\xd3\xb6\x99\x7fb\x19f]6\x1c\x19\x87\x06\xbb\xf4\xbf\
\xec\xf18\x08\x18t2s\x8c\xec\xf0\x85L=\x5c\xb9\
\xd0:4x\x959\xdbz'\xdd\xe6N\x99d\x013\
\xad\x1a\xee\x9b\xa5\x08\xc1f\xed\xe7\xb92\x91\xd3,\x1a\
4+/\xe3\x06K\xd1\x11\x97E\x98t\xdd\xf9<\x12\
\xb1\x0ca24bV\xba\xbbH3XG7\x96%\
\x8f^\xc7\xc0f\xdd:X&aH\xbd\xae\xa3j\x92\
\x89\xa0\xb3\xe8\xdb\xac\xbc\x8c\xd3Ij\x95\xcf\x22z\xb3\
d\x8a\xfc\x1b2\xe6\x08l2\xc8\x1ad\xf295\x9f\
`\x93!\x9a\x98|\x163\x0fN\xbc\xe8\xf1~i3\
\x86\x02\xa3\xc6\xec\xd7~qIeH\xe8BMQj\
\xd2C2\xda\xdd\xcb\xb8\x11\x8b\xe0\xa6L\x83E\x91\x14\
\xc6f\xfc\x9an\x91HK\xb6\xcb\x15+#\x99\x0aQ\
4\xf9\x1bKif3\xa6G2I\x06\x901B\x92\
\x81Ix\xdd\xe0\x8e,\xa5\x87#3\x96I\xbbb!\
\xd6M'2\xca\x22\xe7\xeae\x98%me\xa9\xe6P\
\xbd\xe2\xb7\xbc\xa5\x16|=\x96\xc4\x83Yfy+q\
\xa36=\xd0\xad3nM2\xa0U\xfb5\x05'\x11\
<\xb7\x96\x0d\xa6NR\xa2\x1duAc\x15\x89\xa0P\
\x15k\xa7I\x8fj\xb1, [l&\x09$\xf5\xf0\
1\xba\x0eX\x82\x1b\xa6gD\x97\xebL\x88f\x81e\
\xc8<ChU\x16\xb8\xbd\x5cG;^\xfes\xaa\x11\
\xaf\x14?\x9f\x1aV\xd1*\xe5\xe7%\xc7\xce\x0e_ \
^#\x18\x19\xda>C\x01\x18\x04\x95\x16\x15X\xb8\x97\
\xb6\xe9\xf0\x91\x13GpE\xb3)\x15\x08\x9a\xad\x1b\xb4\
n\xbb\xe7Z\xd2\x9b\xb1\x1e\xfb\xd1\xcd\xfeKG\xd7\xc3\
\xeb\xf1\xb5+'\xc9J\xff\x08Ir\x87?\xb4$\x07\
\xdf\xff\xa1e\xa7?\xb4\xd4\x9d^@\xdb\xfe\xdb\xdb\xd7\
~\xbf\x0b \xf1\x8c\x95\xdb\x1e\x12EHtyoC\
\xfbOoO\x00\x12\xfd\xab<\xbaB\xa2\xcbk\x1b\xda\
Gr\xbe>$v>\x1c\xd9Cb\x1fHt:\xe1\
\xe0\xf4\xdeR\x0dL\xf4o\xf2\xe8\x8a\x89N'\x1c\xb4\
\xdf4z\x02\x98\xd8\xf5g\xd4=&\x9e\xc6\x84\xe9\xf4\
\xf6\xd9\xc2\x91\x9c'\x80\x89\xfemq]1\xd1\xe9\x14\
\xadS\xa4\x98\xfd\x9b\xc1:c\xa2\x0b\xc5l\x1f\x98\xf4\
\xda/\x10\x04$\x0e\xfb\xac\xe1\x8f\x08\x89.\x14\xb3\xfd\
\xfe\xc0\xd7\x87D\xff\xee\xd9\xce\x90\xe8\xc20\xdb\xaf\x0f\
<\x01H\xf4\xcbU\x1d!\xd1\xe9\xd5\xb3\x85#9O\
\x00\x13\xfdzUWLt\x22\x98\xa7w4\x8eV\x07\
\xfe+\x92\x1f\x11\x13\x9d\x8ei=E\x86\xd9\x1f\xa1\xd5\
\x19\x13\x9d\x161O\x91b\x8a~\xc1\xaa+&:-\
b\x9e\x22\xc7\xec\x8fi\xed\x8a\x09u\xd8]\xf2\xd7~\
k\xb9\x96q\xd7\xabXzH\xec\x03\x89\xc3\xee\x92\x9f\
\x00$\xfa%\xcc\xae\x908\xec.\xf9\x09@\xa2_\xc2\
\xec\x0a\x89\x03\xef\x92\xbf>&B\xbf\x86\xd9\x15\x13\x07\
\xde%?\x01L\xf4\x8b\x98\x1d1\xa1\x0f\xbcK~\x02\
\x98\xe8\x171\xbbb\xe2\xc0\xbb\xe4\xaf\x8f\x09\xdf/X\
u\xc5\xc4aw\xc9_\xfb\xa8$@\xa2_\xaf\xea\x0a\
\x89\xc3\xee\x92\x9f\x00$\xfa\xe5\xaa\xae\x908\xec.\xf9\
\xebC\xc2\xf5\xcbU\x1d!a\x0e\xbcK~\x02\x98\xe8\
\xd7\xab\xbab\xe2\xc0\xbb\xe4'\x80\x89~\xc1\xaa+&\
\x0e\xbcK\xfe\xfa\x98\xb0\xfd\x82UWL\x1cx\x97\xfc\
\x040\xd1/Xu\xc5\xc4\x81w\xc9_\x1d\x13\xaa\xe7\
\x13]1a\xbbpLu\x8a\x7f\x12\xe8\xfa\x05\xab\xae\
\x98\xe8\xc21\xd5)>\x9d\xebz>\xd1\x15\x13]8\
\xa6:\xc5\xa7s\xfb%\xab\xce\x98\xe8\xc21\x0b\xb1\xe3\
\xd57;\x94\xef7\xc0\xbab\xa2\x0b\xc7T'\xb8Q\
\xae|\xcf1;b\xc2u\xe2\x98'\xb8\x8e\xa9\xfaM\
\xd1\xce\x98\xe8\xc41Op\x1dS\xc5>vt\xc5D\
'\x8e\xd9^\xb3z\xfd\x5c\xb4\x7f>\xb73&:q\
\xcc\xf6CV\xaf\x9fw\xc4>vt\xc5D'\x8e\xd9\
~\xc8\xea\x040\xd1\xafOt\xc4\x84\xef\xc41\xdb\xb1\
\xe3\xd5\xf3\x0e\xdd\xff\x05XgLt\xe2\x98\xed\xfd\x8e\
\x13\xc0D\xcf1\xbbb\xa2\x13\xc7l\xf3\x89W\xcf;\
\xb4\xe89fWLt\xe2\x98m>\xd1\x1d\x13\xabs\
\x08\xeb/\xfc\xbf_.\xe7\x9f\xf0\xbf\xff\x07\x91v\xe2\
\x1e\
\x00\x00\x06\x10\
\x00\
\x00\x16\x9ax\x9c\xe5X\xc9r\x1b7\x10\xbd\xeb+&\
\xa3\x8bT\x95\x01\xd1\xd8A\x8b\xf2!UI\xe5\x94C\
\xec\x0f\xa0\xc9\x91\xc8\x98\x22U\xc3\xb1\x16\x7f}\x1a@\
\x03CZ\xb4D\x95\x95\x5c\x5c<\xe8\x09\x8dn\xf4\xfe\
(]\xbc\x7f\xb8YUwm\xb7]n\xd6\x93\x1a\x18\
\xaf\xabv=\xdb\xcc\x97\xeb\xebI\xfd\xf1\xc3\xef\x8d\xab\
\xdf_\x9e\x5c\xfc\xd24\xd5o];\xed\xdbyu\xbf\
\xec\x17\xd5\x9f\xeb\xcf\xdb\xd9\xf4\xb6\xad\xce\x16}\x7f;\
\x1e\x8d\xee\xef\xef\xd9\x92\x0e\xd9\xa6\xbb\x1e\x9dWM\x83\
\x9a\xdb\xbb\xebj9\x9f\xd4\xf8S\xd4\xa8;\xef\x17\x93\
Z\xb9\xbaZ\xb4\xcb\xebE\x9f\xf0\xbe\x03\xe8\xd2z;\
\xa9w\x0c\xdf\xcbhRp\xceGh\x88\xae\x8c\x1fV\
\xf8\xe2\xa1\x8b\xe0\xbd\x1fEi}yR]\xcc\xdb\xab\
mt\x22\x00\x15\x8e\xaa\x0b\x14\xb6\xd3\xee\x8fn:_\
\xb6\xeb>J\xf7\x8f\x94V\x1a\x1f\x82I-4\x03\x89\
\xce?\x88\x01?\xe2\xb9aV8\x8b\x18\xcf\x95e\x1c\
d]]\x93\xf6\x87n\xba\xde^m\xba\x9bI}3\
\xed\xbb\xe5\xc3\x19\xd3J8_q\xfc0\xe5\x9c\x87\xaa\
a\x5cx\xf0\xa6j\x040!\xe5\xf9\xa0\xfeq\xbd\xec\
1\x05_\xb6m\xf7\xf7\xedt\xd6\xfe\xb5\xfe\xb8m\xd1\
\x83\x10\xd2x\xd1\xb5W\x93\xfat\xdf]i8\xafG\
\xc7\x85\x16\xef\x86\xab\xd5\xc5\xb6\xdf\xdc\xa6\xfa \xc0s\
\x0cm\xdb?\xae\xdat\xd0\xcc6\xabM7>\xbdR\
\xe1SW\x9b\xab\xabm\x8b5\xa3\x97\x9e\xaa\xab\x83\xea\
\xf3O\xe13\xa8\x039:\xdaw\xeb\xe8\xba\xd8T\x97\
F\x03\xb3\xce\xa4\xc24\x8e9g}\xaa\x8c\xe6I\x10\
*\x83w8\xb8g+\xa3\xa4\x93\x8e*\xa3\xadV\x95\
\x14L\x08\x15\xebb\xad\xfb\xb1\xbax\x09G\xd7%\xdc\
=\x94X/\xe5\xc1\xc4Ze\x8d\x85\x17\xeb\xe2\xa5>\
\xa8>\xfb\x14>oT\x17\x09^\xbc\xf9\xbc\xb8\xddy\
\x01\xc7\xbc\xc5\xb1\xb1aX\xfe\xc7i\x01\xaf\xa8\xe1\x8c\
b\x22\xec\xab\xd8pB0mU\x0a\x0d\x14\xf3\xc6\xa7\
\xd0\x1a\xf0\xcc\xfa\x97;\xce\xeev\x1c\xb6r\xec8\x1b\
\x1aN\xbcM\xc7]\x8c\xc2\xbe\x0b \xad\xe0\xd5\xf4\xb1\
\xed\xb0[\xfa\xc1\x9f\x08W\xb8\xd6\xcf\xf8\xafB\x9d\xa7\
\xd5x;\xc5\x05\x1f\x14\xbav\xd6\x0b\x1d\xa2\xc7_n\
$S\xde\x9bFp\xa6+\x8c\x9ds\xa8\x14\x93m\xa3\
\xee\x84]4\xe9\xe4\xae\x11\xf6k\xe9\xb5\xab\xe5j5\
\xfe\xd2\xad\xceN\x9f\xae\xd6\xf3w\xdb\xbe\xdb|n\x9b\
\xfag\xb3\x5c\x8f\xbb\xcd\x97\xf5<\x9fF\xa2\x183\
\xdc\xe3\xa0\xe9\xec;\x96\xec9\x15\xb2\xb8\x1d\x80\xe4\x86\
\xdc\x0eejL\xf4y\xdf\xdf\xe3|\x0dm}\xbc\xaf\
\xea\x19_C\x1f\x1d\xf6\x15\xc2\xae\x8a)\x16\xd8\x0e\xbe\
\x02\xb5h$\xe3\xde\xde\xc1\x22\xfdl\xe0\xeb\x0dr\xa3\
\x05Y\xf1E\xc3\x99\xd3\xc0%J3\x0ar\xec\x1d\xa5\
\x1dv\xd5\x22\xeb6\x83rS\xb4\x07\xe5fW\x1b\x80\
ia\x1b\xb5\xc0\x8b\xd2DmBQ]0\x10<\xd8\
\x06\xe6\x95\x11I\x1cQ\x14s\x1cQ+t4/A\
X\x9d\xcc'H\xef\x1b\xabE4\xa09'\xfb\x11\x91\
X\x00$1>j\xca\xf3\xe6\x90\xf8\x89wh\xdc\x87\
\x0d\x1e\x8c\x83\x97d< \xf2\xceI,N\xf4\xce\x18\
\xdc+\xc9\xbb\x04)<\xdcL!<\x8c\x13\xf7U\x90\
\x13\x0ab\xc3\xa4\xd5\xbe\x91h>\x8cC2\x1fQ\x90\
*\x86(\xaa\xba\xa4\xe7\xb2S\x9e\x0b\x13\x9d\xb2B\x92\
VD9$-|\x0a\xc9JG!\x05D\x1e\xe1B\
Ib\xa9\x05%<\xa2,\xb6^\x1c|\xd5\x19\xad\xa3\
\x9eq\x86R\x11Q\xac2N\xa9\xc3m\x83\x89\xc0v\
\xd5\xd4\x05\x09R\xaa\xb4\x06M\x854N\xd1\x8d\x04\xe9\
e\xc3M\x0a\xcb\x80\x00z \xa0\xd4\xa4\xd6\xc72c\
z\x85\x13\x9e2\x1daT\xc7\x11\x94\xc2E\xc7%\xae\
\x81\xe4{D\xe4>\xb8\x98\xb4\xef9(B0\x10\xf5\
u\xea\xe2\x04H\x88EOB)\x0d\xd9\x96f'\xf4\
\x06\xe0g\x8c\x1d[\x87\xabF\xa0e\xe1\x15\xb8d9\
AzZ)\xea\x9a\x03\x1d\x8e\x96\xb8N\xe3\xe5,\xf8\
$\x8d(O\x1f\x88\xa4\xac\x84!\xe5\x88\xca\xe6Q\xb4\
y\x9c\x03C\x9b'\xc2\xe3.`\x12\x95\x95*\xdeP\
\xf8\x96J7\x12\xcc\xa9\xc3/B)u\x8e\x96_\x86\
y\xc0\x85N\xd9\xe1\xd2\xa7\xec\x04@\x9d\x01\xd6Ta\
\xf5\x09K\x83\x18@\xde\x1d\x98\x07\x9f\xaa\xaa\x8c\xa3\xcd\
\x96`6\xed\x85\xfa\xde\xee\x88\x83\xccM\x14[\x07*\
\x89#:f\x0d\xe0K\xday\x0a\x1d\xbfUP\xe4\x01\
eu\x0b)\xf7\xdc\x0b\x1a\xf7\x88r]\xd3\xd6\x14\x8a\
\xe7\xc8\x02\xcaBOb\x09\x9a\xba9\xa2 \xf6\xd8\xf8\
J6&\x84\xe5\x94\xa5\xb0\x02\x8a\xcaH-\x86\xa7\x8d\
\xe9\xac\xa6^\x8e(\xfb\x85m\x1bm\x03p\xb2\x1dQ\
\x0e\xcb\x81H\xb3\xa20?\xb4\x91\x13$\x03\x12T\x0a\
\x0cD^\xe9\x11e\xdfM\xeaH\x1c\xaf\x1cY@$\
\xd5JYZ\x9e^\xe7\xe5\xe9\xb3sA\x0e\x95$\x82\
\x84L\xb3y\x8c\xf7yV\x17\x9e\xd5;<\xeb3\xcf\
B!Z8\xc8\xb4z`Z\xfdfLk\x0a\xd3\x9a\
\x81i\xcb8\xbd\x82i\x0fP\xe9\x8f3\xad,L+\
\x07\xa6-k\x0a\x99\xd6\xe5i\x91eZdaZ\x03\
\xcf2m\x1ab\xc7SS\x11:@\xb8\xba\x10\xae>\
j\xd2^A\xb8\x07\xdf~\x05\xed\xeaa\xb3\xeb]\xea\
1\x85z\xd4@=e\x05\x1fO=9\xeb\x01\x0d\xc4\
\xe33\xf1dr\x08h \x1e[\x88\xe7\xa9{\x91x\
D&\x1e(\xcc\x03\xaf\xa7\xdd\x9f*\xf6}\xda\xf5\x03\
\xed\x16\xe6|\x99v\x81xWd\xda-\x0b\x123`\
2\xeb\xea\xc2\xba\xa5\xdb\xf7H\xd5\x0d\xa4\xea\x8e\xbc\x90\
XW\x17\xd6\xd5\x03\xeb\x0e_XvY\xd7\x0d\xac[\
f\xea\x05\xd6\xb5\xcf\xb0\xae\xe1PX\xd7\x0c\xack\x0e\
\xb1\xee\xb7{\xe4\x1b\xd6\xd5\x85u\x8f\xdb\x05\x89uM\
a]\x18h\xb7\xcc\xfb.\xef\xaa\xc2\xbb\x85\xd5\x07\xde\
\xf5\x85w\x87\xaa\xbf\xc4\xbb\xfa?\xe5]YxW\x0e\
\xbc[\x88\xf5Gy\xd7e\xde5\x85wc\xcd\xf6\xff\
\xd4>5\xc6\xbc\xdb\xdcNg\xcb\xfeq\xcc,\xfd\xc3\
\xe2\xfa\xf2\xe4\x22\xfc\x87\xf7\xf2\xe4_\x0c\xce\xeb\xb7\
\x00\x00\x08\xc7\
<\
?xml version=\x221.\
0\x22 encoding=\x22UTF\
-8\x22?>\x0a<!-- Creat\
ed with Inkscape\
(http://www.ink\
scape.org/) -->\x0a\
<svg id=\x22svg3479\
\x22 width=\x2222\x22 hei\
ght=\x2222\x22 version\
=\x221.0\x22 xmlns=\x22ht\
tp://www.w3.org/\
2000/svg\x22 xmlns:\
xlink=\x22http://ww\
w.w3.org/1999/xl\
ink\x22>\x0a <defs id=\
\x22defs3481\x22>\x0a <l\
inearGradient id\
=\x22linearGradient\
2698\x22 x1=\x2224\x22 x2\
=\x2224\x22 y1=\x2244\x22 y2\
=\x223.899\x22 gradien\
tTransform=\x22matr\
ix(.53846 0 0 .5\
3847 -.92308 -.9\
2331)\x22 gradientU\
nits=\x22userSpaceO\
nUse\x22>\x0a <stop \
id=\x22stop2492\x22 st\
yle=\x22stop-color:\
#791235\x22 offset=\
\x220\x22/>\x0a <stop i\
d=\x22stop2494\x22 sty\
le=\x22stop-color:#\
dd3b27\x22 offset=\x22\
1\x22/>\x0a </linearG\
radient>\x0a <radi\
alGradient id=\x22r\
adialGradient269\
6\x22 cx=\x2212.011\x22 c\
y=\x2210.993\x22 r=\x2220\
\x22 gradientTransf\
orm=\x22matrix(3.03\
38e-8 .90468 -1.\
305 0 23.027 -4.\
6356)\x22 gradientU\
nits=\x22userSpaceO\
nUse\x22>\x0a <stop \
id=\x22stop3244\x22 st\
yle=\x22stop-color:\
#f89b7e\x22 offset=\
\x220\x22/>\x0a <stop i\
d=\x22stop3246\x22 sty\
le=\x22stop-color:#\
e35d4f\x22 offset=\x22\
.26238\x22/>\x0a <st\
op id=\x22stop3248\x22\
style=\x22stop-col\
or:#c6262e\x22 offs\
et=\x22.66094\x22/>\x0a \
<stop id=\x22stop3\
250\x22 style=\x22stop\
-color:#690b2c\x22 \
offset=\x221\x22/>\x0a <\
/radialGradient>\
\x0a <linearGradie\
nt id=\x22linearGra\
dient2688\x22 x1=\x222\
4\x22 x2=\x2224\x22 y1=\x225\
\x22 y2=\x2243\x22 gradie\
ntTransform=\x22mat\
rix(.51351 0 0 .\
51351 -.32432 -.\
32433)\x22 gradient\
Units=\x22userSpace\
OnUse\x22>\x0a <stop\
id=\x22stop2919-1\x22\
style=\x22stop-col\
or:#fff\x22 offset=\
\x220\x22/>\x0a <stop i\
d=\x22stop2921-0\x22 s\
tyle=\x22stop-color\
:#fff;stop-opaci\
ty:0\x22 offset=\x221\x22\
/>\x0a </linearGra\
dient>\x0a </defs>\x0a\
<g id=\x22layer1\x22 \
transform=\x22trans\
late(-1 -1)\x22>\x0a \
<rect id=\x22rect55\
05-3\x22 x=\x221.5\x22 y=\
\x221.4998\x22 width=\x22\
21\x22 height=\x2221\x22 \
rx=\x222.2322\x22 ry=\x22\
2.2322\x22 style=\x22f\
ill:url(#radialG\
radient2696);str\
oke-linecap:roun\
d;stroke-linejoi\
n:round;stroke:u\
rl(#linearGradie\
nt2698)\x22/>\x0a <re\
ct id=\x22rect6741\x22\
x=\x222.5\x22 y=\x222.5\x22\
width=\x2219\x22 heig\
ht=\x2219\x22 rx=\x221.36\
52\x22 ry=\x221.3652\x22 \
style=\x22fill:none\
;opacity:.4;stro\
ke-linecap:round\
;stroke-linejoin\
:round;stroke:ur\
l(#linearGradien\
t2688)\x22/>\x0a <g i\
d=\x22g3475\x22 transf\
orm=\x22matrix(1.11\
63 0 0 1.0912 -1\
.9531 -1.2451)\x22>\
\x0a <path id=\x22pa\
th3339\x22 d=\x22m9.69\
5 8.2559c-4.65 2\
.4267-2.5473 9.3\
807 2.7688 9.380\
7 5.2607 0 7.618\
4-6.5927 2.7688-\
9.3807\x22 style=\x22f\
ill:none;stroke-\
linecap:round;st\
roke-miterlimit:\
0;stroke-width:1\
.8122;stroke:#ff\
f\x22/>\x0a <path id\
=\x22path3341\x22 d=\x22m\
12.5 11.358v-4.7\
181\x22 style=\x22fill\
:none;stroke-lin\
ecap:round;strok\
e-miterlimit:0;s\
troke-width:1.81\
22;stroke:#fff\x22/\
>\x0a </g>\x0a </g>\x0a<\
/svg>\x0a\
\x00\x00\x08\x97\
\x00\
\x00+zx\x9c\xedZ\xd9r\xdbF\x16}\xd7W`\
\x98\x17\xab\x8a\x00{_ \xc9\xa9\x8c\xab&\x95\x87\xa9\
yH\xfc\x01 \x01\x92\x88@@\x05@\xa24_\x9f\
\xd3\x0d\xee\xa4I\xda\xa2\x9c\xccDP\xb9H\x5c\xf4r\
\x97s\xcf\xbd\x0d\xfa\xf6\xc7\xe7Y\x11<eu\x93W\
\xe5]\x8fF\xa4\x17d\xe5\xa8J\xf3rr\xd7\xfb\xfc\
\xdb\xbfB\xd3\x0b\x9a6)\xd3\xa4\xa8\xca\xec\xaeWV\
\xbd\x1f?^\xdd\xfe#\x0c\x83Ou\x96\xb4Y\x1a\xcc\
\xf3v\x1a\xfcR\xde7\xa3\xe4!\x0b>L\xdb\xf6!\
\x1e\x0c\xe6\xf3y\x94/\x84QUO\x06\xd7A\x18~\
\xbc\xba\xbam\x9e&WA\x10`\xdf\xb2\x89qs\xd7\
\xdb\x981\xe7~,#\x84\x0c\xf0\xac\xb7\x1ay\xde\xa8\
\xf8\xb9\xc0\x9e\x87\xc6Rk\xed\xc0?\xf5\xa3\xb7,v\
\x82y\x9e\xb6\xd3\xbb\x1ec\xfen\x9a\xe5\x93i\xbb\xba\
\xcd\xd3\xbb\x1e\xf6aB\xd8\xdeG\x08n\xd3l\xdc\xb8\
\x07\xdd#w\xc7\x84\xa4\xfe\x19\x9eb\x97,\xa9\x7f\xae\
\x934\xcf\xca\xb6\x1b\xd7\x8d\xdc~\xc2\x0d\xd3\x8b9\x98\
\xd5\xb4\xd5\xc3r\xecbKH\xb8\xe1\xb2\xb7\x167\xed\
K\x91uO\xc2QUTu\xfc\x83\xe2V\x8d\xd2\x1b\
/\xaa\x1e\x92Q\xde\xbe\xc4tcJ5\x1e7\x19l\
Ad\x07\xa77\xa3\xc77\xd3\xc3D\xa7r{3r\
`3\xba\xda\xecv\xb0m\xf4W\xfb\x88\xf0\xd3>\x22\
'|$\xb9\xc9F\xeaB>\x22\xfa\xeb7{[\x1f\
I~\xdaG\xf2\x14\x8e,\x1f\xda\xf4B8\x92\xe2B\
\xa0\xfdF\x1f=SL%&\xb2\xda\xe8\xb5&/N\
jdD\x09\x15\xeb\xcd\x9e\x19\xa4\x94FTQ\xaa\xd6\
c\x9d\xd4\xf2\xc8H\xcc\xe8}\xd9\xf7R\x1a\xb1^\xca\
\xf1K<\xad\xb3\xf1]\xef\x87\xddq\xda\xac\xc6M\x16\
\xc2\xcfe\xde\x82\xd9\x1e\x9b\xac\xfe\x15N\xc8\xfeS~\
n\xb2\xbdQ\xbf\xd5I\xd9\x8c\xabzv\xd7\x9b%m\
\x9d?\x7f\x00i\xe1b\x94\xf7\x89\xfb\x8b\xac\xbf\x84\xec\
\x87\x94p<\xe3Z\xe1\xab\x96\xf8\xca\xac\xbc^\xfb\xf0\
\x5cDymO\x04\x19\x96\x93\xe3A\x1e\xfb\xeb\x22\x88\
\xc2f\xec\xeb7\xbb`\xd6yDQ\x1e)\xa57\xb0\
\xd3!\xca\x02g\xdc\xee\x22\x8aE\x80\x0e\xdfA\x94\xd1\
G\xb1$\xcf\xc4\x92\xf8\x9f\xc2\x928\x03K\xf2{b\
I\xfe\x15\xb0\xa4\x22a\x99\xde\x86\x92\x05e)\xbd\x89\
\x02\x0f%\x12)\x89HlCIkP\x16\x13\xc7\xf0\
\xc4\xf5Yx\x12\x84\xb1P\x852Tg\xa2\xea\xab1\
\xb0\xb1\xc3\x89\xe0`\xa4\x08)F\xf2\xe3!B\x10\xb5\
\xe4\x17\xc1\x03\xb6T!\x0byx\xa2\xf7\x19&\xc3t\
xF\x13\xf1*T\x88\xc8(\xc9\xd4\x0e,\xc0%\x9c\
\xee1\x8c\xf5R\xb6\xcb0~\x85c5\x0b-\x8c=\
\x07\x17\xbe\xf7\xba\x08\x22\xbazl=\xd5\xec\xb0\xa7\xd6\
;V\xf1\xddQ\xbe\x12\xab\xa3\xf6P}\x96=\xaeO\
\xba\x9c=\xd0\xd4n\xfb\xff\xcb\xb1b$\x12\xdbqe\
]\xe5\xd0[+\x1c\xb2mC\xe7\xa3\xb1\xda\xe0\x92\xef\
\x16+\x16Q\xbaU\x0f\xbdU&\xd2\x96k{\xd4*\
s\x9eU\x97\x8d\xd8\x1b\x22p\xa3I\xbc\xa4=\xb7\x03\
w\xb8\xf4\xdf&\xdd\xbc\xf6\xd5\x15\xfcje\xc4Dr\
\xb5\xac\x10+\xc2\xab\xab\xfb,~\xac\x8b\x0f\xbb]\x07\
\xaa\xc9\xf5M\x9a7\x0fE\xf2\x12\xe7\xa5{z\x93\x95\
\xc9\xb0\xc8\xc2a2\xba\x9f\xd4\xd5c\x99\xc6e6_\
\x9e\x83\x1f\x92v\xba4\x16\xbb\xcd\x02\x07\x18!\xb4\xa2\
}\x97&h\xce\xa9\x0e\x92@\xf7u@\x02\x8a?\x19\
\xa1_gP\x96F\x8amT\xc1\x836k*\x98Q\
\xde\xe6\xf5]\xa8#\xa2\xac%\xd6\x99\xcc\x22f\x04j\
\xea\xf5V\xe4\x9cV\x9cJ\x1b\xae{\xa8\x85\xed\x0b\x9e\
'\xfe\xba\x19\xe7E\x11\x97\x15\x8c\x5c\xf8d}\xb0s\
\xb7\xa1\x7fc\x10\x0bt\x84\xee\x92t)w~\x19%\
\x0f\xb1w\xc7\xa6\xf0\xf7*/\xb7\xa5\xb3\xbc\xcd\xea\x22\
\xc7G,\x96\xb2UeY\x0a\xd2\xa4\x99&u\x0d\x9f\
o(\xe3\xa5]\xc1\x89\xc9\xcd,\xa9\xef\xb3\xba{\xfe\
\x947\xf90/\xdc\x12\xfek\x91\xed\xc6\xacz\xca\xea\
qQ\xcdW\xcf\xf7\x83\x98\x8cF\x8f\xb3\xc7\x22i7\
\xf2j?\x9a\x0cq\xd46@\x04\xf0O\x06\xa1\xec\x93\
}O[\xa9\xf7=}\xa6o\xbf\x93K\xcf\xc2\xf4\x11\
?\xa0\x10\x03eB\xf7\x1d\xa13\xd4^\xbeB\xb5\xc3\
uh\xd1\xe9Y\xc2\xfa\x80<#\x92\xbe!\xac\xcd\x06\
k\x9d\x07\xea\x03\x89\xee\xda\x83\xeb\xbf%\xcao\x07\x93\
7\xa4Z*\xb7\xa9\xd6\xc5b\xcf\x11\x87\x03B\xf5\xf5\
\xbe\xe7^M\xc5P\x14\xa8\xf4T,)c[T\xac\
\x22\x22\xad\xb6\x9e\x8b%\xc0-\xbf\x1d\xb5\x94\x88\x93d\
|\x09\xd4\x1a\xbe\x83Z\x86>\xc4_\xf2\xff\x1a\xb5\xaf\
\xe3f\xbdq\xd2\xfa\x12;\x1fv\xb7\xdcu\xf7y^\
\xf6\x8e\xfd\xd3\xe9Z3\xb3\xa0k\x017m\xd15\x8e\
\xdb\x9apJ\xc1\xd7\x14\xe7V\xf6\x86\xc87l\xcf\xf9\
\xdf\x82|\xfe7E\xfe\x92\xaf\xd71\xee\x22\xac\x22-\
\x8c\xb6\xe06\x01\x8aVV\x8a`\x14\x84$\xe2\x82\x08\
\xc5\xc0\xdb\x0aM\xa8\xe1N\xa45\x05g\xd3\x88QI\
(\xf2\x04\xdf4\xa1F@\xa4,WN\x80\xe9Vq\
\x08\x0c\x06c\xa9\x10\x8brK\xb4+\xe8\xf84\x8a\x05\
\x88\xb70\xae2H\x07\x02\xa9\xb8\xb5\xc1\xa7\x00gy\
b\x8dt\xeb\x83N5cJ\x04<\x92\xda\xfd\xcc\x02\
\xdd\x00/\x83\xa1\x0am/\x13F\x0ae\xfb\xa6S\x17\
\xaa)\xf4\x0a\x9ci\xa6\xfa\x12\xab\xe0pgu\x00`\
bA\x04\xb5/\xb0\x0ac\x86\x88\xc0\x1dB\xad\xd1\x5c\
\xf4\x15V\xc1\x5c\xebl\x85\x85Th\x8d2\x15q\x8c\
\x03\x14\x03\x0a\xba\x87\x08<\x10i)\x04\xb7\x0a\x22%\
\x153Ng&|!\x0bf\x98j\x04\xca\x98\xe9;\
\xcb\x95\xd1\xc2Y\xe2\xf4g\x9cX\x0dm(\x13\x84\x13\
\xd4\x08\xe9^`)X'\xb0\xa2\xb6\x90\x06\xce\xdf\xdc\
\xb5\xee\x90\xe1\xbc\xabi\xa7\xa1\x81\x8a\x9c\xf5\xa1*J\
'<\x07z\x8aP\xd3\xa0\x16d\x0a\x87eM\xa0 \
\x8d\x10\x0b\xc1%d\x92\x19\x86\xd3Z\x175F\x14\xb3\
H$\x12\x11\xaa\xa9\x93\x08\x8a&\xcbz\x09g>\x8c\
\xd8\x93\xa0*{\x09e\xdd,\xab%\xf2\x12\xa5\xb8\x9b\
\xc3\xf0\xc4\xddR\xc3V3D\x7f1\xe1S\xa0#\x05\
\x00\x18\x8d\xdd\xa16E\x5c]\xfc\xac\x82\x1a\x02Vs\
n\xd0~Y\xc4O(\xed67\xae@\x0ai$\xdc\
\xc8`\xa0d\xda\x85\x14\x0d\x1e\xe2\x1f\xa0\xa4R\xb4\xb4\
\xc6\xb1p\xc4%\xd6\xa0\x81\xe3\x04J\x8c\x84\xbf\xad\xb7\
\x900\x18\xc8#\xc4\x99\x0b!=\xe2\xb44~\xa0F\
\xc0\x89\x07\x93\xe1B\x9b\xc0\x02\xb2\xc4\x81\xc9\x85\xc5\x0f\
\xe7\x88\x14\x14\x110\xcaY\xe30\xeaT\xe1\xc2\xe0\xec\
\xedF\x09\xa1P\xe4!2\xcc\x18\xd78\x02\xad\xd2\xa2\
-\xd9\xe8JVg!\xba\xd3\x9aTe\x1b6\xf9\x7f\
\xb3x\x96\xa5\xf9\xe3\xec\xa6\x13\xb8\x87\xc8\xe7z\x96\x14\
\x9d\xe4)\xa9\xf3\xa4l\xb7ds\xff\xdb\xe9\x96\x08\xec\
\x90\xb5\xa3\xe9R\xd6f\xcfm\x98\x97)\xb8\x0b4\xe1\
\xef\x92\x22\x9f\x94q\xd3&u\xdb\x09\xd2lT\xd5I\
\x9bWeG \x8e#\xc2\xe9\xd6\xd2E\xd6\x82\xa8\xc2\
\xc6QS9YJ\xe7U\x9d\xee\xca\xfc\x8a+\xf6^\
V\x91:\x1b\xf9\xf5\x8b\xb6\xbe\x19\x16\xd5\xe8>|\xa8\
\xabI\x9d5\xee\x97\xe1\xb8\x1d\xde\xcc\xeb\xbc\xc5*\xe1\
\xacJ\xb3\xb8\xa8C\x88:e\xcb\xd1\x14\x1c\xddi\xbb\
z?\x1cQys\xea\x04\xb9z\xb1\xbc\xc1\xd5]O\
\x89\xfeQ\x1f\xe5\xeaC\xac\xbcC\xc2oH\xb8]\x1c\
\xc7\xc9,/^\xe2\x9f\x86I\x99b\x934\xf8g\xde\
bI,\x16.\x7f\xea\x0f\xbb\x80?d\xa3|\x9c\x8f\
\xba\x00\xee\x8f_\xf2\xf7.q;J1Z)d\x97\
\xf0D\x88Te\x91\x84\x7f@)dA\x07JP%\
}\xaa\x83]\xf0!Qu\x1d\xd31\x8e\xe4A\x8an\
\x88\xa4\x0f\x04\x05\xadae\x8a\xdc\xc0<\xb7\x07.\xbd\
\xa8\x09\x8c\xb9C\x99\xe3\x01\xd0^G.\x84\x83\x8a\xdd\
F\x0a\x0c\xe5\xd7\x13\xa0 \xe9d~\xaa5A\x81\xae\
\x8auwF\xed&T\xd7V\xc9\xf7\xa4\xba@R\xf1\
7\xc9\xa9\x8bg\x0aZ\xce\xef\x91\x22\xffv\x15\x96h\
\x83\x0cA]\x01\xc6\xb9\x94\xae<\xa3\xeeHj\x85k\
i]\xef\x8a\xe2`#k\xac+I\xeee,\x13L\
\xa0\xaa8\xac\x12W~0\xd5\xb5\x0a\xf2@\x87\xa2\x0e\
t(f\xafCQd\xbfCq/\xf3\xf6:\x14\xb6\
\xdf\xa1\xa03yE\x87r\xb0x\x85\xfa=\xd3^\x9d\
i\x8cwG\x86S\x09w\xe8\xd5\xb0\x94\xe2\xfa\xbd\xaa\
})e\x95\xafj\xc65\xcf\xbe\xedC\xfdxo\xa9\
\xbf\xb5\xa5\xc6\xb1k\xd9R\xf3C-\xf5a\x82\xb0\xef\
\x04\xf1\xe7\x12\x84y'\x88\x05A\xdc\xba\xff\xb2\xfa\xf1\
\xea\x0f\xc1l\xa7\xb7\
\x00\x00\x04\xd3\
<\
?xml version=\x221.\
0\x22 encoding=\x22UTF\
-8\x22?>\x0a<!-- Creat\
ed with Inkscape\
(http://www.ink\
scape.org/) -->\x0a\
<svg id=\x22svg2\x22 w\
idth=\x2222\x22 height\
=\x2222\x22 version=\x221\
.0\x22 xmlns=\x22http:\
//www.w3.org/200\
0/svg\x22 xmlns:xli\
nk=\x22http://www.w\
3.org/1999/xlink\
\x22>\x0a <defs id=\x22de\
fs4\x22>\x0a <linearG\
radient id=\x22line\
arGradient2401\x22 \
x1=\x2223.878\x22 x2=\x22\
23.878\x22 y1=\x2218.5\
35\x22 y2=\x2227.495\x22 \
gradientTransfor\
m=\x22matrix(.89889\
0 0 .89347 -10.\
464 -9.566)\x22 gra\
dientUnits=\x22user\
SpaceOnUse\x22>\x0a \
<stop id=\x22stop37\
13\x22 style=\x22stop-\
color:#e35d4f\x22 o\
ffset=\x220\x22/>\x0a <\
stop id=\x22stop371\
5\x22 style=\x22stop-c\
olor:#c6262e\x22 of\
fset=\x221\x22/>\x0a </l\
inearGradient>\x0a \
<linearGradient\
id=\x22linearGradi\
ent2398\x22 x1=\x2224.\
139\x22 x2=\x2224.139\x22\
y1=\x226.5317\x22 y2=\
\x2245.69\x22 gradient\
Transform=\x22matri\
x(.53994 0 0 .53\
668 -1.8489 -1.5\
062)\x22 gradientUn\
its=\x22userSpaceOn\
Use\x22>\x0a <stop i\
d=\x22stop4224\x22 sty\
le=\x22stop-color:#\
fff\x22 offset=\x220\x22/\
>\x0a <stop id=\x22s\
top4226\x22 style=\x22\
stop-color:#fff;\
stop-opacity:0\x22 \
offset=\x221\x22/>\x0a <\
/linearGradient>\
\x0a </defs>\x0a <g id\
=\x22layer1\x22>\x0a <pa\
th id=\x22path2262\x22\
d=\x22m20.5 7.4948\
v7.0052h-19v-7.0\
052h19z\x22 style=\x22\
fill-rule:evenod\
d;fill:url(#line\
arGradient2401);\
stroke-linejoin:\
round;stroke:#a5\
3738\x22/>\x0a <path \
id=\x22path2272\x22 d=\
\x22m19.5 8.4948v5.\
0052h-17v-5.0052\
h17z\x22 style=\x22fil\
l:none;opacity:.\
4;stroke:url(#li\
nearGradient2398\
)\x22/>\x0a </g>\x0a</svg\
>\x0a\
\x00\x000\x07\
\x00\
\x01\xe4Px\x9c\xed}ms#\xb9\x91\xe6w\xff\x0a\
\x9e\xfa\xcbt\x9cT\xc2\xfb\x8bfz6|vxc\
/\xda\xbb\x17;v\xdc\xc7\x0b\x8a\xa4\xba\xe5Q\x8bZ\
\x92\xfd\xfa\xeb\xef\xc9D\x91D\xb1@\x8aR\x91-z\
\xbb\xd4v\x0c\x99\x05\x14\x80\xc4\x93\x99O\x02\xc5\xc2/\
\xff\xf2\xe5\xc3\xdd\xe0\xd3d6\xbf\x9d\xde\xbf9\x93\x95\
8\x1bL\xeeG\xd3\xf1\xed\xfd\xbb7g\x7f\xff\xdb_\
.\xc2\xd9`\xbe\x18\xde\x8f\x87w\xd3\xfb\xc9\x9b\xb3\xfb\
\xe9\xd9\xbf\xfc\xfa\x87_\xfe\xc7\xc5\xc5\xe0O\xb3\xc9p\
1\x19\x0f>\xdf.\xde\x0f\xfe\xed\xfe\xf7\xf9h\xf80\
\x19\xfc\xf4~\xb1x\xb8\xba\xbc\xfc\xfc\xf9su[\x0b\
\xab\xe9\xec\xdd\xe5\xeb\xc1\xc5\x05j\xce?\xbd\xfb\xc3`\
0@\xb3\xf7\xf3\xab\xf1\xe8\xcdY]\xfe\xe1\xe3\xec\x8e\
\xcb\x8dG\x97\x93\xbb\xc9\x87\xc9\xfdb~)+yy\
\xb6.>Z\x17\xff<\xb9\xaef\x93\xf9\xf4\xe3l\x94\
n?\x1a\xe5%g\xe3\x9buQ\xf4\xe4\xb3\xe6B2\
\xc6x)\xd4\xa5R\x17(q1\xffz\xbf\x18~\xb9\
\xb8\x9f\xbf\xca\xaa\xa2\x83\xa5\xaaJ\x08q\x89k\xeb\x92\
\xfb\x95\xba\xfar\x075l\xed\x0c_\xcd[\x87\xea\x1f\
\xf0\xffU\x85\xa5\xa0Jc\xbdA\xcdIu?Y\x5c\
\xfe\xf9o\x7f^]\xbc\x10\xd5x1\xcen\xb3\xd4|\
\xa3\xdd\xc6t\xdc\x0f?L\xe6\x0f\xc3\xd1d~\xb9\x94\
s\xfd\xcf\xb7\xe3\xc5\xfb7g&T\x82\xff\x1e\xbe\xb0\
\xf8\xfd\xe4\xf6\xdd\xfbE[~;~s\x86\xf1*\xad\
<\x7f_\xf6\xe8j\x85)Qi\x95\x8a\xd6\xcd\xe4\x97\
\x8cm\xd6\x1aOG\xd7\xc39\xba}\xf9~\xfaar\
9\x9e^O\xbe^\xfe\x9f\xd9\xf4\x1f\x93\x11\xe0\xf0\xee\
\x1e\xc2\x8b\xdb\xd1\xf4\xfeb\xf1\x1e\x10\xb9\xc4\xfd\xee\x86\
\xd7w\xf8\xb0\x18.>\xce[\xf7\xa2A\x02\xb3\x93\xc5\
\xe7\xe9\xec\xf7\x8b\xe9\xcd\x0d\xb4=\xa9\x96\xf3\xb3\xea\xd0\
\xf4\xe3\xe2\xe1\xe3\xe2\xffM\xbe,&\xf7\xa9g\xd0P\
\xa6.\xbeL\xd5V\xb2\xb3_q\x83_\xc6\x93\x9b9\
\xdd(\xa9\x81\xbei\xbe\x80K\xd4\xd0p\xf6\xaf\xb3\xe1\
\xf8\x16HN\x85R\xb1\xe6\x15+\x83\xa8\xeb\xa0\xd6|\
1}X\x96\xc5H\x16_\xef\xd0}\x12^\x8c\xa6w\
\xd3\xd9\xd5\xf5\xddp\xf4\xfb\xcf,\x98b\xeen\x17_\
\xaf\xc4\xcfg\xeb\x1a\x18\xe1|\x82Y\x12\x99\x8cg\x08\
5\xd0\x92:\x1b\x5cni++\x15J7\xac\xec\xd9\
\x13;&\x7f\xde\xde\xda\xf3G&\xcb#3\xab\xb6~\
\xb9l*x\xf7|,\x11\x80N\xdc\x01bo\xce\x86\
w\x9f\x87_\xe7g\xbb&\xcc\xb9N\x13&\xf7\x9f0\
\x17\xbe\x9f\x0a\xbdx\xa6\x0a[\x1a\xf2\xce?\x01\xd2\xaf\
\xb4qvh6t\xb4\xa7\x8a\xd0\xd4\x0eL\x17\x1aS\
\xc2\x0c\x83\x7f\xbc\xb1\x92\x8e\xd0\xd8\xf7\x83\x99\xd2\xc2?\
A\x89v\xe8\x87-%\xee\x0b4\xb4\x15\x9f\xa4\xc5R\
k{CMi)\x0f\x055)\x8d\xd8\x8a\xb5e\x83\
TH=\xa2\x8a\xc2 S\x98\xdb\xdf\xa7\xe5\xcd\x99G\
t\xb1gs\xe2\xe7C)\xca)\xb3\x1e\xeeV8>\
22\xdc\xe4\x19z\xbc\xe1\xbf\xa7\xeb\x11\xad=C\x8d\
\xa5\xd6\x0e\xa7F+\xf5V\xab\x5c\xbbR\x1d\x9f\xae\xa5\
\x09\xfd\x8d\x9e\xae%+\xcdc\xfe\xab\xd4\x9a\xc3?\xbd\
\xb5\xb5\xaeZR\xcav\x06\x1bn\xe2\x9f\xac\xc6'\xd2\
\x90\xac\xad\xd2\x94\xedVb9\xdc\x1eH\x85:\xc4\xd8\
Y\x85:\x8a\xc7\x02\xe9V$>\xae4\xdc]?\x1d\
yc=\xf6\xa3\x9b\x83\xa9)n\x8f\x92Y?\x9fa\
\x8f\xcf\xf5Z:\xcag\xd8\xe3q\xbd\x96\x91q+e\
]\xf6\x1be\x8a\xbc\xff\x18\xe6f\x90%\xbf\xa8\xb9=\
\x83\xf5\x0b\xd7-M\xdb\x9f\xf5\x0b\xf7$J\xdb\x89\xf5\
\x0b\xf7(\xa3\xa5o\xc3\xbbM\x15\xcep\xc3\x98\xe5\x84\
7_\xdf\x9c\x19_\x09c\xb5\x5c[\xdb\xcd\x977g\
JTQj\xeb\xd6\xe8\x1a\x15\xcb\x8e\x8ae\xdf\xd5\xed\
\xfem6\xbc\x9f\xdfLg\x1f\xde\x9c}\x18.f\xb7\
_~\x92\x95\x0e\xca\x09}\xae+\x1b\x10r\xc3\xe4\xc2\
\x9e\x8bsQI/-\xc4\xa6R^h\xe9\xce\x95\xac\
D\xf4\xeau\xeb\x9e\x7f\xbf\xbf]\xcc\xdf\x9c}\x9cO\
f\xbf\xd1B\xc8\x7f\xdc\xff}>i`\xa09v\xad\
3^\xc7\xeb6W\xefg\x93\x9b7g\xaf\x0aN\xe9\
Q\xac\xad\xd4\xde+8S\xb0\xea\x15|\x5c\x05\x8b^\
\xc1\xc7T\xb0\xca\xa2h\xaf\xe0\xa3(\xd8\xf5\x0a>\xae\
\x82\xfb wd\x05\xf7An\xad\xe0\x0bQy\xd8t\
8\xa4\x82}\xef\x22\x8e\xac\xe0\xdeE\x1cY\xc1\xbd\x8b\
8\xb2\x82{\x1e|\x5c\x05\xbb\x9e\x07\x1fY\xc1}\x90\
;\xb2\x82\xfb \x97)8$\x05\x9b\x03jXZ\xdb\
k\xf8\xc8\x1a\xee\xbd\xf0\x915\xecd\xaf\xe1#k\xb8\
\xf7\xc3\xc7\xd6\xb0\xef5|\x5c\x0d\xf7\xe9\xc6\xd15\xac\
{\x0d\x1fY\xc3}\xc2qd\x0d\x87~\xd5\xe7\xd8\x1a\
\xee3\x8eck\xb8\xcf8\x8e\xac\xe1\xd8g\x1c\xc7\xd6\
p\x9fq\x1c[\xc3}\xc6q\x5c\x0d\xab\xfeA\x9f\xa3\
k\xb8\xcf8\x8e\xad\xe1>\xe38\xb6\x86c\xaf\xe1\xe3\
jX\xf69\xdd\xb15\xdc\xe7t\xc7\xd6p\x9f\xd3\x1d\
Y\xc3\xaa\xcf\xe9\x8e\xad\xe1>\xa7;\xb2\x86u\x9fq\
\x1c[\xc3}\xc6ql\x0d\xf7\x19\xc7\xb15\xdcg\x1c\
G\xd6\xb0\xe93\x8eck\xb8\xcf8\x8e\xad\xe1cd\
\x1c\xe5\x1f\x83o\x1f\xb9\xa8\x9c\xf11\xf2h1^/\
\xbc\xb3\x96>h\xa3L<\xbf\x90\x951\xdez\xf9\xd4\
\x01\x7fUo\xce\xb4\xaf\xa2\xf6\x19\x8e S\xb1\x92\xd9\
\x8b3\xbeJL\x9f\xab\x84\x8a&\x9b\xbe/\x92\x0b\xda\
\x86\xfaZj\xd9\x0f\xa0{\xbdcb\x0f\x80J_I\
\xa3\x82k\xa248\xd2_h\xa2\xd4\x09[y\x14\x8e\
M\x94\xb6\xca\x8e\x8ae\xb7\xcf\x95\xaa\xbc7\x18\x10\xcf\
\x95\xac\xa2\x8b^8LQ\x88\xb2r\x9aP\xeaU\x15\
\x825\xdd\xd1\xc9oK\xdaC\xbd\xfc\xca\x80\xff&\xea\
\xbd(\xebWJ\xc8\x9d:\xb0z\xe5^\xc9\xf0\x13\xd5\
\xfb\x0c\xe3w\xc6JU\x1b\xbf\xc3G\xab\xcfax.\
\xa2\xe5\xc9\x85\x22\xfbWR\x08\xa5\x9fc\xff\x0a\x86\xad\
cc\xedJ\xb1_\x96\xf0\xcaN5\x9c\x80\x85[\x8d\
\x8e\xde<\xd4p\x02\xad\xb2\xc5\xb7>\xedEh\xf7{\
;\xd4#P\xddo\xf0\xbb4\x1e\xb4\xd3r\xe9n-\
\xa0)a\xc2\x06\x1e\xd0x%\xcf/t\xa5,\xe9\x01\
\xca_\xab<\x05H!\x84l\xda\x87VU\x04FU\
h\xd8\x87\xc2\xdd\x84\x93\xd9{\x1cF\xc5\xb2\xa3b\xd9\
6X\xed\x9e|\x96_\x89w\xdcH\x15\x104\x97`\
5\x86\xc0**\xa35< \xff:\xc2\x00CY\xa4\
\xe2\x18\x14\xaa\xe0\x9d\xc8\xde\x1fE\x18\xd4\xb1\x8a\x02\xa6\
\x1e\x1a\x18t\x95\xe6\xa2\xa6\x81Ax&\xcc\x8d\xd3\xf6\
\x19V\xbf\xf9b-\xb3\xd7\x96\xb0\xcd\xcd\xe0\xb9\x8a\xa4\
\xc1\x1b]\x05e}l\x8e]V\x013\xde\x8c\xc1\x08\
\xd5\xc1(\x15uc\xe8(\x0a\x87\xa8\xb3e\xa5\x1d\x14\
\xaa\x9e\x97\x18\x95\x0f\xf4\x85\xdc\xa9\x0cB\xdb\xe7\xb8\xcb\
\xe6\x98\x10\x8d\xf6\xe2J\x87S\x9c\x80\xdb\x8b.o\x95\
<\x17\xd8!\x11\x15\xd9T\x9d\xa9\xacW6gs\x92\
\xcbJkb\xf6\xae\x8f]\xecS\x00u\xc1\xa7\xb8C\
H\x96\xa0\xdf\x86\x9c\x81@`t\x07\xd1\xa0\xdck\xe5\
*\xbdk\xf0\xa8Fla\xb7v\x15q\x8c\xf3\x8aF\
l@\xbb\x03\x99\xb3\x8b\xc2i\xff,\xba\xa9\xc1\x82\xbc\
\x0f\xbe\x09w\x98o\xc4L4M\x1dnA9\xef]\
\x93s\xa2\xac\x09&\xe4 j\xbfgJ\xed\xf7\xfe\x16\
~\x1f\xd5\x01\x98\x11\x80\xe4m\xb4\x1b\xc4HU.d\
\xcc\x9a\xdd\xben\xb2\xedQ\xa1\xdc\xa8P\xeeq\x93f\
\xf7\x87O\x08!R\xbap\x10\xfa\xb3\xd7*\xdf~\xef\
\xd8{\x04\x8c\x8f\xbe\x08kG\x07\xf8\x05\xa5;\x1c:\
\xf8\xe6.=\xd2\x9b\x9b'0o\x87l\xca9\x8a\xf7\
\xd1[\x97\x05v\x02\xddE$7k\xb3'-\x08\xa0\
\x92\xdeA\xede\x16\xf0\x09\xcc\x17\x12e\x95\xb6y\xe0\
R\xa5\xc2\xbbg\xe6\x11\xd4u\xd1Xs\xca\xda\x5c\xc2\
)\xf1D\xf8t\xc0g\x09\xee%\xb3(\x99O\xc9\xcc\
rs<\x22\xea\x9a\xae\xa3\x8d:\xa7\xdaQ\xe5\xf9\x84\
\xf4\xa9\x0e\xb9\xec(\xb78\xd5\xb2\x03n8\xeb#*\
\xb2\xc9\x07J\x8al3\x9b\xae`\xdc\xc5\x7f\xca\x9cj\
\x0b\xff*R\xb5\x9c\xd4\x1dQq\x1b4\xa0\xa4\xb9}\
}\xcdA\xe9O\x99Ymaae\xc6\xd6`w/\
\x8a\xbdv\xe8\xe8`\xc4OL\x8d\xca\x89\xcd\x96$\xa8\
\x9c05\x92\xab#F\x94fZY\x88(\xfap\x11\
\xe5y\xe9y9\x91.'\xdd\xe5\x04\xbd\x9c\xcc\xe7i\
\xff\x11\x91\xda\x5c\x18) U\x1f4\xdc<}\xc5\xa9\
\xbc\x12\xb4e\xd5\xa8\xbc\xc2\xd4X\x8dzA\xc2\xe8\xf4\
\x01\x22\xce\xf7\xa7\x95\xc7$\x8c\x8d%\xce\x92y\x1f.\
\xd2<c\x01\xbd\xbc\x88\x5c^p./N\x97\x17\xb2\
\x1bk\xde/\xaa\xde\xc3\x85\xa1g,\xa0\x7f_\xf5\x1e\
\xde\xde\x9b\xdbK\x05{7\x87\x0cNO\xdd\xaak\xed\
\xa3m\xd9n+\xec\xcbe\xdbwGDgss\xb3\
\x80Ns\xb8\xd0\xd3a\x8f\xb7\xbc\x9d\x5c\xdez.o\
S\x97\xb7\xb4\x97\x1b\xdf/\xaa\xe1\x03\xa6@\xbd\x86\x8b\
\x1a>d\xaa\xd4k\xb8\xa4\xe1\xc3\xc5\xb0^\xc3E\x0d\
\xdb\x03\xae\xda\xf5\x1a.j\xb8\x8ft\xc7\xd6p\x1f\xe9\
\x8e\xad\xe1>\xd2\x1d[\xc3}\xa4;\xb2\x86]\x1f\xe9\
\x8e\xad\xe1>\xd2\x1d[\xc3}\xa4;\xb6\x86\xfbHw\
l\x0d\xf7\x91\xee\xc8\x1a\xf6}\xa4;\xb6\x86\xfbHw\
l\x0d\xf7\x91\xee\xd8\x1a\xee#\xdd\xb15\xdcG\xba#\
k8\xf4\x91\xee\xd8\x1a\xee#\xdd\xb15\xdcG\xbac\
k\xb8\x8ft\xc7\xd6p\x1f\xe9\x8e\xac\xe1\xd8G\xbac\
k\xb8\x8ft\xc7\xd6p\x1f\xe9\x8e\xad\xe1>\xd2\x1d[\
\xc3}\xa4;\xae\x86\xbd\xe8#\xdd\xb15\xfc\x1d#\xdd\
\xd6\xa3\x0d\xff[+\xf8;\x06\xba\x1fS\xc1\xdf1\xce\
\xfd\x98\x0a\xfe\x8ea\xee\x87T\xb0\xfc\x8eQ\xee\xc7T\
p\x1f\xe4\x8e\xac\xe0>\xc8\x1dY\xc1\xdf/\xc8\x99J\
y\xa1\xa5\xfb\xc1\x14\xfc\xfd\x82\xdc\x8f\xa9\xe0C\xbe\x12\
\xa4WpI\xc1\xdf/\xc8\xfd\xa0\x0a\xfe~A\xee\x07\
Up\x1f\xe4\x8e\xac\xe0>\xc8u|\xdd\x8bs\xbb\x14\
,\xb3g\x01i\x98\xd91\x07<>[5_G\x95\
]\xbf\xd9\xbcN\xaf\xa1\xc2\x10\x1b\xaf\xa6\xda5\x1d\xc6\
Yg\x96?\xcaWJj\xcf/\xd7\xd1\xce\x19y~\
!E%\xbc\xb2{\xbf\xd2\xfaE\x95xH7PT\
K0k\xbdHV\xcc\xeb\xa3\xcc\xdbQ\xdf<\x14v\
\xbe\xcfEf\x0f:\xf0\x1b\xa9\xd6\x86\xc2\xef\x07\x08\xf4\
\xca\x19\x91\xbd\xd7 \xbd\x00h]\x88_B\xf5$\xb4\
\xfcr9\x9e\xdc\xcc\xf9\xd3|:\xbe}\xc0\xff\xaf\xee\
\x87\x1f&\xe3O\xb7\x93\xcf\x7fXu\xf4z\xb8\x9a\xba\
\x87\xe1\xbb\x09\xc6>\x85\xce^\xdd\xf0_}\xe1z:\
\x1bOf\xcbK\xeb\x07\xf1\x93|\x8a\x96o\x17P\xfc\
\xf2\x8dO+E\xd2\x0dWW\x01\xf9\xd2\xf5\xf9\xfb\xe1\
x\xfa\x19\xb3\xb4y\xf1\xdbt\xfa\x81\xde(Qi\xa0\
d\xb5\xe8\xb5\x9e$L\xbc\x85\x7f\xd3vu\xe6\xe7\xfa\
\xdaW\x9aui}\xb4\xa1}\xf1\xe3l\x06\xd5]\xdc\
\x0d\xbfN0\x1c\xfe\xcf\xb2\xe7\xf3\xf7\xd3\xcf\xeff\xa4\
\x96\x9b\xe1\xddJ/\xab\xaat\xe9\xe2\xfaz\x8a\xa6\x17\
\xb3\x8f\xad\xcb\xe3\xe9\xe8\xe3\x07\xba\xf5\xc74-\x0f_\
6K|\xbe\xbd\xc7`/>\xdf\x8e\x17\xef16a\
[*\xabK\xbc\x9f\xdc\xbe{\x0f\x04\x86\xd5\xc1\xd2\x9b\
%h\xf8\xaa\xa5\xd1\xfa\x22M\x86n\x8d\x9cF\x97\xab\
|\xfb\x18\x1f\xa6\xb7\xf74\x82\xbc\x04\xd5N\x13\xbe\x94\
\xd78\xfb0Y\x0c\xc7\xc3\xc5p\x8d\xa9\xa5\xc4\x9c-\
\xdd\xd6\xf8\xe6\xea?\xff\xfc\x97_k\xfc\xfe2\x1a]\
\xfd\xdf\xe9\xec\xf7%\x9ca\xa6(0\xbc\x9e~\xc4\x90\
\xcf~]\x89\x7f\x19\x8f\xae\xc8\x81\x0c\x17\xbf\xde~@\
\xc7/\xe7\x9f\xde\xfd\xcf/\x1f\xee\x00\xed\xd5\x85F\xe1\
\xc5\xd7\x87\xc9\xfa\xa6\xe9\xb6\xb3\xc9|\xfaq6\x9a\xbc\
9{\xbfX<\x5c]^>|\x9c\xddU\xd3\xd9;\
\xdc\x04\xff\xfbpK\x95.\x7f[\xdc\xde\xdd\xfd\x1b5\
\xb2r\x13\xab\x9b\xde.\xee&\xbf\xfe\xfbd\xf1\x19]\
\x1e\xfc\xc7\xcd\x0d\xd96w!]i\x14\xc6\xb0'\xbf\
*!\xec\x85\xd0\x17\x22p1\x965J\x8df\x93\xe1\
b:\xfb5\xeb+\xe9\xe4\x8f\xef\x80\x9e\x5c\x98w\xe0\
\xed\xf0a:\xf8\xd3\xf0n\xf8ax?\x9eMnK\
= \xabo\xdf\x87K\xb6\x9a\xa4;\xcf?^\xff\x03\
\x9e\xaeq\x03\xd2\xd9\xff\x1a\xbekh\xe1\xb2T\x96z\
|w;\x9a\xdc\xcf\x1f\xd79\xb7~\xfb\x09\xee\xe5\xc3\
\x87\xe9\xfd\x9c\xd5_\xd7\x9d_^\x7f\xbd\x98\x0f/\xe1\
\xa6/[\xbaO\xb7\xd9\x94\x8e\xa6\xf7\x88'\xd7\x1f\x9f\
\xaa\xc2\xff=\xfc\xfd\xe3\xf5\xe0\xb7\xc5\x04\x138;\x1f\
\xbc\xfd8\x1a\x0e\xfe2\x81;X,\x9e\xac\xcev\x17\
\xb8,\xc1:\x87\xf9\xdbM\x05eH\x7f\xa2n\x9a\x9a\
\x7f\x98\xcc\x00\xdd\xf9\xa3\x9a\xff<\xb9\xae\x96B\xbe\xf5\
ht\xf9\x9f\x93\x87\xd9t\xfcq\xb4\xb8\x9d\xde7U\
\xde\xf1\xc6\x7f\xbe\x9d'\xad\x94n<\x9b\xfc\xd7\xc7[\
\xd4x\xd6\x9d\xff}\xba\x80J\x0e{\xcf?.\xb6w\
\xb6\x93\x16&\xb3\xdbO<\xa5\x84\x86\xf9a;\xfd\xdb\
\xfb\xe1l\xf2\xc7\xbb\xdb\xdfse0\xf4j\xa8\xd5.\
\xf72\xf3\xb9\xbf\x5c.=2\x7f{\xb7\xf6\xd4\x8d\xe8\
\xb7\x8a\x01w\xc3\xeb\xc9\xdd\x9b\xb3\xb7tq\xd0\xba\xfa\
n6\xfd\xf8\xf0a:\x9e\xd4\xd5\x97N\xfe]\xce\x7f\
\xdeY\x931\xa2\xc5\x9a\x0a\xf2\xc7;x\xc4\x9f\xa4\xa9\
b\xb0\xda\x9d\xcb\xd7+l\xafo\x82\x80\xb3\xf8z\x87\
6\xc6\xb7\xf3\x07\xb4su{O~\xf7l}}w\
w\xf3\x8e\xa8\x5cU\x1dZX\x0c\xe7\x8b\xdb\xc9lX\
h\xc2\xe4\xe6\xf9.\x9f\xd9\xe2\xd8/\xac\xa1\xe4,\x88\
\x90\xbd:\xb7\xd4\xe4o\x1c\xab\x9bE\xea&]\x13X\
\xefJe\xc2So\xbe\xb5\xb7\xd2V\xca8z\x9f\xe5\
\xa3\xadf\x0fD\x96\xe7\xe9\xd3t6m\x14yl\x22\
\x9e\xd8\xaf_.\xdf\x1dl\xba\xdbs\x1dU>\xd7\x0f\
\xc3\xc5\xfb-\x1d]\xbfCLk\xe3\xe3\xf2\x05\x97\xce\
Z\x81~WR\xc6\x18\xe9\x95\xa1JH\xadL\x13\x07\
h\xea\xaf\x03d\xd5\xfc:\xb7A\xfdR\xe4\xc1\x1f\x07\
\xcbw!\x0fl\x15\x82\xb6>\xe8\x81\x18H\xfc\x1bP\
\xae\xef\xa3\x11\xda\x9f\xefS\xbcu\xefo\xcd\x19Y\xa6\
\x0c\xb3\xaf\xf4\xca\xc7\xba\xf2\x96\x22_\x0a\xafLo\x94\
(\xbd\x00\xbaY\xa0\xf06\xe8\xa5\xcaI\xc3\xd0\xba)\
!\xa6\xce.\xaeD\xa5\x7f\xbe\x01\x83\xbb\x02\xb7\xfb\xe9\
\xd5\xe6\x93aJ\xbc\xe6\xab\x17\xcb\xe22}\x9d}\xbc\
\x9b\x5cM>M\xee\xa7\xe3\xf1\xcf\x88\x5c\xd3\xdf'W\
\xf7\xd3\xfbI\xfd9\xf1\xf4+\xf9\xf0e) \xc0\x00\
!W\x08\x1a\x8b\x5c\xf6\x0f\xf0\xe5+\xc4\x8b\xc9l)\
]\xb7\xb4\x03\xd4\xab\xe1\x13\x05E\xba9\x1b5\x8dk\
\x13[\xadA+\xe1\x83C\x06\x9f\xc6\xce]\xdfw\x9c\
\xac\xa8\xd6\xfbq\xd5\xeb\xe6\xd8\xe9\xdd\xca\xf4\x17\xd4\xd3\
U\xc0_\xeen\xf1\x9f+\xb3\x94\x8d\x87H>f3\
\xa8#\xd7\xf3^\xcab\x8bP\xbe\x12AE\x04\x0d\x85\
\x94\x19!F\xea\xc1\x9f\x06JV\x0e6\xe6xy\x8b\
2B\x13\x06ZV\xd1x\xc4\x97s\xe5*\xc0G\xc7\
\x81\x16U\xb0\xd6{u\xae\x0c\xb2C\x1d|\xa0\xca\xb1\
\xf2$VT9\x08\xa7\x9d\x19H_\x19\x11\x84\x8eT\
\x12\xb0V\x066\x13*/\xa2\xb5(GoT\xf5\xf9\
\x19\xe7\x1bP\xddb\x05P\xff\x84f\x1a\x89\xd5\xe8\xdb\
\xb7G\xa6zK\xa5--\x86\x82\xaa\x1c\xc6e\xbc\x82\
\x02`\xea^\x86\x10\x92\xaa0p\xa7\x03\x8d\xc2H\xe5\
\xb4$U!\x95\xb4\x8eUe\x827Z\x91\xae\xb47\
\xd2\xcas\x98%\xbdn\xd8\xfa\xa4+\xe1U][k\
\xe1\xa3'\xbd\xc8\x18\x9cq\xa4+\xe5\x1d\xb0B2h\
RI\xcf\xad\x08\x17\xa4-Y\xef\xd3Q\xfb\xca\xabx\
3\xba)\x834\x9a\x17\x01\xe9\xf6xX\x8a\x05V\x0b\
\xb7z\x17/\xbeD{\xae+\x09\x1dI\x87\x0f\x06\x9a\
\x96\xf2u!\xde[!\xf6\x08\x99\x8d\xdce\x13S[\
P5\x1f\xd1\xff\xce\x9a%W\xd8\xb2\xd9#\x89\x19\xba\
@\xde\xe0\xab)\xaaiUY\x1b\x10y\x80\x8f\x92\x14\
\x01\xdaF\xa5-\xc6\x09\xab\x82\xf9\x012\xf4\x9a\xe7(\
Yb\x11\xb0\x1c\xaa\xc2\x9c\x81P`\x8f\x85\x0a\xedz\
\xc8l K\xd3\xe7\x06\x10\x15\xd1\x07Y\x94\xbd\x1d\xe8\
\xc8/\xde\x86\xc1B\x0a\xb3w\x9a\xbaS\x92B\x16,\
\xd0\xec\xa9\x1d'qQ\x0f\xa8cRD\xa5\xb8m\xe7\
\x84%\xaf\x82!\x18\xa5M\xddq\x8f\xae\xc3\x85\x901\
\xc0/h\x1a \x0c\x1d\xbd/\xca\xde\x16U\xf1m\xb0\
\xa1\xca\x0d\x87.\x7f\xe65\xb6\xab\xeb\xbb\xe1\xe8\xf7,\
\x9e\xb5\xdc\xb4\xd9?\x9e\xbd\x82V\x87\xc1n\x844\x04\
t#\xa41\xf1\x09\x16\xf3a8\xfb}2K6\x91\
>_\xcc\x17\xc3\xd9\xa2!\xf9p;n|\x9f\xdc\x8f\
\x1bV\xb4\xbf\xbd\x91tzs3\x9f,\xaeD\xdb\x06\
?\xdd\xceo\xafo\xef\xe8\x0b\x7f\xbc\x9bl\x98\xe5\xcf\
\xd3O\x93\xd9\xcd\xdd\xf4\xf3\xf2z\xc3N\xf7\xb7\x0e\xfa\
\xdbn\x1b\xa6h\x1b 2\xcaI\xb2l]\x09\x05\xe4\
\xc8d\x1bB\x0a%`\xe6\xa0\xacT\xc8\x19Io\x93\
\xc5\xd7\xa8C SQ\x95\x0a\xd6ky\xae\xe1\xbc\x85\
rp\xa5\xc06j\x90\xebF9a\xbc\x10\x81\xae\x06\
Z\x15U\x8c='\x028\x1f]u\xf47\xd0h\x08\
\xf7\x0b\x9a\x9b\xe7\xb7\xf9\x13\x1e\x0b\x9dz\x0c\x8f0\xd3\
](t/\xc7\xaa\xf6\x98\xccg\x87\x980\xc6\xbf\x9b\
\xa2\xc1X\x11\xda\xddF:|?\xde\xaf\xdft<@\
\x14\xca\xe8P\xc4\x0d]\x06Y\x85\x1b\x0a\x98V\xe9\x04\
\xe3\xc6\xd3\xde\x80q6I\xc1\xc3\x07\xec\xa5\x10\x86}\
V\xb0 \xdb\x0e[\xb79\xefE\xe4?\xd7b\xb6\xb7\
[\x1e6q6\xa3$<e$4\xcb\xe0\xd3\xb0\xa5\
q\xde\xba$U\x12I\x0c\xe1\x1eT\x02\xb4b]\xb2\
+\x83\xfa\xe9H\xf8\xfc\x1esX\xc6\x81V\x9dp\
\xd0\xc4o\x96\xae\xee\xcb\x04\x97\x7fe>h\xa5h\xf3\
A\x8aRJ\xa9\x88\xcc\xd30[r\xf0\x0a`\xd1\x9a\
\x22\xb1\xc9\xa4L\xf3\xc0\x0b\xa3V\x99\x14\x84\xd0\x06$\
\x1b \x89\x08\x96\x11<\xcfd2M\x878H\xf0P\
\xf2W\xf4r|\xeb\xa9$os\x06\xc5\xdek-\xc5\
|;\x15\x8d%\x99\x82*\x93\x8c\x1b2\xd46H\xa4\
\x8f*\x93\xa1\x9f\x95W\x0a\x9aO\xd2\x80D\xd7\x12F\
\xaah\x83\x0e\xc2\xb3\xd4Ee\x1cd\x86\xdeU.S\
3\x11a\x9dD\x02U\x99\x8c\xca\xe0@\xf6\xc9\x1f\xa2\
\x1c\x880\xe8<2W\xe0'\x9a\xc8w\x5cJ\x15\x9d\
m\x82lz@\xc3\x95>(\x97+\xa3\xad\xcb\x0d_\
\x9a\x83\xae\xec=C\xe7\x18.\xf8O\xbb\xc3\xb0\xde\xfd\
\xf8\xedl2Z\xe4\xe3\xa4C4@\xc3\xe8\xafI\xf1\
i_6)\xc77\x1d\xd1rk\x0b\xbcP\xd211\
M\x9fPo\x8d)\x5c\xc4\xc8b{5\x80:\x00\x80\
\xab\xdd\xda\xde\x5c\x01\xd0OX\x01x\xa5\x84\x19\x06_\
\xce1l!\x00tX\x0dxz\x0e\xb8\xcb\xeaM\xdb\
\xea\xc1\x05\x22\xec\xc6\xa8s:PE\x18\xd0\x06\x80\x7f\
-\xc5,x\xa4\xd1\x91#\x0d(\x84B\xf2,\xcf\xc1\
U@\xb2%\xf2@\x19+)\xe9\xd6\xe7\x91\xacV\x9b\
\xa8\xe04\xce\x03\xd1\xe4\xfaf\xf8j\x1a\xf7\x5c\xb7T\
6\x8a\xfd\xe8\x85V\xa7\xb3h\xf3\x9ciJi\xd5\xf6\
\xc9r\xc5\xc9\xb2NDC.\x96\xd1F\xb3\xa2+\xa5\
#\xd8a.\xe5<\x03\xd4\x8e\x1c\xaf\x8a\x16\xa4\xd1e\
2\xdc\xc6S>\xc3\x0e>IA\x1a%\xadsD\xcf\
\x89Q\xba\x13qQr\x89N\xc0\xd9C\x86\xc4*j\
\xc1n[\xc1Q\x83|*Z8\xb0\xc4H(\xdf7\
4\xe3\x5c\x1baYJ!\xd3\x1a\x82G\x92\x03Y\xd4\
&H\xe0\x84d^[#\xb9\xe7^\xd8T\x1b \x0b\
\xa2\x0e9>h\x00\x85\xa4\xec9T\x0a9\x12\xbd\xb3\
!\x93B\xa6e\xa0\xd3\x94\xa8\xbeF\x06\xe5X\xe6\x82\
\xd1\x5c[\x07:\x82)\xd56\xe4\xd9eZ\x97\x90\x9e\
$`\xe7\x84e\x1a!\x5c\xba\x0f\x99\x0c\xbap`\xc0\
\x96CK-%\xdc\xa2-\x85\x00\x9f\xeeXK\xc1\x96\
}\x94\x14p\x14\x1d\xbd\x84\xf1\x86|&<Y\x0aB\
J&{[\x9a\xc8\x1d\xf1\xa1+)Y\xfb\xa6\xef\x19\
\x09\x0a+\xffv\xc3\x99\xb7\xd7?\x90\xb2\x1b\xad\xd2\xa9\
C\xf5C@\x92\x96\x18\xe8;\xa9\x1cI\x8f\xe6\xa5=\
\x09\xde\xf0\xfa\xa9\x0b\x1e\x9b\xc1\x89\xc3\xd3\x05X{\xe5\
\xa2\x0bv\x83\x17~\xe1k\x16|\xc3*\xbdqm\x19\
\xa4\x8c\x07Aj<\x8e\x9e\xfe\x96Oph\x22\x9dZ\
o\xdez\x1d\xa8\x94x,\xc52\x94\x13JD\xf7\xfd\
r\x7f\xbd+\xf7\x07\x92\xbeMf\xd3\x1dn\xf1)\xa4\
\xf5\x87M\xf8\xb7&0\xaa\xb8\x16v\xa1\xc8\x9f \xf1\
\xa0s\x8d\x08lAp\xc2^\x96gRM+\xb3p\
3\xb2,\xc4\x1d\xbcCx6\x88\xc9\x10#\xe6\x22\x83\
\x02\xc3\x059\xf2A\xd2\x82k\xf2\xab\xde.e\x1a\x91\
9T\xc6\x1a\xa5\x99\x1f\xaf\xa4\x17F\x13\x9f\xd5\xf0\xaa\
\x17\xb4f\x00\xd7\xa7\xb3^\xb9-}}|\x81\xe0\x11\
\xf4\xb6x\xd8\xae5\x83\x1e\xbdOCo\xd7\xb9\xd8\x95\
\x81\xf4s\x91\xcfE\xc9\xe8W\xa1c\xc3\xe8\x8b\xf2L\
\x9a\x19}IHw@\x14\xac\xa4\xa2\x15\x9f\x95\xd1_\
\xc8(\x0c\x8a(\x9bY}&\xcc\xcd>\x13\xe7v\x0f\
\xa2f)\xdc\xfa\x86\xdd\x17\xbb\xdb\xb2\xfb\xcc\x05n.\
ynu\xa2\xc7]\xe2P\x05\xfeLK\xaa\xda;\xc9\
[~\xc2\xd0n7\xf3\xcd\x08\xe7F{V+)\xf3\
;E\xabe&\x93\xf2N`p\xc6\xf2>XT\xce\
\xd1\x12\xc5\xb9\xf2\xb4\xa8\x01\xe6r\xae\x05\xf1qI\x0b\
m\xf4\x85v\xb6\x843$1\x81\x96k\xcf\xebk\xb4\
\x1c'\xb2/D\xc3-\x7f'b\xcb\xa9Vv\x15\x89\
<\xe6\x8ah%\xad\x87\x04I\xb7\xcce\xd4:u\x82\
S\x01I\xcf\xde\xcb\xbc\x83\xe0\xbbQ+\xda\xaa\x5c\x0d\
\xa4\xa0\x872\xfd\xdc\x8fr\x98]\xc9\xf3\x86\xa3(.\
U,\x09\xaao-U\xcc\xff\xeb\xe3p6iy\x8d\
\xe4K\x8e\xea5\x8e\xe6\xab\x0f\x83\xee\xc2\x86.%\xd8\
0V\x11iCWJ\x89\xe4$\xc1\x1b\x19\x9eOB\
\xb8\x7fN^(\xc5\x0a\x96\x92\x9f\xa5\x90r\x17\x05G\
\x122QD\x9e\xae\x917\x91\xc8\x18\xa4\x0f\xf5\xea\x9d\
A\x18\xb1\x8cFx\x04ocZ\xbd[KQG(\
\xdeNG\xb0\xf1A\xb1)\xa07\x82\xd3.\xa4\xaa\xb8\
\x9a\xc9\xder\x1a\x19\xa8)\x92\x92\x0d\xc8\x94F\x22-\
C\xce\x182)=\xb5\x8d|\x94V}9\xe1\x0cZ\
5e\x92N<\xe1]\x11\x98O\x88N)\xee\xbd\xb5\
\x9a\xbbYyZ\xcb\xa3}\xe7\x95\x8atE.\x0dM\
e\xb2\x92*w\xafU\x14\xb6\xe6v\xa7m\x1bv\xb1\
#ms\xadm\xeb\x1f\xd6*\xe6\xe9o\xb4\xfcP\x7f\
\xe3\xef+\x93i^k\x14\xdbnP\xba\xb0\x22n\xaa\
(t\x04\xbbN~RX~B\x82\xa42\xf2s\x13\
$U\xb8\xce2KD\x9ce\x92\x9f&i\xd6\x96\xf0\
\x92\x8ek\xd3RM\x8a*2\x04*\x16\xbc4B'\
\x91\x0a.z\x929\xe9<1x\xc8\xb4R\xbc\x88\x01\
\xa9\x0dF\x85\xe4\xb6\x91TJG2CG\xb6\xa7F\
\xb4\x08\xd2\xb2L\xd8\xda\xe1g\x95\xb5\xd7\xd2\xd9\xba\xb2\
7\xdc\xb4R6\xd674N\xa7\xa6e\x5cu\x074\
\x82)\x07I\xb5\x882\x95\xb4dd$\x13H\x12\xea\
\x92\xb8\x8d\x93\x90\xf9\x18\x94\xf3)\x96\x82A\xb2s \
\xa9UQ\xa4\xda\xceFO\xed\xf8 \xe8\xccK\x96y\
\xe8\x8eD\xb0NM\x16K\x228-\x9f*\xd3\x03'\
u\xb0\x82\x8f\xd2\x9ad\x16\x01U\xa6)\x88ZH\xbe\
!:\x81JIf\x0d\xef\x00@*5\x1d+NR\
G\xcfNP;\x0e\xe9~T\xac \xaf\x04X\x11\xc9\
\x04\x86\xe3Y=A\xbah\xf8\xa9\x81\x0a\xd3\x06\xcf\xe2\
\xcf\x99\x0eH0\x02v\x22\x0d)\xaa\xeb$3\x96\x06\
\x0e\x996\xec\xff\x1a\xe5\xb4\x87\xb6\x96\xb5C\xa4\x0d\x08\
H\x8d\x10N%\x19mF$Y\xc4\x9cn\xd4&`\
\xd4\x95\x01\x1b\x9d\x0a\xd2S\x03\x9ee\xb4=\x1cY\xe6\
\xe8\x94\x86\x8d\xca.\xc0\xff\xd7\xb5\x9d\xe6e'\xdaD\
!\xefI\xb2\xa0\xe1\xc5S\xc9h\xb4\xe5\x8e\xc3o\x1b\
Ro\xac\xb4\x10Z\xd6\x1d\x078\x01\xd6$\x05#5\
,\x03\x06\xb8i\x94\x92\x9bu%\xaf\x11s\xdd\x88X\
\x22y\x11G+\x03\x16Ed\x053\xc8+\x82\x918\
\x8ar\x91e\x0e9f\xba\xa1\xf1\xde\xb9\x9a\xd6(\xcf\
\xb1\x09R\xd0\x1b\xc7k\x87\xe0:R\xa5v0\xc9\x9e\
EAHb\xc0$\xa20S/\x8f\x0a\x0eb\x91~\
\x03)x\xb2\x94q\x84v\x96E\xa2y,\x8b\xd2\x84\
4:\xc0U&\x8d)+\xe9\xfc\xd6$\xf5\x9e\xf8\x14\
\xf0m\xbd\xe3V\x8c\x08\x96\xe7O\xd1!\xe1\x9ao\x08\
\x90%\xa6\x0f)\xb4%x\xcc\x06\x93e\x98\x8c\xa1\xa7\
\x92\x87g\x8c3\xca3?\x83\x06E\xba\x1f\x0c<\xb5\
\x0b\x0b\xa5\xe5r\x16:\x86(dV\xeaZ\xb3\xc6\xc3\
\xef8\x96\x19\x18\x5cj\x03v\x22R\xc3\xda\xd1\x02r\
\xea\x8e\x87\xd9r\xc9@\xdb\x22\xac\x07\x83)r,\x8b\
h\xc5\xa7;\xc2>kx\xc3\x0c\x04\xad\xd3\xd3\x82\x9b\
D\x10\xa5y\x01\xb4\x85Om[\xed\xf9Q\x17~h\
\xcc\x09\x99\xc9\xde\xd2\x93]\xc8S\x04=\x80\xd7\x94\xd2\
S9j\xe5I#\x85hK[\x070\xde\xd0\xf0\xaf\
\xb6\x12\xe8\x940\xb9\xd7\xa5\x8d\x07%Mr\x88&\x92\
u\xd2\xc3\xe1\xc1\xd0S\x0c\x1b\xce9\x80\xbc\xe8\xa5\xd4\
\xb1m\xc3\x1bx\xbfj\x9c\x06\x04\x99\xa1\xe7\xcf\xe4f\
m\xa0Y$/@\xb5\x05\xd7\xe6\x87!|-\xc3\x14\
\xb1\x0cY\x974\x9b\xb5\x95Vv)\x05\xd4L\x0a\x0c\
\x91\x9e\xaa\xe0\x8e\x03\x0f\xa1\x18V\xbe\x0d\xfeJ[)\
\x01P\xd0\x0d\x1d\x91\x10\x93\xa4\x99\x9d`\xfe\x90\xaa\x10\
a\xc2\x84Xm\xe8\xb6M\xa9\x0e\xc6\xc8\xd6\x0d6\xee\
J\x8d\xa9\x94\x00\xd6!fYXY\xda\xdf4\x09%\
\xab\xd9#\xba\x18\xd1\x8a\xc9\xa4L\x22\xc1\xe5c\xb2\xe9\
ZJ\xb9\x14F\xebc\x86\x1d\xa2\x91\xf0\x1b\xc15P\
FR(0YV\x8dGZ\xe3\x17\xb4\xa9\x9f\xe1\x96\
d\x92\x93\xd2\x1c\xe1,\xb5\xf4\x93\xd0\xb5-\xf0i\xc9\
\x84\x89\xccfH\xa6\xb5\x8a:7.\x12\x22.\xd6\x16\
\x9c\xac\x90d\x08]!7V\x92E\xd7\xb4i\x96\xc1\
\xb3\xf8\xcc\xfa\xc9qJ\xab\x94_;\x09\x16!\xad\xb2\
\x0do\xc2\xe1\x02\xa80\x99\xdf\xe1\xd0\x00\xa7m3\xff\
\xc42\xcc\xbal82\x0e\x0dv\xe9\x7f\xd9\xe3q\x10\
0\xe8d\xe6\x18\xd9\xe1\x0b\x99z\xb8r\xa1uh\xf0\
*s\xb6\xf5N\xba\xcd\x9d2\xc9\x02fZ5\xdc7\
K\x11\x82\xcd\xda\xcfse\x22\xa7Y4hV^\xc6\
\x0d\x96\xa2#.\x8b0\xe9\xba\xf3y$b\x19\xc2d\
h\xc4\xactw\x91f\xb0\x8en,K\x1e\xbd\x8e\x81\
\xcd\xbau\xb0L\xc2\x90z]G\xd5$\x13Ag\xd1\
\xb7Yy\x19\xa7\x93\xd4*\x9fE\xf4f\xc9\x14\xf97\
d\xcc\x11\xd8d\x905\xc8\xe4sj>\xc1&C4\
1\xf9,f\x1e\x9cx\xd1\xe3\xfd\xd2f\x0c\x05F\x8d\
\xd9\xaf\xfd\xe2\x92\xca\x90\xd0\x85\x9a\xa2\xd4\xa4\x87d\xb4\
\xbb\x97q#\x16\xc1M\x99\x06\x8b\x22)\x8c\xcd\xf85\
\xdd\x22\x91\x96l\x97+VF2\x15\xa2h\xf27\x96\
\xd2\xccfL\x8fd\x92\x0c c\x84$\x03\x93\xf0\xba\
\xc1\x1dYJ\x0fGf,\x93v\xc5B\xac\x9bNd\
\x94E\xce\xd5\xcb0K\xda\xcaR\xcd\xa1z\xc5oy\
K-\xf8z,\x89\x07\xb3\xcc\xf2V\xe2Fmz\xa0\
[g\xdc\x9ad@\xab\xf6k\x0aN\x22xn-\x1b\
L\x9d\xa4D;\xea\x82\xc6*\x12A\xa1*\xd6N\x93\
\x1e\xd5bY@\xb6\xd8L\x12H\xea\xe1ct\x1d\xb0\
\x047L\xcf\x88.\xd7\x99\x10\xcd\x02\xcb\x90y\x86\xd0\
\xaa,p{\xb9\x8ev\xbc\xfc\xe7T#^)~>\
5\xac\xa2U\xca\xcfK\x8e\x9d\x1d\xbe@\xbcF02\
\xb4}\x86\x020\x08*-*\xb0p/m\xd3\xe1#\
'\x8e\xe0\x8afS*\x104[7h\xddv\xcf\xb5\
\xa4Wc=\xf6\xa3\x9b\xfd\x97\x8e\xae\x87\xd7\xe3kW\
N\x92\x95\xfe\x11\x92\xe4\xcd\x0d\xcc\x9d\xcbt\x9b\xeb\xf9\
f\xd7\x03\x13O_\xcf\x7f9\xfd~\xe7\x05\xfd\xf5f\
\xadV\xa5g\x91\xb6<\xbe\xb4\xf9\xb4Ss\x0f\x98\xde\
@\xd0\x5c\xa1\xa6_I\x09\x0aq\x92\x9e\x10o^\xfa\
\xda\xb8t@H\x1cv\xb3\xf8G\x84\x84\xe9\x00\x09\x7f\
\x8a\x908\xec\x0e\xec\x8f\x08\x09\xd7\x01\x12\xf1\x14!q\
\xd8\x8d\xe0\x1f\x11\x12\xc5\x87X\xf7\x84\x84\x94'\x88\x09\
\xfb\x84=\xbf\x1e\x13%L\x18\xd1\x05\x13\xfa\x141\xd1\
\x13\xcc\xae\x98\xe8B0\xe5)2L\xdb3\xcc\xae\x98\
\xe8\xc20\xe5)RL\xdbS\xcc\xae\x98\xe8B1M\
\x0b\x12\xfe\xe5!\xd1S\xcc\xae\x90\xe8B1\xdd\x09B\
\xc2\xf5\x0c\xb3#$l\x17\x86\x19N\x11\x12=\xc1\xec\
\x0a\x89N\x04S\x9c\x22&z\x82\xd9\x15\x13\x9d\x08\xa6\
:EL\xf4\x04\xb3+&\xba\x10Ly\x8a\x0c\xd3\xf5\
\x0c\xb3+&:-b\x9e\x22\xc5\xf4=\xc5\xec\x88\x09\
\xd7i\x11\xf3\x149\xa6\xef9fWL\x1cv\x97<\
\xbc<$z\x8a\xd9\x15\x12\x87\xdd%?\x01H\xf4\x0c\
\xb3+$\x0e\xbbK~\x02\x90\xe8\x09fWH\x1cx\
\x97\xfc\xe51\x11z\x82\xd9\x11\x13\xfe\xc0\xbb\xe4'\x80\
\x89\x9e`v\xc5\xc4\x81w\xc9O\x00\x13=\xc3\xec\x8a\
\x89\x03\xef\x92\x9f\x00&z\x8a\xd9\x15\x13\x87\xdd%\x8f\
/\x0f\x89\x9ebv\x85\xc4aw\xc9_\x1e\x12\xb1g\
\x98\x1d!\x11\x0e\xbbK~\x02\x90\xe8\x09fWH\x1c\
x\x97\xfc\x040\xd1\x13\xcc\xae\x988\xf0.\xf9\x09`\
\xa2'\x98]1q\xe0]\xf2\x13\xc0D\xcf0\xbbb\
\xe2\xc0\xbb\xe4/\x8e\x09/z\x8a\xd9\x11\x13\xf1\xc0\xbb\
\xe4'\x80\x89\x9ecv\xc5D\x17\x8e\xa9N\xf0'\x81\
^\xf4\x1c\xb3+&\xbapLu\x82O\xe7z\xd1s\
\xcc\xae\x98\xe8\xc21\xd5\x09>\x9d\xebE\xcf1\xbbb\
\xa2\x0b\xc7,\xc4\x8e\x17\xdf\xec\xf0\xb2\xe7\x98\xdd0\xe1\
D\x17\x8e\xa9Np\xa3\xdc\xcb\x9ecv\xc5D'\x8e\
y\x82\xeb\x98^\xf6\x1c\xb3+&:q\xcc\x13\x5c\xc7\
\xf4\xb2\xe7\x98]1\xd1\x89c\xb6\xd7\xac^>\x17\x95\
=\xc7\xec\x8a\x89N\x1c\xb3\xfd\x90\xd5\xcb\xe7\x1d\xaa\xe7\
\x98\x1d1!;q\xcc\xf6CV'\x80\x89\x9ecv\
\xc5D'\x8e\xd9\x8e\x1d/\x9fw\xa8\x9ecv\xc5D\
'\x8e\xd9\xde\xef8\x01L\xf4\x1c\xb3+&:q\xcc\
6\x9fx\xf9\xbcc\xe7\x09\xed=&\xf6\xc1D'\x8e\
\xd9\xe6\x13\xdd1\xb1:\x87p\xfdau\x8eq:\xc3\
Xf\x9b\xb9\xad\x136\xdd\xea\xbc\xe1\x0dh\xa1=\x93\
w\x81\x1e0\xce1\xbd\x1ce.\xab\xd5\x91W[\xaa\
\xce(\x9f\x8f\xb5u\x16\xd9V\xd0\xca\xc2\xcf\x9b\xf7\x87\
\xe8\xf3\x8e\x8b~\x14b\xb2Rt\xee\xa1\xb0\xbe\x85\xb5\
|j\x9a\x1a\xcd\xce\x8e\x9e\x8f\x86w\x93\x9f.\xe4\xf9\
\x85\xcc\x8f\x84\xa6\xb3\x9d\x1bo\xc5\xa2\x03\x9d\x1b/\xe7\
}\xa2\xd6\xa1=\xf3L\xad\xb7\xc3\xc7?\xa7\xd6\x0b8\
\xdeW\xa3\xb1\xac\xd1\xb0\x9fFi\xe0\xd9\x99\x93\xb2\xfd\
\xeb\x88J\xfcs\xe8t\xabg\xd1\xd9\x1ba3t\xf3\
\xc7\xbb\xe1b\xf2\x93\xa8b\xb0\xda\xdas\xa9^\xaf\x1d\
\xcd\xbb\x96\xfe6\x8e@\xcf\xd4~\xff;l\xe5ar\
u7\xbc\x9e\xdc\xbd9{;\xfc:\x99\x0d\xe4\xc6\xc4\
POT\x0e\x81\x0e-,\x86\xf3\xc5\xedd6,4\
a\xb2\xb3\xd9\x9b\x87\xc5\x17\x07\x7fa\xcd9\x1d()\
\xc2\xc6\xb9\xef\x9bM\xfe\xf6~8\x9e~nE\x1cj\
\xd2=zB\xbd\xdex\x8d\xde\x1e7\xdf\xda[i\xe9\
\x9c\xb1\x08\x9f\xf4h\xab\x1bojk\xcf\xd3\xa7\xe9l\
\xda(\xf2\xd8D<\xb1_\xd9\xf9\xbb\x9d\xa7{c\xae\
\xef\x08d*\x9f\xec\xcd\xa3\x1e\xb3\x9e~\x18.f\xb7\
_\x80t\xa7\xb5\xf1\xf1\x5c\xd0?:\x9d\xc6\x0at\x9c\
\xce\xd4\x89QCBG\xc7!Ok\x0c8\x9d\xd9\xe8\
+\x17\xbdp\x03\xa3*\x17\xbc\x1d\xfc\x91\xce-\xf16\
Z7\xb0t\x96\x8c\xa5\xd3n\xc5@\xe2\xdf\x00x\xf2\
t\xbc\x92\xf6\xe7\xfb\x14o\xdd\xfb[sJ\x96'V\
\x12\xbbXU\xdeR\x847\xddRC\xe5\x12#r\xb5\
\xa9\x9d-\x05\xd2-\xa2\xde(\xb0:\xd3R[]\x82\
\xcc\x9a\xad\xe8\xad\x11Ki\xa9w\x90\xdb\xc9\xa7\xc9\xfd\
t<\xde\xe1o\x1f\xbe<\xdd\xe3\xae[\xda\x81\xea\xd5\
\xf0\xe9H\xd07g\xc3\xd9\xc69\xd2\xadcD7\x07\
\xad\x84\x0f\xc4\xfb\xd2\xd8\xb9\xeb\xfb\x8e\xb3\x10\x88\x8c\xa2\
'\xe1\x1ac_\x9eH\x13\xd41\x83\xce^\xcab\x8b\
P\xbe\x12AE:A.T\xc2\x04\x91N\xe5\x92\x95\
\x83\x8d9HaV\xdaF\x13\xe8\x9c\xeah<BL\
:\xa9\x89\x0e1\xd5\x22\x9d\xa0\xa4\xce\x95\xa1S\xd3\x82\
\xafOA\xf3$VT9\x08\xa7\xe9DjO\xe7\xa1\
\x09:v\xd4\xd0\xa1U\xcaH:\xf6\xcf\xd3A\xba(\
\x07\x8bv^\x89\xb8\x0d\xaa[@\x9e\x1f\xfe\xfa\xed\xdb\
3N\x8c\xfd\xf6m[\x8b\xbe\xa0*\x87q\x19>\xda\
\xd5\xf3\xe9\x86\xe98&Ya\xe0|2\x95\xa8\x8cT\
NK>\xe4[\x09[\x9f\xef\x14\xe8\x84R\xd2\x95\xf6\
FZy\x0e\xb3t\x02\x9f|}\xd0\x9dWum\xad\
\x85\x8f\x9e\xf4\x22cptF\x93\xa13\xd8\x80\x15\x92\
A\x93\x8a\xce\x19\xe4\x83\xc3\xc3\xc6\x0f\x95k ?\x1d\
\xb5\xaf\xbc\x8a7\xa3\x9b2Hc\xebl\xe1\xef\x02\xd2\
\xed\x01\xb1\x14\x0b\xac\x16N\xaae,\xd0@\xd4\xb9\xae\
$t$\x1d>\x18A\x87\xa6\xbd.\x05|\x1b\xf7\x88\
\x99\xbffEZ\x98\xdav\x0e\xf1\x88\xfew\xd6,\xb9\
\xc6\x96\x93\x1b\x97\x18]\xd2\x00\x1f\x96\xa2\x9aV\x15\x9d\
[\x17\xe9\x8c\xb2\x92\x14\x11\xdaFELO\xd3\xd9\xd5\
\xde\x032\x18\xb9\x8c\x92%HY=\x9dP\x06s\x06\
B\x81=\x16*\xc4H?H\x07m\x12\x84\x0d\x1d'\
\x1f}\x90E\xd9\xdb\x81\x8e\x95\xd1\xf0\xf3\x91\xa40{\
\xa7\xa9;%)d\xc1\x02\xcd\x9e\xdaq\x12\x17\xf5\x80\
:&ET\x8a\xdbvNX>#VUFiS\
w\x9c\x8e;\x83\x0b!c\x80_\xd04@\x13)\xe1\
.\xca\xde\x16U\xb1q\x96\xd8>\xe7n\x17\xdc\xb4\xd2\
;\x1f:\xdb\xb4\x98\xd2\x19\xf5pt\xc1\xd0\xa9m\xf1\
\x09\x16\xf3\xcf\xbel\x93\xdb\xe9\xfe\xd6\xd1:f\xbba\
\x1b\xbah\x1b 2\xca\xd19\xa9\x80\x86\xe0\x03W\x93\
m\x08)\x94\x80\x99\x83\xb3R!:&\x99\xd0e\xc1\
{B SQ\x95\x0a\xd6ky\xae\xe1\xbc\x85r\x81\
\x0f\x7fG\x0d>\x09\xd7\x22\xe2y!\x02]\x0dt\xf6\
\xaeb\xec9A\x87\xf5\xd2UG\x7f\x03\x8d\x86p\xbf\
\xa0\xb9y\x89|\xdd\x12\x1e\x0b\x9dz\x0c\x8f0\xd3\x1d\
(\xdc\xf9\x98\xdb\x91Y\xd5\x1e\x93\xf9\xec\x10\x13\xc6\xf8\
wS4\x18+B\xbb\xdbiQs\xaf~#\x18\xd2\
9\x8bf\xe3\x08\x9d%n\xe8\xb2\x03\x09\xc3|cZ\
\xa5\x13\x8c\x1bP\x1e!\x8c\xb3I\x8aX?`/E\
'\xaef\x05\x0b\xb2\xed\xb0\xb5\x9b\xf3^D\xfes-\
f{\xbb\xbe8l\xe2lFIx\xcaHh\x96\xc1\
\xa7a\xd3\xe9\xb9\xd6%\xa9\x92tB4p\x0f*\x01\
Z\xb1.Y\x90\x95A\xfdt$|~\x8f9,\xe3\
@\xabN8h\xe27\xcbW\xf7e\x82\xcb\xbf-|\
\xd0\xc56\x1f\xa4(\xa5\x94\x8a\xc8<\x0d\xb3\xa5\xfah\
WM\x91\xd8dR\xa6y\xe0\x85Q\xabL\x0aBh\
\x03\x92\x0d\x90D\x04\xcb\x08\x9eg2\x99\x86Cr\xd2\
D:\xb4\x93\xa4\xd1z*)\x88.\x06\xc5\xdek-\
\xc5|;\x15\xe9\x04qd0Pe\x92qCfy\
\x90oT\x99\x0c\xfd\xac\xbcR\xd0|\x92\x06$\xbat\
\x8a,(\xbe\x0d:\xd0\xf1\xc0\x90\xba\xa8\x8c\x83\xccH\
\xa3\xe8\xf4yj&\x22\xac\x93H\xa0*\x93Q\x19\x1c\
\xc8>\xf9C:\x1c\x1a}\x87\xb1U\x1e\xf8\x89|\x9c\
\xf2Z\x8a\xa0\xed\x15\xb2\xe9\x01\x0dW\xfa\xa0\x5c\xae\x8c\
\xb6.\xcb\xe7\x84n_\xf33\xbes\x0c\xe7\xd59\xa1\
\xddaX\xef~\xfcvs\x93\x09\x89\xbd\xa6\x93Yi\
\x9dpsSC'\xe5\xf8-{ \x16y\x03T[\
\xdc6Q\xb8\x88\x91\xc5\xf6j\x00\xaf\xb4\xea\x8d{\xb6\
\xb4\xbd\xb1f\xadt|\x82\xb6\x950\xc3\xe0\x9b\xda^\
\xe6\x18\xb6\x10\x00:\xac\x06<=\x07\xdca\xf5^\xb7\
\xad\x1e\x5c \xc2n\x8c:\xc7'\x0c\x8c\x8e\xb1~\x9b\
I1\x0b\x1eit\xe4HC'\xd3#y\x96\xe7\xe0\
* \xd9t^\xb6\xe4\xc3\xc9\xe9\x1c\xf8HV\xabM\
Tp\x1a\xe7\x81hr}3|5\x8d{\xae[*\
\x1b\xc5>\xf4\xc2\xa9\x9dg7~\xe7E\x9b\xe7LS\
J\xab\xb6O\x96-N\x96u\x22\x1ar\xb1\x8c6\x9a\
\x15]\x01\xbd`\x87\xb9\x94\xf3\x0cP;r\xbc\x8a\x0f\
\xcdv\x99\x0c\xb7\xf1\x94\xcf\xb0\x83OR\x90FI\xeb\
\x1c\xd1sb\x94\xeeD\x5c\x94\x5c\xa2\x13|\x08:%\
VQ\x0bv\xdb\x0a\x8e\x1a\xe4S\xd1\xc2\x81%FB\
\xf9\xbe\xa1\x19\xe7\xda\x08\xcbR\x0a\x99\xd6\x10<\x92\x1c\
\xc8\xa26A\xd2\xf9\xe4\x90ym\x8d\xe4\x9e\xd3\xd9\xcf\
\x5c\x1b \x0b\xa2\x0e9>h\x00\x85\xa4\xec9\xeaS\
\xd9%zgC&\xe5\xf3\xd9\x03\xc4\x5c\x9f\xce\x8e\xe7\
3\xdd\xb5\x0bFsm\x1dLHgc\xd3\xa9\xf6\xf0\
\xec2\xadKH>D\xdd\x0aCX\xa6\x11\xc2\xa5\xfb\
\x90\xc9\xa0\x0b\x07\x06l9\xb4\xd4R\xc2-\xdaR\x08\
\xf0\xe9\x8e\xb5\x14l\xd9GI\x01\x07=G\x82*\xe9\
t\xf4\xf5L\xd0Y\xdd\x14R2\xd9\xdb\xd2D\xee\x88\
\x0f]I\xc9\xda7}\xcfHPZ\xfa\xf7M\xee\xd5\
^\xff@\xcan4p7\xb9HK \xb2\x02\xb1\x03\
B\xe8;\xa9\x1cI\x8f\xe6\xa5=IO$<u\xc1\
c38\xa5mS\xb0\xf6\xcaE\xcc\xd1\x06/\xfc\xc2\
\xd7,\xf8\x86U\x9by\xdc2H\x19\x0f\x82DO\xe0\
n\x5c_\xee\xeek\x22\x9dZo\xdez\x19\xa8\x9c\xdf\
X$,\xf8@C9\xa1Dt\xdf'\xf7\xb7\xe6\xc0\
of\xddEZ\x7f\xd8\x84\x7fk\x02\xb3\xf1z\xa5\xda\
m_(\xf2'H<\xdcy\x02[\x10\x9c\xb0\x97\xe5\
\x99T\xd3\xca,\xdc\x8c,\x0bq\x07\xef\x10\x9e\x0db\
2\xc4\x88\xb9\xc8?\xc1pA\x8e|\x90\xb4\xe0\x9a\xfc\
\xaa\xb7K\x99Fd\x0e\x95\xb1Fi\xe6\xc7+\xe9\x85\
\xd1\xc4g5\xbc\xea\x05\xad\x19\xc0\xf5\xe9\xacWnK\
_\x1f_ x\x04\xbd\x9b;1b\x17\x0f\xeb\xd1\xfb\
4\xf4v\x9c\x0b)\xfb\xb9x\xc6\x13_K\xa3_\x85\
\x8e\x0d\xa3/\xca3if\xf4%!\xdd\x01Q\xb0\x92\
\x8aV|VF\x7f!\xa30(\xa2lf\xf5\x990\
7\xfbL\x9c\xdb=\x88\x9a\xa5p\xeb\x1bv_\xecn\
\xcb\xee3\x17\xa8\xf6Z;\xdaX=:\xfc\x12\xc7\xc6\
+\xac\x96K\x1c\xa0\x89N\xf2\x96\x9f0\xb4\xdb\xcd|\
3\xc2\xb9\xd1\x9e\xd5J\xca\xfcN\xd1j\x99\xc9\xa4\xbc\
\x13\x18\x9c\xb1\xbc\x0f\x16\x95s\xb4Dq\xae<-j\
\x80\xb9\x9ckA|\x5c\xd2B\x1b}\xa1\x9d-\xe1\x0c\
IL\xa0\xe5\xda\xf3\xfa\x1a-\xc7\x89\xec\x0b\xd1p\xcb\
\xdf\x89\xd8r\xaa\x95]E\x22\x8f\xb9\x22ZI\xeb!\
A\xd2-s\x19\xb5N\x9d\xe0T\x00\xfc\xde\xa6\x01.\
;\x08\xbe\x1b\xb5\xa2\xad\xca\xd5@\x0az(\xd3\xcf}\
(\x87\x8e\xc2\xee\xef(\x8aK\x15K\x82\xea[K\x15\
/\xf7\x9c\xe8\xd1|\xf5a\xd0\xed\xca\xa9<\xbd~'\
\xd2\x86\xae\x94\x12\xc9I\x8272<\x9f\x84p\xff\x9c\
\xbcP\x8a\x15,%?K!\xe5.\x0a\x8e$d\xa2\
\x88<]#o\x22\x911H\x1f\xea\xd5;\x830b\
\x19\x8d\xf0\x08\xde\xc6\xb4z\xb7\x96\xa2\x8eP\xbc\x9d\x8e\
`\xe3\x83bS@o\x04\xa7]HUq5\x93\xbd\
\xe542PS$%\x1b\x90)\x8d\xc40\x913\x86\
L\x0a\xbfg\x91\x8f\xd2\xaa/'\x9cA\xab\xa6L\xd2\
\x89\xbb\xbc+\x02\xf3\x09\xd1)\xc5\xbd\xb7Vs7+\
Oky\xb4\xef\xbcR\x91\xae\xc8\xa5\xa1\xa9LVR\
\xe5\xee\xb5\x8a\xc2\xd6\xdc\xee\xb4m\xc3.v\xa4m\xae\
\xb5m\xfd\xc3Z\xc5<\xfd\x8d\x96\x1f\xeao\xfc}e\
2\xcdk\x8db;\x0c*\x94\x0c*\x0a\x1d\xc1\xae\x93\
\x9f\x14\x96\x9f\x90 \xa9\x8c\xfc\xdc\x04I\x15\xae\xb3\xcc\
\x12\x11g\x99\xe4\xa7I\x9a\xb5\xa5C0\xe0\xda\xb4T\
\x93\xa2\x8a\x0c\x81\x8a\x05/\x8d\xd0I\xa4\x82\x8b\x9ed\
N:O\x0c\x1e2\xad\x14/b@j\x83Q!\xb9\
m$\x95\xd2\x91\x0c\x9e\xdc\x87\xd4\x88\x16AZ\x96\x09\
[;\xfc\xac\xb2\xf6Z:[W\xf6\x86\x9bV\xca\xc6\
\xfa\x86\xc6\xe9\xd4\xb4\x8c\xab\xee\x80F0\xe5 )\xb8\
\xb9L%-\x19\x19\xc9\x04\x92\x84\xba$n\xe3$d\
>\x06\xe5|\x8a\xa5N\x07v\x0e$\xb5*\x8aT\xdb\
\xd9\xe8\xa9\x1d\x1f\x04\xfc@\xaa\xed\xa1;\x12\xc1:5\
Y,\x89\xe0\xb4|\xaaL\x0f\x9c\xd4\xc1\x0a>Jk\
\x92Y\x04T\x99\xa6 j!\xf9\x86\xe8\x04*%\x99\
5\xbc\x03\x00\xa9\xd4\xd1\x09\xee\x90\xa3g'\xa8\x1d\x87\
t?*V\x90W\x02\xac\x88d\x02\xc3\xf1\xac\x9e \
]4\xfc\xd4@\x85i\x83g\xf1\xe7L\x07$\x18\x01\
;\x91\x86\x14\xd5u\x92\x19K\x03\x87L\x1b\xf6\x7f\x8d\
r\xdaC[\xcb\xda!\xd2\x06\x04\xa4F\x08\xa7\x92\x8c\
6#\x92,bN7j\x130\xea\xca\x80\x8dN\x05\
\xe9\xa9\x01\xcf2\xda\x1e\x8e,st\xf8\xdfFe\x17\
\xe0\xff\xeb\xdaN\xf3\xb2\x13m\xa2\x90\xf7$Y\xd0\xf0\
\xe2\xa9d4\xdar\xc7\xe1\xb7\x0d\xa97VZ\x08-\
\xeb\x8e\x03\x9c\x00k\x92\x82\x91\x1a\x96\x01\x03\xdc4J\
\xc9\xcd\xba\x92\xd7\x88\xb9nD,\x91\xbc\x88\xa3\x95\x01\
\x8b\x22\xb2\x82\x19\xe4\x15\xc1H\x1cE\xb9\xc82\x87\x1c\
3\xdd\xd0x\xef\x5cMk\x94\xe7\xd8\x04)\xfd\xba\x86\
\xd7\x0e\xc1u\xa4J\xed`\x92=\x8b\x82\x90\xc4\x80I\
Da\xa6^\x1e\x15\x1c\xc4 L\xf0\xd1\xf4H(\xa1\
\x9de\x91h\x1e\xcb\xa24!\x8d\x0ep\x95Ic\xca\
Jo\xeb1\x03\xb7\xc4\xa7\x80o\xeb\x1d\xb7bD\xb0\
<\x7f\x0a\xf4\x8ev\xd9y\xa1Q&\xa6\x0f)\xb4%\
x\xcc\x06\x93e\x98\x8c\xa1\xa7\x92\x87g\x8c3\xca3\
?\x83\x06E\xba\x1f\x0c<\xb5\x0b\x0b\xa5\xe5r\x16:\
\x86(dV\xeaZ\xb3\xc6\xc3\xef8\x96\x19\x18\x5cj\
\x03v\x22R\xc3\x1a\x09\xbct\xa9;\x1ef\xcb%\x03\
d\x8a\xf5`0E\x8ee\x11\xad\xf8tG\xd8g\x0d\
o\x98\x81\xa0uzZp\x93\x08\xa24/\x80\xb6\xf0\
\xa9mZ\x9b\xa6G]\xf8\xa11'd&{KO\
v!O\x11\xf4\x00^SJO\xe5\xa8\x95'\x8d\x14\
\xa2-m\x1d\xc0xC\xc3\xbf\xdaJ\xa0S\xc2\xe4^\
\x976\x1e\x944\xc9!\x9aH\xd6\x09\xf7\x1a\x82\xa1\xa7\
\x186\x9cs\x00y\xd1K\xa9c\xdb\x867\xf0~\xd5\
8\x0d\x082C\xcf\x9f\xc9\xcd\xda@\xb3H^\x80j\
\x0b\xae\xcd\x0fC\xf8Z\x86)b\x19\xb2.i6k\
+\xad\xecR\x0a\xa8\x99\x14\x18\x22=U\xc1\x1d\x07\x1e\
B1\xac|\x1b\xfc\x95\xb6R\x02\xa0\xa0\x1b:\x22!\
&I3;\xc1\xfc!U!\xc2\x84\x09\xb1\xda\xd0m\
\x9bR\xa4=F\xb6n\xb0qWjL\xa5\x04\xb0\x0e\
1\xcb\xc2\xca\xd2\xfe\xa6I(Y\xcd\x1e\xd1\xc5\x88V\
L&e\x12\x09.\x1f\x93M\xd7R\xca\xa50Z\x1f\
3\xec\x10\x8d\x84\xdf\x08\xae\x812\x92B\x81\xc9\xb2j\
<\xd2\x1a\xbf\xa0M\xfd\x0c\xb7$\x93\x9c\x94\xe6\x08g\
\xa9uFf\xb6@2E\x98\xc8l\x86dZ\xab\xa8\
s\xe3\x22!\xe2bm\xc1\xc9\x0aI\x86\xd0\x15rc\
%YtM\x9bf\x19<\x8b\xcf\xac\x9f\x1c\xa7\xb4J\
\xf9\xb5\x93`\x11\xd2*\xdb\xf0&\x1c.\x80\x0a\x93\xf9\
\x1d\x0e\x0dp\xda6\xf3O,\xc3\xac\xcb\x86#\xe3\xd0\
`\x97\xfe\x97=\x1e\x07\x01\x83Nf\x8e\x91\x1d\xbe\x90\
\xa9\x87+\x17Z\x87\x06\xaf2g[\xef\xa4\xdb\xdc)\
\x93,`\xa6U\xc3}\xb3\x14!\xd8\xac\xfd<W&\
r\x9aE\x83f\xe5e\xdc`):\xe2\xb2\x08\x93\xae\
;\x9fG\x22\x96!L\x86F\xccJw\x17i\x06\xeb\
\xe8\xc6\xb2\xe4\xd1\xeb\x18\xd8\xac[\x07\xcb$\x0c\xa9\xd7\
uTM2\x11t\x16}\x9b\x95\x97q:I\xad\xf2\
YDo\x96L\x91\x7fC\xc6\x1c\x81M\x06Y\x83L\
>\xa7\xe6\x13l2D\x13\x93\xcfb\xe6\xc1\x89\x17=\
\xde/m\xc6P`\xd4\x98\xfd\xda/.\xa9\x0c\x09]\
\xa8)JMzHF\xbb{\x197b\x11\xdc\x94i\
\xb0(\x92\xc2\xd8\x8c_\xd3-\x12i\xc9v\xb9be\
$S!\x8a&\x7fc)\xcdl\xc6\xf4H&\xc9\x00\
2FH20\x09\xaf\x1b\xdc\x91\xa5\xf4pd\xc62\
iW,\xc4\xba\xe9DFY\xe4\x5c\xbd\x0c\xb3\xa4\xad\
,\xd5\x1c\xaaW\xfc\x96\xb7\xd4\x82\xaf\xc7\x92x0\xcb\
,o%n\xd4\xa6\x07\xbau\xc6\xadI\x06\xb4j\xbf\
\xa6\xe0$\x82\xe7\xd6\xb2\xc1\xd4IJ\xb4\xa3.h\xac\
\x22\x11\x14\xaab\xed4\xe9Q-\x96\x05d\x8b\xcd$\
\x81\xa4\x1e>F\xd7\x01Kp\xc3\xf4\x8c\xe8r\x9d\x09\
\xd1,\xb0\x0c\x99g\x08\xad\xca\x02\xb7\x97\xebh\xc7\xcb\
\x7fN5\xe2\x95\xe2\xe7S\xc3*Z\xa5\xfc\xbc\xe4\xd8\
\xd9\xe1\x0b\xc4k\x04#C\xdbg(\x00\x83\xa0\xd2\xa2\
\x02\x0b\xf7\xd26\x1d>r\xe2\x08\xaeh6\xa5\x02A\
\xb3u\x83\xd6m\xf7\x5cKz5\xd6c?\xba\xd9\x7f\
\xe9\xe8zx=\xbev\xe5$Y\xe9\x1f!I\xee\xf0\
\x13nr\xf0\xfdO\xb8;\xfd\x84[wz\xb5u\xfb\
W\xfd/\xfd\xe6(@\xe2\x09+\xb7=$\x8a\x90\xe8\
\xf2F\x98\xf6\x8f\xfaO\x00\x12\xfdK\x82\xbaB\xa2\xcb\
\x0ba\xda\x87\xfd\xbe<$v>\x1c\xd9Cb\x1fH\
t:;\xe5\xf4\xde\x7f\x0fL\xf4\xef\x08\xea\x8a\x89N\
g\xa7\xb4\xdfa|\x02\x98\xd8\xf53\xea\x1e\x13\x8fc\
\xc2tz\xafu\xe1\xb0\xdf\x13\xc0D\xff\x1e\xca\xae\x98\
\xe8t>\xdf)R\xcc\xfe\x9d\x83\x9d1\xd1\x85b\xb6\
\x8fb{\xe9W\x93\x02\x12\x87}\xd6\xf0G\x84D\x17\
\x8a\xd9~3\xe9\xcbC\xa2\x7f\xabugHta\x98\
\xed\x17\x93\x9e\x00$\xfa\xe5\xaa\x8e\x90\xe8\xf4R\xeb\xc2\
a\xbf'\x80\x89~\xbd\xaa+&:\x11\xcc\xd3;t\
K\xab\x03\xff\x8a\xe4G\xc4D\xa7\x03\xa0O\x91a\xf6\
\x87\xf3u\xc6D\xa7E\xccS\xa4\x98\xa2_\xb0\xea\x8a\
\x89N\x8b\x98\xa7\xc81\xfb\x03\xa0\xbbbB\x1dv\x97\
\xfc\xa5\xcfC\xd02\xeez\x15K\x0f\x89} q\xd8\
]\xf2\x13\x80D\xbf\x84\xd9\x15\x12\x87\xdd%?\x01H\
\xf4K\x98]!q\xe0]\xf2\x97\xc7D\xe8\xd70\xbb\
b\xe2\xc0\xbb\xe4'\x80\x89~\x11\xb3#&\xf4\x81w\
\xc9O\x00\x13\xfd\x22fWL\x1cx\x97\xfc\xe51Q\
8F\xa7\xc7\xc4\xd30q\xd8]\xf2\x97>\x84\x0d\x90\
\xe8\xd7\xab\xbaB\xe2\xb0\xbb\xe4'\x00\x89~\xb9\xaa+\
$\x0e\xbbK\xfe\xf2\x90p\xfdrUGH\x98\x03\xef\
\x92\x9f\x00&\xfa\xf5\xaa\xae\x988\xf0.\xf9\x09`\xa2\
_\xb0\xea\x8a\x89\x03\xef\x92\xbf<&l\xbf`\xd5\x15\
\x13\x07\xde%?\x01L\xf4\x0bV]1q\xe0]\xf2\
\x17\xc7\x84\xea\xf9DWL\xd8.\x1cS\x9d\xe2O\x02\
]\xbf`\xd5\x15\x13]8\xa6:\xc5\xa7s]\xcf'\
\xbab\xa2\x0b\xc7T\xa7\xf8tn\xbfd\xd5\x19\x13]\
8f!v\xbc\xf8f\x87\xf2\xfd\x06XWLt\xe1\
\x98\xea\x047\xca\x95\xef9fGL\xb8N\x1c\xf3\x04\
\xd71U\xbf)\xda\x19\x13\x9d8\xe6\x09\xaec\xaa\xd8\
\xc7\x8e\xae\x98\xe8\xc41\xdbkV/\x9f\x8b\xf6\xcf\xe7\
v\xc6D'\x8e\xd9~\xc8\xea\xe5\xf3\x8e\xd8\xc7\x8e\xae\
\x98\xe8\xc41\xdb\x0fY\x9d\x00&\xfa\xf5\x89\x8e\x98\xf0\
\x9d8f;v\xbcx\xde\xa1\xfb_\x80u\xc6D'\
\x8e\xd9\xde\xef8\x01L\xf4\x1c\xb3+&:q\xcc6\
\x9fx\xf1\xbcC\x8b\x9ecv\xc5D'\x8e\xd9\xe6\x13\
\xdd1\xb1:\x87p\xfdau\x8e1\xf5\x1bc\x9c\xcc\
\xd6H\xbe\xbd\xff}\x8e\xb9\x9b\x5c\xdd\x0d\xaf'wo\
\xce\xfe\xfaq1\x19\xaf\xae\x96O$^^\xcd\x0e?\
\xe6\x8fw\xc3\xc5\xe4'{.\xc3\xeb\xd5\xa1\xc5\xf9\x19\
\xca|\x82\xb2q\x8d\x97\xcb\x16oA''\xbf>\xdb\
\x01r\x1a<\x9dH\xe8\x82\xb6\xb6\xa5\xb1\xf2\x15\xaa\xc2\
\x07\x98(c7'\x82\x8e7l_X\xcd\x9b\xaa\x84\
\xd0V\x16g\xbatm\x09\x0f%6V\xaa['\xae\
\xb1i\xbe\x9a\xdc\xa8\xa8\xe2\xa6%\xd6v\xf7j$D\
\xd3\xee\xe8\x85\xf1t\x00\x93\xdf\xb4\xbf\xf2\xf9\xa4\x0d\xe9\
\xb3\x0cHV\x8aNu\x14\xd6\xb7-i\xd7\xf9\xd9\xa5\
i\xa3\x83=\x9d\xd4\xb6\x04\xf4\xd2\x15\x9a6_\x1f\x97\
\xde\x9a6U\x95g,\xa6W\xea\x97\x17\x8a\xca\x17\x97\
s\xa6}\xb4\xbb\xe6LT:\xcd\x1a\xeb\xaa<e;\
N\xc2\x0b\xad\x03\xccOm\xd2\xf6;\xdb\xb1\xa5;\xaa\
\x86\xec\xaa\xa9\xd4td\xa9=Wt\xc2\xa7\x11\xe7\xaa\
\xa8\xda\x0du\xeey$\xbc\xac\xec~\xaa|\xf40\xf8\
\xb2*W\xda:\xa4z\x0a'\xba\x92Z\xf8\x00T\xfb\
\xcf\xa9\x9e\xed`\xda\x8cD\xfc\x9f_.\xe7\x9f\xf0\x9f\
\xff\x0fx\xd1\xc5\xf8\
"
qt_resource_name = b"\
\x00\x0a\
\x0a\xc8\xf6\x87\
\x00f\
\x00o\x00l\x00d\x00e\x00r\x00.\x00s\x00v\x00g\
\x00\x10\
\x07\xac\xa7\xc7\
\x00c\
\x00l\x00o\x00s\x00e\x00-\x00t\x00u\x00n\x00n\x00e\x00l\x00.\x00s\x00v\x00g\
\x00\x11\
\x0cu\xee\x87\
\x00e\
\x00d\x00i\x00t\x00-\x00s\x00e\x00t\x00t\x00i\x00n\x00g\x00s\x00.\x00s\x00v\x00g\
\
\x00\x09\
\x0alxC\
\x00r\
\x00e\x00s\x00o\x00u\x00r\x00c\x00e\x00s\
\x00\x0f\
\x0c\x97\xeb\xc7\
\x00o\
\x00p\x00e\x00n\x00-\x00t\x00u\x00n\x00n\x00e\x00l\x00.\x00s\x00v\x00g\
\x00\x08\
\x06|W\x87\
\x00c\
\x00o\x00p\x00y\x00.\x00s\x00v\x00g\
\x00\x05\
\x00o\xa6S\
\x00i\
\x00c\x00o\x00n\x00s\
\x00\x13\
\x03\xf4cg\
\x00s\
\x00y\x00s\x00t\x00e\x00m\x00-\x00s\x00h\x00u\x00t\x00d\x00o\x00w\x00n\x00.\x00s\
\x00v\x00g\
\x00\x10\
\x08\x15\x1e\xe7\
\x00v\
\x00i\x00e\x00w\x00-\x00r\x00e\x00f\x00r\x00e\x00s\x00h\x00.\x00s\x00v\x00g\
\x00\x0f\
\x020\x86g\
\x00l\
\x00i\x00s\x00t\x00-\x00r\x00e\x00m\x00o\x00v\x00e\x00.\x00s\x00v\x00g\
\x00\x14\
\x06\x1c\x9a\x87\
\x00n\
\x00m\x00-\x00n\x00o\x00-\x00c\x00o\x00n\x00n\x00e\x00c\x00t\x00i\x00o\x00n\x00.\
\x00s\x00v\x00g\
"
qt_resource_struct = b"\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x06\x00\x00\x00\x01\
\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\xa4\x00\x01\x00\x00\x00\x01\x00\x00\x8c\xae\
\x00\x00\x01tJ?\xf4m\
\x00\x00\x00\x1a\x00\x00\x00\x00\x00\x01\x00\x00\x17t\
\x00\x00\x01tJ?\xf4q\
\x00\x00\x00h\x00\x02\x00\x00\x00\x01\x00\x00\x00\x07\
\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\
\x00\x00\x01tJ?\xf4m\
\x00\x00\x00@\x00\x00\x00\x00\x00\x01\x00\x00\x22\xcf\
\x00\x00\x01tJ?\xf4q\
\x00\x00\x00\x80\x00\x01\x00\x00\x00\x01\x00\x00_\x08\
\x00\x00\x01tJ?\xf4m\
\x00\x00\x00\xba\x00\x02\x00\x00\x00\x04\x00\x00\x00\x08\
\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x01\x1c\x00\x00\x00\x00\x00\x01\x00\x00\xa4(\
\x00\x00\x01tJ?\xf4m\
\x00\x00\x00\xca\x00\x00\x00\x00\x00\x01\x00\x00\x92\xc2\
\x00\x00\x01tJ?\xf4q\
\x00\x00\x01@\x00\x01\x00\x00\x00\x01\x00\x00\xa8\xff\
\x00\x00\x01tJ?\xf4m\
\x00\x00\x00\xf6\x00\x01\x00\x00\x00\x01\x00\x00\x9b\x8d\
\x00\x00\x01tJ?\xf4q\
"
def qInitResources():
QtCore.qRegisterResourceData(0x03, qt_resource_struct, qt_resource_name, qt_resource_data)
def qCleanupResources():
QtCore.qUnregisterResourceData(0x03, qt_resource_struct, qt_resource_name, qt_resource_data)
qInitResources() | /remote_ssh_tunnel_controller_qt-0.6.0-py3-none-any.whl/rssht_controller_qt/resources.py | 0.435421 | 0.247294 | resources.py | pypi |
import asyncio
import aiohttp
import hashlib
import hmac
import json
from remote_server import exceptions
DEFAULT_SERVER_URL = "https://oauth.accounts.firefox.com/v1"
VERSION_SUFFIXES = ("/v1",)
DEFAULT_CACHE_EXPIRY = 300
TOKEN_HMAC_SECRET = 'PyFxA Token Cache Hmac Secret'
def get_hmac(data, secret, algorithm=hashlib.sha256):
"""Generate an hexdigest hmac for given data, secret and algorithm."""
return hmac.new(secret.encode('utf-8'),
data.encode('utf-8'),
algorithm).hexdigest()
@asyncio.coroutine
def verify_token(server_url, token, scope, cache):
"""Verify an OAuth token, and retrieve user id and scopes.
:param token: the string to verify.
:param scope: optional scope expected to be provided for this token.
:returns: a dict with user id and authorized scopes for this token.
:raises fxa.errors.ClientError: if the provided token is invalid.
:raises fxa.errors.TrustError: if the token scopes do not match.
"""
if server_url is None:
server_url = DEFAULT_SERVER_URL
server_url = server_url.rstrip('/')
if not server_url.endswith(VERSION_SUFFIXES):
server_url += VERSION_SUFFIXES[0]
key = 'fxa.oauth.verify_token:%s:%s' % (
get_hmac(token, TOKEN_HMAC_SECRET), scope)
body = None
if cache is not None:
body = yield from cache.get(key)
if body is None:
url = server_url + '/verify'
request_body = {
'token': token
}
resp = yield from aiohttp.request(
'POST', url,
data=json.dumps(request_body),
headers={'content-type': 'application/json'})
body = yield from resp.json()
missing_attrs = ", ".join([
k for k in ('user', 'scope', 'client_id') if k not in body
])
if missing_attrs:
error_msg = '{0} missing in OAuth response'.format(
missing_attrs)
raise exceptions.OutOfProtocolError(error_msg)
if scope is not None:
authorized_scope = body['scope']
if not scope_matches(authorized_scope, scope):
raise exceptions.ScopeMismatchError(authorized_scope, scope)
if cache is not None:
cache.set(key, json.dumps(body).encode('utf-8'))
else:
body = json.loads(resp.decode('utf-8'))
return body
@asyncio.coroutine
def authenticate(authorization, server_url, oauth_scope, cache=None):
if not authorization:
raise exceptions.NotAuthenticatedError('Authorization is missing')
authmeth, auth = authorization.split(' ', 1)
if authmeth.lower() != 'bearer':
raise exceptions.NotAuthenticatedError(
'Authorization does not contains a Bearer Token.')
try:
profile = yield from verify_token(server_url=server_url,
cache=cache,
token=auth,
scope=oauth_scope)
user_id = profile['user']
except exceptions.BackendError as e:
raise exceptions.NotAuthenticatedError(e)
return user_id
def scope_matches(provided, required):
"""Check that required scopes match the ones provided. This is used during
token verification to raise errors if expected scopes are not met.
:note:
Sub-scopes are expressed using semi-colons.
A required sub-scope will always match if its root-scope is among those
provided (e.g. ``profile:avatar`` will match ``profile`` if provided).
:param provided: list of scopes provided for the current token.
:param required: the scope required (e.g. by the application).
:returns: ``True`` if all required scopes are provided, ``False`` if not.
"""
if not isinstance(required, (list, tuple)):
required = [required]
def split_subscope(s):
return tuple((s.split(':') + [None])[:2])
provided = set([split_subscope(p) for p in provided])
required = set([split_subscope(r) for r in required])
root_provided = set([root for (root, sub) in provided])
root_required = set([root for (root, sub) in required])
if not root_required.issubset(root_provided):
return False
for (root, sub) in required:
if (root, None) in provided:
provided.add((root, sub))
return required.issubset(provided) | /remote-worker-0.1.0.zip/remote-worker-0.1.0/remote_server/authentication.py | 0.70028 | 0.202956 | authentication.py | pypi |
# Remotecall
The module provides functionality to expose Python functions to be called remotely over ethernet.
The implementation uses Python's
[http.server.HTTPServer](https://docs.python.org/3/library/http.server.html) to implement a server
side functionality and [Request](https://pypi.org/project/requests/) for client implementation.
## Getting started
Exposing a function to be called remotely.
### Server
Create a server and expose a function.
```python
from remotecall import Server
def hello() -> str:
return "Hello World"
with Server(("localhost", 8000)) as server:
server.expose(hello)
server.serve_forever()
```
### Client
Subclass a client from BaseClient to call exposed functions.
```python
from remotecall import Server
class Client(BaseClient):
def hello(self) -> str:
return self.call("hello")
client = Client(("localhost", 8000))
client.hello()
```
### Code generation
Use the module to generate client code instead of writing the client code.
```sh
python -m remotecall generate_client http://localhost:8000
```
# Usage
## Basic example
Server side implementation to expose hello() function:
```python
from remotecall import Server
def hello() -> str:
return "Hello World"
with Server(server_address=("localhost", 8000)) as server:
server.register(hello)
server.serve_forever()
```
Client side implementation is based on BaseClient class which is a thin wrapper around Requests
module.
```python
from remotecall import BaseClient
client = BaseClient(server_address=("localhost", 8000))
client.call("hello")
```
Subclassing BaseClient makes accessing the available functions more convenient - for example, by
enabling IDE autocompletion and providing the type information.
```python
from remotecall import BaseClient
class Client(BaseClient):
def hello(self) -> str:
return self.call("hello")
client = Client(server_address=("localhost", 8000))
client.hello()
```
Usage of `python -m remotecall generate_client` automates the client code generation.
For more example, see examples directory.
## Passing arguments
Module uses Python type annotations to determine type of parameter and return
values.
The type information is used to generate API definition. The API definition is provided in JSON
format and can be used to generate client code.
The type information is also used to encode and decode data - like parameter and return values -
send between client and server.
The implementation support basic Python data types including `bytes`, `int`, `float`, `bool`, `str`,
`list`, `tuple` and `dict`. It is possible to add support for addition types. See examples
directory for an implementation for _PIL Images_ and _NumPy_ arrays.
#### Server
Exposing a function with parameters and return value.
```python
def echo(message: str) -> str:
return message
```
#### Client
Calling the function from the client side.
```python
class Client(BaseClient):
def echo(self, message: str) -> str:
return self.call("echo", message=message)
```
## HTTPS and SSL
By default, the server uses HTTP for the communication. HTTPS communication can be enabled by
providing SSL certificate. The certificate is used to create a SSLContext to wrap
the underlying socket instance.
See _examples_ folder for an example and instructions to generate a self-signed certificate.
#### Server
```python
server = Server(...)
server.use_ssl(cert_file="server.crt", key_file="server.key")
```
#### Client
```python
client = BaseClient(...)
client.use_ssl(cert_file="server.pem")
```
## Authentication
By default, the server does not require authentication.
Server provides an implementation for HTTP basic authenticator that can be set in use as
demonstrated below. However, it's also possible to subclass
`remotecall.authentication.Authenticator` and implement other type of authenticators.
Client implementation wraps _Requests_ library and makes it possible to use the
authentication methods available for that library. More documentation can be found from the
library documentation https://requests.readthedocs.io/en/latest/user/authentication/.
#### Server
```python
server = Server(...)
server.set_authenticator(BasicAuthenticator("user", "pass"))
```
#### Client
```python
from requests.auth import HTTPBasicAuth
client = BaseClient(...)
client.set_authentication(HTTPBasicAuth("user", "pass"))
```
### Implementing authenticator
Implementing a custom authenticator begins with subclassing `remotecall.authentication.
Authenticator`.
Authenticator is expected to implement `authenticate` method to carry out the authentication. It
is called right after the HTTP server's `do_*` method gets called.
`authenticate` method receives an instance of
[`BaseHTTPRequestHandler`](https://docs.python.org/3/library/http.server.html#http.server.BaseHTTPRequestHandler)
that represents the HTTP request received by the HTTP server and to handle it.
`Authenticator` is expected to handle the actions required to carry out the authentication.
It may raise `AuthenticationError` if the server should not continue processing the request.
Server expects the authenticator to resolves the request by sending the corresponding headers and
related data.
```python
class Authenticator(ABC):
@abstractmethod
def authenticate(self, request: BaseHTTPRequestHandler):
"""Do the authentication."""
```
## Code generation
The module provides an easy way to generate a Python client for a service provided by a server.
The API definition can be fetched from the server with the following command line command. The
command returns an API definition in JSON format.
```sh
$ python -m remotecall fetch_api http://localhost:8000
```
For example, in case of _echo_ example the output would be:
```json
{
"endpoints": [
{
"name": "echo",
"documentation": "",
"parameters": [
{
"name": "message",
"type": "str"
}
],
"return_type": "str"
}
],
"ssl_enabled": false,
"address": {
"hostname": "localhost",
"port": 8000
}
}
```
The API definition can be written in a file with `output` option.
```sh
$ python -m remotecall fetch_api http://localhost:8000 --output api.json
```
Client code can be generated based on the definition:
```sh
$ python -m remotecall generate_client api.json --output client.py
```
Where, `output` defines the output file.
By default, the client class is named as `Client`. This can be changed by using `name`
option. For example, `--name EchoClient`.
The above steps can be combined if the intermediate results are not needed.
```sh
$ python -m remotecall generate_client http://localhost:8000 --output client.py
```
## Extending supported data type
See _examples/image_ and _examples/numpy_ for image and numpy codec examples.
# Development
## Set up virtual environment
Create virtual environment:
```sh
python3 -m venv venv
```
Activate virtual environment:
```sh
source venv/bin/activate
```
## Running from source
```sh
pip install -e .
```
### Installing from source
```sh
pip install .
python -m remotecall
```
# Running tests
```sh
pip install .[test]
pytest
py.test -s
```
# Building
## Creating a wheel
```sh
$ pip install wheel
$ python setup.py bdist_wheel
```
| /remotecall-0.1.4.tar.gz/remotecall-0.1.4/README.md | 0.569015 | 0.833934 | README.md | pypi |
import argparse
import asyncio
import traceback
import signal
import sys
from getpass import getpass
from pathlib import Path
from shlex import quote
from typing import Dict, List, Union, Optional
from asciimatics.screen import Screen # type: ignore
import aiofiles # type: ignore
import aiofiles.base # type: ignore
import asyncssh # type: ignore
Number = Union[int, float]
def sizeof_fmt(num: Number, suffix: str = "B") -> str:
"""
Takes a number (size in bytes), returns the human readable representation of that (B, KB, MB, GB, etc).
Thanks Fred!
https://web.archive.org/web/20111010015624/http://blogmag.net/blog/read/38/Print_human_readable_file_size
"""
for unit in ("", "Ki", "Mi", "Gi", "Ti", "Pi", "Ei", "Zi"):
if abs(num) < 1024.0:
return "%3.1f%s%s" % (num, unit, suffix)
num /= 1024.0
return "%.1f%s%s" % (num, "Yi", suffix)
async def run_client(
user: str,
host: str,
command: str,
cap_file: Path,
keys: List[Path],
*,
shutdown_event: asyncio.Event,
password: str = None,
known_hosts: Path = None,
sudo_password: bytes = b"",
semaphore: asyncio.Semaphore,
):
log_file = cap_file.with_suffix(".log")
async with semaphore:
async with aiofiles.open(str(cap_file), mode="wb") as cap_fd, aiofiles.open(str(log_file), mode="wb") as log_fd:
client_keys = list(map(str, keys))
port = 22
if ":" in host:
port = int(host.split(":")[1])
host = host.split(":")[0]
# noinspection PyUnusedLocal
connection: asyncssh.SSHClientConnection
async with asyncssh.connect(
host, port, client_keys=client_keys, username=user, known_hosts=str(known_hosts), password=password
) as connection:
stdin = b""
if len(sudo_password) > 0:
stdin = sudo_password
running_command = asyncio.create_task(
connection.run(command, input=stdin, stdout=cap_fd, stderr=log_fd, encoding=None)
)
await shutdown_event.wait()
running_command.cancel()
class FileSize(object):
"""
Class that calculates the file size of capture files and rate at which those files are growing. Is terminal-size
aware so changing the size of your window won't screw things up.
"""
def __init__(
self,
host_count: int,
semaphore: asyncio.Semaphore,
*args: Path,
shutdown_event: asyncio.Event = None,
refresh_interval: int = 5,
):
self.host_count = host_count
self.semaphore = semaphore
self.capture_files = args
assert shutdown_event is not None
self.shutdown_event = shutdown_event
self.refresh_interval = refresh_interval
self._file_header_string = "Capture file:"
longest_file_name = max(len(str(capture_file.name)) for capture_file in args)
self.separator_width = max(longest_file_name, len(self._file_header_string)) + 4 # For padding
self.old_values: Dict[Path, int] = {}
self.size = (0, 0)
@property
def terminal_width(self) -> int:
return self.size[1]
@property
def terminal_height(self) -> int:
return self.size[0]
@property
def file_names(self) -> str:
file_names = self._file_header_string.ljust(self.separator_width)
file_names += "".join(str(capture_file.name).ljust(self.separator_width) for capture_file in self.capture_files)
return file_names
@property
def file_sizes(self) -> Dict[Path, int]:
return {file: (file.stat().st_size if file.exists() else 0) for file in self.capture_files}
@property
def file_growth_rates(self) -> str:
capture_files = self.file_sizes
if len(self.old_values) == 0:
return "Rate:".ljust(self.separator_width)
capture_growth_rates = []
for capture_file, capture_file_size in capture_files.items():
size_delta = (capture_file_size - self.old_values.get(capture_file, 0)) / self.refresh_interval
file_growth = sizeof_fmt(size_delta, suffix="Bps")
capture_growth_rates.append(file_growth)
rate_string = "Rate:".ljust(self.separator_width)
rate_string += "".join(rate.ljust(self.separator_width) for rate in capture_growth_rates)
return rate_string
@property
def capture_file_sizes(self) -> str:
capture_files = self.file_sizes
size_string = "File size:".ljust(self.separator_width)
size_string += "".join(
str(sizeof_fmt(capture_file_size)).ljust(self.separator_width)
for capture_file_size in capture_files.values()
)
return size_string
async def file_size_worker(self):
"""
Prints the sizes and rates 1 and 2 rows up from the bottom, respectively.
:return:
:rtype:
"""
# Want to give the other coroutines a chance to increment the semaphore
screen = Screen.open()
self.size = screen.dimensions
try:
# I wish asyncio.Semaphore made the value public. I know it's probably to prevent people from doing stupid
# things, but sometimes it's nice to be dumb. Doing this so we can die when all of our captures have died.
# noinspection PyProtectedMember
while not self.shutdown_event.is_set() or self.semaphore._value < self.host_count:
if screen.has_resized():
screen.close(restore=False)
screen = Screen.open()
self.size = screen.dimensions
screen.print_at(self.file_names, 0, self.terminal_height - 3)
screen.print_at(self.capture_file_sizes, 0, self.terminal_height - 2)
screen.print_at(self.file_growth_rates, 0, self.terminal_height - 1)
self.old_values = self.file_sizes.copy()
screen.refresh()
await asyncio.wait([self.shutdown_event.wait()], timeout=self.refresh_interval)
finally:
screen.close()
print("\n")
print(self.file_names)
print(self.capture_file_sizes)
print(self.file_growth_rates)
print("\n")
async def main():
home_directory = Path("~").expanduser()
parser = argparse.ArgumentParser(prog="remotecap", formatter_class=argparse.ArgumentDefaultsHelpFormatter)
help_string = (
"File to write to if performing the capture on a single host. Folder to put captures in if "
"capturing from multiple hosts. Required."
)
parser.add_argument("-w", "--filename", type=Path, help=help_string, required=True)
help_string = """Hosts to perform the capture on. Required."""
parser.add_argument("hosts", nargs="+", type=str, help=help_string)
help_string = """Filter to pass to tcpdump on the remote host(s)."""
parser.add_argument("-f", "--filter", default="not port 22", type=str, help=help_string)
default_key_location = home_directory / ".ssh" / "id_rsa"
help_string = """Location of SSH private keys to use. Can be specified multiple times."""
parser.add_argument("-k", "--key", action="append", help=help_string, type=Path)
help_string = """Interface to perform the capture with on the remote host(s)."""
parser.add_argument("-i", "--interface", default="any", type=str, help=help_string)
help_string = "Prompt for password to use for SSH. SSH keys are recommended instead."
parser.add_argument("-p", "--password-prompt", default=False, action="store_true", help=help_string)
help_string = "Length of packets to capture."
parser.add_argument("-s", "--packet-length", type=int, help=help_string, default=0)
help_string = "User to SSH as. The user must have sufficient rights."
parser.add_argument("-u", "--user", type=str, help=help_string, default="root")
help_string = "Interval to refresh file size and growth rates at."
parser.add_argument("-r", "--refresh-interval", type=int, help=help_string, default=5)
help_string = 'Known hosts file to use. Specify "None" if you want to disable known hosts.'
default_known_hosts_location = home_directory / ".ssh" / "known_hosts"
parser.add_argument("-n", "--known-hosts", default=default_known_hosts_location, help=help_string)
help_string = "Escalate privileges (sudo) and prompt for password"
parser.add_argument("-e", "--sudo", action="store_true", default=False, help=help_string)
help_string = "Path to tcpdump on the system. Needed if tcpdump isn't in your path."
parser.add_argument("-c", "--command-path", default="tcpdump", type=str, help=help_string)
help_string = "Do not take over the screen."
parser.add_argument("-q", "--quiet", action="store_true", default=False, help=help_string)
parser.add_argument("-d", "--debugger", action="store_true", default=False)
args = parser.parse_args()
if args.debugger:
breakpoint()
# Janky hack to override this issue: https://bugs.python.org/issue16399
# Basically, if you have a default option and append action, your default will be included instead of being
# clobbered. Gyar!
if args.key is None:
keys: List[Path] = [default_key_location]
else:
keys: List[Path] = [key for key in args.key if key is not default_key_location]
if not any(key.exists() for key in keys):
print("One of the specified private keys doesn't exist!")
print(*keys)
sys.exit(1)
hosts: List[str] = args.hosts
# Using shlex.quote to prevent anyone from injecting random shell commands.
# No ; rm -rf --no-preserve-root / ; for us here!
interface: str = quote(args.interface)
capture_filter: str = quote(args.filter)
command_path: str = args.command_path
if command_path != "tcpdump":
command_path = quote(command_path)
# No need to quote this as argparse is already enforcing int type
packet_length: int = args.packet_length
user: str = args.user
refresh_interval: int = args.refresh_interval
known_hosts: Optional[Union[str, Path]] = args.known_hosts
should_sudo: bool = args.sudo
be_quiet: bool = args.quiet
if known_hosts == "None":
known_hosts = None
elif isinstance(known_hosts, str):
known_hosts = Path(known_hosts)
if not known_hosts.exists():
print(f"Known hosts file {known_hosts} does not exist.")
sys.exit(1)
password: Optional[str] = None
if args.password_prompt:
password = getpass(prompt="SSH password: ")
file_path: Path = args.filename
file_path = file_path.expanduser().resolve()
# If we're capturing from more than one host, we want to create a folder containing our capture files.
if len(hosts) > 1:
file_path.mkdir(exist_ok=True)
capture_files = {host: file_path / f"{host}.cap" for host in hosts}
else:
capture_files = {hosts[0]: file_path}
# Appending to existing capture files makes for invalid capture files.
for capture_file in capture_files.values():
if capture_file.exists():
capture_file.unlink()
sudo = ""
sudo_password: bytes = b""
if should_sudo:
sudo = "sudo -S "
sudo_password = getpass(prompt="Sudo password: ").encode() + b"\n"
command = f"{sudo}{command_path} -i {interface} -s {packet_length} -U -w - '{capture_filter}'"
shutdown_event = asyncio.Event()
semaphore = asyncio.Semaphore(len(capture_files))
task_list = []
for host, file in capture_files.items():
task_list.append(
asyncio.create_task(
run_client(
user,
host,
command,
file,
keys,
semaphore=semaphore,
password=password,
known_hosts=known_hosts,
sudo_password=sudo_password,
shutdown_event=shutdown_event,
)
)
)
# To let one of the tasks above increment the semaphore. Janky, but eh.
await asyncio.sleep(0)
file_size = FileSize(
len(capture_files),
semaphore,
*capture_files.values(),
refresh_interval=refresh_interval,
shutdown_event=shutdown_event,
)
if not be_quiet:
task_list.append(file_size.file_size_worker())
tasks = asyncio.gather(*task_list)
def shutdown_signal_handler(_, __):
shutdown_event.set()
for shutdown_signal in (signal.SIGINT, signal.SIGTERM):
signal.signal(shutdown_signal, shutdown_signal_handler)
try:
await tasks
except Exception:
print("Hit an unexpected exception!")
traceback.print_exc()
raise
finally:
print("Done.")
# Hack to make entrypoints work
def run():
asyncio.run(main())
if __name__ == "__main__":
asyncio.run(main()) | /remotecap-3.0.1.tar.gz/remotecap-3.0.1/remotecap.py | 0.610337 | 0.257788 | remotecap.py | pypi |

[](https://pypi.org/project/remotecodes/)
# remotecodes
`remotecodes` is a curated repository of crowd-sourced IR / RF remote codes intended to be useful in home automation scenarios.
Thus codes are not just an arbitrary list of key-value pairs, but must adhere to a strict schema per device domain, that gives
functional meaning to each code, which can later be interpreted by a higher level integration (for instance in
[Home Assistant](https://www.home-assistant.io/)).
All codes are written using [remoteprotocols](https://github.com/ianchi/remoteprotocols#remote-command-strings) command notation
and all it's [supported protocols](https://github.com/ianchi/remoteprotocols/blob/master/PROTOCOLS.md#list-of-supported-protocols)
are available. See links for details.
A codes file is always referenced as `brand.category.seq_number`
## Usage
Codes files can be used directly from their yaml form.
If consumed from _python_, there are some helper functions available
```python
from remotecodes import get_codes
from remotecodes.schema import media_player, validate_source
#intended as voluptuous validator for source string format
source = validate_source("acme.tv.001")
# find the correct codes file, validate it and return it as a dict
# an optional <domain> can be marked as required
# optional extra root folders can be added to the search. They take priority over built-in ones.
codes = get_codes(source, "media_player", ["my_extra_codes_root"])
```
### Command line
To batch validate all codes files and their folder structure:
```bash
remotecodes validate <codes_root_folder>
```
# Contributing Codes
Codes comes from user contribution, so you are encouraged to share your files.
## Hierarchy
- Codes files are organized in folders that must adhere to the pattern:
`brand/category/`
- Files within each folder must be named according to the pattern: `brand.category.seq_number.yaml`
- The brand and category must be consistent among folder, filename, file content
## Schema
All files must have a required `info` section and at least one domain section that follows it's respective schema. A device can have commands for more than one domain (i.e. a `fan` and a `light`).
Additionally a free `custom` section can be added to include extra commands not tied to a well defined function.
### Section `info`
```yaml
info: #required
brand: Acme #required
models: #required, min length: 1
- Model A
category: tv #required
notes: < #optional
This codes where learned and tested
# At least one domain section required
```
_brand_ and _category_ are validated against filename and path.
_category_ must be one of:
- air_conditioner
- audio_player
- av_receiver
- fan
- light
- projector
- settopbox
- speaker
- switch
- tuner
- tv
- video_player
### Commands
All codes are written using [remoteprotocols](https://github.com/ianchi/remoteprotocols#remote-command-strings) command notation
and all it's [supported protocols](https://github.com/ianchi/remoteprotocols/blob/master/PROTOCOLS.md#list-of-supported-protocols)
are available. See links for details.
This is often done as a single string, but if a multi command sequence is needed it can be expressed as an array of commands.
After validation all entries are converted to arrays.
```yaml
#single command
some_function: nec:0xFE:0x7E
#multi sequence
other_function:
- nec:0x7E:0xA2
- nec:0x7E:0xA3
```
### Section `media_player`
Used for any kind of media device. All subsections are optional, but at least one must be present.
```yaml
media_player: #cannot be empty
power: #optional, but cannot be empty
power_on: <command> #optional, required if 'off' is set
power_off: <command> #optional, required if 'on' is set
power_toggle: <command> #optional
volume: #optional, but cannot be empty
up: <command> #optional, required if 'down' is set
down: <command> #optional, required if 'up' is set
mute_on: <command> #optional, required if 'off' is set
mute_off: <command> #optional, required if 'on' is set
mute_toggle: <command> #optional
sources: #optional, min length: 1
# source names are device dependant. Valid characters: [azAZ09_- ], cannot start with a symbol
- source_1: <command>
- source_2: <command>
sound_modes: #optional, min length: 1
# modes names are device dependant. Valid characters: [azAZ09_- ], cannot start with a symbol
- mode_1: <command>
- mode_2: <command>
numbers: #optional, if set ALL numbers are required
0: <command> #required
1: <command> #required
2: <command> #required
3: <command> #required
4: <command> #required
5: <command> #required
6: <command> #required
7: <command> #required
8: <command> #required
9: <command> #required
media: #optional, but cannot be empty
play: <command> #optional
pause: <command> #optional
play_pause: <command> #optional
stop: <command> #optional
next_track: <command> #optional, required if 'prev' is set
prev_track: <command> #optional, required if 'next' is set
fast_forward: <command> #optional, required if 'rewind' is set
rewind: <command> #optional, required if 'fast_forward' is set
navigate: #optional, but cannot be empty
up: <command> #optional, required if 'down' is set
down: <command> #optional, required if 'up' is set
left: <command> #optional, required if 'right' is set
right: <command> #optional, required if 'left' is set
select: <command> #optional
back: <command> #optional
# clear_playlist, shuffle_set, repeat_set,
```
### Section `fan`
Used for fan device.
```yaml
#TBD
```
### Section `air_conditioner`
Used for air conditioner device.
```yaml
#TBD
```
### Section `light`
Used for light control functions.
```yaml
#TBD
```
### Section `switch`
Used for to control simple switching functions.
```yaml
#TBD
```
### Section `custom`
Used for arbitrary functions that not fall into any other category.
It is discuraged as it's unstandardized nature cannot be used by higher order components, but can be useful for manual use cases.
```yaml
custom:
# function names are completely custom. Valid characters: [azAZ09_- ], cannot start with a symbol
function1: <command>
function2: <command>
```
| /remotecodes-0.0.1.tar.gz/remotecodes-0.0.1/README.md | 0.467575 | 0.898188 | README.md | pypi |
from concurrent.futures import ThreadPoolExecutor
from time import sleep
from azure.core.exceptions import ResourceExistsError, HttpResponseError
from azure.storage.blob import BlobServiceClient
from remotedict.azure.index.azure_index import AzureIndex
from remotedict.exceptions.item_locked_exception import ItemLockedException
from remotedict.utils.serialization import serialize, unserialize
class AzureDictionary:
def __init__(self, connection_string, container_name, folder_name, background_pool_size=16):
self._service_client = BlobServiceClient.from_connection_string(connection_string)
self._container_name = container_name
self._folder_name = folder_name
self._container = self._service_client.get_container_client(container_name)
try:
self._container.create_container()
except ResourceExistsError:
pass
self._index = AzureIndex(self._container.get_blob_client(f"index/{folder_name}/main_index"))
self._background = ThreadPoolExecutor(background_pool_size)
self._name_prefix = "default/" if folder_name is None else folder_name + "/"
self._leases = {}
@property
def index(self):
return self._index
def lock_item(self, item, duration=15, wait=True):
"""
Locks an item for the given duration in seconds.
This ensures that no other process can ever lock this same item while it is locked, effectively "reserving" the property of the item.
Note that the item can still be read by other processes, but they cannot modify or lock it.
:param item:
Item to block
:param duration:
Duration in seconds of the lock
:param wait:
Waits for the lock to be released (in case lease was acquired by other process)
"""
item_index = self.index.get(item)
done = False
while not done:
try:
try:
self._leases[item_index] = self._container.get_blob_client(item_index).acquire_lease(duration)
done = True
except HttpResponseError as e:
if 'There is already a lease present' in str(e):
raise ItemLockedException("Item is already locked by other process", item)
else:
raise e
except ItemLockedException as e:
if wait:
sleep(0.5)
else:
raise e from None
def unlock_item(self, item):
item_index = self.index.get(item)
self._leases[item_index].release()
del self._leases[item_index]
def __delitem__(self, item):
self.index.remove(item)
def __setitem__(self, item, value):
if type(item) is not list:
item = [item]
value = [value]
real_items = [self._name_prefix + i for i in item]
results = [self._background.submit(self._put_single_item, i, v) for i, v in zip(real_items, value)] \
+ [self._background.submit(self._index.insert, item, real_items)]
results = [result.result() for result in results]
def __getitem__(self, item):
was_single_item = type(item) is not list
items = [item] if was_single_item else item
# Obtenemos los índices de los ítems
indexes = self.index.get(items).to_numpy()
results = [self._background.submit(self._get_single_item, i) for i in indexes]
results = [result.result() for result in results]
if len(results) == 1 and was_single_item:
results = results[0]
return results
def _put_single_item(self, item, value):
blob_client = self._container.get_blob_client(item)
blob_client.upload_blob(serialize(value), overwrite=True, lease=self._leases.get(item, None))
def _get_single_item(self, item):
blob_client = self._container.get_blob_client(item)
return unserialize(blob_client.download_blob().readall())
def __iter__(self):
index = self.index
yield from index.keys
def keys(self):
return list(self)
def values(self):
for item in self:
yield self[item]
def items(self):
for k in self:
yield k, self[k]
def clear(self):
self.index.clear()
def __len__(self):
index = self._index
return len(index)
def __str__(self):
return f"Azure Blob Storage. Container: \"{self._container_name}\"; Folder: \"{self._folder_name}\"; Num elements: {len(self)}"
def __repr__(self):
return str(self) | /azure/azure_dictionary.py | 0.66769 | 0.159185 | azure_dictionary.py | pypi |
# remoteprotocols
`remoteprotocols` is a command line utility and a Python's library to convert between more than 20 encoded IR and 5 RF remote protocols (nec, jvc, RC switch, see [full protocol list](https://github.com/ianchi/remoteprotocols/tree/master/PROTOCOLS.md)) and 4 raw formats (pronto, Broadlink, Xiaomi miio, raw durations) and between those. The goal is to be able to use any existing code listing with any transmitter, and to be able to decode raw signal received by any device into the proper encoded format.
## Remote command strings
To interact with _remoteprotocols_ you'll be using remote commands to encode/decode/convert using the following syntax:
```bash
protocol:<arg 1>:<arg 2>: ... :<arg n>
# Example signatures
sony:<data>:<nbits?=12>
toshiba_ac:<rc_code_1>:<rc_code_2?=0>
# Example usage
nec:0x7A:0x57
```
You can get a list of all supported protocols and their command signatures [here](https://github.com/ianchi/remoteprotocols/tree/master/PROTOCOLS.md), and from the command line.
Optional arguments can be omitted, with empty `::` in the middle or completely omitting the colon at the end.
## Command line
You can use _remoteprotocols_ from the command line:
```
usage: remoteprotocols [-h] [-v] command ...
remoteprotocols v0.0.1
positional arguments:
command Command to run:
validate-protocol
Validate a protocol definition file.
validate-command
Validate a send command string(s).
encode Encodes command string(s) into raw signal (durations).
convert Converts command string(s) to other protocols.
list List supported protocols.
optional arguments:
-h, --help show this help message and exit
-v, --version Show version information.
```
## API usage
To interact with _remoteprotocols_ from your own program you import it and interact with it thru the registry, which has all built-in protocol definitions
```python
from remoteprotocols import ProtocolRegistry
protocols = ProtocolRegistry()
matches = protocols.convert("nec:0x7A:0x57", 0.2, ["durations", "broadlink"])
```
Main _ProtocolRegistry_ methods:
- **convert**(_command_:str, _tolerance_: float, _codec_filter_:list[str]) -> list[DecodeMatch]
Converts a given command into other protocols (all or filtered).
Returns a list of _DecodeMatch_ objects with the successful conversions, or an empty list if it couldn't convert.
The match has the following attributes:
- _protocol_: the matched protocol as a _ProtocolDef_ object
- _args_: an array of parsed arguments (including default ones)
- _tolerance_: the tolerance needed to match this protocol (lower better match)
- _uniquematch_: boolean indicating if the match is unique for this protocol. It should always be true for well defined protocols. When _false_, it means that the returned match is one out of multiple results that would encode in the same signal
- _missing_bits_: array of bitmasks of bits for each argument, that could not be decoded, and thus any value in those bits would be a valid result. If any mask is non zero, then the match is not unique for the protocol
- _toggle_bit_: state of the toggle bit (internal argument). Only relevant for protocols that use it (like RC5)
- **decode**(signal: SignalData, tolerance: float, protocol: Optional[list[str]])-> list[DecodeMatch]
Decodes a signal (optional frequency & array of durations) and returns a list of all matching protocols and corresponding decoded arguments. It decodes into all known protocols or a filtered subset.
- **parse_command**(command: str)-> RemoteCommand
Parses and validates a command string into a _RemoteCommand_ object.
It raises `voluptuous.Invalid` exception if there is any parsing problem.
## Example Protocol Definition
Encoded protocols are easily defined using an intuitive declarative syntax in the definitions yaml file, which is then used to both encode and decode.
```yaml
nec:
desc: NEC IR code (in LSB form)
type: IR
args:
- name: address
max: 16bits
desc: Address to send
print: 04X
- name: command
max: 16bits
desc: NEC command to send
print: 04X
timings:
frequency: 38000
unit: 1
one: [560, -1690]
zero: [560, -560]
header: [9000, -4500]
footer: [560]
pattern: header {address LSB 16} {command LSB 16} footer
```
# Acknowledgments
Thanks to all of the following sites and projects from where I obtained information about different codec definitions:
- [ESPHome](https://github.com/esphome/esphome/tree/2022.3.0/esphome/components/remote_base)
- [IRremoteESP8266](https://crankyoldgit.github.io/IRremoteESP8266/)
- [IRMP](https://github.com/ukw100/IRMP)
- [MakeHex](https://github.com/probonopd/MakeHex)
- [IrScrutinizer](https://github.com/bengtmartensson/IrScrutinizer)
- [python-miio](https://github.com/rytilahti/python-miio/blob/master/miio/chuangmi_ir.py) for Xiaomi miio raw format
- [python-broadlink](https://github.com/mjg59/python-broadlink/blob/master/protocol.md) for Broadlink raw format
- [harctoolbox](http://www.harctoolbox.org/Glossary.html#ProntoSemantics) for Pronto
| /remoteprotocols-0.0.7.tar.gz/remoteprotocols-0.0.7/README.md | 0.750278 | 0.909546 | README.md | pypi |
import rasterio
from PIL.JpegImagePlugin import JpegImageFile
from affine import Affine
from rasterio import plot
from rslearn.utils import *
from ._typing import *
class ImageFrame:
"""
图像类
Note
----
这里的图像采用BSQ排列
"""
def __init__(self, data: MatrixLike, dispose: ['BSQ', "BIL", "BIP"], bands_name: Sequence = ...) -> None:
'''该函数使用数据、数据排列方法和 band 名称初始化一个对象。
Parameters
----------
data : MatrixLike
“data”参数是一个类似矩阵的对象,表示图像的数据。它可以是 NumPy 数组或任何其他可以转换为 NumPy 数组的对象。
dispose : ['BSQ', "BIL", "BIP"]
`dispose` 参数用于指定输入矩阵中数据的排列。它可以采用以下值之一:“BSQ”(波段顺序)、“BIL”(波段按行交错)或“BIP”(波段按像素交错)
bands_name : Sequence
数据中每个波段的名称序列。
'''
if dispose == "BSQ":
self.__data = np.array(data)
elif dispose == "BIL":
self.__data = bil_to_bsq(np.array(data))
elif dispose == "BIP":
self.__data = bip_to_bsq(np.array(data))
else:
raise Exception()
self.__shape = self.__data.shape
self.__profile = {}
@classmethod
def read(cls, fp):
'''该函数使用 rasterio 读取光栅文件,使用数据创建图像对象,并设置图像的配置文件。
Parameters
----------
cls
参数“cls”是对“read”方法所属类的引用。它用于使用“data”和“dispose”参数创建类“cls”的实例。
fp
参数“fp”代表“文件路径”。它是您要读取的光栅文件的路径。
Returns
-------
类“cls”的实例,它是使用“data”和“dispose”参数创建的。然后为“img”实例分配“src”对象的配置文件并返回。
'''
with rasterio.open(fp) as src:
data = src.read()
img = cls(data, dispose="BSQ")
img.set_profile(src.profile)
return img
@classmethod
def read_tif(cls, fp):
"""
Parameters
----------
fp
Returns
-------
"""
raise NotImplementedError()
pass
@classmethod
def read_jpg(cls, fp):
img = JpegImageFile(fp=fp)
data = np.array(img)
return cls(data, dispose="BIP")
@property
def profile(self):
return self.__profile
def set_profile(self, profile: DictLike):
# TODO 这里应该加一些安全措施
self.__profile = profile
@property
def shape(self):
return self.__data.shape
@property
def n_bands(self) -> int:
return self.shape[0]
@property
def bands(self) -> int:
return self.to_bsq_mat()
@property
def transform(self) -> Affine:
return self.profile.get("transform", Affine.identity())
def to_bil_mat(self):
return bsq_to_bil(self.__data)
def to_bip_mat(self):
return bip_to_bsq(self.__data)
def to_bsq_mat(self):
return self.__data
def display(self, bidx: list[int] = ...):
'''`display` 函数用于显示图像的指定波段。
Parameters
----------
bidx : list[int]
`bidx` 参数是一个整数列表,指定要显示的波段。如果未提供“bidx”,则默认为“Ellipsis”。如果“bidx”是“Ellipsis”,则该函数检查波段数(“n_bands”)并设置“
'''
if bidx is Ellipsis:
if self.n_bands < 3:
bidx = list[range(self.n_bands)]
else:
bidx = [0, 1, 2]
plot.show(self.bands[bidx, :, :], transform=self.transform) | /remotesense_learn-0.0.4-py3-none-any.whl/rslearn/rsimage.py | 0.590425 | 0.50177 | rsimage.py | pypi |
from sklearn.svm import SVC
from sklearn.base import ClassifierMixin
import numpy as np
from sklearn.metrics import accuracy_score
from typing import *
import decimal
import io
import numpy as np
import pandas as pd
from scipy.sparse import spmatrix
import numpy.typing
from sklearn.base import BaseEstimator, ClassifierMixin, RegressorMixin
from .._typing import *
class ClassifierWrapper(ClassifierMixin):
"""分类器包装器
`ClassifierWrapper`类是一个包装器,允许分类器模型在 3 维输入矩阵(例如图像数据)上进行拟合、预测和评分。
"""
def __init__(self,model) -> None:
'''将分类器进行包装
Parameters
----------
model
“model”参数是分类器类的实例。它用于存储和操作将用于分类任务的分类器模型。
'''
self.__model:Classifier=model
pass
def fit(self,X:MatrixLike,y:MatrixLike,**fit_params):
'''该函数接收输入数据和标签,重新调整它们的形状,并将模型拟合到数据。
Parameters
----------
X : MatrixLike
输入数据矩阵,其中每行代表一个样本,每列代表一个特征。它应该是BSQ格式排列的图像矩阵。
y : MatrixLike
参数“y”表示数据集中的目标变量或因变量。它是一个类似矩阵的对象,包含数据集中每个样本的目标变量的值。
Returns
-------
使用给定的 X 和 y 数据拟合模型后类本身 (self) 的实例。
'''
SHAPE=X.shape
#self.n_features_in_=SHAPE[0]
X=np.transpose(X)
X=np.reshape(X,[-1,SHAPE[0]])
y=np.transpose(y)
y=np.reshape(y ,[-1])
self.__model.fit(X,y,**fit_params)
return self
def predict(self,X:MatrixLike):
'''“predict”函数接受 3 维输入矩阵,对其进行整形和转置,使用模型进行预测,然后返回预测的输出矩阵。
Parameters
----------
X : MatrixLike
参数 X,为BSQ排列的图像矩阵
Returns
-------
预测值,Y。
'''
SHAPE=X.shape
assert len(SHAPE)==3
X= np.transpose(X) #变换维度
X=np.reshape(X,[-1,SHAPE[0]]) #变换形状
Y=self.__model.predict(X) #预测
Y=np.reshape(Y,[SHAPE[2],SHAPE[1]])
Y=np.transpose(Y)
return Y
def score(self, X: MatrixLike, y: MatrixLike | ArrayLike, sample_weight: ArrayLike | None = None) -> Float:
'''“score”函数计算预测标签与真实标签相比的准确度得分。
Parameters
----------
X : MatrixLike
输入数据矩阵。它应该是一个类似矩阵的对象,例如 NumPy 数组或 Pandas DataFrame。
y : MatrixLike | ArrayLike
参数“y”表示给定输入数据“X”的真实标签或目标值。它应当是类似矩阵的对象。
sample_weight : ArrayLike | None
“sample_weight”参数是一个可选的类似数组的对象,用于为各个样本分配权重。它允许您在评分过程中更加重视某些样本。如果未提供,则假定所有样本具有相同的权重。
Returns
-------
准确度得分。
'''
y_pred=self.predict(X)
y_pred=np.reshape(y_pred,[-1])
y=np.reshape(y,[-1])
return accuracy_score(y,y_pred,sample_weight=sample_weight)
@property
def n_features_in_(self)-> int:
"""返回模型中的特征数量,对于遥感影像,特征数量即影像层数"""
return self.__model.n_features_in_
def get_params(self)->dict:
'''函数“get_params”返回一个包含模型参数的字典以及模型类的名称。
'''
d= self.__model.get_params()
d["model"]=self.__model.__class__.__name__
return d | /remotesense_learn-0.0.4-py3-none-any.whl/rslearn/wraper/classfier.py | 0.764276 | 0.496277 | classfier.py | pypi |
import io
import zipfile
from itertools import tee
import requests
__all__ = ['RemoteIOError', 'RemoteZip']
class RemoteZipError(Exception):
pass
class OutOfBound(RemoteZipError):
pass
class RemoteIOError(RemoteZipError):
pass
class RangeNotSupported(RemoteZipError):
pass
class PartialBuffer:
"""An object with buffer-like interface but containing just a part of the data.
The object allows to seek and read like this buffer contains the full data
however, any attempt to read data outside the partial data is going to fail
with OutOfBound error.
"""
def __init__(self, buffer, offset, size, stream):
self.buffer = buffer if stream else io.BytesIO(buffer.read())
self._offset = offset
self._size = size
self._position = offset
self._stream = stream
def __len__(self):
"""Returns the data size contained in the buffer"""
return self._size
def __repr__(self):
return "<PartialBuffer off=%s size=%s stream=%s>" % (self._offset, self._size, self._stream)
def read(self, size=0):
"""Read data from the buffer from the current position"""
if size == 0:
size = self._offset + self._size - self._position
content = self.buffer.read(size)
self._position = self._offset + self.buffer.tell()
return content
def close(self):
"""Ensure memory and connections are closed"""
if not self.buffer.closed:
self.buffer.close()
if hasattr(self.buffer, 'release_conn'):
self.buffer.release_conn()
def tell(self):
"""Returns the current position on the virtual buffer"""
return self._position
def seek(self, offset, whence):
"""Change the position on the virtual buffer"""
if whence == 2:
self._position = self._size + self._offset + offset
elif whence == 0:
self._position = offset
else:
self._position += offset
relative_position = self._position - self._offset
if relative_position < 0 or relative_position >= self._size:
raise OutOfBound("Position out of buffer bound")
if self._stream:
buff_pos = self.buffer.tell()
if relative_position < buff_pos:
raise OutOfBound("Negative seek not supported")
skip_bytes = relative_position - buff_pos
if skip_bytes == 0:
return self._position
self.buffer.read(skip_bytes)
else:
self.buffer.seek(relative_position)
return self._position
class RemoteIO(io.IOBase):
"""Exposes a file-like interface for zip files hosted remotely. It requires the remote server to
support the Range header."""
def __init__(self, fetch_fun, initial_buffer_size=64*1024):
self._fetch_fun = fetch_fun
self._initial_buffer_size = initial_buffer_size
self.buffer = None
self._file_size = None
self._seek_succeeded = False
self._member_position_to_size = None
self._last_member_pos = None
def set_position_to_size(self, position_to_size):
self._member_position_to_size = position_to_size
def read(self, size=0):
position = self.tell()
if size == 0:
size = self._file_size - position
if not self._seek_succeeded:
if self._member_position_to_size is None:
fetch_size = size
stream = False
else:
try:
fetch_size = self._member_position_to_size[position]
self._last_member_pos = position
except KeyError:
if self._last_member_pos and self._last_member_pos < position:
fetch_size = self._member_position_to_size[self._last_member_pos]
fetch_size -= (position - self._last_member_pos)
else:
raise OutOfBound("Attempt to seek outside boundary of current zip member")
stream = True
self._seek_succeeded = True
self.buffer.close()
self.buffer = self._fetch_fun((position, position + fetch_size - 1), stream=stream)
return self.buffer.read(size)
def seekable(self):
return True
def seek(self, offset, whence=0):
if whence == 2 and self._file_size is None:
size = self._initial_buffer_size
self.buffer = self._fetch_fun((-size, None), stream=False)
self._file_size = len(self.buffer) + self.buffer.tell()
try:
pos = self.buffer.seek(offset, whence)
self._seek_succeeded = True
return pos
except OutOfBound:
self._seek_succeeded = False
return self.tell() # we ignore the issue here, we will check if buffer is fine during read
def tell(self):
return self.buffer.tell()
def close(self):
if self.buffer:
self.buffer.close()
self.buffer = None
class RemoteFetcher:
"""Represent a remote file to be fetched in parts"""
def __init__(self, url, session=None, support_suffix_range=True, **kwargs):
self._kwargs = kwargs
self._url = url
self._session = session
self._support_suffix_range = support_suffix_range
@staticmethod
def parse_range_header(content_range_header):
range = content_range_header[6:].split("/")[0]
if range.startswith("-"):
return int(range), None
range_min, range_max = range.split("-")
return int(range_min), int(range_max) if range_max else None
@staticmethod
def build_range_header(range_min, range_max):
if range_max is None:
return "bytes=%s%s" % (range_min, '' if range_min < 0 else '-')
return "bytes=%s-%s" % (range_min, range_max)
def _request(self, kwargs):
if self._session:
res = self._session.get(self._url, stream=True, **kwargs)
else:
res = requests.get(self._url, stream=True, **kwargs)
res.raise_for_status()
if 'Content-Range' not in res.headers:
raise RangeNotSupported("The server doesn't support range requests")
return res.raw, res.headers['Content-Range']
def prepare_request(self, data_range=None):
kwargs = dict(self._kwargs)
kwargs['headers'] = headers = dict(kwargs.get('headers', {}))
if data_range is not None:
headers['Range'] = self.build_range_header(*data_range)
return kwargs
def get_file_size(self):
if self._session:
res = self._session.head(self._url, **self.prepare_request())
else:
res = requests.head(self._url, **self.prepare_request())
try:
res.raise_for_status()
return int(res.headers['Content-Length'])
except IOError as e:
raise RemoteIOError(str(e))
except KeyError:
raise RemoteZipError("Cannot get file size: Content-Length header missing")
def fetch(self, data_range, stream=False):
"""Fetch a part of a remote file"""
# Handle the case suffix range request is not supported. Fixes #15
if data_range[0] < 0 and data_range[1] is None and not self._support_suffix_range:
size = self.get_file_size()
data_range = (max(0, size + data_range[0]), size - 1)
kwargs = self.prepare_request(data_range)
try:
res, range_header = self._request(kwargs)
range_min, range_max = self.parse_range_header(range_header)
return PartialBuffer(res, range_min, range_max - range_min + 1, stream)
except IOError as e:
raise RemoteIOError(str(e))
def pairwise(iterable):
# pairwise('ABCDEFG') --> AB BC CD DE EF FG
a, b = tee(iterable)
next(b, None)
return zip(a, b)
class RemoteZip(zipfile.ZipFile):
def __init__(self, url, initial_buffer_size=64*1024, session=None, fetcher=RemoteFetcher, support_suffix_range=True,
**kwargs):
fetcher = fetcher(url, session, support_suffix_range=support_suffix_range, **kwargs)
rio = RemoteIO(fetcher.fetch, initial_buffer_size)
super(RemoteZip, self).__init__(rio)
rio.set_position_to_size(self._get_position_to_size())
def _get_position_to_size(self):
ilist = [info.header_offset for info in self.infolist()]
if len(ilist) == 0:
return {}
ilist.sort()
ilist.append(self.start_dir)
return {a: b-a for a, b in pairwise(ilist)} | /remotezip-0.12.1.tar.gz/remotezip-0.12.1/remotezip.py | 0.659076 | 0.232898 | remotezip.py | pypi |
import json
from math import ceil
import boto3
from .models import RenderParams, RenderProgress, RenderResponse, RenderProgressParams
class RemotionClient:
"""A client for interacting with the Remotion service."""
# pylint: disable=too-many-arguments
def __init__(self, region, serve_url, function_name, access_key=None, secret_key=None):
"""
Initialize the RemotionClient.
Args:
region (str): AWS region.
serve_url (str): URL for the Remotion service.
function_name (str): Name of the AWS Lambda function.
access_key (str): AWS access key (optional).
secret_key (str): AWS secret key (optional).
"""
self.access_key = access_key
self.secret_key = secret_key
self.region = region
self.serve_url = serve_url
self.function_name = function_name
def _serialize_input_props(self, input_props, render_type):
"""
Serialize inputProps to a format compatible with Lambda.
Args:
input_props (dict): Input properties to be serialized.
type (str): Type of the render (e.g., 'still' or 'video-or-audio').
Raises:
ValueError: If the inputProps are too large or cannot be serialized.
Returns:
dict: Serialized inputProps.
"""
try:
payload = json.dumps(input_props, separators=(',', ':'))
max_inline_payload_size = 5000000 if render_type == 'still' else 200000
if len(payload) > max_inline_payload_size:
raise ValueError(
(
f"InputProps are over {round(max_inline_payload_size / 1000)}KB "
f"({ceil(len(payload) / 1024)}KB) in size. This is not currently supported."
)
)
return {
'type': 'payload',
'payload': payload if payload not in ('', 'null') else json.dumps({})
}
except ValueError as error:
raise ValueError(
'Error serializing InputProps. Check for circular ' +
'references or reduce the object size.'
) from error
def _create_lambda_client(self):
if self.access_key and self.secret_key and self.region:
return boto3.client('lambda',
aws_access_key_id=self.access_key,
aws_secret_access_key=self.secret_key,
region_name=self.region)
return boto3.client('lambda', region_name=self.region)
def _invoke_lambda(self, function_name, payload):
client = self._create_lambda_client()
response = client.invoke(
FunctionName=function_name, Payload=payload)
result = response['Payload'].read().decode('utf-8')
decoded_result = json.loads(result)
if 'errorMessage' in decoded_result:
raise ValueError(decoded_result['errorMessage'])
if 'type' in decoded_result and decoded_result['type'] == 'error':
raise ValueError(decoded_result['message'])
if not 'type' in decoded_result or decoded_result['type'] != 'success':
raise ValueError(result)
return decoded_result
def construct_render_request(self, render_params: RenderParams) -> str:
"""
Construct a render request in JSON format.
Args:
render_params (RenderParams): Render parameters.
Returns:
str: JSON representation of the render request.
"""
render_params.serve_url = self.serve_url
render_params.region = self.region
render_params.function_name = self.function_name
render_params.input_props = self._serialize_input_props(
input_props=render_params.data,
render_type="video-or-audio"
)
return json.dumps(render_params.serialize_params())
def construct_render_progress_request(self, render_id: str, bucket_name: str) -> str:
"""
Construct a render progress request in JSON format.
Args:
render_id (str): ID of the render.
bucket_name (str): Name of the bucket.
Returns:
str: JSON representation of the render progress request.
"""
progress_params = RenderProgressParams(
render_id=render_id,
bucket_name=bucket_name,
function_name=self.function_name,
region=self.region
)
return json.dumps(progress_params.serialize_params())
def render_media_on_lambda(self, render_params: RenderParams) -> RenderResponse:
"""
Render media using AWS Lambda.
Args:
render_params (RenderParams): Render parameters.
Returns:
RenderResponse: Response from the render operation.
"""
params = self.construct_render_request(render_params)
body_object = self._invoke_lambda(
function_name=self.function_name, payload=params)
if body_object:
return RenderResponse(body_object['bucketName'], body_object['renderId'])
return None
def get_render_progress(self, render_id: str, bucket_name: str) -> RenderProgress:
"""
Get the progress of a render.
Args:
render_id (str): ID of the render.
bucket_name (str): Name of the bucket.
Returns:
RenderProgress: Progress of the render.
"""
params = self.construct_render_progress_request(render_id, bucket_name)
progress_response = self._invoke_lambda(
function_name=self.function_name, payload=params)
if progress_response:
render_progress = RenderProgress()
render_progress.__dict__.update(progress_response)
return render_progress
return None | /remotion_lambda-4.1.0a12-py3-none-any.whl/remotion_lambda/remotionclient.py | 0.879348 | 0.19431 | remotionclient.py | pypi |
from typing import Optional, List, Dict
from dataclasses import dataclass, field
from .version import VERSION
# pylint: disable=too-many-instance-attributes
@dataclass
class RenderParams:
"""
Parameters for video rendering.
"""
data: Optional[List] = None
bucket_name: Optional[str] = None
region: Optional[str] = None
out_name: Optional[str] = None
composition: str = ""
serve_url: str = ""
frames_per_lambda: Optional[int] = None
input_props: Optional[Dict] = None
codec: str = 'h264'
version: str = ""
image_format: str = 'jpeg'
crf: Optional[int] = None
env_variables: Optional[List] = None
quality: Optional[int] = None
max_retries: int = 1
privacy: str = 'public'
log_level: str = 'info'
frame_range: Optional[str] = None
timeout_in_milliseconds: Optional[int] = 30000
chromium_options: Optional[Dict] = None
scale: Optional[int] = 1
every_nth_frame: Optional[int] = 1
number_of_gif_loops: Optional[int] = 0
concurrency_per_lambda: Optional[int] = 1
download_behavior: Dict = field(default_factory=lambda: {
'type': 'play-in-browser'})
muted: bool = False
overwrite: bool = False
audio_bitrate: Optional[int] = None
video_bitrate: Optional[int] = None
webhook: Optional[str] = None
force_height: Optional[int] = None
force_width: Optional[int] = None
audio_codec: Optional[str] = None
renderer_function_name: Optional[str] = None
pro_res_profile: Optional[str] = None
pixel_format: Optional[str] = None
def serialize_params(self) -> Dict:
"""
Convert instance attributes to a dictionary for serialization.
"""
parameters = {
'rendererFunctionName': self.renderer_function_name,
'framesPerLambda': self.frames_per_lambda,
'composition': self.composition,
'serveUrl': self.serve_url,
'inputProps': self.input_props,
'codec': self.codec,
'imageFormat': self.image_format,
'maxRetries': self.max_retries,
'privacy': self.privacy,
'logLevel': self.log_level,
'frameRange': self.frame_range,
'outName': self.out_name,
'timeoutInMilliseconds': self.timeout_in_milliseconds,
'chromiumOptions': self.chromium_options if self.chromium_options is not None else {},
'scale': self.scale,
'everyNthFrame': self.every_nth_frame,
'numberOfGifLoops': self.number_of_gif_loops,
'concurrencyPerLambda': self.concurrency_per_lambda,
'downloadBehavior': self.download_behavior,
'muted': self.muted,
'version': VERSION,
'overwrite': self.overwrite,
'audioBitrate': self.audio_bitrate,
'videoBitrate': self.video_bitrate,
'webhook': self.webhook,
'forceHeight': self.force_height,
'forceWidth': self.force_width,
'bucketName': self.bucket_name,
'audioCodec': self.audio_codec,
'type': 'start'
}
if self.crf is not None:
parameters['crf'] = self.crf
if self.env_variables is not None:
parameters['envVariables'] = self.env_variables
if self.pixel_format is not None:
parameters['pixelFormat'] = self.pixel_format
if self.pro_res_profile is not None:
parameters['proResProfile'] = self.pro_res_profile
if self.quality is not None:
parameters['quality'] = self.quality
return parameters
# pylint: disable=too-many-instance-attributes
class RenderResponse:
"""
Response data after rendering.
"""
def __init__(self, bucketName, renderId):
self.bucketName = bucketName
self.renderId = renderId
@dataclass
class RenderProgressParams:
"""
Parameters for checking the progress of video rendering.
"""
render_id: str
bucket_name: str
function_name: str
region: str
def serialize_params(self) -> Dict:
"""
Convert instance attributes to a dictionary for serialization.
"""
parameters = {
'renderId': self.render_id,
'bucketName': self.bucket_name,
'type': 'status',
"version": VERSION,
"s3OutputProvider": None
}
return parameters
class RenderProgress:
"""
Progress of video rendering.
"""
def __init__(self):
self.overallProgress = float()
self.chunks = int()
self.done = bool()
self.encodingStatus = None
self.costs = None
self.renderId = str()
self.renderMetadata = None
self.outputFile = None
self.outKey = None
self.timeToFinish = None
self.errors = []
self.fatalErrorEncountered = bool()
self.currentTime = int()
self.renderSize = int()
self.outputSizeInBytes = None
self.lambdasInvoked = int()
self.framesRendered = None | /remotion_lambda-4.1.0a12-py3-none-any.whl/remotion_lambda/models.py | 0.928457 | 0.219484 | models.py | pypi |
from remotior_sensus.core.bandset_catalog import BandSetCatalog
class OutputManager(object):
"""Manages output.
This class manages several types of output, mainly intended for tools
that have several outputs.
Check argument is False if output failed.
Single output raster or multiple file paths can be defined as arguments.
Additional output files or tables are managed with an extra argument.
The type of the extra argument can be flexible depending on the process
output.
Attributes:
check: True if output is as expected, False if process failed.
path: path of the first output.
paths: list of output paths in case of multiple outputs.
extra: additional output elements depending on the process.
Examples:
Output failed
>>> OutputManager(check=False)
Output is checked and file path is provided
>>> OutputManager(path='file.tif')
""" # noqa: E501
def __init__(
self, check: bool = True, path: str = None, paths: list = None,
extra=None
):
"""Initializes an Output.
Initializes an Output.
Args:
check: True if output is as expected, False if process failed.
path: path of the first output.
paths: list of output paths in case of multiple outputs.
extra: additional output elements depending on the process.
Examples:
Create an object with a single file path
>>> OutputManager(path='file.tif')
Create an object with several output file paths in a list and an extra argument for a dictionary
>>> OutputManager(
... paths=['file1.tif', 'file2.tif'],
... extra={'additional_output': 'file.csv'}
... )
)
""" # noqa: E501
self.check = check
self.paths = paths
if path is None:
if paths is None:
self.path = None
elif len(paths) == 0:
self.path = None
else:
self.path = paths[0]
else:
self.path = path
self.extra = extra
def add_to_bandset(
self, bandset_catalog: BandSetCatalog, bandset_number=None,
band_number=None, raster_band=None, band_name=None, date=None,
unit=None, root_directory=None, multiplicative_factor=None,
additive_factor=None, wavelength=None
):
"""Adds output to BandSet.
Adds the OutputManager.path as a band to a BandSet in a BandSetCatalog.
Args:
bandset_catalog: BandSetCatalog object.
band_name: raster name used for identifying the bands.
wavelength: center wavelengths of band.
unit: wavelength unit as string
multiplicative_factor: multiplicative factor for bands during calculations.
additive_factor: additive factors for band during calculations.
date: date string (format YYYY-MM-DD).
bandset_number: number of the BandSet; if None, the band is added to the current BandSet.
root_directory: root directory for relative path.
raster_band: raster band number.
band_number: number of band in BandSet.
Examples:
Add the output to BandSet 1 as band 1.
>>> catalog = BandSetCatalog()
>>> OutputManager.add_to_bandset(
... bandset_catalog=catalog, bandset_number=1, band_number=1
... )
""" # noqa: E501
if type(bandset_catalog) is BandSetCatalog:
bandset_catalog.add_band_to_bandset(
path=self.path, bandset_number=bandset_number,
band_number=band_number, raster_band=raster_band,
band_name=band_name, date=date, unit=unit,
root_directory=root_directory,
multiplicative_factor=multiplicative_factor,
additive_factor=additive_factor, wavelength=wavelength
)
else:
raise Exception('bandset catalog not found') | /remotior_sensus-0.0.79-py3-none-any.whl/remotior_sensus/core/output_manager.py | 0.915053 | 0.632446 | output_manager.py | pypi |
import logging
from types import FunctionType
from typing import Optional
from remotior_sensus.core import configurations, messages, table_manager
from remotior_sensus.core.bandset_catalog import BandSet, BandSetCatalog
from remotior_sensus.core.log import Log
from remotior_sensus.core.multiprocess_manager import Multiprocess
from remotior_sensus.core.output_manager import OutputManager
from remotior_sensus.core.progress import Progress
from remotior_sensus.core.spectral_signatures import (
SpectralSignaturesCatalog, SpectralSignaturePlotCatalog,
SpectralSignaturePlot
)
from remotior_sensus.core.temporary import Temporary
from remotior_sensus.tools import (
band_calc, band_classification, band_clip, band_combination, band_dilation,
band_erosion, band_neighbor_pixels, band_pca, band_sieve, band_resample,
band_stack, band_mask, raster_split,
cross_classification, download_products, mosaic, preprocess_products,
raster_reclassification, raster_report, raster_to_vector, vector_to_raster
)
from remotior_sensus.util import (
dates_times, system_tools, files_directories, download_tools, shared_tools
)
class Session(object):
"""Manages system parameters.
This module allows for managing Remotior Sensus' session,
setting fundamental processing parameters and exposing core functions
and tools.
Attributes:
configurations: module containing shared variables and functions
bandset: access :func:`~remotior_sensus.core.bandset_catalog.BandSet` class
bandset_catalog: access :func:`~remotior_sensus.core.bandset_catalog.BandSetCatalog` class
spectral_signatures_catalog: access :func:`~remotior_sensus.core.spectral_signatures.SpectralSignaturesCatalog` class
spectral_signatures_plot_catalog: access :func:`~remotior_sensus.core.spectral_signatures.SpectralSignaturePlotCatalog` class
spectral_signatures_plot: access :func:`~remotior_sensus.core.spectral_signatures.SpectralSignaturePlot` class
output_manager: access :func:`~remotior_sensus.core.output_manager.OutputManager` class
table_manager: access functions of :func:`~remotior_sensus.core.table_manager` module
dates_times: access dates and times utilities
download_tools: access download utilities
shared_tools: access shared tools
files_directories: access files directories utilities
band_calc: tool :func:`~remotior_sensus.tools.band_calc`
band_classification: tool :func:`~remotior_sensus.tools.band_classification`
classifier: tool :func:`~remotior_sensus.tools.band_classification.Classifier`
band_combination: tool :func:`~remotior_sensus.tools.band_combination`
band_dilation: tool :func:`~remotior_sensus.tools.band_dilation`
band_erosion: tool :func:`~remotior_sensus.tools.band_erosion`
band_mask: tool :func:`~remotior_sensus.tools.band_mask`
band_neighbor_pixels: tool :func:`~remotior_sensus.tools.band_neighbor_pixels`
band_pca: tool :func:`~remotior_sensus.tools.band_pca`
band_resample: tool :func:`~remotior_sensus.tools.band_resample`
band_sieve: tool :func:`~remotior_sensus.tools.band_sieve`
band_stack: tool :func:`~remotior_sensus.tools.band_stack`
cross_classification: tool :func:`~remotior_sensus.tools.cross_classification`
download_products: tool :func:`~remotior_sensus.tools.download_products`
mosaic: tool :func:`~remotior_sensus.tools.mosaic`
preprocess_products: tool :func:`~remotior_sensus.tools.preprocess_products`
raster_reclassification: tool :func:`~remotior_sensus.tools.preprocess_products`
raster_report: tool :func:`~remotior_sensus.tools.raster_report`
raster_split: tool :func:`~remotior_sensus.tools.raster_split`
raster_to_vector: tool :func:`~remotior_sensus.tools.raster_to_vector`
vector_to_raster: tool :func:`~remotior_sensus.tools.vector_to_raster`
Examples:
Start a session
>>> import remotior_sensus
>>> rs = remotior_sensus.Session()
Start a session defining number of parallel processes. and available RAM
>>> import remotior_sensus
>>> rs = remotior_sensus.Session(n_processes=4,available_ram=4096)
Create a :func:`~remotior_sensus.core.bandset_catalog.BandSetCatalog`
>>> catalog = rs.bandset_catalog()
Run the tool for raster report
>>> output = rs.raster_report(raster_path='file.tif', output_path='output.txt')
Stop a session at the end to clear temporary directory
>>> rs.close()
""" # noqa: E501
def __init__(
self, n_processes: Optional[int] = 2, available_ram: int = 2048,
temporary_directory: str = None,
directory_prefix: str = None, log_level: int = 20,
log_time: bool = True, progress_callback=None,
multiprocess_module=None, messages_callback=None,
smtp_server=None, smtp_user=None, smtp_password=None,
smtp_recipients=None, smtp_notification=None,
sound_notification=None
):
"""Starts a session.
Starts a new session setting fundamental parameters for processing.
It sets the number of parallel processes (default 2) and available RAM
(default 2048MB) to be used in calculations.
It starts the class Temporary to manage temporary files by creating a
temporary directory with an optional name prefix.
It starts the class Log for logging (with a default level INFO) and
creates a logging formatter with the option to hide time.
It starts the class Progress for displaying progress with a default
callback function.
A custom progress callback function can be passed optionally.
The sessions also allows for accessing to the core functions
and tools.
In the end, the close() function should be called to clear
the temporary directory and stop the parallel processes.
Args:
n_processes: number of parallel processes.
available_ram: number of megabytes of RAM available to processes.
temporary_directory: path to a temporary directory.
directory_prefix: prefix of the name of the temporary directory.
log_level: level of logging (10 for DEBUG, 20 for INFO).
log_time: if True, logging includes the time.
progress_callback: function for progress callback.
multiprocess_module: multiprocess module, useful if Remotior Sensus' session is started from another Python module.
messages_callback: message module, useful if Remotior Sensus' session is started from another Python module.
smtp_server: optional server for SMTP notification.
smtp_user: user for SMTP authentication.
smtp_password: password for SMTP authentication.
smtp_recipients: string of one or more email addresses separated by comma for SMTP notification.
smtp_notification: optional, if True send SMTP notification.
sound_notification: optional, if True play sound notification.
Examples:
Start a session
>>> import remotior_sensus
>>> rs = remotior_sensus.Session()
""" # noqa: E501
configurations.n_processes = n_processes
configurations.available_ram = available_ram
if sound_notification is not None:
configurations.sound_notification = sound_notification
if smtp_notification is not None:
configurations.smtp_notification = smtp_notification
if smtp_server is not None:
configurations.smtp_server = smtp_server
configurations.smtp_user = smtp_user
configurations.smtp_password = smtp_password
configurations.smtp_recipients = smtp_recipients
# create temporary directory
temp = Temporary()
if directory_prefix is None:
directory_prefix = configurations.root_name
configurations.temp = temp.create_root_temporary_directory(
prefix=directory_prefix, directory=temporary_directory
)
# create logger
if log_level is None:
log_level = logging.INFO
self.log_level = log_level
configurations.logger = Log(
directory=configurations.temp.dir, level=self.log_level,
time=log_time
)
# start progress
if progress_callback is None:
progress_callback = Progress.print_progress_replace
configurations.progress = Progress(callback=progress_callback)
if messages_callback is None:
configurations.messages = messages
else:
configurations.messages = messages_callback
system_tools.get_system_info()
check = _check_dependencies(configurations)
if check:
self.configurations = configurations
# create multiprocess instance
self.configurations.multiprocess = Multiprocess(
n_processes, multiprocess_module
)
# available core tools
self.bandset = BandSet
self.bandset_catalog = BandSetCatalog
self.spectral_signatures_catalog = SpectralSignaturesCatalog
self.spectral_signatures_plot_catalog = (
SpectralSignaturePlotCatalog
)
self.spectral_signatures_plot = SpectralSignaturePlot
self.output_manager = OutputManager
self.table_manager = table_manager
# available tools
self.band_calc = band_calc.band_calc
self.configurations.band_calc = band_calc.band_calc
self.band_classification = band_classification.band_classification
self.configurations.band_classification = (
band_classification.band_classification
)
self.classifier = band_classification.Classifier
self.band_combination = band_combination.band_combination
self.configurations.band_combination = (
band_combination.band_combination
)
self.band_dilation = band_dilation.band_dilation
self.configurations.band_dilation = band_dilation.band_dilation
self.band_erosion = band_erosion.band_erosion
self.configurations.band_erosion = band_erosion.band_erosion
self.band_mask = band_mask.band_mask
self.configurations.band_mask = band_mask.band_mask
self.mosaic = mosaic.mosaic
self.band_neighbor_pixels = (
band_neighbor_pixels.band_neighbor_pixels
)
self.configurations.band_neighbor_pixels = (
band_neighbor_pixels.band_neighbor_pixels
)
self.band_pca = band_pca.band_pca
self.band_clip = band_clip.band_clip
self.configurations.band_pca = band_pca.band_pca
self.band_sieve = band_sieve.band_sieve
self.configurations.band_sieve = band_sieve.band_sieve
self.band_resample = band_resample.band_resample
self.configurations.band_resample = band_resample.band_resample
self.band_stack = band_stack.band_stack
self.configurations.band_stack = band_stack.band_stack
self.cross_classification = (
cross_classification.cross_classification
)
self.download_products = download_products
self.preprocess_products = preprocess_products
self.raster_reclassification = (
raster_reclassification.raster_reclassification
)
self.raster_report = raster_report.raster_report
self.raster_split = raster_split.raster_split
self.raster_to_vector = raster_to_vector.raster_to_vector
self.vector_to_raster = vector_to_raster.vector_to_raster
self.dates_times = dates_times
self.download_tools = download_tools
self.shared_tools = shared_tools
self.files_directories = files_directories
else:
self.configurations = None
def close(self, log_path: str = None):
"""Closes a Session.
This function closes current session by deleting the temporary files
and stopping parallel processes.
Args:
log_path: path where the log file is saved
Examples:
Given that a session was previously started
>>> import remotior_sensus
>>> rs = remotior_sensus.Session()
Set the number of parallel processes and available RAM
>>> rs.set(n_processes=8, available_ram=20480)
Set the logging level to DEBUG
>>> rs.set(log_level=10)
"""
if log_path:
try:
files_directories.copy_file(
self.configurations.logger.file_path, log_path
)
except Exception as err:
str(err)
self.configurations.temp.clear()
self.configurations.multiprocess.stop()
def set(
self, n_processes: int = None, available_ram: int = None,
temporary_directory: str = None,
directory_prefix: str = None, log_level: int = None,
log_time: bool = None, progress_callback: FunctionType = None,
smtp_server=None, smtp_user=None, smtp_password=None,
smtp_recipients=None, sound_notification=None,
smtp_notification=None
):
"""Sets or changes the parameters of an existing Session.
Sets the parameters of an existing Session such as number
of processes or temporary directory.
Args:
n_processes: number of parallel processes.
available_ram: number of megabytes of RAM available to processes.
temporary_directory: path to a temporary directory.
directory_prefix: prefix of the name of the temporary directory.
log_level: level of logging (10 for DEBUG, 20 for INFO).
log_time: if True, logging includes the time.
progress_callback: function for progress callback.
smtp_server: optional server for SMTP notification.
smtp_user: user for SMTP authentication.
smtp_password: password for SMTP authentication.
smtp_recipients: string of one or more email addresses separated by comma for SMTP notification.
smtp_notification: optional, if True send SMTP notification.
sound_notification: optional, if True play sound notification.
Examples:
Given that a session was previously started
>>> import remotior_sensus
>>> rs = remotior_sensus.Session()
Stop a session
>>> rs.close()
Stop a session saving also the log to a file
>>> rs.close(log_path='file.txt')
""" # noqa: E501
if n_processes:
self.configurations.n_processes = n_processes
check = _check_dependencies(self.configurations)
if check:
self.configurations.multiprocess.stop()
self.configurations.multiprocess = Multiprocess(n_processes)
else:
self.configurations = None
return
if available_ram:
self.configurations.available_ram = available_ram
if temporary_directory:
self.configurations.temp.clear()
# create temporary directory
temp = Temporary()
if directory_prefix is None:
directory_prefix = self.configurations.root_name
self.configurations.temp = temp.create_root_temporary_directory(
prefix=directory_prefix, directory=temporary_directory
)
if log_level:
self.log_level = log_level
if log_time is None:
log_time = True
# create logger
self.configurations.logger = Log(
directory=self.configurations.temp.dir, level=self.log_level,
time=log_time
)
elif log_time:
# create logger
self.configurations.logger = Log(
directory=self.configurations.temp.dir, level=self.log_level,
time=log_time
)
if sound_notification:
self.configurations.sound_notification = sound_notification
if smtp_notification:
self.configurations.smtp_notification = smtp_notification
if smtp_server:
self.configurations.smtp_server = smtp_server
if smtp_user:
self.configurations.smtp_user = smtp_user
if smtp_password:
self.configurations.smtp_password = smtp_password
if smtp_recipients:
self.configurations.smtp_recipients = smtp_recipients
if progress_callback:
# start progress
self.configurations.progress = Progress(callback=progress_callback)
self.configurations.logger.log.info(
'n_processes: %s; ram: %s; temp.dir: %s'
% (self.configurations.n_processes,
self.configurations.available_ram,
self.configurations.temp.dir)
)
def _check_dependencies(configuration_module: configurations) -> bool:
"""Checks the dependencies.
Checks the dependencies and returns a boolean.
Args:
configuration_module: module configurations used for logging.
"""
check = True
try:
import os
try:
import numpy
except Exception as err:
configuration_module.logger.log.error(str(err))
configuration_module.messages.error('dependency error: numpy')
check = False
try:
from scipy import signal
except Exception as err:
configuration_module.logger.log.error(str(err))
configuration_module.messages.error('dependency error: scipy')
check = False
# optional Matplotlib for spectral signature plots
try:
import matplotlib.pyplot as plt
except Exception as err:
configuration_module.logger.log.warning(str(err))
configuration_module.messages.warning(
'dependency error: matplotlib; spectral signature plots '
'are not available'
)
try:
import torch
except Exception as err:
configuration_module.logger.log.warning(str(err))
configuration_module.messages.warning('dependency error: pytorch')
try:
from sklearn import svm
except Exception as err:
configuration_module.logger.log.warning(str(err))
configuration_module.messages.warning('dependency error: sklearn')
if configuration_module.gdal_path is not None:
os.add_dll_directory(configuration_module.gdal_path)
try:
from osgeo import gdal
except Exception as err:
configuration_module.logger.log.error(str(err))
configuration_module.messages.error('dependency error: gdal')
check = False
except Exception as err:
configuration_module.logger.log.error(str(err))
configuration_module.messages.error(str(err))
check = False
return check | /remotior_sensus-0.0.79-py3-none-any.whl/remotior_sensus/core/session.py | 0.89227 | 0.296209 | session.py | pypi |
from __future__ import annotations
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from remotior_sensus.core.multiprocess_manager import Multiprocess
root_name = 'remotior_sensus'
version = None
# type hint for Multiprocess
multiprocess: Multiprocess
# shared classes
logger = messages = band_calc = band_classification = band_combination = None
band_dilation = band_erosion = band_neighbor_pixels = band_pca = None
band_sieve = None
# shared Temporary class
temp = None
# variable to stop processes
action = True
# variables used in Progress class
progress = None
process = root_name
message = 'starting'
refresh_time = 1.0
# operating system information
sys_64bit = None
file_sys_encoding = None
sys_name = None
# notification options
sound_notification = None
smtp_notification = None
smtp_server = ''
smtp_user = ''
smtp_password = ''
smtp_recipients = ''
# optional GDAL path
gdal_path = None
# variables used in BandSet class
band_name_suf = '#b'
date_auto = 'auto'
# memory units used in Multiprocess class for calculating block size
memory_unit_array_12 = 0.000016
memory_unit_array_8 = 0.000010
memory_unit_array_4 = 0.000006
# number of parallel processes. used for Multiprocess calculations
n_processes = 2
# available RAM that should be used by processes
available_ram = 2048
# parameters for raster files
raster_data_type = 'Float32'
raster_compression = True
raster_compression_format = 'LZW'
# nodata values for data types
nodata_val = -32768
nodata_val_UInt16 = 65535
nodata_val_Int32 = 2147483647
nodata_val_Int64 = -9223372036854775808
nodata_val_Float32 = -3.4028235e+38
nodata_val_UInt32 = 4294967295
nodata_val_UInt64 = 2 ** 64 - 1
nodata_val_Byte = 255
# predefined suffixes
csv_suffix = '.csv'
dbf_suffix = '.dbf'
tif_suffix = '.tif'
vrt_suffix = '.vrt'
shp_suffix = '.shp'
gpkg_suffix = '.gpkg'
txt_suffix = '.txt'
xml_suffix = '.xml'
rsmo_suffix = '.rsmo'
# text delimiters
comma_delimiter = ','
tab_delimiter = '\t'
new_line = '\n'
# product variables used for download and preprocessing
sentinel2 = 'Sentinel-2'
landsat = 'Landsat'
sensor_oli = 'oli_tirs'
sensor_etm = 'etm'
sensor_tm = 'tm'
sensor_mss = 'mss'
# NASA CMR Search
# https://cmr.earthdata.nasa.gov/search/site/search_api_docs.html
landsat_hls = 'Landsat_HLS'
landsat_hls_collection = 'C2021957657-LPCLOUD'
sentinel2_hls = 'Sentinel-2_HLS'
sentinel2_hls_collection = 'C2021957295-LPCLOUD'
product_list = [sentinel2, landsat_hls, sentinel2_hls]
# satellites bands for center wavelength definition
no_satellite = 'Band order'
satGeoEye1 = 'GeoEye-1 [bands 1, 2, 3, 4]'
satGOES = 'GOES [bands 1, 2, 3, 4, 5, 6]'
satLandsat9 = 'Landsat 9 OLI [bands 1, 2, 3, 4, 5, 6, 7]'
satLandsat8 = 'Landsat 8 OLI [bands 1, 2, 3, 4, 5, 6, 7]'
satLandsat7 = 'Landsat 7 ETM+ [bands 1, 2, 3, 4, 5, 7]'
satLandsat45 = 'Landsat 4-5 TM [bands 1, 2, 3, 4, 5, 7]'
satLandsat13 = 'Landsat 1-3 MSS [bands 4, 5, 6, 7]'
satRapidEye = 'RapidEye [bands 1, 2, 3, 4, 5]'
satSentinel1 = 'Sentinel-1 [bands VV, VH]'
satSentinel2 = 'Sentinel-2 [bands 1, 2, 3, 4, 5, 6, 7, 8, 8A, 9, 10, 11, 12]'
satSentinel3 = 'Sentinel-3 [bands 1, 2, 3, 4, 5, 6, 7, 8, 9, ' \
'10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21]'
satASTER = 'ASTER [bands 1, 2, 3N, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14]'
satMODIS = 'MODIS [bands 3, 4, 1, 2, 5, 6, 7]'
satMODIS2 = 'MODIS [bands 1, 2]'
satSPOT4 = 'SPOT 4 [bands 1, 2, 3, 4]'
satSPOT5 = 'SPOT 5 [bands 1, 2, 3, 4]'
satSPOT6 = 'SPOT 6 [bands 1, 2, 3, 4]'
satPleiades = 'Pleiades [bands 1, 2, 3, 4]'
satQuickBird = 'QuickBird [bands 1, 2, 3, 4]'
satWorldView23 = 'WorldView-2 -3 Multispectral [bands 1, 2, 3, 4, 5, 6, 7, 8]'
# satellite list used in BandSet class
sat_band_list = [
no_satellite, satASTER, satGeoEye1, satGOES, satLandsat8, satLandsat7,
satLandsat45, satLandsat13, satMODIS, satMODIS2, satPleiades, satQuickBird,
satRapidEye, satSentinel2, satSentinel3, satSPOT4, satSPOT5, satSPOT6,
satWorldView23
]
# units used for center wavelength
no_unit = 'band number'
wl_micro = 'µm (1 E-6m)'
wl_nano = 'nm (1 E-9m)'
# list of units
unit_list = [no_unit, wl_micro, wl_nano]
unit_nano = 'E-9m'
unit_micro = 'E-6m'
# wavelength center and thresholds in micrometers used in BandSet class
blue_center = 0.475
blue_threshold = 0.2
green_center = 0.56
green_threshold = 0.03
red_center = 0.65
red_threshold = 0.04
nir_center = 0.85
nir_threshold = 0.15
swir_1_center = 1.61
swir_1_threshold = 0.2
swir_2_center = 2.2
swir_2_threshold = 0.2
# dictionary of satellite bands center wavelengths
satellites = {
# ASTER center wavelength calculated from USGS, 2015.
# Advanced Spaceborne Thermal Emission and Reflection
# Radiometer (ASTER) Level 1 Precision Terrain Corrected Registered
# At-Sensor Radiance Product (AST_L1T)
satASTER: [
[0.560, 0.660, 0.810, 1.650, 2.165, 2.205, 2.260, 2.330, 2.395,
8.300, 8.650, 9.100, 10.600, 11.300],
wl_micro,
['01', '02', '3N', '04', '05', '06', '07', '08', '09', '10',
'11', '12', '13', '14']],
# Landsat center wavelength calculated from
# http://landsat.usgs.gov/band_designations_landsat_satellites.php
satLandsat8: [[0.44, 0.48, 0.56, 0.655, 0.865, 1.61, 2.2],
wl_micro, ['1', '2', '3', '4', '5', '6', '7']],
satLandsat7: [[0.485, 0.56, 0.66, 0.835, 1.65, 2.22],
wl_micro, ['1', '2', '3', '4', '5', '7']],
satLandsat45: [[0.485, 0.56, 0.66, 0.83, 1.65, 2.215],
wl_micro, ['1', '2', '3', '4', '5', '7']],
satLandsat13: [[0.55, 0.65, 0.75, 0.95], wl_micro, ['4', '5', '6', '7']],
# MODIS center wavelength calculated from
# https://lpdaac.usgs.gov/dataset_discovery/modis
satMODIS: [[0.469, 0.555, 0.645, 0.858, 1.24, 1.64, 2.13],
wl_micro, ['03', '04', '01', '02', '05', '06', '07']],
satMODIS2: [[0.645, 0.858], wl_micro, ['01', '02']],
# RapidEye center wavelength calculated from
# http://www.blackbridge.com/rapideye/products/ortho.htm
satRapidEye: [[0.475, 0.555, 0.6575, 0.71, 0.805],
wl_micro, ['01', '02', '03', '04', '05']],
# SPOT center wavelength calculated from
# http://www.astrium-geo.com/en/194-resolution-and-spectral-bands
satSPOT4: [[0.545, 0.645, 0.835, 1.665], wl_micro,
['01', '02', '03', '04']],
satSPOT5: [[0.545, 0.645, 0.835, 1.665], wl_micro,
['01', '02', '03', '04']],
satSPOT6: [[0.485, 0.56, 0.66, 0.825], wl_micro, ['01', '02', '03', '04']],
# Pleiades center wavelength calculated from
# http://www.astrium-geo.com/en/3027-pleiades-50-cm-resolution-products
satPleiades: [[0.49, 0.56, 0.65, 0.84], wl_micro,
['01', '02', '03', '04']],
# QuickBird center wavelength calculated from
# http://www.digitalglobe.com/resources/satellite-information
satQuickBird: [[0.4875, 0.543, 0.65, 0.8165], wl_micro,
['01', '02', '03', '04']],
# WorldView-2 center wavelength calculated from
# http://www.digitalglobe.com/resources/satellite-information
satWorldView23: [
[0.425, 0.48, 0.545, 0.605, 0.66, 0.725, 0.8325, 0.95],
wl_micro, ['01', '02', '03', '04', '05', '06', '07', '08']],
# GeoEye-1 center wavelength calculated from
# http://www.digitalglobe.com/resources/satellite-information
satGeoEye1: [[0.48, 0.545, 0.6725, 0.85], wl_micro,
['01', '02', '03', '04']],
# Sentinel-1
satSentinel1: [[1, 2], no_unit, ['1', '2']],
# Sentinel-2 center wavelength from
# https://sentinel.esa.int/documents/247904/685211/Sentinel-2A
# +MSI+Spectral+Responses
satSentinel2: [
[0.443, 0.490, 0.560, 0.665, 0.705, 0.740, 0.783, 0.842, 0.865,
0.945, 1.375, 1.610, 2.190], wl_micro,
['01', '02', '03', '04', '05', '06', '07', '08', '8a', '09',
'10', '11', '12']],
# Sentinel-3 center wavelength from Sentinel-3 xfdumanifest.xml
satSentinel3: [
[0.400, 0.4125, 0.4425, 0.490, 0.510, 0.560, 0.620, 0.665,
0.67375, 0.68125, 0.70875, 0.75375, 0.76125,
0.764375, 0.7675, 0.77875, 0.865, 0.885, 0.900, 0.940, 1.020],
wl_micro,
['01', '02', '03', '04', '05', '06', '07', '08', '09', '10',
'11', '12', '13', '14', '15', '16', '17', '18', '19', '20',
'21']],
# GOES center wavelength from GOES-R, 2017.PRODUCT DEFINITION
# AND USER’S GUIDE (PUG) VOLUME 3: LEVEL 1B PRODUCTS
satGOES: [[0.47, 0.64, 0.87, 1.38, 1.61, 2.25], wl_micro,
['01', '02', '03', '04', '05', '06']]
}
# variable used for array name placeholder in expressions for calculations
array_function_placeholder = '_array_function_placeholder'
# reclassification name variables
old_value = 'old_value'
new_value = 'new_value'
variable_raster_name = 'raster'
# calculation band name alias
variable_bandset_name = 'bandset'
variable_band_name = 'b'
variable_band_quotes = '"'
variable_band = '#BAND#'
variable_current_bandset = '#'
variable_output_separator = '@'
variable_bandset_number_separator = '%'
variable_all = '*'
variable_blue_name = '#BLUE#'
variable_green_name = '#GREEN#'
variable_red_name = '#RED#'
variable_nir_name = '#NIR#'
variable_swir1_name = '#SWIR1#'
variable_swir2_name = '#SWIR2#'
variable_ndvi_name = '#NDVI#'
variable_ndvi_expression = '("#NIR#" - "#RED#") / ("#NIR#" + "#RED#")'
variable_evi_name = '#NDVI#'
variable_evi_expression = (
'2.5 * ("#NIR#" - "#RED#") / ("#NIR#" + 6 * "#RED#" - 7.5 * "#BLUE#" + 1)'
)
expression_alias = [[variable_ndvi_name, variable_ndvi_expression],
[variable_evi_name, variable_evi_expression]]
variable_output_name_bandset = '#BANDSET#'
variable_output_name_date = '#DATE#'
variable_output_temporary = 'temp'
forbandsinbandset = 'forbandsinbandset'
forbandsets = 'forbandsets'
calc_function_name = '!function!'
calc_date_format = '%Y-%m-%d'
default_output_name = 'output'
stat_percentile = '@stat_percentile@'
statistics_list = [
['Count', 'np.count_nonzero(~np.isnan(array))'],
['Max', 'np.nanmax(array)'], ['Mean', 'np.nanmean(array)'],
['Median', 'np.nanmedian(array)'], ['Min', 'np.nanmin(array)'],
['Percentile', 'np.nanpercentile(array, %s)' % stat_percentile],
['StandardDeviation', 'np.nanstd(array)'], ['Sum', 'np.nansum(array)']
]
# calculation data types used in calculations
float64_dt = 'Float64'
float32_dt = 'Float32'
int32_dt = 'Int32'
uint32_dt = 'UInt32'
int16_dt = 'Int16'
uint16_dt = 'UInt16'
byte_dt = 'Byte'
datatype_list = [float64_dt, float32_dt, int32_dt, uint32_dt, int16_dt,
uint16_dt, byte_dt]
# spectral catalog table dtype
spectral_dtype_list = [('signature_id', 'U64'), ('macroclass_id', 'int16'),
('class_id', 'int16'), ('class_name', 'U512'),
('selected', 'byte'), ('min_dist_thr', 'float64'),
('max_like_thr', 'float64'),
('spec_angle_thr', 'float64'),
('geometry', 'byte'), ('signature', 'byte'),
('color', 'U64'), ('pixel_count', 'int16'),
('unit', 'U64')]
# spectral signature dtype
signature_dtype_list = [('value', 'float64'), ('wavelength', 'float64'),
('standard_deviation', 'float64')]
# variables used in spectral signatures
uid_field_name = 'roi_id'
macroclass_field_name = 'macroclass_id'
class_field_name = 'class_id'
macroclass_default = 'macroclass'
class_default = 'class'
# input normalization for classification
z_score = 'z score'
linear_scaling = 'linear scaling'
# classification frameworks
classification_framework = 'classification_framework'
scikit_framework = 'scikit'
pytorch_framework = 'pytorch'
spectral_signatures_framework = 'spectral_signatures'
model_classifier_framework = 'model_classifier'
normalization_values_framework = 'normalization_values'
covariance_matrices_framework = 'covariance_matrices'
algorithm_name_framework = 'algorithm_name'
input_normalization_framework = 'input_normalization'
# classification algorithm names
minimum_distance = 'minimum distance'
maximum_likelihood = 'maximum likelihood'
spectral_angle_mapping = 'spectral angle mapping'
random_forest = 'random forest'
random_forest_ovr = 'random forest ovr'
support_vector_machine = 'support vector machine'
multi_layer_perceptron = 'multi-layer perceptron'
pytorch_multi_layer_perceptron = 'pytorch multi-layer perceptron'
classification_algorithms = [
minimum_distance, maximum_likelihood, spectral_angle_mapping,
random_forest, random_forest_ovr, support_vector_machine,
multi_layer_perceptron, pytorch_multi_layer_perceptron
]
# name used in raster conversion to vector for area field
area_field_name = 'area'
not_available = 'n/a' | /remotior_sensus-0.0.79-py3-none-any.whl/remotior_sensus/core/configurations.py | 0.595375 | 0.263924 | configurations.py | pypi |
try:
import matplotlib.pyplot as plt
import matplotlib.pyplot as mpl_plot
except Exception as error:
str(error)
print('plot tools: matplotlib error')
# prepare plot
def prepare_plot(x_label=None, y_label=None):
if x_label is None:
x_label = 'Wavelength'
if y_label is None:
y_label = 'Values'
figure, ax = plt.subplots()
# Set empty ticks
ax.set_xticks([])
ax.set_yticks([])
ax.set_aspect('auto')
ax.grid('on')
ax.set_xlabel(x_label)
ax.set_ylabel(y_label)
return ax
# prepare plot
def prepare_scatter_plot(x_label=None, y_label=None):
if x_label is None:
x_label = 'Band X'
if y_label is None:
y_label = 'Band Y'
figure, ax = plt.subplots()
# Set empty ticks
ax.set_xticks([])
ax.set_yticks([])
ax.set_aspect('auto')
ax.grid('on')
ax.set_xlabel(x_label)
ax.set_ylabel(y_label)
return ax
# add list of values to plot
def add_lines_to_plot(
name_list, wavelength_list, value_list, color_list,
legend_max_chars=15
):
plots = []
plot_names = []
v_lines = []
wavelength_min = 1000000
wavelength_max = 0
value_min = 10000000
value_max = 0
for _id in range(len(name_list)):
plot, = plt.plot(
wavelength_list[_id], value_list[_id], color_list[_id]
)
v_lines.extend(wavelength_list[_id])
wavelength_min = min(min(wavelength_list[_id]), wavelength_min)
wavelength_max = max(max(wavelength_list[_id]), wavelength_max)
value_min = min(min(value_list[_id]), value_min)
value_max = max(max(value_list[_id]), value_max)
plots.append(plot)
plot_names.append(name_list[_id][:legend_max_chars])
x_min = wavelength_min
x_ticks = [x_min]
for x in range(10):
x_min += (wavelength_max - wavelength_min) / 10
x_ticks.append(x_min)
y_min = value_min
y_ticks = [y_min]
for y in range(10):
y_min += (value_max - value_min) / 10
y_ticks.append(y_min)
return plots, plot_names, x_ticks, y_ticks, set(v_lines)
# create plot
def create_plot(
ax, plots, plot_names, x_ticks=None, y_ticks=None, v_lines=None
):
if x_ticks is None:
x_ticks = [0, 1]
if y_ticks is None:
y_ticks = [0, 1]
if v_lines is not None:
for x in v_lines:
ax.axvline(x, color='black', linestyle='dashed')
ax.legend(
plots, plot_names, bbox_to_anchor=(0.0, 0.0, 1.1, 1.0), loc=1,
borderaxespad=0.
).set_draggable(True)
ax.set_xticks(x_ticks)
ax.set_yticks(y_ticks)
plt.show()
# create plot
def create_scatter_plot(
ax, plots, plot_names, x_ticks=None, y_ticks=None
):
if x_ticks is None:
x_ticks = [0, 1]
if y_ticks is None:
y_ticks = [0, 1]
ax.legend(
plots, plot_names, bbox_to_anchor=(0.0, 0.0, 1.1, 1.0), loc=1,
borderaxespad=0.
).set_draggable(True)
ax.set_xticks(x_ticks)
ax.set_yticks(y_ticks)
plt.show()
# add values to plot
def add_values_to_scatter_plot(histogram, ax):
pal = mpl_plot.get_cmap('rainbow')
pal.set_under('w', 0.0)
plot = ax.imshow(
histogram[0].T, origin='lower', interpolation='none',
extent=[histogram[1][0], histogram[1][-1], histogram[2][0],
histogram[2][-1]], cmap=pal, vmin=0.001
)
return plot | /remotior_sensus-0.0.79-py3-none-any.whl/remotior_sensus/util/plot_tools.py | 0.551332 | 0.553988 | plot_tools.py | pypi |
import numpy as np
from sklearn.model_selection import train_test_split
from remotior_sensus.core import configurations as cfg
nn_module = None
try:
import torch
from torch import nn
nn_module = nn.Module
from torch.utils.data import DataLoader, TensorDataset
except Exception as error:
# empty class
class Module:
pass
nn_module = Module
if cfg.logger is not None:
cfg.logger.log.error(str(error))
else:
print(str(error))
class PyTorchNeuralNetwork(nn_module):
def __init__(
self, columns, classes_number, hidden_layer_sizes=None,
activation=None
):
# call parent class
super(PyTorchNeuralNetwork, self).__init__()
if hidden_layer_sizes is not None:
if activation == 'logistic' or activation == 'sigmoid':
activation_function = nn.Sigmoid()
elif activation == 'tanh':
activation_function = nn.Tanh()
elif activation == 'relu':
activation_function = nn.ReLU()
else:
activation_function = nn.ReLU()
layers = []
for x in range(0, len(hidden_layer_sizes)):
if x == 0:
layers.append(nn.Linear(columns, hidden_layer_sizes[x]))
layers.append(activation_function)
else:
layers.append(
nn.Linear(
hidden_layer_sizes[x - 1], hidden_layer_sizes[x]
)
)
layers.append(activation_function)
layers.append(
nn.Linear(hidden_layer_sizes[-1], int(classes_number))
)
self.sequential = nn.Sequential(*layers)
else:
self.sequential = nn.Sequential(
nn.Linear(columns, 100), nn.ReLU(),
nn.Linear(100, classes_number)
)
# forward
def forward(self, training_data):
logits = self.sequential(training_data)
return logits
# noinspection PyTypeChecker,PyUnresolvedReferences
def train_pytorch_model(
x_matrix, y_matrix, pytorch_model=None, activation='relu',
batch_size=None, n_processes=0, training_portion=None,
pytorch_optimizer=None, hidden_layer_sizes=None,
loss_function=None, learning_rate_init=None,
optimization_n_iter_no_change=None, optimization_tol=None,
weight_decay=None, max_iterations=None, device=None, min_progress=0,
max_progress=100
):
cfg.logger.log.debug('start')
# get device
if device == 'cuda':
device = 'cuda' if torch.cuda.is_available() else 'cpu'
else:
device = 'cpu'
if device == 'cpu':
torch.set_num_threads(n_processes)
if optimization_n_iter_no_change is None:
optimization_n_iter_no_change = 5
if optimization_tol is None:
optimization_tol = 0.0001
if max_iterations is None:
max_iterations = 200
if batch_size is None or batch_size == 'auto':
batch_size = int(
min(
2000, min(x_matrix.shape[0], max(200, x_matrix.shape[0] / 100))
)
)
cfg.logger.log.debug(
'device: %s; n_processes: %s; batch_size: %s' % (
device, n_processes, batch_size)
)
# number of classes
classes_number = np.max(y_matrix) + 1
if pytorch_model is None:
# move model to device
model = PyTorchNeuralNetwork(
columns=x_matrix.shape[1], classes_number=classes_number,
hidden_layer_sizes=hidden_layer_sizes, activation=activation
).to(device)
else:
model = pytorch_model(
columns=x_matrix.shape[1], classes_number=classes_number,
hidden_layer_sizes=hidden_layer_sizes, activation=activation
).to(device)
data_type = eval('torch.%s' % x_matrix.dtype)
if training_portion is not None and training_portion < 1:
x_train, x_test, y_train, y_test = train_test_split(
x_matrix, y_matrix, test_size=1 - training_portion,
train_size=training_portion, random_state=0,
stratify=y_matrix
)
x_train = torch.tensor(x_train, dtype=data_type)
y_train = torch.tensor(y_train, dtype=torch.long)
x_test = torch.tensor(x_test, dtype=data_type)
y_test = torch.tensor(y_test, dtype=torch.long)
training_data = DataLoader(
TensorDataset(x_train, y_train), batch_size=batch_size,
shuffle=True
)
test_data = DataLoader(
TensorDataset(x_test, y_test), batch_size=batch_size, shuffle=True
)
else:
x_train, x_test, y_train, y_test = train_test_split(
x_matrix, y_matrix, test_size=0.1,
train_size=0.9, random_state=0, stratify=y_matrix
)
x_train = torch.tensor(x_matrix, dtype=data_type)
y_train = torch.tensor(y_matrix, dtype=torch.long)
x_test = torch.tensor(x_test, dtype=data_type)
y_test = torch.tensor(y_test, dtype=torch.long)
training_data = DataLoader(
TensorDataset(x_train, y_train), batch_size=batch_size,
shuffle=True
)
test_data = DataLoader(
TensorDataset(x_test, y_test), batch_size=batch_size, shuffle=True
)
if learning_rate_init is None:
learning_rate_init = 0.001
if weight_decay is None:
weight_decay = 0.0001
# optimizer
if pytorch_optimizer is None:
pytorch_optimizer = torch.optim.Adam(
model.parameters(), lr=learning_rate_init,
weight_decay=weight_decay
)
# loss function
if loss_function is None:
loss_function = nn.CrossEntropyLoss()
epoch = 0
test_loss_list = []
while True:
if cfg.action is True:
epoch += 1
# train
model.train()
training_size = len(training_data.dataset)
training_loss = 0
for batch, (x, y) in enumerate(training_data):
x, y = x.to(device), y.to(device)
# compute prediction loss
prediction = model(x)
loss = loss_function(prediction, y)
# backpropagation
pytorch_optimizer.zero_grad()
loss.backward()
pytorch_optimizer.step()
training_loss += loss.item()
# progress
count_progress = batch * len(x)
cfg.progress.update(
percentage=int(100 * count_progress / training_size)
)
training_loss /= training_size
# test
model.eval()
test_size = len(test_data.dataset)
test_loss, correct = 0, 0
with torch.no_grad():
for x, y in test_data:
x, y = x.to(device), y.to(device)
prediction = model(x)
test_loss += loss_function(prediction, y).item()
correct += (prediction.argmax(1) == y).type(
torch.float
).sum().item()
test_loss /= test_size
test_loss_list.append(test_loss)
accuracy = correct / test_size * 100
# progress
if max_iterations is not None:
increment = (max_progress - min_progress) / max_iterations
step = int(min_progress + epoch * increment)
else:
step = None
cfg.progress.update(
message=f'epoch: {epoch}, training loss: {training_loss:>8f}, '
f'test loss: {test_loss:>8f}, '
f'accuracy: {accuracy :>0.1f}%', step=step
)
cfg.logger.log.debug(
f'epoch: {epoch}, training loss: {training_loss:>8f}, '
f'test loss: {test_loss:>8f}, '
f'accuracy: {accuracy :>0.1f}%'
)
# check optimization tolerance
if epoch > optimization_n_iter_no_change:
loss_difference = []
for o in range(1, optimization_n_iter_no_change + 1):
diff = test_loss_list[-1 * o] - test_loss_list[
-1 * (o + 1)]
loss_difference.append(diff ** 2 < optimization_tol ** 2)
if all(loss_difference):
cfg.logger.log.debug(
'optimization_tol: %s' % str(optimization_tol)
)
break
if max_iterations is not None and epoch == max_iterations:
cfg.logger.log.debug('max_iterations: %s'
% str(max_iterations))
break
else:
cfg.logger.log.error('cancel')
return None, None, None, None
cfg.logger.log.debug('end')
return model, training_loss, test_loss, accuracy | /remotior_sensus-0.0.79-py3-none-any.whl/remotior_sensus/util/pytorch_tools.py | 0.836688 | 0.44746 | pytorch_tools.py | pypi |
# noqa: E501
from typing import Union, Optional
from remotior_sensus.core import configurations as cfg
from remotior_sensus.core.bandset_catalog import BandSet
from remotior_sensus.core.bandset_catalog import BandSetCatalog
from remotior_sensus.core.output_manager import OutputManager
from remotior_sensus.util import (raster_vector, shared_tools)
def band_stack(
input_bands: Union[list, int, BandSet],
output_path: Optional[str] = None,
overwrite: Optional[bool] = False,
extent_list: Optional[list] = None,
bandset_catalog: Optional[BandSetCatalog] = None,
n_processes: Optional[int] = None,
virtual_output: Optional[bool] = None
):
"""Stack single bands.
This tool allows for stacking single bands in a multiband raster.
Args:
input_bands: list of paths of input rasters, or number of BandSet, or BandSet object.
output_path: string of output path.
overwrite: if True, output overwrites existing files.
extent_list: list of boundary coordinates left top right bottom.
bandset_catalog: BandSetCatalog object required if input_bands is a BandSet number.
n_processes: number of parallel processes.
virtual_output: if True (and output_path is directory), save output
as virtual raster.
Returns:
object :func:`~remotior_sensus.core.output_manager.OutputManager` with
- path = output path
Examples:
Perform band stack
>>> stack = band_stack(input_bands=['path_1', 'path_2'],
... output_path='output_path')
""" # noqa: E501
cfg.logger.log.info('start')
cfg.progress.update(
process=__name__.split('.')[-1].replace('_', ' '), message='starting',
start=True
)
# prepare process files
prepared = shared_tools.prepare_process_files(
input_bands=input_bands, output_path=output_path, overwrite=overwrite,
n_processes=n_processes, bandset_catalog=bandset_catalog,
box_coordinate_list=extent_list, virtual_output=virtual_output
)
input_raster_list = prepared['input_raster_list']
out_path = prepared['output_path']
if input_bands is BandSet:
bandset_x = input_bands
elif input_bands is int:
bandset_x = bandset_catalog.get(input_bands)
else:
bandset_x = BandSet.create(paths=input_raster_list)
if virtual_output:
virtual_path = out_path
else:
virtual_path = cfg.temp.temporary_file_path(name_suffix=cfg.vrt_suffix)
raster_vector.create_virtual_raster(output=virtual_path, bandset=bandset_x)
cfg.progress.update(message='stack', step=2, steps=2, minimum=1,
maximum=99, percentage=50)
if virtual_output is not True:
raster_vector.gdal_copy_raster(input_raster=virtual_path,
output=out_path)
cfg.progress.update(end=True)
cfg.logger.log.info('end; band stack: %s' % str(out_path))
return OutputManager(path=out_path) | /remotior_sensus-0.0.79-py3-none-any.whl/remotior_sensus/tools/band_stack.py | 0.869479 | 0.273287 | band_stack.py | pypi |
import os
from xml.dom import minidom
from typing import Optional
import numpy as np
from remotior_sensus.core import configurations as cfg, table_manager as tm
from remotior_sensus.core.bandset_catalog import BandSetCatalog
from remotior_sensus.core.output_manager import OutputManager
from remotior_sensus.core.processor_functions import (
band_calculation, raster_unique_values_with_sum
)
from remotior_sensus.util import files_directories
# create product table and preprocess
def preprocess(
input_path, output_path, metadata_file_path=None, add_bandset=None,
product=None, nodata_value=None, sensor=None, acquisition_date=None,
dos1_correction=False, output_prefix='', n_processes: int = None,
bandset_catalog: Optional[BandSetCatalog] = None,
available_ram: int = None, progress_message=True
) -> OutputManager:
"""Create table and preprocess products.
Perform image conversion to reflectance of several products.
Can calculate DOS1 corrected reflectance (Sobrino, J. et al., 2004. Land
surface temperature retrieval from LANDSAT TM 5. Remote Sensing of
Environment, Elsevier, 90, 434-440) approximating path radiance
to path reflectance for level 1 data:
TOA reflectance = DN * reflectance_scale + reflectance_offset
path reflectance p = DNm - Dark Object reflectance = DNm * reflectance_scale + reflectance_offset - 0.01
land surface reflectance = TOA reflectance - p = (DN * reflectance_scale) - (DNm * reflectance_scale - 0.01)
Landsat's data Collection 1 and 2
Level 1T
Landsat 8-9 TOA reflectance proportional to exo-atmospheric solar
irradiance in each band and the Earth-Sun distance
(USGS, 2021. Landsat 8-9 Calibration and Validation (Cal/Val) Algorithm
Description Document (ADD). Version 4.0. Department of the Interior
U.S. Geological Survey, South Dakota)
TOA reflectance with correction for the sun angle =
DN * Reflectance multiplicative scaling factor + Reflectance additive
scaling factor / sin(Sun elevation)
Level 2S
Surface reflectance = DN * Reflectance multiplicative scaling factor +
Reflectance additive scaling factor
Sentinel-2 data
Level 1C
TOA reflectance = DN / QUANTIFICATION VALUE + OFFSET
Level 2S
Surface reflectance = DN / QUANTIFICATION VALUE + OFFSET
Args:
input_path:
output_path: string of output path directory.
metadata_file_path:
dos1_correction: if True, perform DOS1 correction.
add_bandset: if True, create a new bandset and add output bands to it; if False, add output bands to current bandset.
product:
nodata_value:
sensor:
acquisition_date:
output_prefix: optional string for output name prefix.
n_processes: number of parallel processes.
available_ram: number of megabytes of RAM available to processes.
bandset_catalog: BandSetCatalog object.
progress_message: progress message.
Returns:
Object :func:`~remotior_sensus.core.output_manager.OutputManager` with
- paths = output list
""" # noqa: E501
table = create_product_table(
input_path=input_path, metadata_file_path=metadata_file_path,
product=product, nodata_value=nodata_value, sensor=sensor,
acquisition_date=acquisition_date
)
output = perform_preprocess(
product_table=table, output_path=output_path, add_bandset=add_bandset,
dos1_correction=dos1_correction, output_prefix=output_prefix,
n_processes=n_processes, available_ram=available_ram,
progress_message=progress_message, bandset_catalog=bandset_catalog
)
return output
# preprocess products
def perform_preprocess(
product_table, output_path, dos1_correction=False, add_bandset=None,
output_prefix='', n_processes: int = None, available_ram: int = None,
bandset_catalog: Optional[BandSetCatalog] = None, progress_message=True
) -> OutputManager:
"""Preprocess products.
Perform preprocessing based on product table.
Args:
product_table: product table object.
output_path: string of output path directory.
dos1_correction: if True, perform DOS1 correction.
add_bandset: if True, create a new bandset and add output bands to it; if False, add output bands to current bandset.
output_prefix: optional string for output name prefix.
n_processes: number of parallel processes.
available_ram: number of megabytes of RAM available to processes.
bandset_catalog: BandSetCatalog object.
progress_message: progress message.
Returns:
Object :func:`~remotior_sensus.core.output_manager.OutputManager` with
- paths = output list
""" # noqa: E501
if progress_message:
cfg.logger.log.info('start')
cfg.progress.update(
process=__name__.split('.')[-1].replace('_', ' '),
message='starting', start=True
)
cfg.logger.log.debug('product_table: %s' % str(product_table))
if n_processes is None:
n_processes = cfg.n_processes
input_list = []
input_dos1_list = []
dos1_nodata_list = []
nodata_list = []
calculation_datatype = []
scale_list = []
offset_list = []
output_nodata = []
output_datatype = []
output_raster_path_list = []
expressions = []
dos1_expressions = []
# create process string list
# Sentinel-2
sentinel_product = product_table[product_table.product == cfg.sentinel2]
# Landsat
landsat_product = product_table[product_table.product == cfg.landsat]
# Sentinel-2
if len(sentinel_product) > 0:
if dos1_correction:
# exclude Level-2A
sentinel_product_2a = sentinel_product[
sentinel_product.processing_level != 'level-2a']
# calculate DOS1 corrected reflectance approximating path
# radiance to path reflectance
# land surface reflectance = TOA reflectance - p =
# (DN * reflectance_scale) - (DNm * reflectance_scale - 0.01)
# raster and dnm are variables in the calculation
string_1 = np.char.add(
'np.clip(( %s * ' % cfg.array_function_placeholder,
sentinel_product_2a.scale.astype('<U16')
)
string_2 = np.char.add(string_1, ' - (')
string_3 = np.char.add(
string_2, sentinel_product_2a.scale.astype('<U16')
)
dos1_expressions.extend(
np.char.add(string_3, ' * dnm - 0.01)), 0, 1)').tolist()
)
input_dos1_list.extend(sentinel_product_2a.product_path.tolist())
# output raster list
output_string_1 = np.char.add(
'%s/%s' % (output_path, output_prefix),
sentinel_product_2a.band_name
)
output_raster_path_list.extend(
np.char.add(output_string_1, cfg.tif_suffix).tolist()
)
nodata_list.extend(sentinel_product_2a.nodata.tolist())
dos1_nodata_list.extend(sentinel_product_2a.nodata.tolist())
calculation_datatype.extend(
[np.float32] * len(sentinel_product_2a)
)
output_datatype.extend([cfg.uint16_dt] * len(sentinel_product_2a))
scale_list.extend([0.0001] * len(sentinel_product_2a))
offset_list.extend([0] * len(sentinel_product_2a))
output_nodata.extend(
[cfg.nodata_val_UInt16] * len(sentinel_product_2a)
)
else:
# calculate reflectance = DN / quantificationValue = DN * scale
# raster is interpreted as variable in the calculation
string_1 = np.char.add(
'np.clip( ( %s * ' % cfg.array_function_placeholder,
sentinel_product.scale.astype('<U16')
)
expressions.extend(np.char.add(string_1, ') , 0, 1)').tolist())
input_list.extend(sentinel_product.product_path.tolist())
# output raster list
output_string_1 = np.char.add(
'%s/%s' % (output_path, output_prefix),
sentinel_product.band_name
)
output_raster_path_list.extend(
np.char.add(output_string_1, cfg.tif_suffix).tolist()
)
nodata_list.extend(sentinel_product.nodata.tolist())
calculation_datatype.extend([np.float32] * len(sentinel_product))
output_datatype.extend([cfg.uint16_dt] * len(sentinel_product))
scale_list.extend([0.0001] * len(sentinel_product))
offset_list.extend([0] * len(sentinel_product))
output_nodata.extend(
[cfg.nodata_val_UInt16] * len(sentinel_product)
)
# Landsat
elif len(landsat_product) > 0:
# temperature
landsat_temperature_product = landsat_product[landsat_product.k1 != 0]
string_0 = np.char.add(
landsat_temperature_product.k2.astype('<U16'), ' / ( log( 1 + '
)
string_1 = np.char.add(
string_0, landsat_temperature_product.k1.astype('<U16')
)
string_2 = np.char.add(
string_1, ' / (%s * ' % cfg.array_function_placeholder
)
string_3 = np.char.add(
string_2, landsat_temperature_product.scale.astype('<U16')
)
string_4 = np.char.add(string_3, ' + ')
string_5 = np.char.add(
string_4, landsat_temperature_product.offset.astype('<U16')
)
expressions.extend(np.char.add(string_5, ') ) )').tolist())
input_list.extend(landsat_temperature_product.product_path.tolist())
calculation_datatype.extend(
[np.float32] * len(landsat_temperature_product)
)
output_datatype.extend(
[cfg.float32_dt] * len(landsat_temperature_product)
)
scale_list.extend([1] * len(landsat_temperature_product))
offset_list.extend([0] * len(landsat_temperature_product))
output_nodata.extend(
[cfg.nodata_val_Float32] * len(landsat_temperature_product)
)
# output raster list
output_string_temperature_1 = np.char.add(
'%s/%s' % (output_path, output_prefix),
landsat_temperature_product.band_name
)
output_raster_path_list.extend(
np.char.add(output_string_temperature_1, cfg.tif_suffix).tolist()
)
nodata_list.extend(landsat_temperature_product.nodata.tolist())
landsat_temperature_product_10 = landsat_product[
landsat_product.band_number == '10']
string_1 = np.char.add(
'%s * ' % cfg.array_function_placeholder,
landsat_temperature_product_10.scale.astype('<U16')
)
string_2 = np.char.add(string_1, ' + ')
string_3 = np.char.add(
string_2, landsat_temperature_product_10.offset.astype('<U16')
)
expressions.extend(string_3.tolist())
input_list.extend(landsat_temperature_product_10.product_path.tolist())
calculation_datatype.extend(
[np.float32] * len(landsat_temperature_product_10)
)
output_datatype.extend(
[cfg.float32_dt] * len(landsat_temperature_product_10)
)
scale_list.extend([1] * len(landsat_temperature_product_10))
offset_list.extend([0] * len(landsat_temperature_product_10))
output_nodata.extend(
[cfg.nodata_val_Float32] * len(landsat_temperature_product_10)
)
# output raster list
output_string_temperature_10 = np.char.add(
'%s/%s' % (output_path, output_prefix),
landsat_temperature_product_10.band_name
)
output_raster_path_list.extend(
np.char.add(output_string_temperature_10, cfg.tif_suffix).tolist()
)
nodata_list.extend(landsat_temperature_product_10.nodata.tolist())
if dos1_correction:
# exclude level 2 products and temperature
landsat_product_l1 = landsat_product[
(landsat_product.processing_level == 'l1tp') & (
landsat_product.band_number != '10') &
(landsat_product.k1 == 0)]
# calculate DOS1 corrected reflectance approximating path
# radiance to path reflectance
# land surface reflectance = TOA reflectance - p =
# (DN * reflectance_scale) - (DNm * reflectance_scale - 0.01)
# raster and dnm are variables in the calculation
string_1 = np.char.add(
'np.clip(( %s * ' % cfg.array_function_placeholder,
landsat_product_l1.scale.astype('<U16')
)
string_2 = np.char.add(string_1, ' - (')
string_3 = np.char.add(
string_2, landsat_product_l1.scale.astype('<U16')
)
dos1_expressions.extend(
np.char.add(string_3, ' * dnm - 0.01)), 0, 1)').tolist()
)
input_dos1_list.extend(landsat_product_l1.product_path.tolist())
# output raster list
output_string_1 = np.char.add(
'%s/%s' % (output_path, output_prefix),
landsat_product_l1.band_name
)
output_raster_path_list.extend(
np.char.add(output_string_1, cfg.tif_suffix).tolist()
)
nodata_list.extend(landsat_product_l1.nodata.tolist())
dos1_nodata_list.extend(landsat_product_l1.nodata.tolist())
calculation_datatype.extend([np.float32] * len(landsat_product_l1))
output_datatype.extend([cfg.uint16_dt] * len(landsat_product_l1))
scale_list.extend([0.0001] * len(landsat_product_l1))
offset_list.extend([0] * len(landsat_product_l1))
output_nodata.extend(
[cfg.nodata_val_UInt16] * len(landsat_product_l1)
)
else:
# level 1 products
landsat_1_product = landsat_product[
(landsat_product.processing_level == 'l1tp') & (
landsat_product.band_number != '10')
& (landsat_product.k1 == 0)]
# calculate reflectance = (raster * scale
# + offset) / sin(Sun elevation)
# raster is interpreted as variable in the calculation
string_1 = np.char.add(
'np.clip( ( ( %s * ' % cfg.array_function_placeholder,
landsat_1_product.scale.astype('<U16')
)
string_2 = np.char.add(string_1, ' + ')
string_3 = np.char.add(
string_2, landsat_1_product.offset.astype('<U16')
)
string_4 = np.char.add(string_3, ') / sin(')
string_5 = np.char.add(
string_4, landsat_1_product.sun_elevation.astype(
'<U16'
)
)
expressions.extend(np.char.add(string_5, ') ) , 0, 1)').tolist())
input_list.extend(landsat_1_product.product_path.tolist())
# output raster list
output_string_1 = np.char.add(
'%s/%s' % (output_path, output_prefix),
landsat_1_product.band_name
)
output_raster_path_list.extend(
np.char.add(output_string_1, cfg.tif_suffix).tolist()
)
nodata_list.extend(landsat_1_product.nodata.tolist())
calculation_datatype.extend([np.float32] * len(landsat_1_product))
output_datatype.extend([cfg.uint16_dt] * len(landsat_1_product))
scale_list.extend([0.0001] * len(landsat_1_product))
offset_list.extend([0] * len(landsat_1_product))
output_nodata.extend(
[cfg.nodata_val_UInt16] * len(landsat_1_product)
)
# level 2 products
landsat_2_product = landsat_product[
(landsat_product.processing_level == 'l2sp') & (
landsat_product.band_number != '10')]
# calculate reflectance = (raster * scale
# + offset) / sin(Sun elevation)
# raster is interpreted as variable in the calculation
string_1 = np.char.add(
'np.clip( ( %s * ' % cfg.array_function_placeholder,
landsat_2_product.scale.astype('<U16')
)
string_2 = np.char.add(string_1, ' + ')
string_3 = np.char.add(
string_2, landsat_2_product.offset.astype('<U16')
)
expressions.extend(np.char.add(string_3, ') , 0, 1)').tolist())
input_list.extend(landsat_2_product.product_path.tolist())
# output raster list
output_string_2 = np.char.add(
'%s/%s' % (output_path, output_prefix),
landsat_2_product.band_name
)
output_raster_path_list.extend(
np.char.add(output_string_2, cfg.tif_suffix).tolist()
)
nodata_list.extend(landsat_2_product.nodata.tolist())
calculation_datatype.extend([np.float32] * len(landsat_product))
output_datatype.extend([cfg.uint16_dt] * len(landsat_product))
scale_list.extend([0.0001] * len(landsat_product))
offset_list.extend([0] * len(landsat_product))
output_nodata.extend(
[cfg.nodata_val_UInt16] * len(landsat_product)
)
files_directories.create_directory(output_path)
# dummy bands for memory calculation
dummy_bands = 2
# conversion
if dos1_correction:
# get min dn values
cfg.multiprocess.run_separated(
raster_path_list=input_dos1_list,
function=raster_unique_values_with_sum, dummy_bands=dummy_bands,
use_value_as_nodata=dos1_nodata_list, n_processes=n_processes,
available_ram=available_ram, keep_output_argument=True,
progress_message='unique values', min_progress=1, max_progress=30
)
cfg.multiprocess.find_minimum_dn()
min_dn = cfg.multiprocess.output
for i in range(len(dos1_expressions)):
expressions.append(
dos1_expressions[i].replace('dnm', str(min_dn[i]))
)
input_list.append(input_dos1_list[i])
# dummy bands for memory calculation
dummy_bands = 2
# run calculation
cfg.multiprocess.run_separated(
raster_path_list=input_list, function=band_calculation,
function_argument=expressions,
calculation_datatype=calculation_datatype,
use_value_as_nodata=nodata_list, dummy_bands=dummy_bands,
output_raster_list=output_raster_path_list,
output_data_type=output_datatype, output_nodata_value=output_nodata,
compress=cfg.raster_compression, n_processes=n_processes,
available_ram=available_ram, scale=scale_list, offset=offset_list,
progress_message='processing', min_progress=30, max_progress=99
)
if len(output_raster_path_list) == 0:
cfg.logger.log.error('unable to process files')
cfg.messages.error('unable to process files')
return OutputManager(check=False)
else:
for i in output_raster_path_list:
if not files_directories.is_file(i):
cfg.logger.log.error('unable to process file: %s' % str(i))
cfg.messages.error('unable to process file: %s' % str(i))
return OutputManager(check=False)
# add output to BandSet
if add_bandset is not None and bandset_catalog is not None:
product = product_table.product[0]
# get Landsat satellite
if product == cfg.landsat:
spacecraft = product_table.spacecraft[0]
if '1' in spacecraft or '2' in spacecraft or '3' in spacecraft:
product = cfg.satLandsat13
elif '4' in spacecraft or '5' in spacecraft:
product = cfg.satLandsat45
elif '7' in spacecraft:
product = cfg.satLandsat7
elif '8' in spacecraft:
product = cfg.satLandsat8
elif '9' in spacecraft:
product = cfg.satLandsat9
if add_bandset is True:
# create bandset
bandset_catalog.create_bandset(
paths=output_raster_path_list,
wavelengths=[product],
date=str(product_table.date[0])
)
else:
for path in output_raster_path_list:
# add to current bandset
try:
bandset_catalog.add_band_to_bandset(
path=path,
bandset_number=bandset_catalog.current_bandset
)
bandset_catalog.set_satellite_wavelength(
satellite_name=product,
bandset_number=bandset_catalog.current_bandset)
except Exception as err:
cfg.logger.log.error(str(err))
cfg.messages.error(str(err))
cfg.progress.update(end=True)
cfg.logger.log.info(
'end; preprocess products: %s' % str(output_raster_path_list)
)
return OutputManager(paths=output_raster_path_list)
# create product table
def create_product_table(
input_path, metadata_file_path=None, product=None, nodata_value=None,
sensor=None, acquisition_date=None
):
band_names = []
band_number_list = []
product_path_list = []
product_name_list = []
scale_value_list = []
scale_offset_dict = {}
k_dict = {}
offset_value_list = []
k2_list = []
k1_list = []
e_sun_list = []
sun_elevation_list = []
earth_sun_distance_list = []
spacecraft_list = []
metadata = metadata_doc = metadata_type = product_date = product_name = \
processing_level = sun_elevation = None
earth_sun_distance = None
if product == cfg.sentinel2:
product_name = cfg.sentinel2
elif product == cfg.landsat:
product_name = cfg.landsat
# get metadata
if metadata_file_path is None:
for f in os.listdir(input_path):
# Sentinel-2 metadata
if f.lower().endswith('.xml') and (
'mtd_msil1c' in f.lower() or 'mtd_safl1c' in f.lower()
or 'mtd_msil2a' in f.lower()):
metadata = '%s/%s' % (input_path, str(f))
product_name = cfg.sentinel2
metadata_type = 'xml'
# Landsat metadata
elif f[0].lower() == 'l' and f.lower().endswith(
'.xml'
) and 'mtl' in f.lower():
metadata = '%s/%s' % (input_path, str(f))
product_name = cfg.landsat
metadata_type = 'xml'
else:
metadata = metadata_file_path
if files_directories.file_extension(metadata) == cfg.xml_suffix:
metadata_type = 'xml'
if metadata_type == 'xml':
# open metadata
try:
metadata_doc = minidom.parse(metadata)
# Sentinel-2
try:
spacecraft_name = \
metadata_doc.getElementsByTagName('SPACECRAFT_NAME')[
0].firstChild.data
if spacecraft_name:
product_name = cfg.sentinel2
except Exception as err:
str(err)
# Landsat
try:
spacecraft_id = \
metadata_doc.getElementsByTagName('SPACECRAFT_ID')[
0].firstChild.data
if spacecraft_id:
product_name = cfg.landsat
except Exception as err:
str(err)
except Exception as err:
cfg.messages.error('unable to open metadata')
cfg.logger.log.error(str(err))
return OutputManager(check=False)
# Sentinel-2
if product_name == cfg.sentinel2:
cfg.logger.log.debug(cfg.sentinel2)
scale_value = 1 / 10000
offset_value = 0
sentinel2_bands = cfg.satellites[cfg.satSentinel2][2]
# open metadata
if metadata_doc:
try:
# get date in the format YYYY-MM-DD
product_date = \
metadata_doc.getElementsByTagName('PRODUCT_START_TIME')[
0].firstChild.data.split('T')[0]
processing_level = \
metadata_doc.getElementsByTagName('PROCESSING_LEVEL')[
0].firstChild.data
# L2A products
if '2a' in processing_level.lower():
scale_value = 1 / int(
metadata_doc.getElementsByTagName(
'BOA_QUANTIFICATION_VALUE'
)[0].firstChild.data
)
offset = metadata_doc.getElementsByTagName(
'BOA_ADD_OFFSET'
)
# L1C products
else:
scale_value = 1 / int(
metadata_doc.getElementsByTagName(
'QUANTIFICATION_VALUE'
)[0].firstChild.data
)
offset = metadata_doc.getElementsByTagName(
'RADIO_ADD_OFFSET'
)
for n in range(len(sentinel2_bands)):
if offset_value:
offset_value_list.append(offset[n].firstChild.data)
cfg.logger.log.debug('metadata')
except Exception as err:
cfg.logger.log.error(str(err))
cfg.messages.error(str(err))
# use default values
else:
scale_value = 1 / 10000
offset_value_list = [0] * len(sentinel2_bands)
cfg.messages.warning('using default values without metadata')
cfg.logger.log.debug('no metadata')
# get bands
file_list = files_directories.files_in_directory(
input_path, sort_files=True, suffix_filter=cfg.tif_suffix
)
file_list.extend(
files_directories.files_in_directory(
input_path, sort_files=True, suffix_filter='.jp2'
)
)
for f in file_list:
# check band number
if f[-6:-4].lower() in sentinel2_bands:
band_names.append(files_directories.file_name(f))
product_path_list.append(f)
band_number_list.append(f[-6:-4].lower())
product_name_list = [product_name] * len(band_names)
spacecraft_list = [product_name] * len(band_names)
scale_value_list = [scale_value] * len(band_names)
if len(offset_value_list) == 0:
offset_value_list = [0] * len(band_names)
elif product_name == cfg.landsat:
cfg.logger.log.debug(cfg.landsat)
# open metadata
if metadata_doc:
sensor_id = metadata_doc.getElementsByTagName('SENSOR_ID')[
0].firstChild.data
processing_level = \
metadata_doc.getElementsByTagName('PROCESSING_LEVEL')[
0].firstChild.data.lower()
spacecraft_id = metadata_doc.getElementsByTagName('SPACECRAFT_ID')[
0].firstChild.data.lower()
# get date in the format YYYY-MM-DD
product_date = metadata_doc.getElementsByTagName('DATE_ACQUIRED')[
0].firstChild.data
sun_elevation = metadata_doc.getElementsByTagName('SUN_ELEVATION')[
0].firstChild.data
earth_sun_distance = \
metadata_doc.getElementsByTagName('EARTH_SUN_DISTANCE')[
0].firstChild.data
if sensor_id.lower() == cfg.sensor_oli:
band_list = cfg.satellites[cfg.satLandsat8][2]
elif sensor_id.lower() == cfg.sensor_etm:
band_list = cfg.satellites[cfg.satLandsat7][2]
elif sensor_id.lower() == cfg.sensor_tm:
band_list = cfg.satellites[cfg.satLandsat45][2]
elif sensor_id.lower() == cfg.sensor_mss:
band_list = cfg.satellites[cfg.satLandsat13][2]
else:
band_list = []
for b in band_list:
if processing_level == 'l2sp':
reflectance_tag = metadata_doc.getElementsByTagName(
'LEVEL2_SURFACE_REFLECTANCE_PARAMETERS'
)[0]
else:
reflectance_tag = metadata_doc.getElementsByTagName(
'LEVEL1_RADIOMETRIC_RESCALING'
)[0]
try:
reflectance_mult = reflectance_tag.getElementsByTagName(
'REFLECTANCE_MULT_BAND_%s' % str(b)
)[0].firstChild.data
reflectance_add = reflectance_tag.getElementsByTagName(
'REFLECTANCE_ADD_BAND_%s' % str(b)
)[0].firstChild.data
scale_offset_dict[str(b)] = [float(reflectance_mult),
float(reflectance_add)]
except Exception as err:
str(err)
scale_offset_dict[str(b)] = [1, 0]
# temperature
# Landsat 8-9
if sensor_id.lower() == cfg.sensor_oli and processing_level == \
'l2sp':
scale_offset_dict['10'] = [float(
metadata_doc.getElementsByTagName(
'TEMPERATURE_MULT_BAND_ST_B10'
)[0].firstChild.data
), float(
metadata_doc.getElementsByTagName(
'TEMPERATURE_ADD_BAND_ST_B10'
)[0].firstChild.data
)]
# Landsat 7 level 1
elif sensor_id.lower() == cfg.sensor_etm and processing_level == \
'l1tp':
k_dict['6_VCID_1'] = [float(
metadata_doc.getElementsByTagName(
'K1_CONSTANT_BAND_6_VCID_1'
)[0].firstChild.data
), float(
metadata_doc.getElementsByTagName(
'K2_CONSTANT_BAND_6_VCID_1'
)[0].firstChild.data
), float(
metadata_doc.getElementsByTagName(
'RADIANCE_MULT_BAND_6_VCID_1'
)[0].firstChild.data
), float(
metadata_doc.getElementsByTagName(
'RADIANCE_ADD_BAND_6_VCID_1'
)[0].firstChild.data
)]
k_dict['6_VCID_2'] = [float(
metadata_doc.getElementsByTagName(
'K1_CONSTANT_BAND_6_VCID_2'
)[0].firstChild.data
), float(
metadata_doc.getElementsByTagName(
'K2_CONSTANT_BAND_6_VCID_2'
)[0].firstChild.data
), float(
metadata_doc.getElementsByTagName(
'RADIANCE_MULT_BAND_6_VCID_2'
)[0].firstChild.data
), float(
metadata_doc.getElementsByTagName(
'RADIANCE_ADD_BAND_6_VCID_2'
)[0].firstChild.data
)]
# Landsat 7 level 2
elif sensor_id.lower() == cfg.sensor_tm and processing_level == \
'l2sp':
scale_offset_dict['6'] = [float(
metadata_doc.getElementsByTagName(
'TEMPERATURE_MULT_BAND_ST_B6'
)[0].firstChild.data
), float(
metadata_doc.getElementsByTagName(
'TEMPERATURE_ADD_BAND_ST_B6'
)[0].firstChild.data
)]
# Landsat 5 level 1
elif sensor_id.lower() == cfg.sensor_tm and processing_level == \
'l1tp':
k_dict['6'] = [float(
metadata_doc.getElementsByTagName('K1_CONSTANT_BAND_6')[
0].firstChild.data
), float(
metadata_doc.getElementsByTagName('K2_CONSTANT_BAND_6')[
0].firstChild.data
), float(
metadata_doc.getElementsByTagName('RADIANCE_MULT_BAND_6')[
0].firstChild.data
), float(
metadata_doc.getElementsByTagName('RADIANCE_ADD_BAND_6')[
0].firstChild.data
)]
# Landsat 5 level 2
elif sensor_id.lower() == cfg.sensor_tm and processing_level == \
'l2sp':
scale_offset_dict['6'] = [float(
metadata_doc.getElementsByTagName(
'TEMPERATURE_MULT_BAND_ST_B6'
)[0].firstChild.data
), float(
metadata_doc.getElementsByTagName(
'TEMPERATURE_ADD_BAND_ST_B6'
)[0].firstChild.data
)]
# use default values
else:
spacecraft_id = cfg.landsat
if sensor:
sensor_id = sensor
else:
sensor_id = 'oli_tirs'
processing_level = 'l2sp'
if acquisition_date:
product_date = acquisition_date
else:
product_date = '2000-01-01'
cfg.messages.warning('using default values without metadata')
cfg.logger.log.debug('no metadata')
cfg.logger.log.debug('sensor_id: %s' % sensor_id)
# get bands
if sensor_id.lower() == cfg.sensor_oli:
landsat_bands = cfg.satellites[cfg.satLandsat8][2]
elif sensor_id.lower() == cfg.sensor_etm:
landsat_bands = cfg.satellites[cfg.satLandsat7][2]
elif sensor_id.lower() == cfg.sensor_tm:
landsat_bands = cfg.satellites[cfg.satLandsat45][2]
elif sensor_id.lower() == cfg.sensor_mss:
landsat_bands = cfg.satellites[cfg.satLandsat13][2]
else:
landsat_bands = []
file_list = files_directories.files_in_directory(
input_path, sort_files=True, suffix_filter=cfg.tif_suffix
)
for f in file_list:
# check band number for multispectral bands
if f[-5:-4] in landsat_bands:
band_names.append(files_directories.file_name(f))
band_number_list.append(f[-5:-4])
product_path_list.append(f)
sun_elevation_list.append(sun_elevation)
earth_sun_distance_list.append(earth_sun_distance)
if f[-5:-4] in scale_offset_dict:
scale_value_list.append(scale_offset_dict[f[-5:-4]][0])
offset_value_list.append(scale_offset_dict[f[-5:-4]][1])
k1_list.append(0)
k2_list.append(0)
# temperature bands Landsat 5 band 6
elif sensor_id.lower() == cfg.sensor_tm:
if processing_level == 'l2sp' and f[-5:-4] == '6':
band_names.append(files_directories.file_name(f))
band_number_list.append('6')
product_path_list.append(f)
sun_elevation_list.append(sun_elevation)
earth_sun_distance_list.append(earth_sun_distance)
if '6' in scale_offset_dict:
scale_value_list.append(scale_offset_dict['6'][0])
offset_value_list.append(scale_offset_dict['6'][1])
k1_list.append(0)
k2_list.append(0)
elif processing_level == 'l1tp' and f[-5:-4] == '6':
band_names.append(files_directories.file_name(f))
band_number_list.append('6')
product_path_list.append(f)
sun_elevation_list.append(sun_elevation)
earth_sun_distance_list.append(earth_sun_distance)
if '6' in k_dict:
k1_list.append(k_dict['6'][0])
k2_list.append(k_dict['6'][1])
scale_value_list.append(k_dict['6'][2])
offset_value_list.append(k_dict['6'][3])
# temperature bands Landsat 7 band 6
elif sensor_id.lower() == cfg.sensor_etm:
if processing_level == 'l2sp' and f[-5:-4] == '6':
band_names.append(files_directories.file_name(f))
band_number_list.append('6')
product_path_list.append(f)
sun_elevation_list.append(sun_elevation)
earth_sun_distance_list.append(earth_sun_distance)
if '6' in scale_offset_dict:
scale_value_list.append(scale_offset_dict['6'][0])
offset_value_list.append(scale_offset_dict['6'][1])
k1_list.append(0)
k2_list.append(0)
elif processing_level == 'l1tp' and f[-12:-4] == '6_VCID_1':
band_names.append(files_directories.file_name(f))
band_number_list.append('6_VCID_1')
product_path_list.append(f)
sun_elevation_list.append(sun_elevation)
earth_sun_distance_list.append(earth_sun_distance)
if '6_VCID_1' in k_dict:
k1_list.append(k_dict['6_VCID_1'][0])
k2_list.append(k_dict['6_VCID_1'][1])
scale_value_list.append(1)
offset_value_list.append(0)
elif processing_level == 'l1tp' and f[-12:-4] == '6_VCID_2':
band_names.append(files_directories.file_name(f))
band_number_list.append('6_VCID_2')
product_path_list.append(f)
sun_elevation_list.append(sun_elevation)
earth_sun_distance_list.append(earth_sun_distance)
if '6_VCID_2' in k_dict:
k1_list.append(k_dict['6_VCID_2'][0])
k2_list.append(k_dict['6_VCID_2'][1])
scale_value_list.append(1)
offset_value_list.append(0)
# temperature band 10 Landsat 8-9
elif sensor_id.lower() == cfg.sensor_oli and f[-6:-4] == '10':
band_names.append(files_directories.file_name(f))
band_number_list.append('10')
product_path_list.append(f)
sun_elevation_list.append(sun_elevation)
earth_sun_distance_list.append(earth_sun_distance)
if '10' in scale_offset_dict:
scale_value_list.append(scale_offset_dict['10'][0])
offset_value_list.append(scale_offset_dict['10'][1])
k1_list.append(0)
k2_list.append(0)
product_name_list = [cfg.landsat] * len(band_names)
spacecraft_list = [spacecraft_id] * len(band_names)
if len(scale_value_list) == 0:
scale_value_list = [0.0000275] * len(band_names)
if len(offset_value_list) == 0:
offset_value_list = [-0.2] * len(band_names)
if len(k2_list) == 0:
k2_list = [0] * len(band_names)
if len(k1_list) == 0:
k1_list = [0] * len(band_names)
if len(e_sun_list) == 0:
e_sun_list = [0] * len(band_names)
if len(sun_elevation_list) == 0:
sun_elevation_list = [0] * len(band_names)
if len(earth_sun_distance_list) == 0:
earth_sun_distance_list = [0] * len(band_names)
processing_level_list = [processing_level] * len(band_names)
product_date_list = [product_date] * len(band_names)
if len(offset_value_list) == 0:
offset_value_list = [0] * len(band_names)
if nodata_value:
nodata_value_list = [nodata_value] * len(band_names)
else:
nodata_value_list = [np.nan] * len(band_names)
product_table = tm.add_product_to_preprocess(
product_list=product_name_list, spacecraft_list=spacecraft_list,
processing_level=processing_level_list,
band_name_list=band_names, product_path_list=product_path_list,
scale_list=scale_value_list,
offset_list=offset_value_list, nodata_list=nodata_value_list,
date_list=product_date_list,
k1_list=k1_list, k2_list=k2_list, band_number_list=band_number_list,
e_sun_list=e_sun_list, sun_elevation_list=sun_elevation_list,
earth_sun_distance_list=earth_sun_distance_list
)
return product_table | /remotior_sensus-0.0.79-py3-none-any.whl/remotior_sensus/tools/preprocess_products.py | 0.823612 | 0.343397 | preprocess_products.py | pypi |
# noqa: E501
from typing import Union, Optional
import numpy
from remotior_sensus.core import configurations as cfg
from remotior_sensus.core.bandset_catalog import BandSet
from remotior_sensus.core.bandset_catalog import BandSetCatalog
from remotior_sensus.core.output_manager import OutputManager
from remotior_sensus.core.processor_functions import raster_resample
from remotior_sensus.util import files_directories, shared_tools, raster_vector
def vector_to_raster(
vector_path, align_raster: Union[str, BandSet, int],
vector_field: Optional[str] = None,
constant: Optional[int] = None,
pixel_size: Optional[int] = None,
output_path: Optional[str] = None,
method: Optional[str] = None,
area_precision: Optional[int] = 20, resample='mode',
nodata_value: Optional[int] = None,
minimum_extent: Optional[bool] = True,
extent_list: Optional[list] = None, output_format='GTiff',
compress=None, compress_format=None,
n_processes: Optional[int] = None, available_ram: Optional[int] = None,
bandset_catalog: Optional[BandSetCatalog] = None,
) -> OutputManager:
"""Performs the conversion from vector to raster.
This tool performs the conversion from vector polygons to raster.
Args:
vector_path: path of vector used as input.
align_raster: optional string path of raster used for aligning output pixels and projections; it can also be a BandSet or an integer number of a BandSet in a Catalog.
output_path: string of output path.
vector_field: the name of the field used as reference value.
constant: integer value used as reference for all the polygons.
pixel_size: size of pixel of output raster.
minimum_extent: if True, raster has the minimum vector extent; if False, the extent is the same as the align raster.
extent_list: list of boundary coordinates left top right bottom.
output_format: output format, default GTiff
method: method of conversion, default pixel_center, other methods are all_touched for burning all pixels touched or area_based for burning values based on area proportion.
area_precision: for area_based method, the higher the value, the more is the precision in area proportion calculation.
resample: type for resample when method is area_based.
compress: if True, compress the output raster.
compress_format: compress format.
nodata_value: value to be considered as nodata.
n_processes: number of parallel processes.
available_ram: number of megabytes of RAM available to processes.
bandset_catalog: BandSetCatalog object.
Returns:
object :func:`~remotior_sensus.core.output_manager.OutputManager` with
- path = output path
Examples:
Perform the conversion to raster of a vector
>>> vector_to_raster(vector_path='file.gpkg',output_path='vector.tif')
""" # noqa: E501
cfg.logger.log.info('start')
cfg.progress.update(
process=__name__.split('.')[-1].replace('_', ' '), message='starting',
start=True
)
vector_path = files_directories.input_path(vector_path)
if type(align_raster) is str:
input_bands = [align_raster]
else:
input_bands = align_raster
cfg.logger.log.debug('input_bands: %s' % str(input_bands))
# prepare process files
prepared = shared_tools.prepare_process_files(
input_bands=input_bands, output_path=output_path,
n_processes=n_processes, box_coordinate_list=extent_list,
bandset_catalog=bandset_catalog
)
reference_path = prepared['temporary_virtual_raster'][0]
# prepare output
temp_path = cfg.temp.temporary_file_path(name_suffix=cfg.tif_suffix)
if n_processes is None:
n_processes = cfg.n_processes
# perform conversion
if compress is None:
compress = cfg.raster_compression
if compress_format is None:
compress_format = 'DEFLATE21'
if pixel_size is None:
(gt, reference_crs, unit, xy_count, nd, number_of_bands, block_size,
scale_offset, data_type) = raster_vector.raster_info(align_raster)
x_y_size = (round(gt[1], 3), round(gt[1], 3))
else:
x_y_size = [pixel_size, pixel_size]
t_pixel_size = x_y_size
if vector_field is None and constant is None:
constant = 1
nodata_value_set = nodata_value
if nodata_value_set is None:
nodata_value_set = cfg.nodata_val_Int32
min_progress = 1
if method is None or method.lower() == 'pixel_center':
all_touched = None
max_progress = 100
elif method.lower() == 'all_touched':
all_touched = True
max_progress = 100
elif method.lower() == 'area_based':
all_touched = None
compress = True
minimum_extent = False
max_progress = 50
# calculate pixel size precision
size_precision = round(x_y_size[0] / area_precision, 2)
if size_precision == 0:
size_precision = 0.1
ratio = size_precision.as_integer_ratio()
# greatest common divisor
try:
area_precision = numpy.gcd(
ratio[1], x_y_size[0]) * 10**(len(str(area_precision)) - 1)
except Exception as err:
str(err)
area_precision = numpy.gcd(
ratio[1], int(x_y_size[0] * 100)) * 10**(
len(str(area_precision)) - 1)
temp_px_size = x_y_size[0] / area_precision
t_pixel_size = [temp_px_size, temp_px_size]
else:
all_touched = None
max_progress = 100
cfg.progress.update(message='processing', step=1)
# open input with GDAL
cfg.logger.log.debug('vector_path: %s' % vector_path)
vector_crs = raster_vector.get_crs(vector_path)
reference_crs = raster_vector.get_crs(reference_path)
# check crs
same_crs = raster_vector.compare_crs(vector_crs, reference_crs)
cfg.logger.log.debug('same_crs: %s' % str(same_crs))
if not same_crs:
input_vector = cfg.temp.temporary_file_path(
name_suffix=files_directories.file_extension(vector_path)
)
vector_path = raster_vector.reproject_vector(
vector_path, input_vector, input_epsg=vector_crs,
output_epsg=reference_crs
)
cfg.logger.log.debug('t_pixel_size: %s' % str(t_pixel_size))
# perform conversion
cfg.multiprocess.multiprocess_vector_to_raster(
vector_path=vector_path, field_name=vector_field,
output_path=temp_path, reference_raster_path=reference_path,
output_format=output_format, nodata_value=nodata_value_set,
background_value=nodata_value_set, burn_values=constant,
compress=compress, compress_format=compress_format,
x_y_size=t_pixel_size, all_touched=all_touched,
available_ram=available_ram, minimum_extent=minimum_extent,
min_progress=min_progress, max_progress=max_progress
)
cfg.logger.log.debug('temp_path: %s' % temp_path)
if output_path is None:
output_path = cfg.temp.temporary_file_path(name_suffix=cfg.tif_suffix)
output_path = files_directories.output_path(output_path, cfg.tif_suffix)
files_directories.create_parent_directory(output_path)
# resample raster
if method is not None and method.lower() == 'area_based_experimental':
min_progress = 51
max_progress = 100
(gt, crs, crs_unit, xy_count, nd, number_of_bands, block_size,
scale_offset, data_type) = raster_vector.raster_info(temp_path)
left = gt[0]
top = gt[3]
t_x_size = gt[1]
t_y_size = abs(gt[5])
cfg.logger.log.debug('t_x_size, t_y_size: %s, %s'
% (t_x_size, t_y_size))
value_list = [t_x_size, t_y_size]
# calculate output size
specific_output = {}
resize_factor = t_y_size / x_y_size[0]
cfg.logger.log.debug('resize_factor: %s' % resize_factor)
specific_output['geo_transform'] = (left, x_y_size[0], 0, top, 0,
-x_y_size[1])
specific_output['resize_factor'] = resize_factor
cfg.multiprocess.run(
raster_path=temp_path, function=raster_resample,
function_argument=x_y_size, n_processes=n_processes,
available_ram=available_ram, calculation_datatype=numpy.int32,
function_variable=value_list, output_raster_path=output_path,
use_value_as_nodata=nodata_value_set,
specific_output=specific_output,
output_data_type='Int32', output_nodata_value=cfg.nodata_val_Int32,
compress=cfg.raster_compression,
progress_message='resampling', multiple_block=1/resize_factor,
min_progress=min_progress, max_progress=max_progress
)
elif method is not None and method.lower() == 'area_based':
(gt, crs, crs_unit, xy_count, nd, number_of_bands, block_size,
scale_offset, data_type) = raster_vector.raster_info(temp_path)
# copy raster
left = gt[0]
top = gt[3]
right = gt[0] + gt[1] * xy_count[0]
bottom = gt[3] + gt[5] * xy_count[1]
if compress_format == 'DEFLATE21':
compress_format = 'DEFLATE -co PREDICTOR=2 -co ZLEVEL=1'
extra_params = ' -te %s %s %s %s -tr %s %s' % (
left, bottom, right, top, x_y_size[0], x_y_size[1])
min_progress = 51
max_progress = 100
raster_vector.gdal_warping(
input_raster=temp_path, output=output_path, output_format='GTiff',
resample_method=resample, compression=True,
compress_format=compress_format, additional_params=extra_params,
n_processes=n_processes, dst_nodata=nodata_value,
min_progress=min_progress, max_progress=max_progress)
else:
if files_directories.is_file(temp_path):
files_directories.move_file(
in_path=temp_path, out_path=output_path
)
cfg.progress.update(end=True)
cfg.logger.log.info('end; output_path: %s' % output_path)
return OutputManager(path=output_path) | /remotior_sensus-0.0.79-py3-none-any.whl/remotior_sensus/tools/vector_to_raster.py | 0.840095 | 0.441733 | vector_to_raster.py | pypi |
# noqa: E501
from typing import Optional
from remotior_sensus.core import configurations as cfg
from remotior_sensus.core.output_manager import OutputManager
from remotior_sensus.util import files_directories, shared_tools
def raster_to_vector(
raster_path, output_path: Optional[str] = None,
dissolve: Optional[bool] = None, field_name: Optional[str] = None,
extent_list: Optional[list] = None,
n_processes: Optional[int] = None, available_ram: Optional[int] = None
) -> OutputManager:
"""Performs the conversion from raster to vector.
This tool performs the conversion from raster to vector.
Parallel processes are used for the conversion, resulting in a vector output
which is split as many in portions as the process numbers.
The argument dissolve allows for merging these portions,
but it requires additional processing time depending on vector size.
Args:
raster_path: path of raster used as input.
output_path: string of output path.
dissolve: if True, dissolve adjacent polygons having the same values;
if False, polygons are not dissolved and the process is rapider.
field_name: name of the output vector field to store raster values (default = DN).
extent_list: list of boundary coordinates left top right bottom.
n_processes: number of parallel processes.
available_ram: number of megabytes of RAM available to processes.
Returns:
object :func:`~remotior_sensus.core.output_manager.OutputManager` with
- path = output path
Examples:
Perform the conversion to vector of a raster
>>> raster_to_vector(raster_path='file.tif',output_path='vector.gpkg')
""" # noqa: E501
cfg.logger.log.info('start')
cfg.progress.update(
process=__name__.split('.')[-1].replace('_', ' '), message='starting',
start=True
)
raster_path = files_directories.input_path(raster_path)
if extent_list is not None:
# prepare process files
prepared = shared_tools.prepare_process_files(
input_bands=[raster_path], output_path=output_path,
n_processes=n_processes, box_coordinate_list=extent_list
)
input_raster_list = prepared['input_raster_list']
raster_path = input_raster_list[0]
if output_path is None:
output_path = cfg.temp.temporary_file_path(name_suffix=cfg.gpkg_suffix)
output_path = files_directories.output_path(output_path, cfg.gpkg_suffix)
files_directories.create_parent_directory(output_path)
if n_processes is None:
n_processes = cfg.n_processes
# perform conversion
cfg.multiprocess.multiprocess_raster_to_vector(
raster_path=raster_path, output_vector_path=output_path,
field_name=field_name, n_processes=n_processes,
dissolve_output=dissolve, min_progress=1, max_progress=100,
available_ram=available_ram
)
cfg.progress.update(end=True)
cfg.logger.log.info('end; output_path: %s' % output_path)
return OutputManager(path=output_path) | /remotior_sensus-0.0.79-py3-none-any.whl/remotior_sensus/tools/raster_to_vector.py | 0.886451 | 0.367497 | raster_to_vector.py | pypi |
from typing import Union, Optional
from remotior_sensus.core import configurations as cfg
from remotior_sensus.core.bandset_catalog import BandSet
from remotior_sensus.core.bandset_catalog import BandSetCatalog
from remotior_sensus.core.output_manager import OutputManager
from remotior_sensus.core.processor_functions import raster_dilation
from remotior_sensus.util import shared_tools
def band_dilation(
input_bands: Union[list, int, BandSet], value_list: list, size: int,
output_path: Union[list, str] = None,
overwrite: Optional[bool] = False,
circular_structure: Optional[bool] = None,
prefix: Optional[str] = '', extent_list: Optional[list] = None,
n_processes: Optional[int] = None,
available_ram: Optional[int] = None,
bandset_catalog: Optional[BandSetCatalog] = None,
virtual_output: Optional[bool] = None
) -> OutputManager:
"""Perform dilation of band pixels.
This tool performs the dilation of pixels identified by a list of values.
A new raster is created for each input band.
Args:
input_bands: input of type BandSet or list of paths or integer
number of BandSet.
output_path: string of output path directory or list of paths.
overwrite: if True, output overwrites existing files.
value_list: list of values for dilation.
size: size of dilation in pixels.
virtual_output: if True (and output_path is directory), save output
as virtual raster of multiprocess parts
circular_structure: if True, use circular structure; if False, square structure.
prefix: optional string for output name prefix.
extent_list: list of boundary coordinates left top right bottom.
n_processes: number of parallel processes.
available_ram: number of megabytes of RAM available to processes.
bandset_catalog: optional type BandSetCatalog for BandSet number
Returns:
Object :func:`~remotior_sensus.core.output_manager.OutputManager` with
- paths = output list
Examples:
Perform the dilation of size 5 for value 1 and 2
>>> dilation = band_dilation(input_bands=['path_1', 'path_2'],value_list=[1, 2],size=5,output_path='directory_path',circular_structure=True)
""" # noqa: E501
cfg.logger.log.info('start')
cfg.progress.update(
process=__name__.split('.')[-1].replace('_', ' '), message='starting',
start=True
)
# prepare process files
prepared = shared_tools.prepare_process_files(
input_bands=input_bands, output_path=output_path, overwrite=overwrite,
n_processes=n_processes, box_coordinate_list=extent_list,
bandset_catalog=bandset_catalog, prefix=prefix,
multiple_output=True, multiple_input=True,
virtual_output=virtual_output
)
input_raster_list = prepared['input_raster_list']
raster_info = prepared['raster_info']
n_processes = prepared['n_processes']
nodata_list = prepared['nodata_list']
output_list = prepared['output_list']
vrt_list = prepared['vrt_list']
if not circular_structure:
structure = shared_tools.create_base_structure(size * 2 + 1)
else:
structure = shared_tools.create_circular_structure(size)
# process calculation
n = 0
min_p = 1
max_p = int((99 - 1) / len(input_raster_list))
# dummy bands for memory calculation as the number of values
dummy_bands = len(value_list) + 4
for i in input_raster_list:
out = output_list[n]
nd = nodata_list[n]
data_type = raster_info[n][8]
cfg.multiprocess.run(
raster_path=i, function=raster_dilation,
function_argument=structure, n_processes=n_processes,
available_ram=available_ram,
function_variable=value_list, output_raster_path=out,
output_data_type=data_type, output_nodata_value=nd,
compress=cfg.raster_compression, dummy_bands=dummy_bands,
boundary_size=structure.shape[0] + 1, virtual_raster=vrt_list[n],
progress_message='processing raster %s' % (n + 1),
min_progress=min_p + max_p * n,
max_progress=min_p + max_p * (n + 1)
)
n += 1
cfg.progress.update(end=True)
cfg.logger.log.info('end; band dilation: %s' % output_list)
return OutputManager(paths=output_list) | /remotior_sensus-0.0.79-py3-none-any.whl/remotior_sensus/tools/band_dilation.py | 0.908925 | 0.327453 | band_dilation.py | pypi |
# noqa: E501
import io
from typing import Optional
import numpy as np
from remotior_sensus.core import configurations as cfg, table_manager as tm
from remotior_sensus.core.output_manager import OutputManager
from remotior_sensus.core.processor_functions import (
raster_unique_values_with_sum
)
from remotior_sensus.util import (
files_directories, raster_vector, read_write_files, shared_tools
)
def raster_report(
raster_path: str, output_path: Optional[str] = None,
nodata_value: Optional[int] = None, extent_list: Optional[list] = None,
n_processes: Optional[int] = None, available_ram: Optional[int] = None
):
"""Calculation of a report providing information extracted from a raster.
This tool allows for the calculation of a report providing information
such as pixel count, area per class and percentage of the total area.
The output is a csv file.
This tool is intended for integer rasters.
Args:
raster_path: path of raster used as input.
output_path: string of output path.
nodata_value: value to be considered as nodata.
extent_list: list of boundary coordinates left top right bottom.
n_processes: number of parallel processes.
available_ram: number of megabytes of RAM available to processes.
Returns:
object :func:`~remotior_sensus.core.output_manager.OutputManager` with
- path = output path
Examples:
Perform the report of a raster
>>> raster_report(raster_path='file.tif',output_path='report.csv')
""" # noqa: E501
cfg.logger.log.info('start')
cfg.progress.update(
process=__name__.split('.')[-1].replace('_', ' '), message='starting',
start=True
)
raster_path = files_directories.input_path(raster_path)
if extent_list is not None:
# prepare process files
prepared = shared_tools.prepare_process_files(
input_bands=[raster_path], output_path=output_path,
n_processes=n_processes, box_coordinate_list=extent_list
)
n_processes = prepared['n_processes']
raster_path = prepared['temporary_virtual_raster']
if output_path is None:
output_path = cfg.temp.temporary_file_path(name_suffix=cfg.csv_suffix)
output_path = files_directories.output_path(output_path, cfg.csv_suffix)
files_directories.create_parent_directory(output_path)
(gt, crs, crs_unit, xy_count, nd, number_of_bands, block_size,
scale_offset, data_type) = raster_vector.raster_info(raster_path)
pixel_size_x = abs(gt[1])
pixel_size_y = abs(gt[5])
if n_processes is None:
n_processes = cfg.n_processes
# dummy bands for memory calculation
dummy_bands = 2
# multiprocess calculate unique values and sum
cfg.multiprocess.run(
raster_path=raster_path, function=raster_unique_values_with_sum,
use_value_as_nodata=nodata_value, n_processes=n_processes,
available_ram=available_ram, keep_output_argument=True,
dummy_bands=dummy_bands,
progress_message='unique values', min_progress=2, max_progress=99
)
cfg.progress.update(message='output table', step=99)
# calculate sum of values
cfg.multiprocess.multiprocess_sum_array(nodata_value)
unique_val = cfg.multiprocess.output
# create table
table = _report_table(
table=unique_val, crs_unit=crs_unit, pixel_size_x=pixel_size_x,
pixel_size_y=pixel_size_y
)
# save combination to table
read_write_files.write_file(table, output_path)
cfg.progress.update(end=True)
cfg.logger.log.info('end; raster report: %s' % output_path)
return OutputManager(path=output_path)
def _report_table(table, crs_unit, pixel_size_x, pixel_size_y):
"""Create text for tables."""
cfg.logger.log.debug('start')
total_sum = table['sum'].sum()
text = []
cv = cfg.comma_delimiter
nl = cfg.new_line
# table
if 'degree' not in crs_unit:
output_field_names = ['RasterValue', 'PixelSum', 'Percentage %',
'Area [%s^2]' % crs_unit]
input_field_names = ['new_val', 'sum', 'percentage', 'area']
cross_class = tm.calculate_multi(
matrix=table, expression_string_list=[
'"sum" * %s * %s' % (pixel_size_x, pixel_size_y),
'100 * "sum" / %s' % total_sum],
output_field_name_list=['area', 'percentage'],
progress_message=False
)
else:
output_field_names = ['RasterValue', 'PixelSum', 'Percentage %',
'Area not available']
input_field_names = ['new_val', 'sum', 'percentage', 'area']
# area is set to nan
cross_class = tm.calculate_multi(
matrix=table,
expression_string_list=['100 * "sum" / %s' % total_sum,
'np.nan * "sum"'],
output_field_name_list=['percentage', 'area'],
progress_message=False
)
redefined = tm.redefine_matrix_columns(
matrix=cross_class, input_field_names=input_field_names,
output_field_names=output_field_names, progress_message=False
)
# create stream handler
stream1 = io.StringIO()
np.savetxt(stream1, redefined, delimiter=cv, fmt='%1.2f')
matrix_value = stream1.getvalue()
for c in output_field_names:
text.append(c)
text.append(cv)
text.pop(-1)
text.append(nl)
text.append(matrix_value.replace('.00', ''))
text.append(nl)
joined_text = ''.join(text)
return joined_text | /remotior_sensus-0.0.79-py3-none-any.whl/remotior_sensus/tools/raster_report.py | 0.819099 | 0.331282 | raster_report.py | pypi |
# noqa: E501
from typing import Optional
from remotior_sensus.core import configurations as cfg
from remotior_sensus.core.output_manager import OutputManager
from remotior_sensus.util import (
files_directories, raster_vector, shared_tools
)
def raster_split(
raster_path: str, output_path: str = None,
prefix: Optional[str] = None,
extent_list: Optional[list] = None,
n_processes: Optional[int] = None,
virtual_output: Optional[bool] = None
):
"""Split a multiband raster to single bands.
This tool allows for splitting a multiband raster to single bands.
Args:
raster_path: path of raster used as input.
output_path: string of output directory path.
prefix: optional string for output name prefix.
extent_list: list of boundary coordinates left top right bottom.
n_processes: number of parallel processes.
virtual_output: if True (and output_path is directory), save output
as virtual raster.
Returns:
object :func:`~remotior_sensus.core.output_manager.OutputManager` with
- path = output path
Examples:
Perform the split of a raster
>>> split = raster_split(raster_path='input_path',
... output_path='output_path')
""" # noqa: E501
cfg.logger.log.info('start')
cfg.progress.update(
process=__name__.split('.')[-1].replace('_', ' '), message='starting',
start=True
)
raster_path = files_directories.input_path(raster_path)
# prepare process files
prepared = shared_tools.prepare_process_files(
input_bands=[raster_path], output_path=output_path,
n_processes=n_processes, box_coordinate_list=extent_list
)
raster_info = prepared['raster_info']
output_list = []
bands = raster_info[0][5]
output_path = output_path.replace('\\', '/').replace('//', '/')
if output_path.endswith('/'):
output_path = output_path[:-1]
if prefix is None:
prefix = 'band'
for band in range(bands):
files_directories.create_parent_directory(output_path)
out_path = '%s/%s%s' % (output_path, prefix, str(band + 1))
if virtual_output is True:
virtual_path = files_directories.output_path(out_path,
cfg.vrt_suffix)
output = virtual_path
else:
virtual_path = cfg.temp.temporary_file_path(
name_suffix=cfg.vrt_suffix)
output = files_directories.output_path(out_path,
cfg.tif_suffix)
raster_vector.create_virtual_raster(
input_raster_list=[raster_path], output=virtual_path,
band_number_list=[[band + 1]], box_coordinate_list=extent_list,
relative_to_vrt=False
)
if virtual_output is not True:
raster_vector.gdal_copy_raster(
input_raster=virtual_path, output=output
)
output_list.append(output)
cfg.progress.update(
message='splitting', step=band, steps=bands, minimum=1,
maximum=99, percentage=int(100 * band / bands)
)
cfg.progress.update(end=True)
cfg.logger.log.info('end; raster split: %s' % str(output_list))
return OutputManager(paths=output_list) | /remotior_sensus-0.0.79-py3-none-any.whl/remotior_sensus/tools/raster_split.py | 0.842847 | 0.263759 | raster_split.py | pypi |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.