code stringlengths 114 1.05M | path stringlengths 3 312 | quality_prob float64 0.5 0.99 | learning_prob float64 0.2 1 | filename stringlengths 3 168 | kind stringclasses 1
value |
|---|---|---|---|---|---|
r"""rna_rosetta_run.py - prepare & run ROSETTA simulations
Based on C. Y. Cheng, F. C. Chou, and R. Das, Modeling complex RNA tertiary folds with Rosetta, 1st ed., vol. 553. Elsevier Inc., 2015.
http: // www.sciencedirect.com / science / article / pii / S0076687914000524
The script makes(1) a folder for you job, with seq.fa, ss.fa, input file is copied as input.fa to the folder(2) make helices(3) prepare rosetta input files(4) sends jobs to the cluster.
The header is take from the fast file(`` > /header / ``) not from the filename of your Fasta file.
I discovered this::
qstat -xml | tr '\n' ' ' | sed 's#<job_list[^>]*>#\n#g' \
> | sed 's#<[^>]*>##g' | grep " " | column -t
(https://stackoverflow.com/questions/26104116/qstat-and-long-job-names) so there is now need to shorted my job ids.
Helix
-------------------------------------------------------
Run::
rna_rosetta_run.py -i -e -r -g -c 200 cp20.fa
`-i`::
# prepare a folder for a run
>cp20
AUUAUCAAGAAUCUCAAAGAGAGAUAGCAACCUGCAAUAACGAGCAAGGUGCUAAAAUAGAUAAGCCAAAUUCAAUUGGAAAAAAUGUUAA
.(((((....(((((.....)))))(((..(((((..[[[[..)).))).)))......))))).((((......)))).......]]]].
[peyote2] ~ rna_rosetta_run.py -i cp20.fa
run rosetta for:
cp20
AUUAUCAAGAAUCUCAAAGAGAGAUAGCAACCUGCAAUAACGAGCAAGGUGCUAAAAUAGAUAAGCCAAAUUCAAUUGGAAAAAAUGUUAA
.(((((....(((((.....)))))(((..(((((..[[[[..)).))).)))......))))).((((......)))).......]]]].
/home / magnus // cp20 / created
Seq & ss created
Troubleshooting.
If one of the helices is missing you will get::
IOError: [Errno 2] No such file or directory: 'helix1.out'
rosetta_submit.py README_FARFAR o 500 100 taf
Could not find: README_FARFAR
and the problem was a1 and g8 pairing::
outputting command line to: helix0.RUN # previous helix #0
Sequence: AUGG CCGG
Secstruc: (((())))
Not complementary at positions a1 and g8!
Sequence: GUGGG CCCAU
Secstruc: ((((()))))
Writing to fasta file: helix2.fasta # next helix #2
My case with a modeling of rp12
Sequence: cc gc
Secstruc: (())
Not complementary at positions 1 and 4!
edit the secondary structure, run the program with -i(init, to overwrite seq.fa, ss.fa) and then it works.
Notes::
rp17hc 6 characters
"""
from __future__ import print_function
import argparse
import textwrap
import os
import glob
import subprocess
import shutil
import math
import os
import sys
try:
from rna_tools.rna_tools_config import RNA_ROSETTA_RUN_ROOT_DIR_MODELING
except:
RNA_ROSETTA_RUN_ROOT_DIR_MODELING = ''
print ('Set up rna_rosetta_run_root_dir_for_modeling in rpt_config_local.py')
class CustomFormatter(argparse.ArgumentDefaultsHelpFormatter, argparse.RawTextHelpFormatter):
pass
def get_parser():
parser = argparse.ArgumentParser(description=__doc__,
formatter_class=CustomFormatter)
parser.add_argument('-i', '--init', help='prepare _folder with seq and ss',
action='store_true')
parser.add_argument('-e', '--helices', help='produce h(E)lices',
action='store_true')
parser.add_argument('-r', '--rosetta', help='prepare rosetta files (still you need `go` to send jobs to a cluster)',
action='store_true')
parser.add_argument('-g', '--go', help=go.__doc__,
action='store_true')
parser.add_argument('-m', '--motif', help="include a motif file, e.g. -s E-loop_1q9a_mutated_no_flanks_renumber.pdb")
parser.add_argument('-n', '--nstruc', help="# of structures you want to get",
default=10000, type=int)
parser.add_argument('-c', '--cpus', help='# of cpus to be used', default=200,
type=int)
parser.add_argument('--sandbox', help="where to run it (default: RNA_ROSETTA_RUN_ROOT_DIR_MODELING",
default=RNA_ROSETTA_RUN_ROOT_DIR_MODELING)
parser.add_argument('file', help=textwrap.dedent(
"""file: \n > a04\nUAUAACAUAUAAUUUUGACAAUAUGGGUCAUAAGUUUCUACCGGAAUACCGUAAAUAUUCUGACUAUGUAUA\n((((.((((...((.((((.......)))).))........(.(((((.......))))).)..))))))))"""))
return parser
def prepare_folder(args, header, seq, ss, path):
"""Make folder for you job, with seq.fa, ss.fa, input file is copied as input.fa to the folder.
For ss lowercase is needed when used with motifs, otherwise::
[peyote2] aa20$ rna_rosetta_run.py -r -m E-loop_1q9a_mutated_no_flanks_renumber_for_acy20.pdb ~/aa20.fa
2019-05-03 21:31:30,842 rpt_config_local.py::<module>::rpt_config_local loading...
run rosetta for:
aa20
UACGUUCAUCAUCCGUUUGGAUGACGGAAGUAAGCGAAAGCUGAAGGAACGCAUG
..(((((.((((((....))))))..[.....(((....)))....)))))]...
rna_denovo_setup.py -fasta seq.fa -secstruct_file ss.fa -cycles 20000 -no_minimize -nstruct 50 -s E-loop_1q9a_mutated_no_flanks_renumber_for_acy20.pdb -silent helix0.out helix1.out helix2.out -input_silent_res 3-7 47-51 9-14 19-24 33-35 40-42
Sequence: UACGUUCAUCAUCCGUUUGGAUGACGGAAGUAAGCGAAAGCUGAAGGAACGCAUG
Secstruc: ..(((((.((((((....))))))..[.....(((....)))....)))))]...
aaguagaag
AAGUAGAAG
Traceback (most recent call last):
File "/home/magnus/opt/rosetta_src_2016.13.58602_bundle/tools/rna_tools/bin//rna_denovo_setup.py", line 170, in <module>
raise ValueError('The sequence in %s does not match input sequence!!' % pdb)
ValueError: The sequence in E-loop_1q9a_mutated_no_flanks_renumber_for_acy20.pdb does not match input sequence!!
rosetta_submit.py README_FARFAR o 200 100 aa20_
Could not find: README_FARFAR
"""
d = path
try:
os.mkdir(d)
print(d, 'created')
except OSError:
print(d, 'created is already created')
pass
with open(d + "seq.fa", "w") as f:
f.write(header + '\n')
f.write(seq)
with open(d + "ss.fa", "w") as f:
f.write(ss.lower())
print('Seq & ss created')
shutil.copyfile(args.file, d + 'input.fa')
def prepare_helices():
"""Make helices(wrapper around 'helix_preassemble_setup.py')
.. warning:: I think multiprocessing of helixX.run does not work."""
# run helix_p..
cmd = 'helix_preassemble_setup.py -secstruct ss.fa -fasta seq.fa'
os.system(cmd)
# find all helix
helix_runs = glob.glob('*RUN')
print(helix_runs)
f = open('HRUNS', 'w')
for h in helix_runs:
f.write(open(h).read().strip() + ' & \n')
f.close()
# does not work (!)
# os.system('chmod +x CMDLINES')
# os.system('./CMDLINES')
# ./CMDLINES: 2: source: not found
# os.system runs multiprocessing, but does not wait for the rest of the program
# hmm... it does not wait because I used & ???
# this works in multirpocessing mode but it does not wait for `-r` !!! so if your run -e only it's OK.
# don't combile -e with -r because making helices will not wait to run -r (!) and you will get an error
# and only helices made and then the program will finish
if False:
os.system('bash HRUNS')
else:
p = subprocess.Popen(open('HRUNS').read(),
shell=True, stderr=subprocess.PIPE)
p.wait()
stderr = p.stderr.read().strip()
if stderr:
print(stderr)
def prepare_rosetta(header, cpus, motif, nstruc):
"""Prepare ROSETTA using rna_denovo_setup.py
cpus is used to calc nstruc per job to get 10k structures per full run::
Args:
nstruc(int): how many structures you want to obtain
nstruct = int(math.floor(20000 / cpus))
motif: motif file; e.g., -s E-loop_1q9a_mutated_no_flanks_renumber.pdb
50 (nstruc) = 10k / 200 (cpus)
"""
# get list line
helices = open('CMDLINES').readlines()[-1].replace('#', '')
njobs = cpus # 500
nstruct = int(math.floor(nstruc / cpus)) # 20000/500 -> 40
if motif:
cmd_motif = ' -s ' + motif
else:
cmd_motif = ''
cmd = 'rna_denovo_setup.py -fasta seq.fa -secstruct_file ss.fa -cycles 20000 -no_minimize -nstruct ' + \
str(nstruct) + ' ' + cmd_motif + ' ' + helices
print(cmd)
os.system(cmd)
# change to 50 per job (!)
# 50 * 100 = 10k ?
# dont change this 100 (!) it might not run on peyote2 with values like 99999 !
# cmd is
# rna_tools/bin//rosetta_submit.py <text file with rosetta command> <outdir> <# jobs> <# hours> <job name>
# manually: [peyote2] a22$ rosetta_submit.py README_FARFAR o 200 100 a22_
cmd = 'rosetta_submit.py README_FARFAR o ' + \
str(njobs) + ' 100 ' + header + '_'
print(cmd)
os.system(cmd)
def go():
"""send jobs to a cluster(run qsubMINI)"""
os.system('chmod +x ./qsubMINI')
os.system('./qsubMINI')
def main():
"""Pipeline for modeling RNA"""
args = get_parser().parse_args()
if args.file:
f = open(args.file)
header = f.readline().replace('>', '').replace(' ', '').strip()
seq = f.readline().strip()
ss = f.readline().strip()
cpus = int(args.cpus)
print('run rosetta for:')
print(header)
print(seq)
print(ss)
if RNA_ROSETTA_RUN_ROOT_DIR_MODELING.strip() == '':
print('Set RNA_ROSETTA_RUN_ROOT_DIR_MODELING in your rpt_config file.')
return
path = args.sandbox + os.sep + header + \
os.sep # RNA_ROSETTA_RUN_ROOT_DIR_MODELING
curr = os.getcwd()
if args.init:
prepare_folder(args, header, seq, ss, path)
try:
os.chdir(path)
except OSError:
print('You have to make a folder first! use --init')
sys.exit(1)
if args.helices:
prepare_helices()
if args.rosetta:
prepare_rosetta(header, cpus, args.motif, args.nstruc)
if args.go:
go()
os.chdir(curr)
# main
if __name__ == '__main__':
main() | /rna_tools-3.13.7-py3-none-any.whl/rna_tools-3.13.7.data/scripts/rna_rosetta_run.py | 0.547222 | 0.426979 | rna_rosetta_run.py | pypi |
r"""rna_plot_density.py - generate a density plot
Don't open Excel, Jupyter. Simple plot a density of one column and save it to a file.
Example::
# file
fn rmsd_all
0 19_Bujnicki_Human_4_rpr_n0-000001.pdb-000001_A... 14.73
1 19_Bujnicki_Human_4_rpr_n0-000001.pdb.trafl_19... 0.46
2 19_Bujnicki_Human_4_rpr_n0-000001.pdb.trafl_19... 14.73
3 19_Bujnicki_Human_4_rpr_n0-000001.pdb_thrs0.50... 0.73
4 19_Bujnicki_Human_4_rpr_n0-000001.pdb_thrs0.50... 0.83
$ rna_plot_hist.py rmsds.csv --column rmsd_all
.. image:: ../../rna_tools/tools/plotting/test_data/rmsds_dens.png
"""
from __future__ import print_function
import pandas as pd
import matplotlib.pyplot as plt
import argparse
import sys
plt.style.use('ggplot')
plt.rc('figure', figsize=(10, 6))
def get_parser():
"""Get parser."""
parser = argparse.ArgumentParser(description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('file', help="rmsd.txt")
parser.add_argument('x', help="column of file to plot")
parser.add_argument('y', help="column of file to plot")
parser.add_argument('--sep', help="separator, be default \t", default=",")
parser.add_argument('-o', '--output')
return parser
if __name__ == '__main__':
parser = get_parser()
args = parser.parse_args()
fn = args.file
x = args.x
y = args.y
df = pd.read_csv(args.file, sep=args.sep)
print(df.head())
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
plt.figure(figsize=(6,4))
sns.set(style="white")
#ax = sns.violinplot(data=df, x=x, y=y, color="orange")
ax = sns.boxplot(data=df, x="growthb", y="rmsd_all", color="black")#, ax=ax)
#ax.set_ylim(0,3)
ax = sns.swarmplot(data=df, x="growthb", y="rmsd_all", color="orange", ax=ax)
#plt.savefig('/Users/magnus/d/out.png', dpi=200)
plt.tight_layout()
if not args.output:
outfn = args.file.replace('.txt', '').replace('.csv', '') + '_bx.png'
print('Save plot %s' % outfn)
plt.savefig(outfn, dpi=100)
import os
os.system('open %s' % outfn) | /rna_tools-3.13.7-py3-none-any.whl/rna_tools-3.13.7.data/scripts/rna_plot_boxplotlike.py | 0.681621 | 0.332907 | rna_plot_boxplotlike.py | pypi |
import logging
import argparse
from Bio.SeqRecord import SeqRecord
from Bio import SeqIO
from Bio.PDB import PDBParser
from Bio.PDB import PDBIO
from Bio.PDB.Atom import PDBConstructionWarning
import warnings
warnings.simplefilter('ignore', PDBConstructionWarning)
# logger
logger = logging.getLogger()
handler = logging.StreamHandler()
logger.addHandler(handler)
def get_seq(alignfn, seqid):
"""Get seq from an alignment with gaps.
Args:
alignfn (str): a path to an alignment
seqid (str): seq id in an alignment
Usage::
>>> get_seq('test_data/ALN_OBJ1_OBJ2.fa', 'obj1')
SeqRecord(seq=SeqRecord(seq=Seq('GUUCAG-------------------UGAC-', SingleLetterAlphabet()), id='obj1', name='obj1', description='obj1', dbxrefs=[]), id='<unknown id>', name='<unknown name>', description='<unknown description>', dbxrefs=[])
Returns:
SeqRecord
"""
# alignment = AlignIO.read(alignfn, 'fasta')
alignment = SeqIO.index(alignfn, 'fasta')
# print SeqRecord(alignment[seqid])
sequence = SeqRecord(alignment[seqid])
return sequence
def open_pdb(pdbfn):
"""Open pdb with Biopython.
Args:
pdbfn (str): a path to a pdb structure
Returns:
PDB Biopython object: with a pdb structure
"""
parser = PDBParser()
return parser.get_structure('struc', pdbfn)
def renumber(seq_with_gaps, struc, residue_index_start):
"""Renumber a pdb file.
Args:
seq_with_gaps (str): a target sequence extracted from the alignment
struc (pdb): a structure
residue_index_start (int): starting number
Returns:
BioPython Structure object
"""
new_numbering = []
for nt in seq_with_gaps:
if nt != '-':
nt_num_a = [residue_index_start, nt]
new_numbering.append(residue_index_start)
logger.info(nt_num_a)
residue_index_start = residue_index_start + 1
logger.info(new_numbering)
# works only for single chain
for struc in pdb:
for chain in struc:
for residue, resi in zip(chain, new_numbering):
residue.id = (residue.id[0], resi, residue.id[2])
return struc
def write_struc(struc, outfn):
"""Write renumbered pdb with Biopython.
Args:
struc (pdb): a renumbered structure
outfn (str): a path to a new, renumbered pdb file
Returns:
none: writes to a file
"""
io = PDBIO()
io.set_structure(struc)
io.save(outfn)
logger.info('Structure written to %s' % outfn)
def get_parser():
parser = argparse.ArgumentParser(description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument("-v", "--verbose", help="increase output verbosity",
action="store_true")
parser.add_argument("--residue_index_start",
help="renumber starting number (default: 1)",
default=1, type=int)
parser.add_argument("--outfn", help="output pdb file (default: pdbfn .pdb -> _out.pdb)")
parser.add_argument("seqid", help="seq id in the alignemnt")
parser.add_argument("alignfn", help="alignemnt in the Fasta format")
parser.add_argument("pdbfn", help="pdb file")
return parser
# main
if __name__ == '__main__':
args = get_parser().parse_args()
if args.verbose:
logger.setLevel(logging.INFO)
if not args.outfn:
args.outfn = args.pdbfn.replace('.pdb', '_out.pdb')
seq_with_gaps = get_seq(args.alignfn, args.seqid)
pdb = open_pdb(args.pdbfn)
struc = renumber(seq_with_gaps, pdb, args.residue_index_start)
write_struc(struc, args.outfn) | /rna_tools-3.13.7-py3-none-any.whl/rna_tools-3.13.7.data/scripts/renum_pdb_to_aln.py | 0.62223 | 0.210401 | renum_pdb_to_aln.py | pypi |
# RNAfbinv 2.0
RNAfbinv is a fragment based RNA design tool. It uses a simulated annealing process to optimize a 2D RNA structure.<br/>
The similarity is based on fragment based design. A tree alignment is done based on nodes (structural motifs).<br/>
Nodes are comparable if they are both bounded motifs (stems) or unbounded motifs (multi loop, interior loops, bulges ect...).<br/>
Each iteration the target motif tree will be aligned to the current candidate tree.<br/>
The best alignment with the addition of other valuable features will generate a design score.<br/>
Design score of 0 is exact fit but even higher scores can generate a good candidate.<br/><br/>
RNAfbinv 2.0 can be easily installed as it is available on pypi (python 3 compatible). To install it simply run ```pip install rnafbinv```.
## Attaching Vienna RNA
[Vienna RNA package](https://www.tbi.univie.ac.at/RNA/ "Vienna RNA home") is required for RNAfbinv to work. This must be installed separately.<br/>
Current version was tested with Vienna 2.4 and above. RNAfbinv will identify Vienna package if it's bin directory is in PATH.<br/>
If you wish to link a specific installation of Vienna set the VIENNA_PATH environment variable to the correct bin directory.
You can set Vienna location in python
```python
import os
os.environ['VIENNA_PATH'] = "VIENNA_BIN_DIR_PATH"
```
or directly via the vienna script
```python
from rnafbinv import vienna
vienna.set_vienna_path("VIENNA_BIN_DIR_PATH")
```
## Usage
The design process can be ran using the following code:
```python
from rnafbinv import RNAfbinvCL
RNAfbinvCL.main(command_line_arguments)
```
To generate a tree for a specific sequence / structure:<br/>
Structure is a dot bracket notation structure and sequence is an IUPAC string with the same length
```python
from rnafbinv import shapiro_tree_aligner
shapiro_tree_aligner.get_tree(sructure, sequence)
```
To compare two trees and score them:
alignment_rules has a default value and is optional
```python
from rnafbinv import shapiro_tree_aligner
shapiro_tree_aligner.align_trees(source_tree, tree_target, alignment_rules)
```
## GUI / Command line
You can download the RNAfbinv wrapper from [RNAfbinv2.0 git repository](https://github.com/matandro/RNAsfbinv/)<br/>
The main folder includes python code to run the GUI / command line and a configuration file:
* RNAfbinv.py - A GUI wrapper for RNAfbinv2.0
* RNAfbinvCL.py - A command line wrapper for RNAfbinv2.0
* **Required** varna_generator.py - Used to generate images based on [VARNA](http://varna.lri.fr/ "VARNA rna homepage")
* **Required** config.ini - Configuration file with paths to required software (information below).
* **Required** img folder with NoImage.png - used in GUI as a placeholder
If you remove the VARNA jar or do not have java installed, images will not be generated but the design process will proceed normally.<br/><br/>
To specify [vienna package](https://www.tbi.univie.ac.at/RNA/ "The ViennaRNA Package homepage") binary folder please update the 'VIENNA' parameter in config.ini (or set VIENNA_PATH environment variable)<br/>
To specify Java binary folder please update the 'JAVA' parameter in config.ini (or set JAVA_PATH environment variable)<br/>
To specify [VARNA](http://varna.lri.fr/ "VARNA rna homepage")'s jar file please update the 'VARNA' parameter in config.ini (or set VARNA_PATH environment variable)<br/>
Note that if the java or vienna package binaries are in your environment variables you may leave it empty.
Example to a valid config.ini file which has java installed and within the system's path:
```
[PATH]
VIENNA=~/ViennaRNA/bin/
#JAVA=
VARNA=~/VARNA/VARNAv3-93.jar
```
### Command line arguments:
```
usage: RNAfbinvCL.py [-h] [-l LOG_OUTPUT] [--verbose | --debug]
[-p {MFE,centroid}] [-i ITERATIONS] [--seed SEED]
[-t LOOK_AHEAD] [--reduced_bi REDUCED_BI] [-e]
[--seq_motif] [-m MOTIF_LIST] [-s STARTING_SEQUENCE | -r]
[--length LENGTH] [-f INPUT_FILE]
optional arguments:
-h, --help show this help message and exit
-l LOG_OUTPUT, --log_output LOG_OUTPUT
Path to output log file. (default: None)
--verbose Increase output verbosity. (default: False)
--debug Debug level logging. (default: False)
-p {MFE,centroid}, --structure_type {MFE,centroid}
uses RNAfold centroid or MFE folding. (default: MFE)
-i ITERATIONS, --iterations ITERATIONS
Sets the number of simulated annealing iterations.
(default: 100)
--seed SEED Random seed used in the random number generator.
(default: None)
-t LOOK_AHEAD, --look_ahead LOOK_AHEAD
Number of look head mutation attempts for each
iteration. (default: 4)
--reduced_bi REDUCED_BI
Remove extra penalty for removal or addition of bulges
and interior loops under the given size. Alignment
penalties still occur. (default: 0)
-e, --circular Designs a circular RNA. (default: False)
--seq_motif Enables increased penalty for insertion or deletions
within marked regions (lower case characters in
sequence constraint). The feature was added to control
multi base sequence constraints (sequence motifs).
Only valid within a specific structural motif.
(default: False)
-m MOTIF_LIST, --motif_list MOTIF_LIST
A comma separated list of motifs that are targeted for
preservation with size.Single motif format: <motif
No>[M|H|E|I|S|B]<motif No of bases>. Use
rnafbinv.ListMotifs.list_motifs(structure) to retrieve
a list of legal motifs for a given structure.
(default: [])
-s STARTING_SEQUENCE, --starting_sequence STARTING_SEQUENCE
The initial sequence for the simulated annealing
process in IUPAC nucleotide codes. (default: None)
-r, --random_start Start simulated annealing with a random sequence.
(default: False)
--length LENGTH Maximum variation in result length compared to target
structure. (default: 0)
-f INPUT_FILE Path of ini file that includes mandatory information.
Some options can also be set via file. command line
options take precedence. (default: None)
```
### Input file format (the '-f' parameter):
```
# mandatory
TARGET_STRUCTURE=<target structure>
TARGET_SEQUENCE=<target sequence>
# optional
TARGET_ENERGY=<target energy>
TARGET_MR=<target mutational robustness>
SEED=<random seed>
STARTING_SEQUENCE=<starting sequence>
ITERATION=<number of simulated annealing iterations>
```
## Webserver
RNAfbinv2.0 can be found in a web server combined with incaRNAtion. The webserver generates starting seeds using incaRNAtion global sampling algorithm.<br/>
Te seed sequences are then sent to RNAfbinv2.0 for design. [incaRNAfbinv web server](https://www.cs.bgu.ac.il/incaRNAfbinv/ "incaRNAtion & RNAfbinv")
| /rnafbinv-2.0.3.tar.gz/rnafbinv-2.0.3/README.md | 0.409811 | 0.842928 | README.md | pypi |
<p align="center">
<h1 align="center">
RNAIndel
</h1>
<p align="center">
<a href="https://github.com/stjude/RNAIndel" target="_blank">
<img alt="Status"
src="https://img.shields.io/badge/status-active-success.svg" />
</a>
<a href="https://github.com/stjude/RNAIndel/issues" target="_blank">
<img alt="Github Issues"
src="https://img.shields.io/github/issues/stjude/RNAIndel" />
</a>
<a href="https://github.com/stjude/RNAIndel/pulls" target="_blank">
<img alt="Pull Requests"
src="https://img.shields.io/github/issues-pr/stjude/RNAIndel" />
</a>
<a href="https://github.com/stjude/RNAIndel/blob/master/LICENSE.md" target="_blank">
<img alt="License: MIT"
src="https://img.shields.io/badge/License-MIT-blue.svg" />
</a>
<a href="https://badge.fury.io/py/rnaindel" target="_blank">
<img alt="PyPI version"
src="https://badge.fury.io/py/rnaindel.png" />
</a>
<br />
<a href="https://github.com/stjude/RNAIndel/actions?query=workflow%3ADocumentation" target="_blank">
<img alt="Actions: Documentation Status"
src="https://github.com/stjude/RNAIndel/workflows/Documentation/badge.svg" />
</a>
<a href="https://github.com/stjude/RNAIndel/actions?query=workflow%3APackage" target="_blank">
<img alt="Actions: Package Status"
src="https://github.com/stjude/RNAIndel/workflows/Package/badge.svg" />
</a>
</p>
<p align="center">
RNAIndel calls coding indels from tumor RNA-Seq data and classifies them as somatic, germline, and artifactual. RNAIndel supports GRCh38 and 37. <br>
<br />
<a href="https://stjude.github.io/RNAIndel/"><strong>Explore the docs »</strong></a>
<br />
<a href="https://doi.org/10.1093/bioinformatics/btz753"><strong>Read the paper »</strong></a>
<br />
<br />
<a href="https://github.com/stjude/RNAIndel/issues/new?assignees=&labels=&template=feature_request.md&title=Descriptive%20Title&labels=enhancement">Request Feature</a>
|
<a href="https://github.com/stjude/RNAIndel/issues/new?assignees=&labels=&template=bug_report.md&title=Descriptive%20Title&labels=bug">Report Bug</a>
<br />
⭐ Consider starring the repo! ⭐
<br />
</p>
</p>
## What's new in Version 3
New implementation with [indelpost](https://github.com/stjude/indelPost), an indel realigner/phaser.
* [faster analysis](#benchmarking) (typically < 20 min with 8 cores)
* somatic complex indel calling in RNA-Seq
* ensemble calling with your own caller (e.g., GATK HaplotypeCaller/MuTect2)
* improved sensitivity for homopolymer indels by error-profile outlier analysis
## Quick Start
RNAIndel can be executed via Docker or run locally, downloadable via PyPI.
### Docker
We publish our latest docker builds on GitHub. You can run the latest code base by running the following command
```
> docker run --rm -v ${pwd}:/data ghcr.io/stjude/rnaindel:latest
```
If you want to have a more native feel, you can add an alias to your shell's rc file.
```
> alias rnaindel="docker run --rm -v ${pwd}:/data ghcr.io/stjude/rnaindel:latest"
```
Note: if its the first time you are executing the `docker run` command, you will see the output of docker downloading the image
### PyPI
RNAIndel depends on [python>=3.6.0](https://www.python.org/downloads/) and [java>=1.8.0](https://www.java.com/en/download/).<br>
Installing via the pip command will install the following packages:
* [indelpost>=0.0.4](https://github.com/stjude/indelPost)
* [pysam>=0.15.0](https://github.com/pysam-developers)
* [cython>=0.29.12](https://cython.org/)
* [numpy>=1.16.0](https://numpy.org/)
* [ssw-py==0.2.6](https://github.com/Wyss/ssw-py)
* [pandas>=0.23.0](https://pandas.pydata.org/)
* [scikit-learn>=0.22.0](http://scikit-learn.org/stable/install.html#)
```
> pip install indelpost --no-binary indelpost --no-build-isolation
> pip install rnaindel
```
Test the installation.
```
> rnaindel -h
usage: rnaindel <subcommand> [<args>]
subcommands are:
PredictIndels Predict somatic/germline/artifact indels from tumor RNA-Seq data
CalculateFeatures Calculate and report features for training
Train Perform model training
CountOccurrence Count occurrence within cohort to filter false somatic predictions
positional arguments:
subcommand PredictIndels, CalculateFeatures, Train, CountOccurrence
optional arguments:
-h, --help show this help message and exit
--version show program's version number and exit
```
### DataPackage
Download data package (version 3 is not compatible with the previous data package).
```
#GRCh38
curl -LO http://ftp.stjude.org/pub/software/RNAIndel/data_dir_grch38.v3.tar.gz
tar -zxf data_dir_grch38.v3.tar.gz
#GRCh37
curl -LO http://ftp.stjude.org/pub/software/RNAIndel/data_dir_grch37.v3.tar.gz
tar -zxf data_dir_grch37.v3.tar.gz
```
## Usage
RNAIndel has 4 subcommands:
* ```PredictIndels``` analyze RNA-Seq data for indel discovery
* ```CalculateFeatures``` calculate features for training
* ```Train``` train models with user's dataset
* ```CountOccurrence``` annotate over-represented somatic predictions
Subcommands are invoked:
```
> rnaindel subcommand [subcommand-specific options]
```
### Discover somatic indels
#### Input BAM file
RNAIndel expects [STAR](https://academic.oup.com/bioinformatics/article/29/1/15/272537) 2-pass mapped BAM file with sorted by coordinate
and [MarkDuplicates](https://broadinstitute.github.io/picard/command-line-overview.html#MarkDuplicates). Further preprocessing such as
indel realignment may prevent desired behavior.
#### Standard calling
This mode uses the built-in caller to analyze simple and complex indels.
```
> rnaindel PredictIndels -i input.bam -o output.vcf -r ref.fa -d data_dir -p 8 (default 1)
```
#### Ensemble calling
Indels in the exernal VCF (supplied by -v) are integrated to the callset by the built-in caller to boost performance.<br>
See [demo](./docs/walkthrough/README.md).
```
> rnaindel PredictIndels -i input.bam -o output.vcf -r ref.fa -d data_dir -v gatk.vcf.gz -p 8
```
#### Options
* ```-i``` input [STAR](https://academic.oup.com/bioinformatics/article/29/1/15/272537)-mapped BAM file (required)
* ```-o``` output VCF file (required)
* ```-r``` reference genome FASTA file (required)
* ```-d``` [data directory](#datapackage) contains trained models and databases (required)
* ```-v``` VCF file (must be .vcf.gz + index) from user's caller. (default: None)
* ```-p``` number of cores (default: 1)
* <details>
<summary>other options (click to open)</summary><p>
* ```-q``` STAR mapping quality MAPQ for unique mappers (default: 255)
* ```-m``` maximum heap space (default: 6000m)
* ```--region``` target genomic region. specify by chrN:start-stop (default: None)
* ```--pon``` user's defined list of non-somatic calls such as PanelOfNormals. Supply as .vcf.gz with index (default: None)
* ```--include-all-external-calls``` set to include all indels in VCF file supplied by -v. (default: False. Use only calls with PASS in FILTER)
* ```--skip-homopolyer-outlier-analysis``` no outlier analysis for homopolymer indels (repeat > 4) performed if set. (default: False)
</p></details>
#### Benchmarking
Using pediatric tumor RNA-Seq samples ([SJC-DS-1003](https://platform.stjude.cloud/data/cohorts#), n=77),
the time and memory consumption was benchmarked for ensemble calling with 8 cores (i.e., -p 8)
on a server with 32-core AMD EPYC 7542 CPU @2.90 GHz.
| | Run time (wall) | Max memory |
|------ | ------------- | ---------- |
|median | 374 sec | 18.6 GB |
|max | 1388 sec | 23.5 GB |
### Train RNAIndel
Users can [train](./docs/training) RNAIndel with their own training set.
### Annotate over-represented putative somatic indels
Check [occurrence](./docs/filtering) to filter probable false positives.
## Contact
* kohei.hagiwara[AT]stjude.org
Please let me know what your experience with RNAIndel was like (even bad comments are welcome)!
## Citation
Published in [Bioinformatics](https://doi.org/10.1093/bioinformatics/btz753)
| /rnaindel-3.0.9.tar.gz/rnaindel-3.0.9/README.md | 0.431944 | 0.93611 | README.md | pypi |
import json
import logging
from pathlib import Path
from typing import Union, List
import jsonschema
import randname.error
from . import set_logging_level
set_logging_level("error")
class Database:
schema_info_json = {
"type": "object",
"title": "info.json schema",
"description": "Schema for info.json file",
"properties": {
"country": {"type": "string"},
"first_names": {
"type": "array",
"items": {"type": "string"},
"minItems": 1,
},
"last_names": {"type": "array", "items": {"type": "string"}, "minItems": 1},
},
"required": ["country", "first_names", "last_names"],
"additionalProperties": False,
}
schema_name_json = {
"type": "object",
"title": "first_names and last_names schema",
"description": "Schema for last and first names files",
"properties": {
"Names": {"type": "array", "items": {"type": "string"}, "minItems": 1},
"Totals": {"type": "array", "items": {"type": "number"}, "minItems": 1},
},
"required": ["Names", "Totals"],
"additionalProperties": False,
}
draft_validator_info = jsonschema.Draft7Validator(schema_info_json)
draft_validator_name = jsonschema.Draft7Validator(schema_name_json)
def __init__(self, path_to_database: Union[Path, str]):
"""Database container
:param path_to_database: path to directory with database
:type path_to_database: Union[Path, str]
"""
# self.validate_database(path_to_database)
self._path = Path(path_to_database)
@property
def path(self) -> Path:
"""Path to database
:return: path to database
:rtype: Path
"""
return self._path
@path.setter
def path(self, new_path: Union[Path, str]) -> None:
self.validate(new_path)
self._path = Path(new_path)
def validate(self, path_to_database: Union[Path, str] = None) -> None:
"""Check if database has valid structure and it's files are
correctly formatted.
..warning::
Validating database might take some time, depends how large is the database.
:param path_to_database: path to database
:type path_to_database: Union[Path, str]
:raises randname.error.DirectoryDoesNotExist: raise when directory with database does not exist.
:raises randname.error.MissingInfoFile: raise when info.json is missing.
:raises randname.error.GenderMismatch: raise when gender information in info.json does not match to what is in directories.
:raises randname.error.FileNameDoesNotMatchPattern: raise when file with names doesn't mach naming convention.
:raises jsonschema.ValidationError: raise when json file doesn't match pattern.
"""
invalid_name_pattern = []
invalid_json_files = []
if path_to_database:
path = Path() / path_to_database
else:
path = self._path
if not path.is_dir():
raise randname.error.DirectoryDoesNotExist(path)
# traverse directory
for country_directory in path.iterdir():
path_to_info_file = Path() / country_directory / "info.json"
first_names_dir = Path() / country_directory / "first_names"
last_names_dir = Path() / country_directory / "last_names"
# check for required files
if not path_to_info_file.exists():
raise randname.error.MissingInfoFile(path_to_info_file)
if not first_names_dir.exists():
raise randname.error.DirectoryDoesNotExist(first_names_dir)
if not last_names_dir.exists():
raise randname.error.DirectoryDoesNotExist(last_names_dir)
# check info.json
with open(path_to_info_file, "r", encoding="utf-8") as info_file:
json_file = json.load(info_file)
first_names_sex = set(json_file["first_names"])
last_names_sex = set(json_file["last_names"])
try:
self._validate_json_schema(self.schema_info_json, path_to_info_file)
except jsonschema.ValidationError:
logging.error(f"Invalid info file: {info_file}")
invalid_json_files.append(info_file)
# check if content fo info.json match the content of first_names and last_names directories
sex_in_first_names_dir = set(
[path.name.split("_")[1] for path in first_names_dir.iterdir()]
)
sex_in_last_names_dir = set(
[path.name.split("_")[1] for path in last_names_dir.iterdir()]
)
diff = first_names_sex.difference(sex_in_first_names_dir)
if diff:
raise randname.error.GenderMismatch(
f"Info file: {path_to_info_file}, defines: {first_names_sex}, but there is {sex_in_first_names_dir} in firs_names directory"
)
diff = last_names_sex.difference(sex_in_last_names_dir)
if diff:
raise randname.error.GenderMismatch(
f"Info file: {path_to_info_file}, defines: {last_names_sex}, but there is {sex_in_last_names_dir} in firs_names directory"
)
# TODO: refactor into smaller functions
# check first_names
glob_pattern = f"[1-9]*_[{''.join(first_names_sex)}]"
for f in first_names_dir.iterdir():
match = f.match(glob_pattern)
if not match:
logging.error(f"Invalid name pattern: {f}")
invalid_name_pattern.append(f)
try:
self._validate_json_schema(self.schema_name_json, f)
except jsonschema.ValidationError:
logging.error(f"Invalid content pattern: {f}")
invalid_json_files.append(f)
# check last_names
glob_pattern = f"[1-9]*_[{''.join(last_names_sex)}]"
for f in last_names_dir.iterdir():
if not f.match(glob_pattern):
logging.error(f"Invalid name pattern: {f}")
invalid_name_pattern.append(f)
try:
self._validate_json_schema(self.schema_name_json, f)
except jsonschema.ValidationError:
logging.error(f"Invalid content pattern: {f}")
invalid_json_files.append(f)
if invalid_json_files:
raise jsonschema.ValidationError(invalid_json_files)
if invalid_name_pattern:
raise randname.error.FileNameDoesNotMatchPattern(invalid_name_pattern)
def _validate_json_schema(
self, schema, path_to_json: Union[Path, str] = None
) -> None:
if path_to_json:
path = path_to_json
else:
path = self._path
with open(path, "r", encoding="utf-8") as f:
json_content = json.load(f)
if schema is self.schema_name_json:
self.draft_validator_name.validate(json_content)
if schema is self.schema_info_json:
self.draft_validator_info.validate(json_content)
jsonschema.validate(json_content, schema) | /rname-0.3.7.tar.gz/rname-0.3.7/randname/database.py | 0.568296 | 0.236946 | database.py | pypi |
<p align="center"><a href="https://rnasamba.lge.ibi.unicamp.br/"><img src="https://raw.githubusercontent.com/apcamargo/RNAsamba/master/logo.png" width="350rem"></a></p>
- [Overview](#overview)
- [Documentation](#documentation)
- [Installation](#installation)
- [Download the pre-trained models](#download-the-pre-trained-models)
- [Usage](#usage)
- [`rnasamba train`](#rnasamba-train)
- [`rnasamba classify`](#rnasamba-classify)
- [Examples](#examples)
- [Using the Docker image](#using-the-docker-image)
- [Citation](#citation)
## Overview
RNAsamba is a tool for computing the coding potential of RNA sequences using a neural network classification model. A description of the algorithm and benchmarks comparing RNAsamba to other tools can be found in our [article](#citation).
## Web version
RNAsamba can be used through a minimal web interface that is freely available online at [https://rnasamba.lge.ibi.unicamp.br/](https://rnasamba.lge.ibi.unicamp.br/).
## Documentation
A complete documentation for RNAsamba can be found at [https://apcamargo.github.io/RNAsamba/](https://apcamargo.github.io/RNAsamba/).
## Installation
There are two ways to install RNAsamba:
- Using pip:
```
pip install rnasamba
```
- Using conda:
```
conda install -c bioconda rnasamba
```
## Download the pre-trained models
We provide two HDF5 files containing the weights of classification models trained with human trancript sequences. The first model (`full_length_weights.hdf5`) was trained exclusively with full-length transcripts and can be used in datasets comprised mostly or exclusively of complete transcript sequences. The second model (`partial_length_weights.hdf5`) was trained with both complete and truncated transcripts and is prefered in cases where there is a significant fraction of partial-length sequences, such as transcriptomes assembled using *de novo* approaches.
Both models achieves high classification performance in transcripts from a variety of different species (see [reference](#citation)).
You can download the files by executing the following commands:
```
curl -O https://raw.githubusercontent.com/apcamargo/RNAsamba/master/data/full_length_weights.hdf5
curl -O https://raw.githubusercontent.com/apcamargo/RNAsamba/master/data/partial_length_weights.hdf5
```
In case you want to train your own model, you can follow the steps shown in the [Examples](#examples) section.
## Usage
RNAsamba provides two commands: `rnasamba train` and `rnasamba classify`.
### `rnasamba train`
`rnasamba train` is the command for training a new classification model from a training dataset and saving the network weights into an HDF5 file. The user can specify the batch size (`--batch_size`) and the number of training epochs (`--epochs`). The user can also choose to activate early stopping (`--early_stopping`), which reduces training time and can help avoiding overfitting.
```
usage: rnasamba train [-h] [-s EARLY_STOPPING] [-b BATCH_SIZE] [-e EPOCHS]
[-v {0,1,2,3}]
output_file coding_file noncoding_file
Train a new classification model.
positional arguments:
output_file output HDF5 file containing weights of the newly
trained RNAsamba network.
coding_file input FASTA file containing sequences of protein-
coding transcripts.
noncoding_file input FASTA file containing sequences of noncoding
transcripts.
optional arguments:
-h, --help show this help message and exit
--version show program's version number and exit
-s EARLY_STOPPING, --early_stopping EARLY_STOPPING
number of epochs after lowest validation loss before
stopping training (a fraction of 0.1 of the training
set is set apart for validation and the model with the
lowest validation loss will be saved). (default: 0)
-b BATCH_SIZE, --batch_size BATCH_SIZE
number of samples per gradient update. (default: 128)
-e EPOCHS, --epochs EPOCHS
number of epochs to train the model. (default: 40)
-v {0,1,2,3}, --verbose {0,1,2,3}
print the progress of the training. 0 = silent, 1 =
current step, 2 = progress bar, 3 = one line per
epoch. (default: 0)
```
### `rnasamba classify`
`rnasamba classify` is the command for computing the coding potential of transcripts contained in an input FASTA file and classifying them into coding or non-coding. Optionally, the user can specify an output FASTA file (`--protein_fasta`) in which RNAsamba will write the translated sequences of the predicted coding ORFs. If multiple weight files are provided, RNAsamba will ensemble their predictions into a single output.
```
usage: rnasamba classify [-h] [-p PROTEIN_FASTA] [-v {0,1}]
output_file fasta_file weights [weights ...]
Classify sequences from a input FASTA file.
positional arguments:
output_file output TSV file containing the results of the
classification.
fasta_file input FASTA file containing transcript sequences.
weights input HDF5 file(s) containing weights of a trained
RNAsamba network (if more than a file is provided, an
ensembling of the models will be performed).
optional arguments:
-h, --help show this help message and exit
--version show program's version number and exit
-p PROTEIN_FASTA, --protein_fasta PROTEIN_FASTA
output FASTA file containing translated sequences for
the predicted coding ORFs. (default: None)
-v {0,1}, --verbose {0,1}
print the progress of the classification. 0 = silent,
1 = current step. (default: 0)
```
## Examples
- Training a new classification model using *Mus musculus* data downloaded from GENCODE:
```
rnasamba train -v 2 mouse_model.hdf5 gencode.vM21.pc_transcripts.fa gencode.vM21.lncRNA_transcripts.fa
```
- Classifying sequences using our pre-trained model (`partial_length_weights.hdf5`) and saving the predicted proteins into a FASTA file:
```
rnasamba classify -p predicted_proteins.fa classification.tsv input.fa partial_length_weights.hdf5
head classification.tsv
sequence_name coding_score classification
ENSMUST00000054910 0.99022 coding
ENSMUST00000059648 0.84718 coding
ENSMUST00000055537 0.99713 coding
ENSMUST00000030975 0.85189 coding
ENSMUST00000050754 0.02638 noncoding
ENSMUST00000008011 0.14949 noncoding
ENSMUST00000061643 0.03456 noncoding
ENSMUST00000059704 0.89232 coding
ENSMUST00000036304 0.03782 noncoding
```
## Using the Docker image
```
docker pull antoniopcamargo/rnasamba
# Training example:
docker run -ti --rm -v "$(pwd):/app" antoniopcamargo/rnasamba train -v 2 mouse_model.hdf5 gencode.vM21.pc_transcripts.fa gencode.vM21.lncRNA_transcripts.fa
# Classification example:
docker run -ti --rm -v "$(pwd):/app" antoniopcamargo/rnasamba classify -p predicted_proteins.fa classification.tsv input.fa full_length_weights.hdf5
```
## Citation
> Camargo, Antonio P., Vsevolod Sourkov, and Marcelo F. Carazzolle. "[RNAsamba: coding potential assessment using ORF and whole transcript sequence information](https://www.biorxiv.org/content/10.1101/620880v1)" *BioRxiv* (2019).
| /rnasamba-0.2.3.tar.gz/rnasamba-0.2.3/README.md | 0.724968 | 0.974988 | README.md | pypi |
import os
import shutil
import textwrap
from multiprocessing import cpu_count
from subprocess import call
import pandas as pd
from rnaseq_lib.docker import fix_directory_ownership
from rnaseq_lib.tissues import get_tumor_samples, get_gtex_samples, get_normal_samples, map_genes
from rnaseq_lib.utils import mkdir_p
def run_deseq2(df_path, tissue, output_dir, gtex=True, cores=None):
"""
Runs DESeq2 on a specific tissue
:param str df_path: Path to samples by genes dataframe
:param str tissue: Tissue to run
:param str output_dir: Full path to output directory
:param bool gtex: If True uses GTEx as normal tissue. Otherwise uses TCGA Normal
:param int cores: Number of cores to use. Defaults to # of cores on machine.
"""
# Make workspace directories
work_dir = os.path.join(output_dir, 'work_dir')
mkdir_p(work_dir)
# Get samples for tissue
tumor = [x.replace('-', '.') for x in get_tumor_samples(tissue)]
normal = get_gtex_samples(tissue) if gtex else get_normal_samples(tissue)
normal = [x.replace('-', '.') for x in normal]
# Write out vectors
tissue_vector = os.path.join(work_dir, 'tissue.vector')
with open(tissue_vector, 'w') as f:
f.write('\n'.join(tumor + normal))
disease_vector = os.path.join(work_dir, 'disease.vector')
with open(disease_vector, 'w') as f:
f.write('\n'.join(['T' if x in tumor else 'N' for x in tumor + normal]))
# Write out script
cores = cores if cores else int(cpu_count())
script_path = os.path.join(work_dir, 'deseq2.R')
with open(script_path, 'w') as f:
f.write(
textwrap.dedent("""
library('DESeq2'); library('data.table'); library('BiocParallel')
register(MulticoreParam({cores}))
# Argument parsing
args <- commandArgs(trailingOnly = TRUE)
df_path <- args[1]
tissue_path <- args[2]
disease_path <- args[3]
tissue <- '{tissue}'
output_dir <- '/data/'
# Read in vectors
tissue_vector <- read.table(tissue_path)$V1
disease_vector <- read.table(disease_path)$V1
# Read in table and process
n <- read.table(df_path, sep='\\t', header=1, row.names=1)
sub <- n[, colnames(n)%in%tissue_vector]
setcolorder(sub, as.character(tissue_vector))
# Preprocessing
countData <- round(sub)
colData <- data.frame(disease=disease_vector, row.names=colnames(countData))
y <- DESeqDataSetFromMatrix(countData = countData, colData = colData, design = ~ disease)
# Run DESeq2
y <- DESeq(y, parallel=TRUE)
res <- results(y, parallel=TRUE)
summary(res)
# Write out table
resOrdered <- res[order(res$padj),]
res_name <- paste(tissue, '.tsv', sep='')
res_path <- paste(output_dir, res_name, sep='/')
write.table(as.data.frame(resOrdered), file=res_path, col.names=NA, sep='\\t', quote=FALSE)
# MA Plot
ma_name <- paste(tissue, '-MA.pdf', sep='')
ma_path <- paste(output_dir, ma_name, sep='/')
pdf(ma_path, width=7, height=7)
plotMA(res, main='DESeq2')
dev.off()
# Dispersion Plot
disp_name <- paste(tissue, '-dispersion.pdf', sep='')
disp_path <- paste(output_dir, disp_name, sep='/')
pdf(disp_path, width=7, height=7)
plotDispEsts( y, ylim = c(1e-6, 1e1) )
dev.off()
# PVal Hist
hist_name <- paste(tissue, '-pval-hist.pdf', sep='')
hist_path <- paste(output_dir, hist_name, sep='/')
pdf(hist_path, width=7, height=7)
hist( res$pvalue, breaks=20, col="grey" )
dev.off()
# Ratios plots
qs <- c( 0, quantile( res$baseMean[res$baseMean > 0], 0:7/7 ) )
bins <- cut( res$baseMean, qs )
levels(bins) <- paste0("~",round(.5*qs[-1] + .5*qs[-length(qs)]))
ratios <- tapply( res$pvalue, bins, function(p) mean( p < .01, na.rm=TRUE ) )
ratio_name <- paste(tissue, '-ratios.pdf', sep='')
ratio_path <- paste(output_dir, ratio_name, sep='/')
pdf(ratio_path, width=7, height=7)
barplot(ratios, xlab="mean normalized count", ylab="ratio of small $p$ values")
dev.off()
""".format(cores=cores, tissue=tissue)))
# Call DESeq2
docker_parameters = ['docker', 'run',
'-v', '{}:/data'.format(output_dir),
'-v', '{}:/df'.format(os.path.dirname(df_path)),
'jvivian/deseq2']
parameters = ['/data/work_dir/deseq2.R',
'/df/{}'.format(os.path.basename(df_path)),
'/data/{}'.format(os.path.join('work_dir', 'tissue.vector')),
'/data/{}'.format(os.path.join('work_dir', 'disease.vector'))]
print '\nCalling: {}\n'.format(' '.join(docker_parameters + parameters))
call(docker_parameters + parameters)
# Fix output of files
fix_directory_ownership(output_dir=output_dir, tool='jvivian/deseq2')
# Add gene names to output
output_tsv = os.path.join(output_dir, '{}.tsv'.format(tissue))
df = map_genes(pd.read_csv(output_tsv, index_col=0, sep='\t'))
df.to_csv(output_tsv, sep='\t')
# Clean up
shutil.rmtree(work_dir) | /rnaseq-lib-1.0a21.tar.gz/rnaseq-lib-1.0a21/src/rnaseq_lib/R/__init__.py | 0.485112 | 0.300483 | __init__.py | pypi |
from collections import defaultdict
import gzip
import pandas as pd
import re
GTF_HEADER = ['seqname', 'source', 'feature', 'start', 'end', 'score',
'strand', 'frame']
R_SEMICOLON = re.compile(r'\s*;\s*')
R_COMMA = re.compile(r'\s*,\s*')
R_KEYVALUE = re.compile(r'(\s+|\s*=\s*)')
def dataframe_from_gtf(filename):
"""Open an optionally gzipped GTF file and return a pandas.DataFrame.
"""
# Each column is a list stored as a value in this dict.
result = defaultdict(list)
for i, line in enumerate(lines(filename)):
for key in line.keys():
# This key has not been seen yet, so set it to None for all
# previous lines.
if key not in result:
result[key] = [None] * i
# Ensure this row has some value for each column.
for key in result.keys():
result[key].append(line.get(key, None))
return pd.DataFrame(result)
def lines(filename):
"""Open an optionally gzipped GTF file and generate a dict for each line.
"""
fn_open = gzip.open if filename.endswith('.gz') else open
with fn_open(filename) as fh:
for line in fh:
if line.startswith('#'):
continue
else:
yield parse(line)
def parse(line):
"""Parse a single GTF line and return a dict.
"""
result = {}
fields = line.rstrip().split('\t')
for i, col in enumerate(GTF_HEADER):
result[col] = _get_value(fields[i])
# INFO field consists of "key1=value;key2=value;...".
infos = [x for x in re.split(R_SEMICOLON, fields[8]) if x.strip()]
for i, info in enumerate(infos, 1):
# It should be key="value".
try:
key, _, value = re.split(R_KEYVALUE, info, 1)
# But sometimes it is just "value".
except ValueError:
key = 'INFO{}'.format(i)
value = info
# Ignore the field if there is no value.
if value:
result[key] = _get_value(value)
return result
def _get_value(value):
if not value:
return None
# Strip double and single quotes.
value = value.strip('"\'')
# Return a list if the value has a comma.
if ',' in value:
value = re.split(R_COMMA, value)
# These values are equivalent to None.
elif value in ['', '.', 'NA']:
return None
return value | /rnaseq-lib-1.0a21.tar.gz/rnaseq-lib-1.0a21/src/rnaseq_lib/gtf/__init__.py | 0.57821 | 0.303919 | __init__.py | pypi |
import os
import pickle
import pandas as pd
from rnaseq_lib.utils import flatten
__location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
def return_samples():
"""
Returns sample dictionary which maps TCGA and GTEx samples to a tissue.
Synapse ID: syn10296681
:return: Tissues are keys are list of samples are values
:rtype: dict(str, list(str))
"""
return pickle.load(open(os.path.join(os.path.dirname(__location__), 'data/samples.pickle'), 'rb'))
def get_gene_map():
"""
Dictionary mapping gene ID to gene name
:return: Gene map
:rtype: dict
"""
return pickle.load(open(os.path.join(os.path.dirname(__location__), 'data/gene_map.pickle'), 'rb'))
def map_genes(genes, strict=True):
"""
Maps gene IDs to gene names
:param list genes: ENSEMBL gene IDs to be mapped to gene names
:param bool strict: If true, raies a KeyError if gene is not found in the gene_map
:return: Mapped genes
:rtype: list
"""
gene_map = get_gene_map()
if strict:
return [gene_map[x.split('.')[0]] for x in genes]
else:
mapped = []
for g in genes:
try:
mapped.append(gene_map[g.split('.')[0]])
except KeyError:
print '{} not found in gene_map, leaving as is'
mapped.append(g)
return mapped
def get_mab_targets():
"""
Returns sorted list of MAB cancer drug targets
:return: Sorted gene list
:rtype: list
"""
path = os.path.join(os.path.dirname(__location__), 'data/cancer-MAB-gene-targets.txt')
return sorted([x.strip() for x in open(path, 'r').readlines()])
def get_ucsf_genes():
"""
Returns sorted list of UCSF genes
:return: Sorted gene list
:rtype: list
"""
path = os.path.join(os.path.dirname(__location__), 'data/UCSF-genes.csv')
return sorted([x.strip() for x in open(path, 'r').readlines()])
def get_civic_genes():
"""
Returns sorted list of genes from CIViC
:return: Sorted gene list
:rtype: list
"""
path = os.path.join(os.path.dirname(__location__), 'data/civic-genes.txt')
return sorted([x.strip() for x in open(path, 'r').readlines()])
def get_ucsf_subset(df):
"""
Subset UCSF dataframe and return.
:param pd.DataFrame df: Input Dataframe in the format of "Genes by Samples"
:return: Subset of Dataframe that only includes UCSF genes
:rtype: pd.DataFrame
"""
df.index = map_genes(df.index)
ucsf_genes = get_ucsf_genes()
ucsf_genes = [x for x in ucsf_genes if x in df.index]
return df.loc[ucsf_genes]
def get_tumor_samples(tissue):
"""
Returns TCGA tumor samples for a tissue
:param str tissue: Tissue to grab TCGA tumor samples from
:return: List of tumor samples
:rtype: list
"""
samples = return_samples()
return [x for x in samples[tissue] if x.endswith('-01')]
def get_gtex_samples(tissue):
"""
Returns GTEx samples for a tissue
:param str tissue: Tissue to grab GTEx samples from
:return: List of GTEx samples
:rtype: list
"""
samples = return_samples()
return [x for x in samples[tissue] if not x.startswith('TCGA')]
def get_normal_samples(tissue):
"""
Returns TCGA normal samples for a tissue
:param str tissue: Tissue to grab TCGA normal samples from
:return: List of TCGA normal samples
:rtype: list
"""
samples = return_samples()
return [x for x in samples[tissue] if x.endswith('-11')]
def identify_tissue_from_str(content):
"""
Identifies possible tissue(s) referenced by a given string
:param str content: Text to examine for terms associated with tissues
:return: Possible tissues referenced in input string
:rtype: set(str)
"""
td_map = tissue_disease_mapping()
return set([k for k, v in td_map.iteritems() if any([term for term in v if term in content.lower()])])
def tissue_disease_mapping():
"""
Maps tissue types to words associated with cancers of that tissue
:return: Tissue / disease term mapping
:rtype: dict(str, list(str))
"""
return {
'Adrenal': ['adrenal', 'adrenocortical', 'cortical', 'oncocytic', 'myxoid'],
'Bladder': ['bladder'],
'Blood': ['blood', 'leukemia', 'lymphoma', 'myeloma', 'hemato'],
'Bone': ['bone', 'osteosarcoma', 'ewing'],
'Brain': ['anaplastic', 'astrocytoma', 'neurocytoma', 'choroid', 'plexus', 'neuroepithelial', 'ependymal',
'fibrillary', 'giant-cell', 'glioblastoma', 'multiforme', 'gliomatosis', 'cerebri', 'gliosarcoma',
'hemangiopericytoma', 'medulloblastoma', 'medulloepithelioma', 'meningeal', 'neuroblastoma',
'neurocytoma', 'oligoastrocytoma', 'optic', 'ependymoma', 'pilocytic', 'pinealoblastoma',
'pineocytoma', 'meningioma', 'subependymoma', 'retinoblastoma', 'neuro'],
'Breast': ['breast'],
'Cervix': ['cervix', 'cervical'],
'Colon-Small_intestine': ['colon', 'rectal', 'colorectal', 'intestine', 'intestinal', 'bowel'],
'Esophagus': ['esophagus', 'esophogeal'],
'Kidney': ['kidney', 'renal', 'nephron', 'nephroma', 'wilm', 'chromophobe'],
'Liver': ['liver', 'hepatic', 'hepato', 'parenchymal', 'cholang'],
'Lung': ['lung', 'small-cell', 'non-small-cell', 'small cell', 'non small cell', 'non small-cell'],
'Ovary': ['ovary', 'ovarian', 'endometrioid', 'fallopian', 'cord', 'stromal'],
'Pancreas': ['pancreas', 'pancreatic', 'cystadenocarcinomas'],
'Prostate': ['prostate'],
'Skin-Head': ['head', 'neck', 'skin', 'basal', 'melanoma', ],
'Stomach': ['stomach', 'gastric'],
'Testis': ['testis', 'testicular', 'testes', 'gonad', ],
'Thyroid': ['thyroid'],
'Uterus': ['uterus', 'uterine', 'endometrial', 'ureteral', 'gestational']
}
def grep_cancer_terms(content, replace_newlines_with_periods=True, comprehensive=False):
"""
Returns sentences with cancer terms
:param str content: String containing sentences to check for cancer terms
:param bool replace_newlines_with_periods: If True, replaces newlines with periods so they count as "sentences"
:param bool comprehensive: if True, adds all values from tissue_disease_mapping
:return: Sentences with matches
:rtype: list(str)
"""
terms = {'cancer', 'leukemia', 'carcinoma', 'squamous', 'lymphoma',
'malignant', 'metastasis', 'metastatic', 'sarcoma', 'tumor'}
# Add all terms from tissue_disease_mapping to grep list
terms = terms.union(set(flatten(tissue_disease_mapping().values()))) if comprehensive else terms
# Replace newlines with periods
content = content.replace('\n', '.') if replace_newlines_with_periods else content
# Return sentences that include terms
return [x for x in content.split('.') if any(y for y in terms if y.upper() in x.upper())] | /rnaseq-lib-1.0a21.tar.gz/rnaseq-lib-1.0a21/src/rnaseq_lib/tissues/__init__.py | 0.692122 | 0.32469 | __init__.py | pypi |
import argparse
import os
import requests
def usage():
description = '''
Problem:
I choose project BRCA on GDC(https://gdc-portal.nci.nih.gov/projects/t)
and forward to WXS(1,050 cases, 2,175 bam files). Download manifest file from the summary tab.
In fact the manifest only contains the file_id, file_name and md5.
I can't tell sample information like TCGA barcode.
On the Files tab in the search result of GDC-Portal,
we can download sample information in form of json in some condition.
However, I can't get sample information from the BRCA-WXS json file. Use GDC-API files endpoint.
Task:
Map file_id to sample sample id through GDC-API
(https://gdc-docs.nci.nih.gov/API/Users_Guide/Search_and_Retrieval/#filters-specifying-the-quer
'''
use = """%(prog)s -i <manifest.txt>"""
parser = argparse.ArgumentParser(description=description, usage=use)
parser.add_argument("-i", "--input", dest='manifest', type=str,
help="Input manifest file downloaded from gdc-portal or file contains first column as file_id",
required=True)
parser.add_argument('-v', '--version', action='version', version='%(prog)s 1.0')
parser.add_argument('-o', '--output', type=str, default='./', help='Output location for metadata.')
args = parser.parse_args()
return args
def load_manifest(manifest):
file_ids = list()
with open(manifest, 'r') as foo:
for line in foo:
arr = line.rstrip().split("\t")
if len(arr[0]) != 36:
continue
file_ids.append(arr[0])
return file_ids
def make_params(file_ids):
params = {
"filters": {
"op": "in",
"content": {
"field": "files.file_id",
"value": file_ids
}
},
"format": "TSV",
"fields": "analysis.metadata.read_groups.is_paired_end,cases.samples.portions.analytes.aliquots.submitter_id,"
"cases.samples.sample_type",
# The must be no space after comma
"size": len(file_ids)
}
return params
def gdc_api(file_ids, manifest):
output = manifest + '.map2submitterID'
outputh = open(output, 'w')
files_endpt = "https://gdc-api.nci.nih.gov/files"
params = make_params(file_ids)
response = requests.post(files_endpt, json=params)
outputh.write(response.text)
outputh.close()
# print(response.text)
return response.text
def metadata_from_manifest(manifest, output):
file_ids = load_manifest(manifest)
response = gdc_api(file_ids, manifest)
with open(output, 'w') as f:
f.write(response)
def main():
args = usage()
assert os.path.isdir(args.output)
output = os.path.join(args.output, 'metadata.tsv')
assert not os.path.exists(output)
metadata_from_manifest(args.manifest, output)
if __name__ == '__main__':
main() | /rnaseq-lib-1.0a21.tar.gz/rnaseq-lib-1.0a21/src/rnaseq_lib/utils/gdc.py | 0.428712 | 0.295192 | gdc.py | pypi |
import os
from subprocess import call
import logging
logging.basicConfig(level=logging.INFO)
def filter_aligned_reads(bam_path, output_name=None, paired=True):
"""
Filters bams for aligned reads
:param str bam_path: Path to bam file
:param str output_name: Defaults to input bam with '.filtered' inserted before the .bam extension
:param bool paired: Only keep paired reads
:return: Path to filtered bam
:rtype: str
"""
if not output_name:
output_name = os.path.basename(os.path.splitext(bam_path)[0]) + '.filtered.bam'
# Define parameters
work_dir = os.path.dirname(os.path.abspath(bam_path))
parameters = ['docker', 'run',
'-v', '{}:/data'.format(work_dir),
'quay.io/ucsc_cgl/samtools',
'view',
'-h',
'-o', '/data/{}'.format(output_name),
'-F', '0x04 ']
if paired:
parameters.extend(['-f', '0x02'])
parameters.append(os.path.join('/data', os.path.basename(bam_path)))
# Call tool
output_path = os.path.join(work_dir, output_name)
if not os.path.exists(output_path):
logging.info('Filtering bam')
call(parameters)
else:
logging.info('Skipping. Filtered bam exists: {}'.format(output_path))
return output_path
def make_test_bam(bam_path, region='chr6', output_name='chr6.test.bam', clean_input_bam=False):
"""
Makes a smaller bam based on the region argument passed
:param str bam_path: Path to bam to use to make test bam
:param str region: Region of the genome to subset from
:param str output_name: Output file name test bam
:param bool clean_input_bam: Cleans the input bam before starting
:return: Path to test bam
:rtype: str
"""
if clean_input_bam:
bam_path = filter_aligned_reads(bam_path)
work_dir = os.path.dirname(os.path.abspath(bam_path))
docker_parameters = ['docker', 'run',
'-v', '{}:/data'.format(work_dir),
'quay.io/ucsc_cgl/samtools']
bam_no_ext = bam_path.split('.bam')[0]
if not os.path.exists(bam_no_ext + '.bai') and not os.path.exists(bam_no_ext + '.bam.bai'):
index = docker_parameters + ['index', os.path.join('/data', os.path.basename(bam_path))]
call(index)
parameters = docker_parameters + ['view',
'-b',
'-h',
'-o', os.path.join('/data', output_name),
os.path.join('/data', os.path.basename(bam_path)),
region]
call(parameters)
return os.path.join(work_dir, output_name) #filter_aligned_reads() | /rnaseq-lib-1.0a21.tar.gz/rnaseq-lib-1.0a21/src/rnaseq_lib/utils/bam.py | 0.47098 | 0.29954 | bam.py | pypi |
import holoviews as hv
import pandas as pd
from rnaseq_lib.dim_red import run_tsne, run_tete
from rnaseq_lib.utils import flatten
def plot_boxplot(df,
plot_info,
feature,
norm_func=None,
title=None,
value_label='counts', group_label='dataset'):
"""
Return holoviews boxplot object for a "samples by feature" DataFrame
:param pd.DataFrame df: Input DataFrame
:param dict(str, list(str)) plot_info: Dict in the form "Label: [Samples]"
:param str feature: Feature (column label) to use
:param func norm_func: Normalization function for dataframe
:param str title: Title of plot
:param str value_label: Label to use for values in boxplot
:param str group_label: Label to use for groups in dataset
:return: Holoviews boxplot object
:rtype: hv.BoxWhisker
"""
# Apply normalization function if provided
if norm_func:
df = df.apply(norm_func)
# Define group label
group = []
for label in sorted(plot_info):
group.extend([label for _ in plot_info[label]])
# Create dictionary with plot info
plot = {value_label: flatten([df.loc[plot_info[x]][feature].tolist() for x in sorted(plot_info)]),
group_label: group}
# Return Holoviews BoxWhisker object
return hv.BoxWhisker(pd.DataFrame.from_dict(plot), kdims=['dataset'], vdims=['counts'], group=title)
def tsne_of_dataset(df, title, perplexity=50, learning_rate=1000, plot_info=None):
"""
t-SNE plot of a dataset
:param pd.DataFrame df: Samples by features DataFrame
:param str title: Title of plot
:param int perplexity: Perplexity hyperparamter for t-SNE
:param int learning_rate: Learning rate hyperparameter for t-SNE
:param dict plot_info: Additional information to include in plot
:return: Holoviews scatter object
:rtype: hv.Scatter
"""
z = run_tsne(df, num_dims=2, perplexity=perplexity, learning_rate=learning_rate)
return _scatter_dataset(z=z, title=title, info=plot_info)
def tete_of_dataset(df, title, num_neighbors=30, plot_info=None):
"""
t-ETE plot of a dataset
:param pd.DataFrame df: Samples by features DataFrame
:param str title: Title of plot
:param int num_neighbors: Number of neighbors in t-ETE algorithm
:param dict plot_info: Additional information to include in plot
:return: Holoviews scatter object
:rtype: hv.Scatter
"""
z = run_tete(df, num_dims=2, num_neighbors=num_neighbors)
return _scatter_dataset(z, title=title, info=plot_info)
def _scatter_dataset(z, title, info=None):
"""
Internal function for scattering dataset
:param np.array z: An [n x 2] matrix of values to plot
:param dict info: Additional info for plotting. Lengths of values must match x and y vectors derived from z
"""
# Collect information for plotting
if info is None:
info = dict()
info['x'] = z[:, 0]
info['y'] = z[:, 1]
# Return Holoviews Scatter object
return hv.Scatter(pd.DataFrame.from_dict(info),
kdims=['x'],
vdims=['y'] + [x for x in info.keys() if not x == 'x' and not x == 'y'],
group=title) | /rnaseq-lib-1.0a21.tar.gz/rnaseq-lib-1.0a21/src/rnaseq_lib/plotting/__init__.py | 0.823612 | 0.585634 | __init__.py | pypi |
import urllib2
import requests
import xmltodict
from bs4 import BeautifulSoup
def get_drug_target_from_wiki(drug):
"""
Scrape wikipedia for the target of a drug
:param str drug: Drug to lookup
:return: Drug target
:rtype: str
"""
# Look for wiki page
url = 'https://en.wikipedia.org/wiki/'
try:
page = urllib2.urlopen(url + drug)
except urllib2.HTTPError:
print 'Page not found for: {}'.format(drug)
return None
# Parse page
soup = BeautifulSoup(page, 'html.parser')
# Look for table via HTML tags
name_box = soup.find('table', {'class': 'infobox'})
if name_box:
# Look for "Target", next item in the list should be the drug name
name = name_box.text.strip()
if 'Target' in name:
return name.split('\n')[name.split('\n').index('Target') + 1]
else:
print '{} has no listed Target'.format(drug)
return None
else:
print 'No table found for {}'.format(drug)
return None
def get_info_from_wiki(drug):
"""
Scrape wikipedia for all information about a drug
:param str drug: Drug to lookup
:return: Information on the wikipedia page
:rtype: str
"""
# Look for wiki page
url = 'https://en.wikipedia.org/wiki/'
try:
page = urllib2.urlopen(url + drug)
except urllib2.HTTPError:
print 'Page not found for: {}'.format(drug)
return None
# Parse page
soup = BeautifulSoup(page, 'html.parser')
# Scrape all content
name_box = soup.find('div', {'class': 'mw-content-ltr'})
if name_box:
return name_box.text.strip()
else:
print 'No table found for {}'.format(drug)
return None
def get_drug_usage_nih(drug):
"""
Gets drug uasage information from NIH API
:param str drug:
:return: Usage section from NIH
:rtype: str
"""
# Make request
params = {'drug_name': drug}
url = 'https://dailymed.nlm.nih.gov/dailymed/services/v2/spls.json'
r = _rget(url, params=params)
if r:
# Get "set ID" to query SPLS for detailed info
setid = None
for data in r.json()['data']:
if drug in data['title'] or drug.upper() in data['title']:
print 'Found set ID for: {}'.format(data['title'])
setid = data['setid']
if setid:
# Make request
url = 'https://dailymed.nlm.nih.gov/dailymed/services/v2/spls/{}.xml'.format(setid)
r = _rget(url)
if r:
# I hate XML with all my being
xml = xmltodict.parse(r.content)
comp = xml['document']['component']['structuredBody']['component']
# Look for usage tag
content = None
for sec in comp:
if 'USAGE' in sec['section']['code']['@displayName']:
content = str(sec['section'])
# Parse
if content:
remove = ['[', '(', ')', ']', "u'", "'", '#text', 'OrderedDict',
'u@styleCode', 'uitalics', 'ulinkHtml', 'href', ',']
for item in remove:
content = content.replace(item, '')
return content.replace('displayName', '\n\n')
else:
print 'No content found'
return None
else:
return None
else:
print 'No set ID found for {}'.format(drug)
return None
else:
return None
def _rget(url, params=None):
"""
Wrapper for requests.get that checks status code
:param str url: Request URL
:param dict params: Parameters for request
:return: Request from URL or None if status code != 200
:rtype: requests.models.Response
"""
r = requests.get(url, params=params)
if r.status_code != 200:
print 'Error Status Code {}'.format(r.status_code)
return None
else:
return r | /rnaseq-lib-1.0a21.tar.gz/rnaseq-lib-1.0a21/src/rnaseq_lib/web/__init__.py | 0.487063 | 0.230909 | __init__.py | pypi |
from typing import List, Tuple
import pandas as pd
from pandas import DataFrame
# These functions are for data stored in this Synapse ID:
# Expression: pd.read_hdf(data_path, key='exp')
# Metadata: pd.read_hdf(data_path, key='met')
def add_metadata_to_exp(exp: DataFrame, met: DataFrame) -> DataFrame:
"""Adds metadata to the expression dataframe and returns a combined object"""
# Copy genes from expression DataFrame
genes = exp.columns.tolist()
# Remove duplicates from metadata
samples = [x for x in exp.index if x in met.id]
met = met[met.id.isin(samples)].drop_duplicates('id')
# Ensure index dims are the same length
assert len(exp) == len(met), 'Expression dataframe and metadata do not match index lengths'
# Add metadata and return resorted dataframe
df = exp
df.loc[:, 'id'] = met.id
df.loc[:, 'tissue'] = met.tissue
df.loc[:, 'type'] = met.type
df.loc[:, 'tumor'] = met.tumor
df.loc[:, 'label'] = _label_vector_from_samples(df.index)
return df[['id', 'tissue', 'type', 'label', 'tumor'] + genes]
def _label_vector_from_samples(samples: List[str]) -> List[str]:
"""Produce a vector of TCGA/GTEx labels for the sample vector provided"""
vector = []
for x in samples:
if x.startswith('TCGA'):
if x.endswith('11'):
vector.append('tcga-normal')
elif x.endswith('01'):
vector.append('tcga-tumor')
else:
vector.append('tcga-other')
else:
vector.append('gtex')
return vector
def sample_counts_df(df: DataFrame, groupby: str = 'tissue') -> DataFrame:
"""Return a dataframe of sample counts based on groupby of 'tissue' or 'type'"""
# Cast value_counts as DataFrame
vc = pd.DataFrame(df.groupby(groupby).label.value_counts())
# Relabel column and reset_index to cast multi-index as columns
vc.columns = ['counts']
vc.reset_index(inplace=True)
return vc.sort_values([groupby, 'label'])
def subset_by_dataset(df: DataFrame) -> Tuple[DataFrame, DataFrame, DataFrame]:
"""Subset expression/metadata table by Label"""
tumor = df[df.label == 'tcga-tumor']
normal = df[df.label == 'tcga-normal']
gtex = df[df.label == 'gtex']
return tumor, normal, gtex | /rnaseq-lib3-1.0a1.tar.gz/rnaseq-lib3-1.0a1/src/rnaseq_lib3/exp/__init__.py | 0.872863 | 0.691555 | __init__.py | pypi |
from collections import defaultdict
from typing import List, Set
import pandas as pd
from rnaseq_lib3.math import log2fc
# TCGA Mapping
subtype_abbrev = {
'LAML': 'Acute Myeloid Leukemia',
'ACC': 'Adrenocortical carcinoma',
'BLCA': 'Bladder Urothelial Carcinoma',
'LGG': 'Brain Lower Grade Glioma',
'BRCA': 'Breast invasive carcinoma',
'CESC': 'Cervical squamous cell carcinoma and endocervical adenocarcinoma',
'CHOL': 'Cholangiocarcinoma',
'LCML': 'Chronic Myelogenous Leukemia',
'COAD': 'Colon adenocarcinoma',
'COADRED': 'Colorectal adenocarcinoma',
'CNTL': 'Controls',
'ESCA': 'Esophageal carcinoma',
'FPPP': 'FFPE Pilot Phase II',
'GBM': 'Glioblastoma multiforme',
'HNSC': 'Head and Neck squamous cell carcinoma',
'KICH': 'Kidney Chromophobe',
'KIRC': 'Kidney renal clear cell carcinoma',
'KIRP': 'Kidney renal papillary cell carcinoma',
'LIHC': 'Liver hepatocellular carcinoma',
'LUAD': 'Lung adenocarcinoma',
'LUSC': 'Lung squamous cell carcinoma',
'DLBC': 'Lymphoid Neoplasm Diffuse Large B-cell Lymphoma',
'MESO': 'Mesothelioma',
'MISC': 'Miscellaneous',
'OV': 'Ovarian serous cystadenocarcinoma',
'PAAD': 'Pancreatic adenocarcinoma',
'PCPG': 'Pheochromocytoma and Paraganglioma',
'PRAD': 'Prostate adenocarcinoma',
'READ': 'Rectum adenocarcinoma',
'SARC': 'Sarcoma',
'SKCM': 'Skin Cutaneous Melanoma',
'STAD': 'Stomach adenocarcinoma',
'TGCT': 'Testicular Germ Cell Tumors',
'THYM': 'Thymoma',
'THCA': 'Thyroid carcinoma',
'UCS': 'Uterine Carcinosarcoma',
'UCEC': 'Uterine Corpus Endometrial Carcinoma',
'UVM': 'Uveal Melanoma',
}
# General Information
def patient_tissue(met: pd.DataFrame, patient_id: str) -> str:
"""Return a patient's disease tissue of origin"""
return met.drop_duplicates('id').loc[patient_id].tissue
def patient_subtype(met: pd.DataFrame, patient_id: str) -> str:
"""Return a patient's disease subtype"""
return met.drop_duplicates('id').loc[patient_id].type
def patients_from_subtype(df: pd.DataFrame, subtype: str) -> List[str]:
"""Given a subtype, return all patients within that subtype"""
return df[df.type == subtype].id.tolist()
def patients_from_tissue(df: pd.DataFrame, tissue: str) -> List[str]:
"""Given a tissue, return all patients within that tissue"""
return df[df.tissue == tissue].id.tolist()
# Differential Expression
def find_de_genes(df1: pd.DataFrame, df2: pd.DataFrame, genes: List[str], normalization=False) -> pd.DataFrame:
"""Return DataFrame of differentially expressed genes between two groups"""
# Compute L2FC values for every gene between both DataFrames
l2fcs = []
for gene in genes:
if normalization:
med1 = df1[gene].apply(normalization).median()
med2 = df2[gene].apply(normalization).median()
else:
med1 = df1[gene].median()
med2 = df2[gene].median()
l2fcs.append(log2fc(med1, med2))
# Construct output DataFrame, sorting by L2FC
df = pd.DataFrame()
df['genes'] = genes
df['L2FC'] = l2fcs
df = df.sort_values('L2FC', ascending=False)
return df
# TCGA SNV/Driver Functions
def mutations_for_gene(driver_mutations_path: str, gene: str) -> List[str]:
"""Returns set of mutations for a TCGA cancer driver gene"""
mut = pd.read_csv(driver_mutations_path, sep='\t')
mut_set = mut[mut.Gene == gene].Mutation.unique()
return sorted(mut_set)
def subtypes_for_gene(driver_consensus_path: str, gene: str) -> List[str]:
"""Returns TCGA cancer subtypes for a given gene"""
con = pd.read_csv(driver_consensus_path, sep='\t')
cancer_set = con[con.Gene == gene].Cancer.unique()
submap = subtype_abbrev
cancer_mapping = [submap[x] if x in submap else x for x in cancer_set]
cancer_mapping = ['_'.join(y.capitalize() for y in x.split()) for x in cancer_mapping]
return cancer_mapping
def subtype_filter(metadata: pd.DataFrame, samples: List[str], subtypes: List[str]) -> Set[str]:
"""Filter samples by set of valid subtypes"""
sub = metadata[metadata.type.isin(subtypes)]
return set(samples).intersection(set(sub.id))
def pathway_from_gene(driver_pathway_path: str, gene: str) -> str:
"""Returns TCGA cancer driver pathway for a given gene"""
path = pd.read_csv(driver_pathway_path, sep='\t')
pathway = path[path.Gene == gene].Pathway.unique()
if len(pathway) != 1:
print(f'More than 1 pathway found: {pathway}')
return pathway
else:
return pathway[0]
# TCGA MC3 Mutation Table
def mutation_sample_map(snv: pd.DataFrame, gene: str, mutations: List[str]) -> pd.DataFrame:
"""Identify samples with a given set of mutations in a particular gene"""
# Subset by variant type and mutation
sub = snv[(snv.SYMBOL == gene) & (snv.Variant_Type == 'SNP')]
# Collect samples for all mutants
s = defaultdict(set)
for mut in mutations:
s[mut].update([x[:15] for x in sub[sub.HGVSp_Short == mut].Tumor_Sample_Barcode])
# Convert to DataFrame
df = pd.DataFrame(list({x: k for k, v in s.items() for x in v}.items()), columns=['Sample', 'Mutation'])
df = df.set_index('Sample')
df.index.name = None
return df | /rnaseq-lib3-1.0a1.tar.gz/rnaseq-lib3-1.0a1/src/rnaseq_lib3/tcga/__init__.py | 0.84869 | 0.5047 | __init__.py | pypi |
from genericdiff.generic_diff import *
import math
class sin(GenericDiff):
def __init__(self, obj):
def _sin_generic(obj):
self.val = math.sin(obj.val)
self.der = math.cos(obj.val)*obj.der
try:
_sin_generic(obj)
except AttributeError:
obj = Constant(obj)
_sin_generic(obj)
class cos(GenericDiff):
def __init__(self, obj):
def _cos_generic(obj):
self.val = math.cos(obj.val)
self.der = -math.sin(obj.val)*obj.der
try:
_cos_generic(obj)
except AttributeError:
obj = Constant(obj)
_cos_generic(obj)
class tan(GenericDiff):
def __init__(self, obj):
def _tan_generic(obj):
self.val = math.tan(obj.val)
self.der = obj.der/(math.cos(obj.val)**2.0)
try:
_tan_generic(obj)
except AttributeError:
obj = Constant(obj)
_tan_generic(obj)
class sinh(GenericDiff):
def __init__(self, obj):
def _sinh_generic(obj):
self.val = math.sinh(obj.val)
self.der = math.cosh(obj.val) * obj.der
try:
_sinh_generic(obj)
except AttributeError:
obj = Constant(obj)
_sinh_generic(obj)
class cosh(GenericDiff):
def __init__(self, obj):
def _cosh_generic(obj):
self.val = math.cosh(obj.val)
self.der = math.sinh(obj.val) * obj.der
try:
_cosh_generic(obj)
except AttributeError:
obj = Constant(obj)
_cosh_generic(obj)
class tanh(GenericDiff):
def __init__(self, obj):
def _tanh_generic(obj):
self.val = math.tanh(obj.val)
self.der = obj.der/(math.cosh(obj.val)**2.0)
try:
_tanh_generic(obj)
except AttributeError:
obj = Constant(obj)
_tanh_generic(obj)
class acos(GenericDiff):
def __init__(self, obj):
def _acos_generic(obj):
self.val = math.acos(obj.val)
self.der = -obj.der/(math.sqrt(1.0 - obj.val**2.0))
try:
_acos_generic(obj)
except AttributeError:
obj = Constant(obj)
_acos_generic(obj)
class asin(GenericDiff):
def __init__(self, obj):
def _asin_generic(obj):
self.val = math.asin(obj.val)
self.der = obj.der/(math.sqrt(1.0 - obj.val**2.0))
try:
_asin_generic(obj)
except AttributeError:
obj = Constant(obj)
_asin_generic(obj)
class atan(GenericDiff):
def __init__(self, obj):
def _atan_generic(obj):
self.val = math.atan(obj.val)
self.der = obj.der / (math.sqrt(1.0 + obj.val ** 2.0))
try:
_atan_generic(obj)
except AttributeError:
obj = Constant(obj)
_atan_generic(obj)
#exponential for base e
class exp(GenericDiff):
def __init__(self, obj):
def _exp_generic(obj):
self.val = math.exp(obj.val)
if obj.der == 0:
self.der = 0
else:
self.der = math.exp(obj.val)*obj.der
try:
_exp_generic(obj)
except AttributeError:
obj = Constant(obj)
_exp_generic(obj)
# will handle any base with default = e
class log(GenericDiff):
def __init__(self, obj, base=math.e):
def _log_generic(obj):
self.val = math.log(obj.val, base)
if obj.der == 0:
self.der = 0
else:
self.der = obj.der/(obj.val*math.log(base))
try:
_log_generic(obj)
except AttributeError:
obj = Constant(obj)
_log_generic(obj)
#logistic function
class logit(GenericDiff):
def __init__(self, obj):
def _logit_generic(obj):
self.val = math.exp(obj.val)/(1+math.exp(obj.val))
self.der = (1+math.exp(-obj.val))**(-2)*(math.exp(-obj.val))*(-obj.der)
try:
_logit_generic(obj)
except AttributeError:
obj = Constant(obj)
_logit_generic(obj)
#sqrt function
class sqrt(GenericDiff):
def __init__(self, obj, base=math.e):
def _sqrt_generic(obj):
if obj.val <= 0:
raise ValueError("Cannot take the derivative for sqrt of 0 or negative number.\n\
This package only outputs real numbers.")
self.val = math.sqrt(obj.val)
if obj.der == 0:
self.der = 0
else:
self.der = 1/(2*math.sqrt(obj.val)*obj.der)
try:
_sqrt_generic(obj)
except AttributeError:
obj = Constant(obj)
_sqrt_generic(obj) | /rnavelocity-genericdiff-0.1.0.tar.gz/rnavelocity-genericdiff-0.1.0/genericdiff/elemental_functions.py | 0.403567 | 0.244566 | elemental_functions.py | pypi |
import math
import matplotlib.pyplot as plt
from .Generaldistribution import Distribution
class Gaussian(Distribution):
""" Gaussian distribution class for calculating and
visualizing a Gaussian distribution.
Attributes:
mean (float) representing the mean value of the distribution
stdev (float) representing the standard deviation of the distribution
data_list (list of floats) a list of floats extracted from the data file
"""
def __init__(self, mu=0, sigma=1):
Distribution.__init__(self, mu, sigma)
def calculate_mean(self):
"""Function to calculate the mean of the data set.
Args:
None
Returns:
float: mean of the data set
"""
avg = 1.0 * sum(self.data) / len(self.data)
self.mean = avg
return self.mean
def calculate_stdev(self, sample=True):
"""Function to calculate the standard deviation of the data set.
Args:
sample (bool): whether the data represents a sample or population
Returns:
float: standard deviation of the data set
"""
if sample:
n = len(self.data) - 1
else:
n = len(self.data)
mean = self.calculate_mean()
sigma = 0
for d in self.data:
sigma += (d - mean) ** 2
sigma = math.sqrt(sigma / n)
self.stdev = sigma
return self.stdev
def plot_histogram(self):
"""Function to output a histogram of the instance variable data using
matplotlib pyplot library.
Args:
None
Returns:
None
"""
plt.hist(self.data)
plt.title('Histogram of Data')
plt.xlabel('data')
plt.ylabel('count')
def pdf(self, x):
"""Probability density function calculator for the gaussian distribution.
Args:
x (float): point for calculating the probability density function
Returns:
float: probability density function output
"""
return (1.0 / (self.stdev * math.sqrt(2*math.pi))) * math.exp(-0.5*((x - self.mean) / self.stdev) ** 2)
def plot_histogram_pdf(self, n_spaces = 50):
"""Function to plot the normalized histogram of the data and a plot of the
probability density function along the same range
Args:
n_spaces (int): number of data points
Returns:
list: x values for the pdf plot
list: y values for the pdf plot
"""
mu = self.mean
sigma = self.stdev
min_range = min(self.data)
max_range = max(self.data)
# calculates the interval between x values
interval = 1.0 * (max_range - min_range) / n_spaces
x = []
y = []
# calculate the x values to visualize
for i in range(n_spaces):
tmp = min_range + interval*i
x.append(tmp)
y.append(self.pdf(tmp))
# make the plots
fig, axes = plt.subplots(2,sharex=True)
fig.subplots_adjust(hspace=.5)
axes[0].hist(self.data, density=True)
axes[0].set_title('Normed Histogram of Data')
axes[0].set_ylabel('Density')
axes[1].plot(x, y)
axes[1].set_title('Normal Distribution for \n Sample Mean and Sample Standard Deviation')
axes[0].set_ylabel('Density')
plt.show()
return x, y
def __add__(self, other):
"""Function to add together two Gaussian distributions
Args:
other (Gaussian): Gaussian instance
Returns:
Gaussian: Gaussian distribution
"""
result = Gaussian()
result.mean = self.mean + other.mean
result.stdev = math.sqrt(self.stdev ** 2 + other.stdev ** 2)
return result
def __repr__(self):
"""Function to output the characteristics of the Gaussian instance
Args:
None
Returns:
string: characteristics of the Gaussian
"""
return "mean {}, standard deviation {}".format(self.mean, self.stdev) | /rnc_mlnd_distributions-0.1.tar.gz/rnc_mlnd_distributions-0.1/rnc_mlnd_distributions/Gaussiandistribution.py | 0.688364 | 0.853058 | Gaussiandistribution.py | pypi |
from __future__ import print_function
import hashlib
import hmac
import sys
from Crypto import Random
from Crypto.Cipher import AES
from Crypto.Protocol import KDF
__all__ = ('RNCryptor', 'decrypt', 'encrypt')
__version__ = '3.3.0'
PY2 = sys.version_info[0] == 2
PY3 = sys.version_info[0] == 3
if PY2:
def to_bytes(s):
if isinstance(s, str):
return s
if isinstance(s, unicode):
return s.encode('utf-8')
to_str = to_bytes
def bchr(s):
return chr(s)
def bord(s):
return ord(s)
elif PY3:
unicode = str # hack for pyflakes (https://bugs.launchpad.net/pyflakes/+bug/1585991)
def to_bytes(s):
if isinstance(s, bytes):
return s
if isinstance(s, str):
return s.encode('utf-8')
def to_str(s):
if isinstance(s, bytes):
return s.decode('utf-8')
if isinstance(s, str):
return s
def bchr(s):
return bytes([s])
def bord(s):
return s
if hasattr(hmac, 'compare_digest'):
def compare_in_constant_time(left, right):
return hmac.compare_digest(left, right)
else:
def compare_in_constant_time(left, right):
length_left = len(left)
length_right = len(right)
result = length_left - length_right
for i, byte in enumerate(right):
result |= bord(left[i % length_left]) ^ bord(byte)
return result == 0
compare_in_constant_time.__doc__ = """\
Compare two values in time proportional to the second one.
Return True if the values are equal, False otherwise.
"""
class RNCryptorError(Exception):
"""Base error for when anything goes wrong with RNCryptor."""
class DecryptionError(RNCryptorError):
"""Raised when bad data is inputted."""
class RNCryptor(object):
"""Cryptor for RNCryptor."""
SALT_SIZE = 8
def pre_decrypt_data(self, data):
"""Handle data before decryption."""
data = to_bytes(data)
return data
def post_decrypt_data(self, data):
"""Remove useless symbols which appear over padding for AES (PKCS#7)."""
data = data[:-bord(data[-1])]
return to_str(data)
def decrypt(self, data, password):
"""Decrypt `data` using `password`."""
data = self.pre_decrypt_data(data)
password = to_bytes(password)
n = len(data)
# version = data[0] # unused now
# options = data[1] # unused now
encryption_salt = data[2:10]
hmac_salt = data[10:18]
iv = data[18:34]
cipher_text = data[34:n - 32]
hmac = data[n - 32:]
encryption_key = self._pbkdf2(password, encryption_salt)
hmac_key = self._pbkdf2(password, hmac_salt)
if not compare_in_constant_time(self._hmac(hmac_key, data[:n - 32]), hmac):
raise DecryptionError("Bad data")
decrypted_data = self._aes_decrypt(encryption_key, iv, cipher_text)
return self.post_decrypt_data(decrypted_data)
def pre_encrypt_data(self, data):
"""Do padding for the data for AES (PKCS#7)."""
data = to_bytes(data)
aes_block_size = AES.block_size
rem = aes_block_size - len(data) % aes_block_size
return data + bchr(rem) * rem
def post_encrypt_data(self, data):
"""Handle data after encryption."""
return data
def encrypt(self, data, password):
"""Encrypt `data` using `password`."""
data = self.pre_encrypt_data(data)
password = to_bytes(password)
encryption_salt = self.encryption_salt
encryption_key = self._pbkdf2(password, encryption_salt)
hmac_salt = self.hmac_salt
hmac_key = self._pbkdf2(password, hmac_salt)
iv = self.iv
cipher_text = self._aes_encrypt(encryption_key, iv, data)
version = b'\x03'
options = b'\x01'
new_data = b''.join([version, options, encryption_salt, hmac_salt, iv, cipher_text])
encrypted_data = new_data + self._hmac(hmac_key, new_data)
return self.post_encrypt_data(encrypted_data)
@property
def encryption_salt(self):
return Random.new().read(self.SALT_SIZE)
@property
def hmac_salt(self):
return Random.new().read(self.SALT_SIZE)
@property
def iv(self):
return Random.new().read(AES.block_size)
def _aes_encrypt(self, key, iv, text):
return AES.new(key, AES.MODE_CBC, iv).encrypt(text)
def _aes_decrypt(self, key, iv, text):
return AES.new(key, AES.MODE_CBC, iv).decrypt(text)
def _hmac(self, key, data):
return hmac.new(key, data, hashlib.sha256).digest()
def _prf(self, secret, salt):
return hmac.new(secret, salt, hashlib.sha1).digest()
def _pbkdf2(self, password, salt, iterations=10000, key_length=32):
return KDF.PBKDF2(password, salt, dkLen=key_length, count=iterations, prf=self._prf)
def decrypt(data, password):
cryptor = RNCryptor()
return cryptor.decrypt(data, password)
decrypt.__doc__ = RNCryptor.decrypt.__doc__
def encrypt(data, password):
cryptor = RNCryptor()
return cryptor.encrypt(data, password)
encrypt.__doc__ = RNCryptor.encrypt.__doc__ | /rncryptor-3.3.0.tar.gz/rncryptor-3.3.0/rncryptor.py | 0.650689 | 0.265678 | rncryptor.py | pypi |
import math
import matplotlib.pyplot as plt
from .Generaldistribution import Distribution
class Gaussian(Distribution):
""" Gaussian distribution class for calculating and
visualizing a Gaussian distribution.
Attributes:
mean (float) representing the mean value of the distribution
stdev (float) representing the standard deviation of the distribution
data_list (list of floats) a list of floats extracted from the data file
"""
def __init__(self, mu=0, sigma=1):
Distribution.__init__(self, mu, sigma)
def calculate_mean(self):
"""Function to calculate the mean of the data set.
Args:
None
Returns:
float: mean of the data set
"""
avg = 1.0 * sum(self.data) / len(self.data)
self.mean = avg
return self.mean
def calculate_stdev(self, sample=True):
"""Function to calculate the standard deviation of the data set.
Args:
sample (bool): whether the data represents a sample or population
Returns:
float: standard deviation of the data set
"""
if sample:
n = len(self.data) - 1
else:
n = len(self.data)
mean = self.calculate_mean()
sigma = 0
for d in self.data:
sigma += (d - mean) ** 2
sigma = math.sqrt(sigma / n)
self.stdev = sigma
return self.stdev
def plot_histogram(self):
"""Function to output a histogram of the instance variable data using
matplotlib pyplot library.
Args:
None
Returns:
None
"""
plt.hist(self.data)
plt.title('Histogram of Data')
plt.xlabel('data')
plt.ylabel('count')
def pdf(self, x):
"""Probability density function calculator for the gaussian distribution.
Args:
x (float): point for calculating the probability density function
Returns:
float: probability density function output
"""
return (1.0 / (self.stdev * math.sqrt(2*math.pi))) * math.exp(-0.5*((x - self.mean) / self.stdev) ** 2)
def plot_histogram_pdf(self, n_spaces = 50):
"""Function to plot the normalized histogram of the data and a plot of the
probability density function along the same range
Args:
n_spaces (int): number of data points
Returns:
list: x values for the pdf plot
list: y values for the pdf plot
"""
mu = self.mean
sigma = self.stdev
min_range = min(self.data)
max_range = max(self.data)
# calculates the interval between x values
interval = 1.0 * (max_range - min_range) / n_spaces
x = []
y = []
# calculate the x values to visualize
for i in range(n_spaces):
tmp = min_range + interval*i
x.append(tmp)
y.append(self.pdf(tmp))
# make the plots
fig, axes = plt.subplots(2,sharex=True)
fig.subplots_adjust(hspace=.5)
axes[0].hist(self.data, density=True)
axes[0].set_title('Normed Histogram of Data')
axes[0].set_ylabel('Density')
axes[1].plot(x, y)
axes[1].set_title('Normal Distribution for \n Sample Mean and Sample Standard Deviation')
axes[0].set_ylabel('Density')
plt.show()
return x, y
def __add__(self, other):
"""Function to add together two Gaussian distributions
Args:
other (Gaussian): Gaussian instance
Returns:
Gaussian: Gaussian distribution
"""
result = Gaussian()
result.mean = self.mean + other.mean
result.stdev = math.sqrt(self.stdev ** 2 + other.stdev ** 2)
return result
def __repr__(self):
"""Function to output the characteristics of the Gaussian instance
Args:
None
Returns:
string: characteristics of the Gaussian
"""
return "mean {}, standard deviation {}".format(self.mean, self.stdev) | /rnd_probability-0.1.tar.gz/rnd_probability-0.1/rnd_probability/Gaussiandistribution.py | 0.688364 | 0.853058 | Gaussiandistribution.py | pypi |

[](https://codecov.io/gh/davips/rndqts)
# rndqts
Random stock market quotes
<img src="https://raw.githubusercontent.com/davips/rndqts/main/chart.png">
[Latest version](https://github.com/davips/rndqts)
## Installation
### as a standalone lib.
```bash
# Set up a virtualenv.
python3 -m venv venv
source venv/bin/activate
# Install from PyPI...
pip install --upgrade pip
pip install -U rndqts
# ...or, install from updated source code.
pip install git+https://github.com/davips/rndqts
```
### as an editable lib inside your project.
```bash
cd your-project
source venv/bin/activate
git clone https://github.com/davips/rndqts ../rndqts
pip install --upgrade pip
pip install -e ../rndqts
```
## Examples
**Fetching from Yahoo**
<details>
<p>
```python3
from rndqts import Real
print(Real("VALE3.sa").data)
"""
Fetching VALE3.SA ...
[*********************100%***********************] 1 of 1 completed
Open High Low Close Volume
Date
2020-12-17 82.771173 84.158663 82.455396 83.440994 21367800
2020-12-18 83.842888 84.570121 83.661076 84.015129 23843100
2020-12-21 82.436261 83.632377 81.125317 83.115654 31877300
2020-12-22 83.115654 83.240048 81.747298 83.192207 23157000
2020-12-23 82.799879 83.756771 82.675485 83.594101 17710200
2020-12-28 84.005561 84.761506 83.326168 83.546249 26001300
2020-12-29 84.177801 84.397882 82.780740 83.316597 19727500
2020-12-30 83.431427 83.814178 82.914705 83.680214 30102700
"""
```
</p>
</details>
**Random stock quotes**
<details>
<p>
```python3
from rndqts import Realistic
from rndqts import Real
# Real quotes to fetch from Yahoo.
r1 = Real("PETR4.sa")
r2 = Real("CSNA3.sa")
r3 = Real("VALE3.sa")
r4 = Real("USIM5.sa")
# Generating random quotes.
print(Realistic([r1, r2, r3, r4]).data)
"""
Fetching PETR4.SA ...
[*********************100%***********************] 1 of 1 completed
Fetching CSNA3.SA ...
[*********************100%***********************] 1 of 1 completed
Fetching USIM5.SA ...
[*********************100%***********************] 1 of 1 completed
Open High Low Close Volume
Date
0 99.71 105.87 99.28 104.65 12499
1 105.44 105.72 103.69 104.36 9484
2 103.58 105.33 103.31 104.65 11855
3 104.25 104.72 99.10 99.80 5155
4 98.36 98.99 97.81 98.99 6444
5 99.65 100.64 99.10 99.94 2998
6 100.43 101.34 99.62 99.88 3748
7 97.59 100.78 93.42 98.84 2410
8 99.58 100.67 96.47 97.76 1495
9 97.76 97.91 96.15 97.85 1087
10 99.29 99.85 98.66 98.66 805
11 98.12 98.41 95.27 96.29 764
12 96.75 97.59 96.54 96.95 853
13 98.14 98.21 93.27 95.50 851
14 95.05 96.15 94.91 95.96 651
15 94.51 94.58 89.68 91.43 444
16 91.43 92.96 91.29 91.35 555
17 91.81 92.07 91.09 91.81 488
18 93.59 95.77 92.71 94.87 510
19 94.72 97.32 94.43 96.96 630
20 96.50 96.70 95.67 96.30 565
21 98.56 102.95 95.44 97.31 707
22 98.35 99.30 96.96 97.88 390
23 98.16 98.59 92.45 93.53 239
24 93.66 94.09 93.08 93.94 299
25 92.80 97.65 92.73 95.36 300
26 96.04 96.21 94.72 95.66 247
27 93.86 95.22 92.37 94.64 309
28 94.12 94.85 91.24 92.95 229
29 92.38 93.21 88.42 89.13 169
30 89.27 89.54 86.88 87.21 137
31 87.38 87.61 83.80 84.49 150
32 87.32 89.52 86.36 87.87 68
33 87.62 88.09 87.19 87.65 55
34 86.72 87.96 85.90 87.14 69
35 86.94 89.12 86.42 88.72 60
36 88.60 89.15 88.19 88.33 40
37 88.15 91.93 87.92 91.17 37
38 91.60 91.74 90.56 90.73 47
39 87.79 88.76 85.63 87.24 59
40 86.63 87.83 86.47 86.96 72
41 88.29 93.05 88.23 91.27 90
42 91.48 92.03 89.24 89.64 104
43 89.89 90.34 89.42 89.86 129
44 90.36 93.21 89.66 91.49 162
45 93.24 94.75 91.91 92.48 122
46 90.72 91.58 88.65 89.50 117
47 89.85 94.52 89.44 93.85 147
48 93.23 93.75 92.31 92.96 184
49 92.50 93.26 91.68 93.01 126
50 92.36 95.63 91.72 94.81 158
51 95.33 98.18 95.05 97.14 167
52 96.66 97.42 96.38 96.66 190
53 95.94 99.03 94.91 97.73 238
54 98.34 102.73 97.46 101.92 298
55 102.64 103.35 99.13 99.99 143
"""
```
```python3
```
</p>
</details>
**Synthetic (non-realistic) quotes**
<details>
<p>
```python3
from rndqts import Synthetic
print(Synthetic()[:5].data)
"""
Open High Low Close Volume
Date
0 112.22 117.64 104.00 111.43 11868
1 121.24 122.02 100.54 108.78 11689
2 118.44 124.60 110.35 123.54 12208
3 129.35 141.99 127.66 136.83 11958
4 114.05 145.78 102.64 136.04 14673
"""
```
</p>
</details>
**Saving as a CSV file**
<details>
<p>
```python3
from rndqts import Real
Real("VALE3.sa").data.to_csv("/tmp/myfile.csv")
```
</p>
</details>
**Plotting**
<details>
<p>
```python3
from rndqts import Real
print((Real("VALE3.sa").data))
# Real("VALE3.sa").plot()
"""
Fetching VALE3.sa ...
[*********************100%***********************] 1 of 1 completed
"""
"""
Open High Low Close Volume
Date
2020-12-17 82.771173 84.158663 82.455396 83.440994 21367800
2020-12-18 83.842888 84.570121 83.661076 84.015129 23843100
2020-12-21 82.436261 83.632377 81.125317 83.115654 31877300
2020-12-22 83.115654 83.240048 81.747298 83.192207 23157000
2020-12-23 82.799879 83.756771 82.675485 83.594101 17710200
2020-12-28 84.005561 84.761506 83.326168 83.546249 26001300
2020-12-29 84.177801 84.397882 82.780740 83.316597 19727500
2020-12-30 83.431427 83.814178 82.914705 83.680214 30102700
"""
```
</p>
</details>
<p><a href="https://github.com/davips/rndqts/blob/main/examples/plotvale3.png">
<img src="https://raw.githubusercontent.com/davips/rndqts/main/examples/plotvale3.png" alt="Output as a browser window" width="200" height="200">
</a></p>
## Features (current or planned)
* [x] Fetch from yahoo
* [x] Automatic local caching
* [x] Slicing
* [x] Plot candle sticks
* [x] Cacheable and identified by hash id
* [x] **Distinct kinds of quotes**
* [x] ***Real***
* market quotes
* [x] ***Realistic***
* random, based on real quotes
* [x] ***Synthetic***
* entirely based on Gaussian distributions from a pseudo random number generator
* good for software test
* lazy / infinite
* [ ] ***Holding***
* combination of real quotes, without randomness
* useful for dataset augmentation with fictional tickers
* [ ] **News fetching**
* [ ] https://blog.datahut.co/scraping-nasdaq-news-using-python
| /rndqts-0.2105.28.tar.gz/rndqts-0.2105.28/README.md | 0.444324 | 0.684178 | README.md | pypi |
from collections import OrderedDict
from functools import reduce, partial
def make_form(rng, root_name='ArchiveTransfer'):
"""Takes an rng, returns a html form.
Should be reworked.
"""
results = rng.to_form()
inside = results[root_name]
def make_input(value):
""" depending on what is found in the rng, make the input"""
what = value[0]
if what.startswith('not editable'):
what = what.replace('not editable:', '').replace("'", "\'")
if what.startswith('attribute:value:'):
what = what.replace('attribute:value:', '').replace("'", "\'")
return what
def walk_dict(target_dict, depth=1):
""" walks through the dict, makes a form"""
stuff = ""
def metadata_in_name(target_string, values):
"""serioulsy"""
return 0 in [target_string.find(value) for value in values]
for rng_key, rng_val in sorted(target_dict.items(), key=lambda x: x[0]):
if isinstance(rng_val, dict):
cssclass = ""
if metadata_in_name(rng_key, ['zeroOrMore', 'oneOrMore']):
cssclass = "class='multiple'"
clean_name = rng_key.replace('optional.', '').replace(
'oneOrMore.', '').replace('.data', '').replace(
'zeroOrMore.', '')
stuff +="<div class=\"{0}\" >".format(clean_name)
stuff += "<h{0} {2} rel='togglable' class=\"{3}_rel\">{1}<span class=\"nice-span glyphicon glyphicon-minus\"></span></h{0}>".format(depth, rng_key, cssclass, clean_name)
stuff += "<div class='holder{}'>".format(depth)
stuff += walk_dict(rng_val, depth + 1)
else:
def find_key(a_dict, key):
"""find keys"""
for his_key, his_val in a_dict.items():
if isinstance(his_val, dict):
found = find_key(his_val, key)
if found:
return [his_key] + found
elif his_val == key:
return [his_key]
def make_input_name(value):
"""makes input name"""
values = ['optional', 'value',
'oneOrMore', 'data', "zeroOrMore"]
def strip_meta(this_string):
"""removes metadata"""
wot = this_string.replace('optional', '').replace(
'oneOrMore', '').replace('.data', '').replace(
'zeroOrMore', '').replace('.', '')
return wot
ret = [strip_meta(tag) for tag in find_key(inside, value) if tag not in values]
return ".".join(ret)
stuff += "\n<div class=\"{0}\"><div style='font-weight:bold;'>{1}</div>".format(
make_input_name(rng_val),
".".join(find_key(inside, rng_val)))
def val_starts_with(base_string, strings):
""" check if str startswith """
for the_string in strings:
if base_string.startswith(the_string):
return True
if len(make_input(rng_val)) < 45:
if val_starts_with(rng_val[0], ['attribute:value:', 'not editable']):
stuff += "<input class='selectable' value=\"{}\" style='width:87%' name=\"{}\" readonly>".format(
make_input(rng_val), make_input_name(rng_val))
else:
stuff += "<input class='selectable' value=\"{}\" style='width:87%' name=\"{}\">".format(
"", make_input_name(rng_val))
else:
if val_starts_with(rng_val[0], ['attribute:value:', 'not editable']):
stuff += "<textarea class='selectable' rows='8' cols='120' readonly name=\"{1}\">{0}</textarea>".format(
make_input(rng_val), make_input_name(rng_val))
else:
stuff += "<textarea class='selectable' rows='8' cols='120'>{0}</textarea>".format(
"")
stuff += "</div>"
stuff+="</div>"
stuff += "</div>"
return stuff
return walk_dict(results)
def make_tree(dot_separated_keys):
"""Generates a dict of dicts from dot separated keys.
Yet without associated values.
For instance :
{'a.b': 1, 'a.c': 2, 'b.d' : 1, 'b.e.a': 1, 'b.e.b': 2}
would give you :
{'a': {'c': {}, 'b': {}}, 'b': {'d': {}, 'e': {'b': {}, 'a': {}}}}
"""
tree = {}
for item in dot_separated_keys:
inside_tree = tree
for part in item.split('.'):
inside_tree = inside_tree.setdefault(part, {})
return tree
def set_in_nested_dict(empty, full):
def get_from_dict(data_dict, map_list):
"""get from nested dict"""
return reduce(lambda d, k: d[k], map_list, data_dict)
def set_in_dict(data_dict, map_list, value):
"""set in nested dict"""
target = get_from_dict(data_dict, map_list[:-1])
if isinstance(target, dict):
if len(target[map_list[-1]]) == 0 and isinstance(value, str):
target[map_list[-1]] = value
else:
target[map_list[-1]]['value'] = value
for key, val in full.items():
set_in_dict(empty, key.split('.'), val)
return True
def order_dicts(stuff, ordered=None, context=None):
new_d = OrderedDict()
for k, v in stuff.items():
if isinstance(v, dict):
new_d[k] = OrderedDict()
top_level_order = []
right = ""
if not context:
top_level_order = [value for key, value in ordered.items() if k in key][0]
else:
right = [item for item in context if k in item][0]
top_level_order = [value for key, value in ordered.items() if right in key][0]
weights = {e: i for i, e in enumerate(top_level_order) if e}
def cond(sep, x, k):
return x+sep in k
final_cond = None
if k not in stuff.keys():
final_cond = partial(cond, "")
else:
final_cond = partial(cond, "_")
top_level_key_list = sorted(stuff[k],key=lambda x : [v for k,v in weights.items() if final_cond(x,k)][0] if\
len([ v for k,v in weights.items() if\
final_cond(x,k)]) else\
weights[x])
for key in top_level_key_list:
if right and right in stuff.keys():
k = right
def is_a_dict(an_inside_d, an_order, k):
if isinstance(an_inside_d, dict):
order = ordered[k]
inside_d = OrderedDict()
for item in order:
if item in an_inside_d.keys():
inside_d[item]= is_a_dict(an_inside_d[item],order,item)
else:
print(item)
return inside_d
else:
return an_inside_d
new_d[k][key] = is_a_dict(stuff[k][key],
ordered[k],
key)
for clef in list(v.keys()):
if isinstance(v[clef], dict):
with_suffix = [k for k in weights.keys() if clef+'_' in k]
clef2 = clef
if "_" not in clef and len(with_suffix) and with_suffix[0] in list(ordered.keys()) :
clef2 = with_suffix[0]
order_dicts(v[clef],
ordered=ordered,
context=ordered[clef2])
return new_d
def dict_path(my_dict, path=None):
"""Flattens nested dicts into a single level dict.
For instance :
{'a': {'b': 1, 'c': 2}, 'b': {'d': 1, 'e': {'a': 1, 'b': 2}}}
would give you :
{'a.b': 1, 'a.c': 2, 'b.d' : 1, 'b.e.a': 1, 'b.e.b': 2}
"""
if path is None:
path = ""
for k, v in my_dict.items():
newpath = path + ("." if path != "" else "") + k
if isinstance(v, dict):
for u in dict_path(v, newpath):
yield u
else:
yield newpath, v | /rng_to_html_form-1.0.2-py3-none-any.whl/rng_to_form/form_maker.py | 0.704058 | 0.207696 | form_maker.py | pypi |
from __future__ import unicode_literals, print_function
import sys
from os import path
import os
import glob
import itertools
import operator
import shutil
import pkg_resources
from invoke import ctask as task
import six
from six.moves import shlex_quote, reduce
import wheel.pep425tags
ROOT = path.relpath(path.dirname(__file__))
@task
def clean_build(ctx):
shutil.rmtree(path.join(ROOT, "build"))
@task
def clean_dist(ctx):
shutil.rmtree(path.join(ROOT, "dist"))
def _pytest_args():
# On Python 3 we run doctests in modules. The doctests are PY3 specific due
# to output formatting differences between PY2 and 3. Also, the doctests
# are just supplemental examples, not the real tests.
args = ["--pyargs", "rnginline"]
if six.PY3:
return ["--doctest-modules"] + args
return args
@task
def test(ctx, combine_coverage=False):
"""Run rnginline test suite"""
cov_args = ["--parallel-mode"] if combine_coverage is True else []
ctx.run(cmd(["coverage", "run"] + cov_args +
["-m", "pytest"] + _pytest_args()))
if not combine_coverage:
_report_coverage(ctx)
def _report_coverage(ctx):
ctx.run("coverage report")
@task
def coverage(ctx):
"""
Combine coverage of Python 2 and Python 3 test runs
This captures Python 2/3 specific code branches in coverage results.
"""
print("Combining coverage of Python 2 and 3 test runs:")
print("===============================================")
ctx.run("coverage erase")
ctx.run("tox -e py27,py34 -- --combine-coverage")
ctx.run("coverage combine")
ctx.run("coverage html")
print()
print("Combined coverage of Python 2 and 3 test runs:")
print("==============================================")
print()
_report_coverage(ctx)
@task
def pep8(ctx):
"""Lint code for PEP 8 violations"""
ctx.run("flake8 --version")
ctx.run("flake8 setup.py tasks.py rnginline")
@task
def readme(ctx):
"""Lint the README for reStructuredText syntax issues"""
ctx.run("restructuredtext-lint README.rst")
@task
def docs_test(ctx, cache_dir=None, out_dir=None):
"""
Test the doctests in the Sphinx docs. Must be run with Python 3."""
if not six.PY3:
msg = """\
error: Tried to run doc's doctests with Python 2. They must be run with
Python 3 due to the doctest module not handling formatting differences
between 2 and 3."""
raise RuntimeError(msg)
docs(ctx, builder="doctest", cache_dir=cache_dir, out_dir=out_dir,
warnings_are_errors=True)
docs(ctx, builder="html", cache_dir=cache_dir, out_dir=out_dir,
warnings_are_errors=True)
@task
def docs(ctx, builder="html", cache_dir=None, out_dir=None,
warnings_are_errors=False):
"""Build sphinx documentation"""
opts = []
if cache_dir is not None:
opts += ["-d", cache_dir]
if warnings_are_errors is True:
opts += ["-W"]
out_dir = path.join(ROOT, "docs/_build/") if out_dir is None else out_dir
ctx.run(cmd(["sphinx-build", "-b", builder] + opts +
[path.join(ROOT, "docs/"), out_dir]))
@task(clean_build, clean_dist)
def build_dists(ctx):
"""Build distribution packages"""
ctx.run("python setup.py sdist", pty=True)
ctx.run("python setup.py bdist_wheel", pty=True)
@task
def test_dists(ctx):
"""Test the build distributions from ./dist/ in isolation"""
ctx.run("tox -c tox-dist.ini", pty=True)
@task
def test_dist(ctx, dist_type):
"""Test a built distribution"""
dist_file = get_distribution(dist_type)
ctx.run(cmd("pip", "install", "--ignore-installed", dist_file), pty=True)
ctx.run(cmd(["py.test"] + _pytest_args()), pty=True)
def get_distribution(type):
type_glob = {
"sdist": "rnginline-*.tar.gz",
"wheel": "rnginline-*.whl"
}.get(type)
if type_glob is None:
raise ValueError("Unknown distribution type: {0}".format(type))
pattern = path.join(ROOT, "dist", type_glob)
dists = glob.glob(pattern)
if len(dists) != 1:
raise ValueError("Expected one find one distribution matching: {0!r} "
"but got: {1}".format(pattern, len(dists)))
return dists[0]
@task
def cache_all_requirement_wheels(ctx):
ctx.run("tox -c tox-wheelcache.ini", pty=True)
def get_platform_tag():
return wheel.pep425tags.get_abbr_impl() + wheel.pep425tags.get_impl_ver()
@task
def cache_requirement_wheels(ctx):
wheelhouse = path.join(ROOT, "wheelhouse")
all_reqs = path.join(ROOT, "requirements", "all.txt")
with open(all_reqs) as f:
reqs = list(pkg_resources.parse_requirements(f.read()))
print("Checking if wheel cache is populated...")
absent_reqs = []
for req in reqs:
print("checking {0} ... ".format(req), end="")
sys.stdout.flush()
is_cached_cmd = cmd(
"pip", "install", "--download", "/tmp/", "--use-wheel",
"--no-index", "--find-links", wheelhouse, str(req))
result = ctx.run(is_cached_cmd, warn=True, hide="both")
if result.ok:
print("present")
else:
print("ABSENT")
absent_reqs.append(req)
if absent_reqs:
print()
print("Wheel cache is not complete, populating...")
# Build wheels for all our dependencies, storing them in the wheelhouse
# dir
ctx.run(cmd([
"pip", "wheel",
# Make built wheels specific to interpreter running this.
# Required because non-wheel packages like pytest are not
# necessarily universal. e.g. pytest for python 2.6 requires
# argparse, but 2.7, 3.3, 3.4 don't.
"--build-option", "--python-tag=" + get_platform_tag(),
"--wheel-dir", wheelhouse] +
list(map(six.text_type, absent_reqs))))
print()
print("Done")
@task
def gen_requirements_all(ctx, write=False):
files = (path.join(ROOT, "requirements", f)
for f in os.listdir(path.join(ROOT, "requirements"))
if f != "all.txt")
all_requirements = reduce(operator.add, map(load_requirements, files), [])
unique_requirements = merge_requirements(all_requirements)
all = "\n".join(six.text_type(req) for req in unique_requirements)
if write is False:
print(all)
else:
with open(path.join(ROOT, "requirements", "all.txt"), "w") as f:
f.write("# Auto generated by $ inv gen_requirements_all\n")
f.write(all)
f.write("\n")
def load_requirements(file):
with open(file) as f:
return list(pkg_resources.parse_requirements(f.read()))
def merge_dupes(reqs):
merged = set(reqs)
assert len(merged) != 0
if len(merged) > 1:
raise ValueError(
"Duplicate requirement for {} with differing version/extras: {}"
.format(next(iter(merged)).key, merged))
return next(iter(merged))
def merge_requirements(reqs):
reqs = sorted(reqs, key=lambda r: r.key)
grouped = itertools.groupby(reqs, key=lambda r: r.key)
return set(merge_dupes(dupes) for (key, dupes) in grouped)
def cmd(*args):
r"""
Create a shell command string from a list of arguments.
>>> print(cmd("a", "b", "c"))
a b c
>>> print(cmd(["ls", "-l", "some dir"]))
ls -l 'some dir'
>>> print(cmd(["echo", "I'm a \"string\"."]))
echo 'I'"'"'m a "string".'
"""
if len(args) == 1 and not isinstance(args[0], six.string_types):
return cmd(*args[0])
return " ".join(shlex_quote(arg) for arg in args) | /rnginline-0.0.2.tar.gz/rnginline-0.0.2/tasks.py | 0.496094 | 0.253188 | tasks.py | pypi |
# rnmd - markdown execution runtime
RNMD is a mardown execution runtime which can be used to run code contained inside of a markdown file.
The vision behind it is to have your documentation and code together in one place and not maintain
2 versions of the same code (1 in .md and 1 script file).
Especially useful when automatizing things under linux as it is easy to forget what some scripts were for,
if they do not contain documentation and maintaining the script and a good looking documentation would be too much effort.
(Especially with the very stripped down syntax of many command line programs this can become a problem)
In that regard **rnmd** also has installation features through which to manage these scripts make them optionally executable/runnable
from anywhere on the system. (mainly for automatization)
It also adds features to easily transport your scripts and documentation to different machines.
Just make your markdown notebook a git repository and pull your commands to any machine.
Or just use **rnmd** to run code of markdown files from an online url. (Easily share your code)
Currently **supported languages** are:
- bash script
**TODOS:**
- Add specific block execution (--> makes it possible to run the samples in this readme)
- Support more languages (maybe add possibility to specify run command in markdown)
- Resolve module imports (To write whole programs using rnmd)
- Resolve paths (it they are for instance relative to the .md script)
- Improve argparse option handling
- Namespaces and modules (prevent name conflicts by grouping documents and their backups)
- Multi Notebook management
- Windows support (the proxies are right now bash on shell script and therefore not portable -> switch to python3)
## Installation
Can be easily installed from pypi with pip3.
```bash
pip3 install rnmd
```
## Running from source
You can also alway clone the repo from [GitHub](https://github.com/MarkusPeitl/rnmd) and run rnmd with python.
```bash
python3 rnmd.py notebook/test.md
```
## Running code contained in a markdown file
Execute a markdown document using the rnmd markdown execution runtime.
The document location passed to rnmd can currently be:
1. A file path
2. An url containing a document
```bash
rnmd notebook/test.md
```
## Passing arguments to the runtime
```bash
rnmd notebook/test.md --args arg1 arg2 arg3
```
Note: If passing arguments to an installed proxy then the **--args** flag is not required.
## Using rnmd to make a proxy linking to the specified document
Proxies are itermediate bash scripts that link to the document to be run.
(They also contain a shebang so you do not need to specify "bash" to run the script)
By executing a proxy we are using rnmd to execute our linked document without having to write the command ourselves.
```bash
#Make Proxy
rnmd notebook/test.md --proxy proxy/test
#Run Proxy
proxy/test
```
## Setting up rnmd for installation of proxies
You can also use rnmd to install proxies to your system.
To use the install feature of rnmd you need to run the setup process once before.
During this process have to specify a location (your **notebook**) where the proxies and document backups are installed to.
After this you are asked if you want to add this location to your path (using your shell configuration) making your installed proxies
executable from anywhere on your system by its name.
```bash
rnmd --setup
```
## Installing proxies
Install a proxy to your document on your system and make the command available from you path.
(Requires **rnmd --setup** to have been run)
Also moves a backup copy of your document into your notbook, which can be executed if the main linked document was not found.
```bash
#Make and install Proxy
rnmd notebook/test.md --install test-proxy-install
#Execute (if in path)
test-proxy-install
```
Note: Installing works for .sh scripts as well, so you can easily install them to your system.
## Proxies
Proxies are currently bash scripts with a shebang for easy execution of a linked document using rnmd.
The however have other functions included as well:
1. An included installer:
If **rnmd** is not yet installed the script asks the user if he wants to install it on the machine.
If yes was selected **rnmd** is installed using **pip3**
Note: python3 and pip3 are requirements of rnmd.
2. Running the backed up document instead, if the linked document could not be found (installed proxy only)
3. Refreshing the document backup, from the linked doc
4. Running the linked document using **rnmd**
## Making portable installs
If you want to transport your notebook to another machine you might want to perform a portable install of your documents instead.
By doing this the document you are installing is moved to your notebook and the location inside of your notebook is linked by the proxy instead.
The advantage of this is that you for instance can move you notebook around and to a different machine and the commands will all still work
as the documents stay inside of the notebook. (for example if you make your notebook a git repo)
```bash
#Make and install Proxy
rnmd notebook/test.md --portableinstall test-portable-proxy-install
#Execute (if in path)
test-portable-proxy-install
```
## List installed commands of your notebook
```bash
rnmd --list
rnmd -l
```
## Remove/uninstall a command of your notebook
```bash
rnmd --remove test-portable-proxy-install
```
## Print the code contained inside of a document
```bash
rnmd --extract notebook/test.md
```
## Compile markdown to target location
```bash
rnmd notebook/test.md --compile compiled/test
```
## Create backups
Create a backup of the specified document in the backup dir of your notebook directory.
```bash
rnmd --backup notebook/test.md
```
## Create backups at location
Create a backup of the specified document in the backup dir of your notebook directory.
"backupto" path can either be a file path or a directory into which to move the source document.
Also useful for downloading documents to the local machine.
```bash
rnmd notebook/test.md --backupto target/test.md
```
## Check if the specified document exists
```bash
rnmd --check notebook/test.md
```
## Licence notes
The choice for using LGPL 2.1 is strategic so if i may stop developing the runtime
it will still receive bugfixes/improvements from entities using this software in their programs.
As you could build whole programs based on the rn_md runtime (markdown -> script) interpreter
the GPL licence is not the way to go as it probably would make those programs GPL as well,
which in turn hurts adoption of this project as it would pretty much restrict its
usage to GPL programs only.
Because of these reasons the LGPL2.1 Licence was chosen.
### If you like the project consider dropping me a coffee
[](https://www.paypal.com/donate?hosted_button_id=BSFX8LCPHW2AE)
<br>
<br> | /rnmd-1.1.0.tar.gz/rnmd-1.1.0/README.md | 0.478773 | 0.813387 | README.md | pypi |
## Training continuous time Recurrent Neural Networks (RNNs) on various behavioral tasks
This project aims to establish a pipeline for seamlessly defining the behavioral task and training RNNs on it using
backpropagation (BPTT) on *PyTorch*.
It also implements a useful class for the post-training task-performance analysis, as well as a class for analysis of
the RNN dynamics: computing its dynamics fixed-points structure for a given input.
### Some examples:
<p align="center">
<img src="https://github.com/engellab/RNN_training_pipeline/blob/main/img/fixed%20points%203BitFlipFlop%20task.gif?raw=true"/>
</p>
<center>
*Fixed point structure revealed after training an RNN to perform a 3 bit flip-flop task*
</center>
__________________________________
<p align="center">
<img src="https://github.com/engellab/RNN_training_pipeline/blob/main/img/random_trials_MemoryAnti_task.png" width="500">
</p>
<center>
*Random trials after training the RNN on 2 bit flip-flop task*
</center>
__________________________________
<p align="center">
<img src="https://github.com/engellab/RNN_training_pipeline/blob/main/img/FixedPoints_MemoryAntiNumber.png" width="500">
</p>
<center>
*Fixed point structure for MemoryAntiNumber task:
The first line attractor (blue-red points, appearing for the input during the stimulus-presentation stage) lies in the
nullspace of the W_out. The second line-attractor (violet-tomato points, appearing for the input provided on the recall
stage) has some projection on the output axes*
</center>
__________________________________
<p align="center">
<img src="https://github.com/engellab/RNN_training_pipeline/blob/main/img/fixed%20points%20MemoryAnti%20task.gif?raw=true"/>
</p>
<center>
*Fixed point structure in the MemoryAntiAngle task:
same as for the line attractors in MemoryAntiNumber task, but instead of the line attractors, the networks forms ring
attractors.*
</center>
__________________________________
### Continuous-time RNN description
The dynamics for RNN are captured by the following equations:
<img src="https://latex.codecogs.com/svg.image?\begin{align*}\tau&space;\mathbf{\dot{x}}&space;&=&space;-\mathbf{x}&space;+&space;[W_{rec}\mathbf{x}&space;+&space;W_{inp}&space;(\mathbf{u}&space;+&space;\xi_{inp})&space;+&space;\mathbf{b}_{rec}&space;+&space;\xi_{rec}]_+&space;\\\text{out}&space;&=&space;W_{out}&space;\mathbf{x}&space;\end{align*}&space;" title="https://latex.codecogs.com/svg.image?\begin{align*}\tau \mathbf{\dot{x}} &= -\mathbf{x} + [W_{rec}\mathbf{x} + W_{inp} (\mathbf{u} + \xi_{inp}) + \mathbf{b}_{rec} + \xi_{rec}]_+ \\\text{out} &= W_{out} \mathbf{x} \end{align*} " />
Where "\tau" is the time constant, "x" is the state vector of the RNN, "u" is an input vector, "W rec" is the recurrent
connectivity of the RNN, "W inp" - matrix of input connectivities distributing input vector "u" to the neural nodes, "b
rec" is a bias in the recurrent connectivity, "\xi" is some gaussian random noise. The output of the network is provided
by the readout matrix "W out" applied to the neural nodes.
There are two classes implementing RNN dynamics:
- **RNN_pytorch** -- used for training the network on the task
- **RNN_numpy** -- used for performance analysis, fixed point analysis, and easy plotting.
### Task definition
Each task has its own class specifying the structure of (input, target output) of the behavior. It should contain two
main methods:
- `generate_input_target_stream(**kwargs)` -- generates a single (input, target output, condition) tuple with specified
parameters
- `get_batch(**kwargs)` -- generates a batch of inputs, targets and conditions. The batch dimension is the last.
The implemented example tasks are:
- Context-Dependent Decision Making
- Delayed Match to Sample
- 3 Bit Flip-Flop
- MemoryAntiNumber
- MemoryAntiAngle
Descriptions of these tasks are provided in the comments in the relevant task classes.
One can easily define their own task following the provided general template.
### Training
During the training, the connectivity matrices W_rec, W_inp, W_out are iteratively modified to minimize a loss function:
the lower the loss function, the better the network performs the task.
The training loop is implemented in the **Trainer** class, which accepts the task and the RNN_pytorch instance. Trainer
implements three main methods:
- `train_step(input_batch, target_batch)` -- returns the loss-value for a given batch, (linked to the computational
graph to compute the gradient w.r.t connectivity weights) as well as the vector of losses on each individual trial
- `eval_step(input_batch, target_batch)` -- returns the loss value for a given batch, detached from the gradient.
- `run_training(**kwargs)` -- implements an iterative update of connectivity parameters, minimizing a loss function
### Performance Analysis
The class **PerformanceAnalyzer** accepts the RNN_numpy instance and a Task instance and has two main methods:
- `get_validation_score(scoring_function, input_batch, target_batch, **kwargs)` -- runs the network with the specified
inputs and calculates the mean loss between the predicted and target outputs using the specified scoring function.
- `plot_trials(input_batch, target_batch, **kwargs)` -- generates a figure plotting multiple predicted outputs as a
response to specified inputs, as well as shows target outputs for comparison.
One can extend the base class by defining task-specific PerformanceAnalyzer
(see AnalyzerCDDM as an example)
### Fixed-point Analysis
The fixed-point analysis is implemented in the **DynamicSystemAnalyzer** class and accepts RNN_numpy instance.
The class contains three methods:
- `get_fixed_points(Input_vector, mode, **kwargs)` -- calculates stable and unstable fixed points of the RNN's dynamics
for a given input. It searches for exact fixed points if *mode = 'exact'* option, using `scipy.fsolve` methods applied
to the right-hand side of the dynamics equations. Alternatively, if *mode = 'approx'* it searches for 'slow points'
-- points where RHS of dynamics is approximately zero. In the latter case, the cut-off threshold for a point is
controlled by the parameter *'fun_tol'*.
- `plot_fixed_points(projection, P)` -- assumes that the fixed points has been calculated with `get_fixed_points` method
for maximum three input vectors, If th projection matrix `P' is not specified, assembles the fixed points into an
array, performs the PCA on them and projects the points on either first 2 (projection='2D') or first 3 (
projection='3D') PCs, returning the figure with the projected fixed points.
- `compute_point_analytics(point, Input_vector, **kwargs)` -- at a given point in the state-space, and given input to
the RNN, calculate statistics of the point:
value of the |RHS|^2, Jacobian, eigenvalues, and the principle left and right eigenvectors.
### Saving the data
When initialized, DataSaver creates a dedicated data_folder and stores its address as a 'data_folder' parameter. It has
two methods:
- `save_data(data, filename)` -- saves either a pickle or JSON file containing the data into the 'data_folder'
- `save_figure(figure, filename)` -- saves a figure as a png-file into the 'data_folder'
Integration with DataJoint is coming
| /rnn_coach-0.1.tar.gz/rnn_coach-0.1/README.md | 0.954879 | 0.933975 | README.md | pypi |
import sys
sys.path.insert(0, "../")
from copy import deepcopy
import torch
import numpy as np
from numpy import linalg
# Connectivity defining methods
def sparse(tensor, sparsity, mean=0, std=1, generator=None):
r"""Fills the 2D input `Tensor` as a sparse matrix, where the
non-zero elements will be drawn from the normal distribution
:math:`\mathcal{N}(0, 0.01)`, as described in `Deep learning via
Hessian-free optimization` - Martens, J. (2010).
Args:
tensor: an n-dimensional `torch.Tensor`
sparsity: The fraction of elements in each column to be set to zero
std: the standard deviation of the normal distribution used to generate
the non-zero values
Examples:
>>> w = torch.empty(3, 5)
>>> nn.init.sparse_(w, sparsity=0.1)
"""
if tensor.ndimension() != 2:
raise ValueError("Only tensors with 2 dimensions are supported")
rows, cols = tensor.shape
num_zeros = int(np.ceil(sparsity * rows))
with torch.no_grad():
tensor.normal_(mean, std, generator=generator)
for col_idx in range(cols):
row_indices = torch.randperm(rows, generator=generator)
zero_indices = row_indices[:num_zeros]
tensor[zero_indices, col_idx] = 0
return tensor
def get_connectivity(device, N, num_inputs, num_outputs, radius=1.5, recurrent_density=1, input_density=1,
output_density=1, generator=None):
'''
generates W_inp, W_rec and W_out matrices of RNN, with specified parameters
:param device: torch related: CPU or GPU
:param N: number of neural nodes
:param num_inputs: number of input channels, input dimension
:param num_outputs: number of output channels, output dimension
:param radius: spectral radius of the generated cnnectivity matrix: controls the maximal abs value of eigenvectors.
the greater the parameter is the more sustained and chaotic activity the network exchibits, the lower - the quicker
the network relaxes back to zero.
:param recurrent_density: oppposite of sparcirty of the reccurrent matrix. 1.0 - fully connected recurrent matrix
:param input_density: 1.0 - fully connected input matrix, 0 - maximally sparce matrix
:param output_density: 1.0 - fully connected output matrix, 0 - maximally sparce matrix
:param generator: torch random generator, for reproducibility
:return:
'''
# Balancing parameters
mu = 0
mu_pos = 1 / np.sqrt(N)
var = 1 / N
recurrent_sparsity = 1 - recurrent_density
W_rec = sparse(torch.empty(N, N), recurrent_sparsity, mu, var, generator)
# spectral radius adjustment
W_rec = W_rec - torch.diag(torch.diag(W_rec))
w, v = linalg.eig(W_rec)
spec_radius = np.max(np.absolute(w))
W_rec = radius * W_rec / spec_radius
W_inp = torch.zeros([N, num_inputs]).float()
input_sparsity = 1 - input_density
W_inp = sparse(W_inp, input_sparsity, mu_pos, var, generator)
output_sparsity = 1 - output_density
W_out = sparse(torch.empty(num_outputs, N), output_sparsity, mu_pos, var, generator)
output_mask = (W_out != 0).to(device=device).float()
input_mask = (W_inp != 0).to(device=device).float()
recurrent_mask = torch.ones(N, N) - torch.eye(N)
return W_rec.to(device=device).float(), \
W_inp.to(device=device).float(), \
W_out.to(device=device).float(), \
recurrent_mask.to(device=device).float(), \
output_mask.to(device=device).float(), \
input_mask.to(device=device).float()
def get_connectivity_Dale(device, N, num_inputs, num_outputs, radius=1.5, recurrent_density=1, input_density=1,
output_density=1, generator=None):
'''
generates W_inp, W_rec and W_out matrices of RNN, with specified parameters, subject to a Dales law,
and about 20:80 ratio of inhibitory neurons to exchitatory ones.
Following the paper "Training Excitatory-Inhibitory Recurrent Neural Networks for Cognitive Tasks:
A Simple and Flexible Framework" - Song et al. (2016)
:param device: torch related: CPU or GPU
:param N: number of neural nodes
:param num_inputs: number of input channels, input dimension
:param num_outputs: number of output channels, output dimension
:param radius: spectral radius of the generated cnnectivity matrix: controls the maximal abs value of eigenvectors.
the greater the parameter is the more sustained and chaotic activity the network exchibits, the lower - the quicker
the network relaxes back to zero.
:param recurrent_density: oppposite of sparcirty of the reccurrent matrix. 1.0 - fully connected recurrent matrix
:param input_density: 1.0 - fully connected input matrix, 0 - maximally sparce matrix
:param output_density: 1.0 - fully connected output matrix, 0 - maximally sparce matrix
:param generator: torch random generator, for reproducibility
:return:
'''
Ne = int(N * 0.8)
Ni = int(N * 0.2)
# Initialize W_rec
W_rec = torch.empty([0, N])
# Balancing parameters
mu_E = 1 / np.sqrt(N)
mu_I = 4 / np.sqrt(N)
var = 1 / N
# generating excitatory part of connectivity and an inhibitory part of connectivity:
rowE = torch.empty([Ne, 0])
rowI = torch.empty([Ni, 0])
recurrent_sparsity = 1 - recurrent_density
rowE = torch.cat((rowE, torch.abs(sparse(torch.empty(Ne, Ne), recurrent_sparsity, mu_E, var, generator))), 1)
rowE = torch.cat((rowE, -torch.abs(sparse(torch.empty(Ne, Ni), recurrent_sparsity, mu_I, var, generator))), 1)
rowI = torch.cat((rowI, torch.abs(sparse(torch.empty(Ni, Ne), recurrent_sparsity, mu_E, var, generator))), 1)
rowI = torch.cat((rowI, -torch.abs(sparse(torch.empty(Ni, Ni), recurrent_sparsity, mu_I, var, generator))), 1)
W_rec = torch.cat((W_rec, rowE), 0)
W_rec = torch.cat((W_rec, rowI), 0)
# spectral radius adjustment
W_rec = W_rec - torch.diag(torch.diag(W_rec))
w, v = linalg.eig(W_rec)
spec_radius = np.max(np.absolute(w))
W_rec = radius * W_rec / spec_radius
W_inp = torch.zeros([N, num_inputs]).float()
input_sparsity = 1 - input_density
W_inp = torch.abs(sparse(W_inp, input_sparsity, mu_E, var, generator))
W_out = torch.zeros([num_outputs, N])
output_sparsity = 1 - output_density
W_out = torch.abs(sparse(W_out, output_sparsity, mu_E, var, generator))
dale_mask = torch.sign(W_rec).to(device=device).float()
output_mask = (W_out != 0).to(device=device).float()
input_mask = (W_inp != 0).to(device=device).float()
recurrent_mask = torch.ones(N, N) - torch.eye(N)
return W_rec.to(device=device).float(), W_inp.to(device=device).float(), W_out.to(
device=device).float(), recurrent_mask.to(device=device).float(), dale_mask, output_mask, input_mask
'''
Continuous-time RNN class implemented in pytorch to train with BPTT
'''
class RNN_torch(torch.nn.Module):
def __init__(self,
N,
activation,
dt=1,
tau=10,
constrained=True,
connectivity_density_rec=1.0,
spectral_rad=1.2,
sigma_rec=.03,
sigma_inp=.03,
bias_rec=None,
y_init=None,
random_generator=None,
input_size=6,
output_size=2,
device=None):
'''
:param N: int, number of neural nodes in the RNN
:param activation: torch function, activation function in the dynamics of the RNN
:param constrained: whether the connectivity is constrained to comply with Dales law and elements of W_inp, W_out > 0
:param connectivity_density_rec: float, defines the sparcity of the connectivity
:param spectral_rad: float, spectral radius of the initial connectivity matrix W_rec
:param dt: float, time resolution of RNN
:param tau: float, internal time constant of the RNN-neural nodes
:param sigma_rec: float, std of the gaussian noise in the recurrent dynamics
:param sigma_inp: float, std of the gaussian noise in the input to the RNN
:param bias_rec: array of N values, (inhibition/excitation of neural nodes from outside of the network)
:param y_init: array of N values, initial value of the RNN dynamics
:param random_generator: torch random generator, for reproducibility
:param output_size: number of the output channels of the RNN
:param device:
'''
super(RNN_torch, self).__init__()
self.N = N
self.activation = activation
self.tau = tau
self.dt = dt
self.alpha = (dt / tau)
self.sigma_rec = torch.tensor(sigma_rec)
self.sigma_inp = torch.tensor(sigma_inp)
self.input_size = input_size
self.output_size = output_size
self.spectral_rad = spectral_rad
self.connectivity_density_rec = connectivity_density_rec
self.constrained = constrained
self.dale_mask = None
if not (y_init is None):
self.y_init = y_init
else:
self.y_init = torch.zeros(self.N)
# self.device = torch.device('cpu')
if (device is None):
if torch.cuda.is_available():
self.device = torch.device('cuda')
else:
self.device = torch.device('mps') if torch.backends.mps.is_available() else torch.device('cpu')
else:
self.device = torch.device(device)
print(f"Using {self.device} for RNN!")
self.random_generator = random_generator
self.input_layer = (torch.nn.Linear(self.input_size, self.N, bias=False)).to(self.device)
self.recurrent_layer = torch.nn.Linear(self.N, self.N, bias=(False if (bias_rec is None) else bias_rec)).to(
self.device)
self.output_layer = torch.nn.Linear(self.N, self.output_size, bias=False).to(self.device)
if self.constrained:
# imposing a bunch of constraint on the connectivity:
# positivity of W_inp, W_out,
# W_rec has to be subject to Dale's law
W_rec, W_inp, W_out, self.recurrent_mask, self.dale_mask, self.output_mask, self.input_mask = \
get_connectivity_Dale(device, self.N, num_inputs=self.input_size, num_outputs=self.output_size,
radius=self.spectral_rad, generator=self.random_generator,
recurrent_density=self.connectivity_density_rec)
else:
W_rec, W_inp, W_out, self.recurrent_mask, self.output_mask, self.input_mask = \
get_connectivity(device, self.N, num_inputs=self.input_size, num_outputs=self.output_size,
radius=self.spectral_rad,
generator=self.random_generator,
recurrent_density=self.connectivity_density_rec)
self.output_layer.weight.data = W_out.to(self.device)
self.input_layer.weight.data = W_inp.to(self.device)
self.recurrent_layer.weight.data = W_rec.to(self.device)
if bias_rec is None:
self.recurrent_layer.bias = None
def forward(self, u, w_noise=True):
'''
forward dynamics of the RNN (full trial)
:param u: array of input vectors (self.input_size, T_steps, batch_size)
:param w_noise: bool, pass forward with or without noise
:return: the full history of the internal variables and the outputs
'''
T_steps = u.shape[1]
batch_size = u.shape[-1]
states = torch.zeros(self.N, 1, batch_size, device=self.device)
states[:, 0, :] = deepcopy(self.y_init).reshape(-1, 1).repeat(1, batch_size)
rec_noise = torch.zeros(self.N, T_steps, batch_size, device=self.device)
inp_noise = torch.zeros(self.input_size, T_steps, batch_size)
if w_noise:
rec_noise = torch.sqrt((2 / self.alpha) * self.sigma_rec ** 2) \
* torch.randn(*rec_noise.shape, generator=self.random_generator)
inp_noise = torch.sqrt((2 / self.alpha) * self.sigma_inp ** 2) \
* torch.randn(*inp_noise.shape, generator=self.random_generator)
# passing through layers require batch-first shape!
# that's why we need to reshape the inputs and states!
states = torch.swapaxes(states, 0, -1)
u = torch.swapaxes(u, 0, -1).to(self.device)
rec_noise = torch.swapaxes(rec_noise, 0, -1).to(self.device)
inp_noise = torch.swapaxes(inp_noise, 0, -1).to(self.device)
for i in range(T_steps - 1):
state_new = (1 - self.alpha) * states[:, i, :] + \
self.alpha * (
self.activation(
self.recurrent_layer(states[:, i, :]) +
self.input_layer(u[:, i, :] + inp_noise[:, i, :])) +
rec_noise[:, i, :]
)
states = torch.cat((states, state_new.unsqueeze_(1)), 1)
outputs = torch.swapaxes(self.output_layer(states), 0, -1)
states = torch.swapaxes(states, 0, -1)
return states, outputs
def get_params(self):
'''
Save crucial parameters of the RNN as numpy arrays
:return: parameter dictionary containing connectivity parameters, initial conditions,
number of nodes, dt and tau
'''
param_dict = {}
W_out = deepcopy(self.output_layer.weight.data.cpu().detach().numpy())
W_rec = deepcopy(self.recurrent_layer.weight.data.cpu().detach().numpy())
W_inp = deepcopy(self.input_layer.weight.data.cpu().detach().numpy())
y_init = deepcopy(self.y_init.detach().cpu().numpy())
if not (self.recurrent_layer.bias is None):
bias_rec = deepcopy(self.recurrent_layer.bias.data.cpu().detach().numpy())
else:
bias_rec = None
param_dict["W_out"] = W_out
param_dict["W_inp"] = W_inp
param_dict["W_rec"] = W_rec
param_dict["bias_rec"] = bias_rec
param_dict["y_init"] = y_init
param_dict["N"] = self.N
param_dict["dt"] = self.dt
param_dict["tau"] = self.tau
return param_dict
def set_params(self, params):
self.output_layer.weight.data = torch.from_numpy(params["W_out"]).to(self.device)
self.input_layer.weight.data = torch.from_numpy(params["W_inp"]).to(self.device)
self.recurrent_layer.weight.data = torch.from_numpy(params["W_rec"]).to(self.device)
if not (self.recurrent_layer.bias is None):
self.recurrent_layer.bias.data = torch.from_numpy(params["bias_rec"]).to(self.device)
self.y_init = torch.from_numpy(params["y_init"]).to(self.device)
return None
if __name__ == '__main__':
N = 100
activation = lambda x: torch.maximum(x, torch.tensor(0))
rnn_torch = RNN_torch(N=N, activation=activation, constrained=True)
param_dict = rnn_torch.get_params()
print(param_dict) | /rnn_coach-0.1.tar.gz/rnn_coach-0.1/src/RNN_torch.py | 0.826817 | 0.78287 | RNN_torch.py | pypi |
from copy import deepcopy
import numpy as np
import torch
def L2_ortho(rnn, X=None, y=None):
# regularization of the input and ouput matrices
b = torch.cat((rnn.input_layer.weight, rnn.output_layer.weight.t()), dim=1)
b = b / torch.norm(b, dim=0)
return torch.norm(b.t() @ b - torch.diag(torch.diag(b.t() @ b)), p=2)
def print_iteration_info(iter, train_loss, min_train_loss, val_loss, min_val_loss):
gr_prfx = '\033[92m'
gr_sfx = '\033[0m'
train_prfx = gr_prfx if (train_loss <= min_train_loss) else ''
train_sfx = gr_sfx if (train_loss <= min_train_loss) else ''
if not (val_loss is None):
val_prfx = gr_prfx if (val_loss <= min_val_loss) else ''
val_sfx = gr_sfx if (val_loss <= min_val_loss) else ''
print(f"iteration {iter},"
f" train loss: {train_prfx}{np.round(train_loss, 6)}{train_sfx},"
f" validation loss: {val_prfx}{np.round(val_loss, 6)}{val_sfx}")
else:
print(f"iteration {iter},"
f" train loss: {train_prfx}{np.round(train_loss, 6)}{train_sfx}")
class Trainer():
def __init__(self, RNN, Task, max_iter, tol, criterion, optimizer, lambda_orth, lambda_r):
'''
:param RNN: pytorch RNN (specific template class)
:param Task: task (specific template class)
:param max_iter: maximum number of iterations
:param tol: float, such that if the cost function reaches tol the optimization terminates
:param criterion: function to evaluate loss
:param optimizer: pytorch optimizer (Adam, SGD, etc.)
:param lambda_ort: float, regularization softly imposing a pair-wise orthogonality
on columns of W_inp and rows of W_out
:param lambda_r: float, regularization of the mean firing rates during the trial
'''
self.RNN = RNN
self.Task = Task
self.max_iter = max_iter
self.tol = tol
self.criterion = criterion
self.optimizer = optimizer
self.lambda_orth = lambda_orth
self.lambda_r = lambda_r
def train_step(self, input, target_output, mask):
states, predicted_output = self.RNN(input)
loss = self.criterion(target_output[:, mask, :], predicted_output[:, mask, :]) + \
self.lambda_orth * L2_ortho(self.RNN) + \
self.lambda_r * torch.mean(states ** 2)
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
error_vect = torch.sum(((target_output[:, mask, :] - predicted_output[:, mask, :]) ** 2).squeeze(),
dim=1) / len(mask)
return loss.item(), error_vect
def eval_step(self, input, target_output, mask):
with torch.no_grad():
self.RNN.eval()
states, predicted_output_val = self.RNN(input, w_noise=False)
val_loss = self.criterion(target_output[:, mask, :], predicted_output_val[:, mask, :]) + \
self.lambda_orth * L2_ortho(self.RNN) + \
self.lambda_r * torch.mean(states ** 2)
return float(val_loss.cpu().numpy())
def run_training(self, train_mask, same_batch=False):
train_losses = []
val_losses = []
self.RNN.train() # puts the RNN into training mode (sets update_grad = True)
min_train_loss = np.inf
min_val_loss = np.inf
best_net_params = deepcopy(self.RNN.get_params())
if same_batch:
input_batch, target_batch, conditions_batch = self.Task.get_batch()
input_batch = torch.from_numpy(input_batch.astype("float32")).to(self.RNN.device)
target_batch = torch.from_numpy(target_batch.astype("float32")).to(self.RNN.device)
input_val = deepcopy(input_batch)
target_output_val = deepcopy(target_batch)
# input_val, target_output_val, conditions_val = self.Task.get_batch()
# input_val = torch.from_numpy(input_val.astype("float32")).to(self.RNN.device)
# target_output_val = torch.from_numpy(target_output_val.astype("float32")).to(self.RNN.device)
for iter in range(self.max_iter):
if not same_batch:
input_batch, target_batch, conditions_batch = self.Task.get_batch()
input_batch = torch.from_numpy(input_batch.astype("float32")).to(self.RNN.device)
target_batch = torch.from_numpy(target_batch.astype("float32")).to(self.RNN.device)
input_val, target_output_val, conditions_val = self.Task.get_batch()
input_val = torch.from_numpy(input_val.astype("float32")).to(self.RNN.device)
target_output_val = torch.from_numpy(target_output_val.astype("float32")).to(self.RNN.device)
train_loss, error_vect = self.train_step(input=input_batch, target_output=target_batch, mask=train_mask)
if self.RNN.constrained:
# positivity of entries of W_inp and W_out
self.RNN.output_layer.weight.data = torch.maximum(self.RNN.output_layer.weight.data, torch.tensor(0))
self.RNN.input_layer.weight.data = torch.maximum(self.RNN.input_layer.weight.data, torch.tensor(0))
# Dale's law
self.RNN.recurrent_layer.weight.data = (
torch.maximum(self.RNN.recurrent_layer.weight.data.cpu() * self.RNN.dale_mask.cpu(),
torch.tensor(0)) * self.RNN.dale_mask).to(self.RNN.device)
# validation
val_loss = self.eval_step(input_val, target_output_val, train_mask)
# keeping track of train and valid losses and printing
print_iteration_info(iter, train_loss, min_train_loss, val_loss, min_val_loss)
train_losses.append(train_loss)
val_losses.append(val_loss)
if val_loss <= min_val_loss:
min_val_loss = val_loss
best_net_params = deepcopy(self.RNN.get_params())
if train_loss <= min_train_loss:
min_train_loss = train_loss
if val_loss <= self.tol:
self.RNN.set_params(best_net_params)
return self.RNN, train_losses, val_losses, best_net_params
self.RNN.set_params(best_net_params)
return self.RNN, train_losses, val_losses, best_net_params | /rnn_coach-0.1.tar.gz/rnn_coach-0.1/src/Trainer.py | 0.893071 | 0.512144 | Trainer.py | pypi |
import sys
from copy import deepcopy
import numpy as np
import scipy.optimize
from matplotlib import pyplot as plt
from scipy.optimize import fsolve, minimize
from tqdm.auto import tqdm
sys.path.insert(0, '../')
from src.utils import get_colormaps, in_the_list, sort_eigs, make_orientation_consistent
import warnings
warnings.filterwarnings("ignore")
from sklearn.decomposition import PCA
class DynamicSystemAnalyzer():
'''
Generic class for analysis of the RNN dynamics: finding fixed points and plotting them in 2D and 3D
'''
def __init__(self, RNN_numpy):
self.RNN = RNN_numpy
self.rhs = self.RNN.rhs_noisless
self.rhs_jac = self.RNN.rhs_jac
self.fp_data = {}
def objective(x, Input):
return np.sum(self.rhs(x, Input) ** 2)
def objective_grad(x, Input):
return 2 * (self.rhs(x, Input).reshape(1, -1) @ self.rhs_jac(x, Input)).flatten()
self.objective = objective
self.objective_grad = objective_grad
def get_fixed_points(self,
Input,
patience=100,
fun_tol=1e-12,
stop_length=100,
sigma_init_guess=10,
eig_cutoff=1e-10,
diff_cutoff=1e-7,
mode='exact'):
'''
calculates fixed points (stable, unstable and marginally stable)
:param Input: np.array, input to the RNN at which the fixed points are calculated
:param patience: the greater the patience parameter, the longer the fixed point are searched for. The search for
the fixed points (FPs) terminates if patience is exceeded
:param fun_tol: RHS norm tolerance parameter: the points with the values greate than fun_tol are discarded
:param stop_length: the maximal number of fixed points. The search terminates if number of found FPs
exceeds the stop_length parameter
:param sigma_init_guess: the variance of the N-dimensional Gausssian distribution for generating an initial guess
for a FP
:param eig_cutoff: if the norm of the max eigenvalue of the Jacobian at a found FP is lesser than eig_cutoff,
count this FP as "marginally stable"
:param diff_cutoff: if the difference of the currently found FP with any of the previously found FPs
(stored in the list) is lesser than diff_cutoff - discard this point as a duplicate
:return: arrays of points: stable fixed points, unstable fixed points, marginally stable fixed points.
with dimensions (num_points x N) in each array
:param mode: 'exact' or 'approx'. 'exact' - computes exact fixed points, with scipy.optimize.fsolve method
'approx' finds 'slow points' - points with small |RHS|^2, with fun_tol controlling the cut-off |RHS|^2.
'''
unstable_fps = [];
stable_fps = [];
marginally_stable_fps = [];
all_points = []
N = self.RNN.W_rec.shape[0]
cntr = 0 # counter parameter, keeps track of how many times in a row an optimizer didn't find any new fp
# proceed while (cntr <= patience) and unless one of the list start to overflow (because of a 2d attractor)
while (cntr <= patience) and (len(all_points) < stop_length):
x0 = sigma_init_guess * np.random.randn(N)
# finding the roots of RHS of the RNN
if mode == 'exact':
x_root = fsolve(func=self.rhs, x0=x0, fprime=self.rhs_jac, args=(Input,))
elif mode == "approx":
res = scipy.optimize.minimize(fun=self.objective, x0=x0, args=(Input,), method='Powell')
x_root = res.x
else:
raise ValueError(f"Mode {mode} is not implemented!")
fun_val = self.objective(x_root, Input)
cntr += 1
if fun_val <= fun_tol:
J = self.rhs_jac(x_root, Input)
L = np.linalg.eigvals(J)
L_0 = np.max(np.real(L))
if not in_the_list(x_root, all_points, diff_cutoff=diff_cutoff):
cntr = 0
all_points.append(x_root)
if (np.abs(L_0) <= eig_cutoff): # marginally stable fixed point (belongs to 1D attractor)
marginally_stable_fps.append(x_root)
else:
stable_fps.append(x_root) if (L_0 < -eig_cutoff) else unstable_fps.append(x_root)
# Saving the data in the internal dictionary accessible by the input vector turned into string:
input_as_key = str(Input.tolist())
self.fp_data[input_as_key] = {}
point_types = ["stable_fps", "unstable_fps", "marginally_stable_fps"]
for i, type in enumerate(point_types):
if len(eval(type)) != 0:
self.fp_data[input_as_key][type] = np.vstack(eval(point_types[i]))
return None
def plot_fixed_points(self, projection='2D', P=None):
'''
a function plots all the pre-calculated fixed-points
If projection matrix P is None it performs PCA on the points
and then plots these points projected on the first PCs
If P is supplied, projects the points using the matrix P
if not, performs PCA on the recovered points and uses first PCs for projecting.
:param projection: to plot the FPs either on a plane (2D) or in 3D
:param P: projection matrix N x n_dim size
:return: a figure of fixed points on the first
'''
n_dim = 2 if projection == '2D' else 3
inputs_as_key = list(self.fp_data.keys())
if len(inputs_as_key) == 0:
raise ValueError("To plot fixed points, one has to calculate them using 'get_fixed_point function' first!")
all_points = []
for input_as_key in inputs_as_key:
types = list(self.fp_data[input_as_key].keys())
points_per_input = np.vstack([self.fp_data[input_as_key][type] for type in types])
all_points.append(points_per_input)
all_points = np.vstack(all_points)
if all_points.shape[0] < n_dim:
raise ValueError("The number of found fixed points is lesser than n_dim of projection!")
if P is None:
pca = PCA(n_components=n_dim)
pca.fit(all_points)
P = np.array(pca.components_).T
# projecting fixed points onto n_dim-subspace
data_to_plot = {}
for input_as_key in inputs_as_key:
data_to_plot[input_as_key] = {}
types = list(self.fp_data[input_as_key].keys())
for type in types:
data_to_plot[input_as_key][type] = self.fp_data[input_as_key][type] @ P
color_sets = [["blue", "red", "black"], ["blueviolet", "tomato", "darkslategray"],
["green", "darkorange", "midnightblue"]]
# Plotting the fixed points
if n_dim == 2:
fig = plt.figure(figsize=(7, 7))
fig.suptitle(r"Fixed points projected on 2D PCA plane", fontsize=16)
for j, input_as_key in enumerate(inputs_as_key):
markers = ["o", "x", "o"];
colors = color_sets[j]
types = list(data_to_plot[input_as_key].keys())
for t, type in enumerate(types):
plt.scatter(data_to_plot[input_as_key][type][:, 0],
data_to_plot[input_as_key][type][:, 1],
marker=markers[t], s=100, color=colors[t],
edgecolors='k')
plt.xlabel("PC 1", fontsize=16)
plt.ylabel("PC 2", fontsize=16)
plt.grid(True)
elif n_dim == 3:
fig = plt.figure(figsize=(7, 7))
ax = fig.add_subplot(projection='3d')
# make the panes transparent
ax.xaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))
ax.yaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))
ax.zaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))
ax.set_xlabel("PC 1", fontsize=20)
ax.set_ylabel("PC 2", fontsize=20)
ax.set_zlabel("PC 3", fontsize=20)
fig.suptitle(r"Fixed points projected on 3D PCA subspace", fontsize=16)
for j, input_as_key in enumerate(inputs_as_key):
markers = ["o", "x", "o"];
colors = color_sets[j]
types = list(data_to_plot[input_as_key].keys())
for t, type in enumerate(types):
ax.scatter(data_to_plot[input_as_key][type][:, 0],
data_to_plot[input_as_key][type][:, 1],
data_to_plot[input_as_key][type][:, 2],
marker=markers[t], s=100, color=colors[t],
edgecolors='k')
plt.grid(True)
return fig
def compute_point_analytics(self, point, Input):
'''
:param point: a numpy vector, specifying a point of the state-space of RNN
:param Input: a numpy vector, specifying the input to the RNN
:return: fun_val - value of the |RHS|^2, J - Jacobian, E - eigenvalues sorted according their real part,
the first left eigenvector l and the first right eigenvector r
'''
self.RNN.y = deepcopy(point)
fun_val = self.objective(point, Input)
J = self.rhs_jac(point, Input)
E, R = np.linalg.eig(J)
E, R = sort_eigs(E, R)
L = np.linalg.inv(R)
l = np.real(L[0, :])
r = np.real(R[:, 0])
return fun_val, J, E, l, r
class DynamicSystemAnalyzerCDDM(DynamicSystemAnalyzer):
'''
Class which is inрerited from the DynamicSystemAnalyzer base class,
dedicated to processing of the RNNs trained on CDDM task
'''
def __init__(self, RNN):
DynamicSystemAnalyzer.__init__(self, RNN)
self.choice_axis = self.RNN.W_out.flatten() if self.RNN.W_out.shape[0] == 1 \
else (self.RNN.W_out[0, :] - self.RNN.W_out[1, :])
self.context_axis = self.RNN.W_inp[:, 0] - self.RNN.W_inp[:, 1]
self.sensory_axis = np.sum([self.RNN.W_inp[:, i] for i in [2, 3, 4, 5]])
self.LA_data = {}
self.LA_data["motion"] = {}
self.LA_data["color"] = {}
def get_LineAttractor_endpoints(self, context, nudge=0.05, T_steps=1000, relax_steps=10):
'''
:param context: "motion" or "color"
:param nudge: an additional input, creating a bias either to the right choice, or to the left choice
:param T_steps: run the RNN with the applied input for the T_steps duration
:param relax_steps: after T_steps with input, turn off any input and let the RNN relax to 'slow points'
:return: left-most and right-most points of a presupposed line attractor
'''
ctxt_ind = 0 if context == 'motion' else 1
sensory_inds = [2, 3] if context == 'motion' else [4, 5]
default_input = np.array([0, 0, 0.5, 0.5, 0.5, 0.5])
default_input[ctxt_ind] = 1.0
input_right_decision = deepcopy(default_input)
input_left_decision = deepcopy(default_input)
input_right_decision[sensory_inds] += np.array([nudge, -nudge])
input_left_decision[sensory_inds] -= np.array([nudge, -nudge])
points = []
# find left and right points
for inp in [input_left_decision, input_right_decision]:
self.RNN.y_init = deepcopy(self.RNN.y_init)
self.RNN.y = deepcopy(self.RNN.y_init)
self.RNN.run(input_timeseries=np.repeat(inp[:, np.newaxis], axis=1, repeats=T_steps))
self.RNN.run(input_timeseries=np.repeat(inp[:, np.newaxis], axis=1, repeats=relax_steps))
points.append(deepcopy(self.RNN.y))
return points[0], points[1]
def calc_LineAttractor_analytics(self,
N_points=31,
obj_max_iter=100,
nudge=0.05,
T_steps=1000,
relax_steps=10):
'''
:param N_points: number of points on each line attractor
:param obj_max_iter: maximum iteration in the |RHS|^2 minimization process
the rest of the parameters are the same as in 'get_LineAttractor_endpoints', 'get_fixed_points'
:return: a dictionary with "color" and "motion" contexts, each containing sub-dictionary with:
'slow points', |RHS|^2 value, Jacobian, eigenvalues, the principal left and right eigenvectors over these points
'''
default_input = np.array([0, 0, 0.5, 0.5, 0.5, 0.5])
for context in ["motion", "color"]:
ctxt_ind = 0 if context == 'motion' else 1
Input = deepcopy(default_input)
Input[ctxt_ind] = 1
# get the end points of the line attractor
left_point, right_point = self.get_LineAttractor_endpoints(context,
nudge=nudge,
T_steps=T_steps,
relax_steps=relax_steps)
# define the starting direction for search
increment = (1 / (N_points - 1)) * (right_point - left_point)
direction = deepcopy(increment)
direction *= np.sign(np.dot(self.choice_axis, direction))
slow_points = [];
fun_vals = [];
eigs = [];
jacs = [];
selection_vects = [];
principle_eigenvects = []
x_init = deepcopy(left_point)
print(f"Analyzing points on a line attractor in {context} context...")
for i in tqdm(range(N_points)):
# minimize ||RHS(x)|| such that the x stays within a space orthogonal to the line attractor
res = minimize(self.objective, x0=x_init, args=(Input,), method='SLSQP',
jac=self.objective_grad, options={'disp': False, 'maxiter': obj_max_iter},
constraints={'type': 'eq', 'fun': lambda x: np.dot(x - x_init, increment)})
x_root = deepcopy(res.x)
x_init = x_root + increment
slow_pt = deepcopy(x_root)
# compute analytics at the slow point:
fun_val, J, E, l, r = self.compute_point_analytics(slow_pt, Input)
k = np.sign(np.dot(r, direction))
l *= k;
r *= k
slow_points.append(deepcopy(slow_pt))
fun_vals.append(fun_val)
jacs.append(deepcopy(J))
eigs.append(deepcopy(E))
selection_vects.append(deepcopy(l))
principle_eigenvects.append(deepcopy(r))
selection_vects = make_orientation_consistent(selection_vects)
principle_eigenvects = make_orientation_consistent(principle_eigenvects)
self.LA_data[context]["slow_points"] = (np.array(slow_points))
self.LA_data[context]["fun_val"] = (np.array(fun_vals))
self.LA_data[context]["jac"] = (np.array(jacs))
self.LA_data[context]["eigs"] = (np.array(eigs))
self.LA_data[context]["l"] = (np.array(selection_vects))
self.LA_data[context]["r"] = (np.array(principle_eigenvects))
return None
def plot_LineAttractor_3D(self,
nudge=0.05,
steps_stim_on=500,
steps_context_only_on=250):
'''
Plots
Figure with projected line attractors within each context on a (sensory, context, choice)-subspace
also plots 6 trajectories: three in each context.
light blue - color context, orange - motion context,
solid lines - trajectories with the decision to the right, dashed lines - to the left.
two magenta trajectores - trials within each context with 0 coherence
:return:
'''
nDim = 3
# projection matrix
P_matrix = np.zeros((self.RNN.N, 3))
P_matrix[:, 0] = self.choice_axis
P_matrix[:, 1] = self.context_axis
P_matrix[:, 2] = self.sensory_axis
if not ("slow_points" in (list(self.LA_data["motion"].keys()))):
raise ValueError("Need to run `calc_LineAttractor_analytics' first!")
else:
LA_data_dict = deepcopy(self.LA_data)
trajectories = dict()
trajectories["motion"] = {}
trajectories["color"] = {}
for ctxt in ["motion", "color"]:
trajectories[ctxt] = {}
for stim_status in ["relevant", "irrelevant"]:
trajectories[ctxt][stim_status] = {}
for period in ["context_only_on", "stim_on", "stim_off"]:
trajectories[ctxt][stim_status][period] = {}
colors, cmp = get_colormaps()
red, blue, bluish, green, orange, lblue, violet = colors
colors_trajectories = dict()
colors_trajectories["motion"] = dict()
colors_trajectories["color"] = dict()
colors_trajectories["motion"]["relevant"] = colors[5]
colors_trajectories["motion"]["irrelevant"] = colors[3]
colors_trajectories["color"]["relevant"] = colors[1]
colors_trajectories["color"]["irrelevant"] = colors[3]
for ctxt in ["motion", "color"]:
val = 1 if ctxt == 'motion' else 0
for stim_status in ["relevant", "irrelevant"]:
self.RNN.clear_history()
rel_inds = [2, 3] if ctxt == 'motion' else [4, 5]
irrel_inds = [4, 5] if ctxt == 'motion' else [2, 3]
nudge_inds = rel_inds if stim_status == 'relevant' else irrel_inds
x0 = 0.00 * np.random.randn(self.RNN.N)
input = np.array([val, 1 - val, 0.0, 0.0, 0.0, 0.0])
input_timeseries = np.repeat(input[:, np.newaxis], axis=1, repeats=steps_context_only_on)
self.RNN.y = deepcopy(x0)
self.RNN.run(input_timeseries=input_timeseries, save_history=True)
x_trajectory_context_only_on = self.RNN.get_history()
trajectories[ctxt][stim_status]["context_only_on"] = x_trajectory_context_only_on
self.RNN.clear_history()
x0 = deepcopy(x_trajectory_context_only_on[-1, :])
for direction in ['left', 'right', 'center']:
input = deepcopy(np.array([val, 1 - val, 0.5, 0.5, 0.5, 0.5]))
if direction == 'left':
input[nudge_inds] -= np.array([nudge, -nudge])
elif direction == 'right':
input[nudge_inds] += np.array([nudge, -nudge])
input_timeseries = np.repeat(input[:, np.newaxis], axis=1, repeats=steps_stim_on)
self.RNN.y = deepcopy(x0)
self.RNN.run(input_timeseries=input_timeseries, save_history=True)
trajectory = self.RNN.get_history()
trajectories[ctxt][stim_status]["stim_on"][direction] = deepcopy(trajectory)
self.RNN.clear_history()
colors_trajectories = dict()
colors_trajectories["motion"] = dict()
colors_trajectories["color"] = dict()
colors_trajectories["motion"]["relevant"] = orange
colors_trajectories["motion"]["irrelevant"] = orange
colors_trajectories["color"]["relevant"] = lblue
colors_trajectories["color"]["irrelevant"] = lblue
colors_LA = dict()
colors_LA["motion"] = bluish
colors_LA["color"] = green
fig_3D = plt.figure(figsize=(7, 7))
ax = fig_3D.add_subplot(projection='3d')
# make the panes transparent
ax.xaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))
ax.yaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))
ax.zaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))
ax.set_xlabel("Choice", fontsize=20)
ax.set_ylabel("Context", fontsize=20)
ax.set_zlabel("Sensory", fontsize=20)
# initial point trajectory
ax.scatter([0, 0], [0, 0], [0, 0], color='r', marker='o', s=10, alpha=0.9)
for ctxt in ["motion", "color"]:
slow_points_projected = LA_data_dict[ctxt]["slow_points"] @ P_matrix
ax.scatter(*(slow_points_projected[:, k] for k in range(nDim)), color=colors_LA[ctxt], marker='o', s=20,
alpha=0.5)
ax.plot(*(slow_points_projected[:, k] for k in range(nDim)), color=colors_LA[ctxt])
for stim_status in ["relevant"]:
clr = colors_trajectories[ctxt][stim_status]
trajectory_projected = trajectories[ctxt][stim_status]["context_only_on"] @ P_matrix
ax.plot(*(trajectory_projected[:, t] for t in range(nDim)),
linestyle='-', linewidth=2, color=clr, alpha=0.8)
linestyles = ['-', ':', '-']
colors = [clr, clr, 'm']
for k, key in enumerate(["right", "left", "center"]):
trajectory_projected = trajectories[ctxt][stim_status]["stim_on"][key] @ P_matrix
ax.plot(*(trajectory_projected[:, t] for t in range(nDim)),
linestyle=linestyles[k], linewidth=3, color=colors[k], alpha=0.8)
fig_3D.subplots_adjust()
ax.view_init(12, 228)
fig_3D.subplots_adjust()
plt.tight_layout()
return fig_3D
def plot_RHS_over_LA(self):
if not ("slow_points" in (list(self.LA_data["motion"].keys()))):
raise ValueError("Need to run `calc_LineAttractor_analytics' first!")
else:
LA_data_dict = deepcopy(self.LA_data)
colors, cmp = get_colormaps()
red, blue, bluish, green, orange, lblue, violet = colors
fig_RHS = plt.figure(figsize=(12, 3))
plt.suptitle(r"$\||RHS(x)\||^2$", fontsize=16)
plt.axhline(0, color="gray", linewidth=2, alpha=0.2)
x = np.linspace(0, 1, LA_data_dict['motion']["slow_points"].shape[0])
plt.plot(x, np.array(LA_data_dict["motion"]["fun_val"]), color=bluish, linewidth=3, linestyle='-',
label="motion")
plt.plot(x, np.array(LA_data_dict["color"]["fun_val"]), color=green, linewidth=3, linestyle='-', label="color")
plt.legend(fontsize=14)
plt.xlabel("distance along the LA", fontsize=16)
plt.ylabel(r"$\||RHS(x)\||$", fontsize=16)
plt.grid(True)
return fig_RHS | /rnn_coach-0.1.tar.gz/rnn_coach-0.1/src/DynamicSystemAnalyzer.py | 0.609524 | 0.577436 | DynamicSystemAnalyzer.py | pypi |
from copy import deepcopy
import numpy as np
from matplotlib import pyplot as plt
class PerformanceAnalyzer():
'''
Generic class for analysis of the RNN performance on the given task
'''
def __init__(self, rnn_numpy, task=None):
self.RNN = rnn_numpy
self.Task = task
def get_validation_score(self, scoring_function,
input_batch, target_batch, mask,
sigma_rec=0.03, sigma_inp=0.03,
rng_numpy=None):
n_inputs = input_batch.shape[0]
n_steps = input_batch.shape[1]
batch_size = input_batch.shape[2]
self.RNN.clear_history()
# self.RNN.y = np.repeat(deepcopy(self.RNN.y)[:, np.newaxis], axis=-1, repeats=batch_size)
trajectories, output_prediction = self.RNN.run_multiple_trajectories(input_timeseries=input_batch,
sigma_rec=sigma_rec,
sigma_inp=sigma_inp,
generator_numpy=rng_numpy)
avg_score = np.mean(
[scoring_function(output_prediction[:, mask, i], target_batch[:, mask, i]) for i in range(batch_size)])
return avg_score
def plot_trials(self, input_batch, target_batch, mask, sigma_rec=0.03, sigma_inp=0.03, labels=None, rng_numpy=None):
n_inputs = input_batch.shape[0]
n_steps = input_batch.shape[1]
batch_size = input_batch.shape[2]
fig_output, axes = plt.subplots(batch_size, 1, figsize=(7, 8))
self.RNN.clear_history()
self.RNN.y = deepcopy(self.RNN.y_init)
self.RNN.run_multiple_trajectories(input_timeseries=input_batch,
sigma_rec=sigma_rec,
sigma_inp=sigma_inp,
generator_numpy=rng_numpy)
predicted_output = self.RNN.get_output()
colors = ["r", "b", "g", "c", "m", "y", 'k']
n_outputs = self.RNN.W_out.shape[0]
for k in range(batch_size):
for i in range(n_outputs):
tag = labels[i] if not (labels is None) else ''
axes[k].plot(predicted_output[i, :, k], color=colors[i], label=f'predicted {tag}')
axes[k].plot(mask, target_batch[i, mask, k], color=colors[i], linestyle='--', label=f'target {tag}')
axes[k].spines.right.set_visible(False)
axes[k].spines.top.set_visible(False)
axes[0].legend(fontsize=12, frameon=False, bbox_to_anchor=(1.0, 1.0))
axes[batch_size // 2].set_ylabel("Output", fontsize=12)
axes[-1].set_xlabel("time step, ms", fontsize=12)
fig_output.tight_layout()
plt.subplots_adjust(hspace=0.15, wspace=0.15)
return fig_output
class PerformanceAnalyzerCDDM(PerformanceAnalyzer):
def __init__(self, rnn_numpy):
PerformanceAnalyzer.__init__(self, rnn_numpy)
def calc_psychometric_data(self, task, mask, num_levels=7, num_repeats=7, sigma_rec=0.03, sigma_inp=0.03):
coherence_lvls = np.linspace(-1, 1, num_levels)
psychometric_data = {}
psychometric_data["coherence_lvls"] = coherence_lvls
psychometric_data["motion"] = {}
psychometric_data["color"] = {}
psychometric_data["motion"]["right_choice_percentage"] = np.empty((num_levels, num_levels))
psychometric_data["color"]["right_choice_percentage"] = np.empty((num_levels, num_levels))
psychometric_data["motion"]["MSE"] = np.empty((num_levels, num_levels))
psychometric_data["color"]["MSE"] = np.empty((num_levels, num_levels))
input_batch, target_batch, conditions = task.get_batch()
batch_size = input_batch.shape[-1]
input_batch = np.repeat(input_batch, axis=-1, repeats=num_repeats)
target_batch = np.repeat(target_batch, axis=-1, repeats=num_repeats)
self.RNN.clear_history()
self.RNN.y = deepcopy(self.RNN.y_init)
self.RNN.run(input_timeseries=input_batch,
sigma_rec=sigma_rec,
sigma_inp=sigma_inp,
save_history=True)
output = self.RNN.get_output()
if output.shape[0] == 2:
choices = np.sign(output[0, -1, :] - output[1, -1, :])
elif output.shape[0] == 1:
choices = np.sign(output[0, -1, :])
errors = np.sum(np.sum((target_batch[:, mask, :] - output[:, mask, :]) ** 2, axis=0), axis=0) / mask.shape[0]
choices_to_right = (choices + 1) / 2
# This reshaping pattern relies on the batch-structure from the CDDM task.
# If you mess up with a batch generation function it may affect the psychometric function
mean_choices_to_right = np.mean(choices_to_right.reshape(2, num_levels, num_levels, num_repeats), axis=-1)
mean_error = np.mean(errors.reshape(2, num_levels, num_levels, num_repeats), axis=-1)
psychometric_data["motion"]["right_choice_percentage"] = mean_choices_to_right[0, ...].T
psychometric_data["color"]["right_choice_percentage"] = mean_choices_to_right[1, ...]
psychometric_data["motion"]["MSE"] = mean_error[0, ...].T
psychometric_data["color"]["MSE"] = mean_error[1, ...]
self.psychometric_data = deepcopy(psychometric_data)
return psychometric_data
def plot_psychometric_data(self):
coherence_lvls = self.psychometric_data["coherence_lvls"]
Motion_rght_prcntg = self.psychometric_data["motion"]["right_choice_percentage"]
Color_rght_prcntg = self.psychometric_data["color"]["right_choice_percentage"][::-1, :]
Motion_MSE = self.psychometric_data["motion"]["MSE"]
Color_MSE = self.psychometric_data["color"]["MSE"][::-1, :]
num_lvls = Color_rght_prcntg.shape[0]
fig, axes = plt.subplots(2, 2, figsize=(10, 8))
fig.suptitle("Psychometric data", fontsize=14)
tag = ["Motion", "Color"]
for i in range(2):
axes[0, i].title.set_text(f"{tag[i]}, % right")
im1 = axes[0, i].imshow(eval(f"{tag[i]}_rght_prcntg"), cmap="coolwarm", interpolation="bicubic")
plt.colorbar(im1, ax=axes[0, i], orientation='vertical')
axes[1, i].title.set_text(f"{tag[i]}, MSE surface")
im2 = axes[1, i].imshow(eval(f"{tag[i]}_MSE"), cmap="coolwarm", interpolation="bicubic")
plt.colorbar(im2, ax=axes[1, i], orientation='vertical')
for j in range(2):
axes[j, i].set_xticks(np.arange(num_lvls), labels=np.round(coherence_lvls, 2), rotation=50)
axes[j, i].set_yticks(np.arange(num_lvls), labels=np.round(coherence_lvls, 2)[::-1])
axes[0, 0].set_xticklabels([])
axes[0, 1].set_xticklabels([])
axes[0, 0].set_ylabel("Coherence of color", fontsize=16)
axes[1, 0].set_ylabel("Coherence of color", fontsize=16)
axes[1, 0].set_xlabel("Coherence of motion", fontsize=16)
axes[1, 1].set_xlabel("Coherence of motion", fontsize=16)
fig.tight_layout()
plt.subplots_adjust(wspace=0.125, hspace=0.15)
return fig | /rnn_coach-0.1.tar.gz/rnn_coach-0.1/src/PerformanceAnalyzer.py | 0.809991 | 0.423995 | PerformanceAnalyzer.py | pypi |
import json
import os
import sys
sys.path.insert(0, '../')
sys.path.insert(0, '../../')
from src.DynamicSystemAnalyzer import DynamicSystemAnalyzer
from src.PerformanceAnalyzer import PerformanceAnalyzer
from src.RNN_numpy import RNN_numpy
from src.utils import get_project_root, numpify, orthonormalize
from src.Trainer import Trainer
from src.RNN_torch import RNN_torch
from src.Task import *
from matplotlib import pyplot as plt
import torch
import time
from sklearn.decomposition import PCA
disp = True
activation = "tanh"
taskname = "MemoryAntiAngle"
train_config_file = f"train_config_{taskname}_{activation}.json"
config_dict = json.load(open(os.path.join(get_project_root(), "data", "configs", train_config_file), mode="r"))
# defining RNN:
N = config_dict["N"]
activation_name = config_dict["activation"]
if activation_name == 'relu':
activation = lambda x: torch.maximum(x, torch.tensor(0))
elif activation_name == 'tanh':
activation = torch.tanh
elif activation_name == 'sigmoid':
activation = lambda x: 1 / (1 + torch.exp(-x))
elif activation_name == 'softplus':
activation = lambda x: torch.log(1 + torch.exp(5 * x))
dt = config_dict["dt"]
tau = config_dict["tau"]
constrained = config_dict["constrained"]
connectivity_density_rec = config_dict["connectivity_density_rec"]
spectral_rad = config_dict["sr"]
sigma_inp = config_dict["sigma_inp"]
sigma_rec = config_dict["sigma_rec"]
seed = config_dict["seed"]
rng = torch.Generator()
if not seed is None:
rng.manual_seed(seed)
input_size = config_dict["num_inputs"]
output_size = config_dict["num_outputs"]
# Task:
n_steps = config_dict["n_steps"]
task_params = config_dict["task_params"]
# Trainer:
lambda_orth = config_dict["lambda_orth"]
lambda_r = config_dict["lambda_r"]
mask = np.array(config_dict["mask"])
max_iter = config_dict["max_iter"]
tol = config_dict["tol"]
lr = config_dict["lr"]
weight_decay = config_dict["weight_decay"]
same_batch = config_dict["same_batch"]
# General:
tag = config_dict["tag"]
timestr = time.strftime("%Y%m%d-%H%M%S")
data_folder = os.path.join(config_dict["data_folder"], timestr)
# # creating instances:
rnn_torch = RNN_torch(N=N, dt=dt, tau=tau, input_size=input_size, output_size=output_size,
activation=activation, constrained=constrained,
sigma_inp=sigma_inp, sigma_rec=sigma_rec,
connectivity_density_rec=connectivity_density_rec,
spectral_rad=spectral_rad,
random_generator=rng)
task = TaskMemoryAntiAngle(n_steps=n_steps, n_inputs=input_size, n_outputs=output_size, task_params=task_params)
criterion = torch.nn.MSELoss()
optimizer = torch.optim.Adam(rnn_torch.parameters(),
lr=lr,
weight_decay=weight_decay)
trainer = Trainer(RNN=rnn_torch, Task=task,
max_iter=max_iter, tol=tol,
optimizer=optimizer, criterion=criterion,
lambda_orth=lambda_orth, lambda_r=lambda_r)
# datasaver = DataSaver(data_folder)
datasaver = None
try:
SLURM_JOB_ID = int(os.environ["SLURM_JOB_ID"])
task_params["seed"] = SLURM_JOB_ID
seed = SLURM_JOB_ID
except:
SLURM_JOB_ID = None
rnn_trained, train_losses, val_losses, net_params = trainer.run_training(train_mask=mask, same_batch=same_batch)
fig_trainloss = plt.figure(figsize=(10, 3))
plt.plot(train_losses, color='r', label='train loss (log scale)')
plt.plot(val_losses, color='b', label='valid loss (log scale)')
plt.yscale("log")
plt.grid(True)
plt.legend(fontsize=16)
if disp:
plt.show()
if not (datasaver is None): datasaver.save_figure(fig_trainloss, "train&valid_loss")
# net_params = pickle.load(open(os.path.join(get_project_root(), "data", "trained_RNNs", "MemoryAnti", "20230119-222602", "params_MemoryAnti_0.00639.pkl"), "rb+"))
# validate
RNN_valid = RNN_numpy(N=net_params["N"],
dt=net_params["dt"],
tau=net_params["tau"],
activation=numpify(activation),
W_inp=net_params["W_inp"],
W_rec=net_params["W_rec"],
W_out=net_params["W_out"],
bias_rec=net_params["bias_rec"],
y_init=net_params["y_init"])
analyzer = PerformanceAnalyzer(RNN_valid)
score_function = lambda x, y: np.mean((x - y) ** 2)
input_batch_valid, target_batch_valid, conditions_valid = task.get_batch()
score = analyzer.get_validation_score(score_function, input_batch_valid, target_batch_valid,
mask, sigma_rec=sigma_rec, sigma_inp=sigma_inp)
print(f"MSE validation: {np.round(score, 5)}")
if not (datasaver is None): datasaver.save_data(config_dict, "config.json")
if not (datasaver is None): datasaver.save_data(net_params, f"params_{taskname}_{np.round(score, 5)}.pkl")
print(f"Plotting random trials")
inds = np.random.choice(np.arange(input_batch_valid.shape[-1]), 12)
inputs = input_batch_valid[..., inds]
targets = target_batch_valid[..., inds]
fig_trials = analyzer.plot_trials(inputs, targets, mask, sigma_rec=sigma_rec, sigma_inp=sigma_inp)
if disp:
plt.show()
if not (datasaver is None): datasaver.save_figure(fig_trials, "random_trials")
dsa = DynamicSystemAnalyzer(RNN_valid)
params = {"fun_tol": 0.05,
"diff_cutoff": 1e-4,
"sigma_init_guess": 15,
"patience": 100,
"stop_length": 100,
"mode": "approx"}
dsa.get_fixed_points(Input=np.array([0, 0, 0]), **params)
dsa.get_fixed_points(Input=np.array([0, 0, 1]), **params)
all_points = np.vstack([dsa.fp_data[str([0, 0, 0])][type] for type in list(dsa.fp_data[str([0, 0, 0])].keys())])
pca = PCA(n_components=2)
pca.fit(all_points)
P = np.zeros((RNN_valid.N, 3))
P[:, 0] = RNN_valid.W_out[0, :]
P[:, 1:] = pca.components_.T
P = orthonormalize(P)
fig_fp = dsa.plot_fixed_points(projection='3D', P=P)
if disp:
plt.show()
if not (datasaver is None): datasaver.save_figure(fig_fp, "slow_points_projection_output") | /rnn_coach-0.1.tar.gz/rnn_coach-0.1/src/training_RNNs/training_MemoryAntiAngle.py | 0.526586 | 0.2296 | training_MemoryAntiAngle.py | pypi |
import os
import sys
sys.path.insert(0, '../')
sys.path.insert(0, '../../')
import json
from src.DataSaver import DataSaver
from src.DynamicSystemAnalyzer import DynamicSystemAnalyzer
from src.PerformanceAnalyzer import PerformanceAnalyzer
from src.RNN_numpy import RNN_numpy
from src.utils import get_project_root, numpify
from src.Trainer import Trainer
from src.RNN_torch import RNN_torch
from src.Task import *
from matplotlib import pyplot as plt
import torch
import time
disp = True
activation = "tanh"
taskname = "NBitFlipFlop"
train_config_file = f"train_config_{taskname}_{activation}.json"
config_dict = json.load(open(os.path.join(get_project_root(), "data", "configs", train_config_file), mode="r"))
# defining RNN:
N = config_dict["N"]
activation_name = config_dict["activation"]
if activation_name == 'relu':
activation = lambda x: torch.maximum(x, torch.tensor(0))
elif activation_name == 'tanh':
activation = torch.tanh
elif activation_name == 'sigmoid':
activation = lambda x: 1 / (1 + torch.exp(-x))
elif activation_name == 'softplus':
activation = lambda x: torch.log(1 + torch.exp(5 * x))
dt = config_dict["dt"]
tau = config_dict["tau"]
constrained = config_dict["constrained"]
connectivity_density_rec = config_dict["connectivity_density_rec"]
spectral_rad = config_dict["sr"]
sigma_inp = config_dict["sigma_inp"]
sigma_rec = config_dict["sigma_rec"]
seed = config_dict["seed"]
rng = torch.Generator()
if not seed is None:
rng.manual_seed(seed)
input_size = config_dict["num_inputs"]
output_size = config_dict["num_outputs"]
# Task:
n_steps = config_dict["n_steps"]
task_params = config_dict["task_params"]
# Trainer:
lambda_orth = config_dict["lambda_orth"]
lambda_r = config_dict["lambda_r"]
mask = np.array(config_dict["mask"])
max_iter = config_dict["max_iter"]
tol = config_dict["tol"]
lr = config_dict["lr"]
weight_decay = config_dict["weight_decay"]
same_batch = config_dict["same_batch"]
# General:
tag = config_dict["tag"]
timestr = time.strftime("%Y%m%d-%H%M%S")
data_folder = os.path.join(config_dict["data_folder"], timestr)
# # creating instances:
rnn_torch = RNN_torch(N=N, dt=dt, tau=tau, input_size=input_size, output_size=output_size,
activation=activation, constrained=constrained,
sigma_inp=sigma_inp, sigma_rec=sigma_rec,
connectivity_density_rec=connectivity_density_rec,
spectral_rad=spectral_rad,
random_generator=rng)
task = TaskNBitFlipFlop(n_steps=n_steps, n_inputs=input_size, n_outputs=output_size, task_params=task_params)
criterion = torch.nn.MSELoss()
optimizer = torch.optim.Adam(rnn_torch.parameters(),
lr=lr,
weight_decay=weight_decay)
trainer = Trainer(RNN=rnn_torch, Task=task,
max_iter=max_iter, tol=tol,
optimizer=optimizer, criterion=criterion,
lambda_orth=lambda_orth, lambda_r=lambda_r)
datasaver = DataSaver(data_folder)
try:
SLURM_JOB_ID = int(os.environ["SLURM_JOB_ID"])
task_params["seed"] = SLURM_JOB_ID
seed = SLURM_JOB_ID
except:
SLURM_JOB_ID = None
rnn_trained, train_losses, val_losses, best_net_params = trainer.run_training(train_mask=mask, same_batch=same_batch)
fig_trainloss = plt.figure(figsize=(10, 3))
plt.plot(train_losses, color='r', label='train loss (log scale)')
plt.plot(val_losses, color='b', label='valid loss (log scale)')
plt.yscale("log")
plt.grid(True)
plt.legend(fontsize=16)
if disp:
plt.show()
if not (datasaver is None): datasaver.save_figure(fig_trainloss, "train&valid_loss")
# validate
RNN_valid = RNN_numpy(N=best_net_params["N"],
dt=best_net_params["dt"],
tau=best_net_params["tau"],
activation=numpify(activation),
W_inp=best_net_params["W_inp"],
W_rec=best_net_params["W_rec"],
W_out=best_net_params["W_out"],
bias_rec=best_net_params["bias_rec"],
y_init=best_net_params["y_init"])
analyzer = PerformanceAnalyzer(RNN_valid)
score_function = lambda x, y: np.mean((x - y) ** 2)
input_batch_valid, target_batch_valid, conditions_valid = task.get_batch()
score = analyzer.get_validation_score(score_function, input_batch_valid, target_batch_valid,
mask, sigma_rec=sigma_rec, sigma_inp=sigma_inp)
print(f"MSE validation: {np.round(score, 5)}")
if not (datasaver is None): datasaver.save_data(best_net_params, f"params_{taskname}_{np.round(score, 5)}.pkl")
if not (datasaver is None): datasaver.save_data(config_dict, "config.json")
print(f"Plotting random trials")
inds = np.random.choice(np.arange(input_batch_valid.shape[-1]), 12)
inputs = input_batch_valid[..., inds]
targets = target_batch_valid[..., inds]
fig_trials = analyzer.plot_trials(inputs, targets, mask, sigma_rec=sigma_rec, sigma_inp=sigma_inp)
if disp:
plt.show()
if not (datasaver is None): datasaver.save_figure(fig_trials, "random_trials")
dsa = DynamicSystemAnalyzer(RNN_valid)
dsa.get_fixed_points(Input=np.zeros(input_size), sigma_init_guess=10, patience=50, stop_length=50)
fig_fp = dsa.plot_fixed_points(projection='2D')
if disp:
plt.show()
if not (datasaver is None): datasaver.save_figure(fig_fp, "fp_projection") | /rnn_coach-0.1.tar.gz/rnn_coach-0.1/src/training_RNNs/training_NBitFlipFlop.py | 0.487551 | 0.219662 | training_NBitFlipFlop.py | pypi |
import os
import sys
sys.path.insert(0, '../')
sys.path.insert(0, '../../')
import json
from src.DataSaver import DataSaver
from src.DynamicSystemAnalyzer import DynamicSystemAnalyzer
from src.PerformanceAnalyzer import PerformanceAnalyzer
from src.RNN_numpy import RNN_numpy
from src.utils import get_project_root, numpify
from src.Trainer import Trainer
from src.RNN_torch import RNN_torch
from src.Task import *
from matplotlib import pyplot as plt
import torch
import time
disp = True
activation = "relu"
taskname = "DMTS"
train_config_file = f"train_config_{taskname}_{activation}.json"
config_dict = json.load(open(os.path.join(get_project_root(), "LA_data", "configs", train_config_file), mode="r"))
# defining RNN:
N = config_dict["N"]
activation_name = config_dict["activation"]
if activation_name == 'relu':
activation = lambda x: torch.maximum(x, torch.tensor(0))
elif activation_name == 'tanh':
activation = torch.tanh
elif activation_name == 'sigmoid':
activation = lambda x: 1 / (1 + torch.exp(-x))
elif activation_name == 'softplus':
activation = lambda x: torch.log(1 + torch.exp(5 * x))
dt = config_dict["dt"]
tau = config_dict["tau"]
constrained = config_dict["constrained"]
connectivity_density_rec = config_dict["connectivity_density_rec"]
spectral_rad = config_dict["sr"]
sigma_inp = config_dict["sigma_inp"]
sigma_rec = config_dict["sigma_rec"]
seed = config_dict["seed"]
rng = torch.Generator()
if not seed is None:
rng.manual_seed(seed)
input_size = config_dict["num_inputs"]
output_size = config_dict["num_outputs"]
# Task:
n_steps = config_dict["n_steps"]
task_params = config_dict["task_params"]
# Trainer:
lambda_orth = config_dict["lambda_orth"]
lambda_r = config_dict["lambda_r"]
mask = np.array(config_dict["mask"])
max_iter = config_dict["max_iter"]
tol = config_dict["tol"]
lr = config_dict["lr"]
weight_decay = config_dict["weight_decay"]
same_batch = config_dict["same_batch"]
# General:
tag = config_dict["tag"]
timestr = time.strftime("%Y%m%d-%H%M%S")
data_folder = os.path.join(config_dict["data_folder"], timestr)
# # creating instances:
rnn_torch = RNN_torch(N=N, dt=dt, tau=tau, input_size=input_size, output_size=output_size,
activation=activation, constrained=constrained,
sigma_inp=sigma_inp, sigma_rec=sigma_rec,
connectivity_density_rec=connectivity_density_rec,
spectral_rad=spectral_rad,
random_generator=rng)
task = TaskDMTS(n_steps=n_steps, n_inputs=input_size, n_outputs=output_size, task_params=task_params)
criterion = torch.nn.MSELoss()
optimizer = torch.optim.Adam(rnn_torch.parameters(),
lr=lr,
weight_decay=weight_decay)
trainer = Trainer(RNN=rnn_torch, Task=task,
max_iter=max_iter, tol=tol,
optimizer=optimizer, criterion=criterion,
lambda_orth=lambda_orth, lambda_r=lambda_r)
datasaver = DataSaver(data_folder)
try:
SLURM_JOB_ID = int(os.environ["SLURM_JOB_ID"])
task_params["seed"] = SLURM_JOB_ID
seed = SLURM_JOB_ID
except:
SLURM_JOB_ID = None
rnn_trained, train_losses, val_losses, best_net_params = trainer.run_training(train_mask=mask, same_batch=same_batch)
fig_trainloss = plt.figure(figsize=(10, 3))
plt.plot(train_losses, color='r', label='train loss (log scale)')
plt.plot(val_losses, color='b', label='valid loss (log scale)')
plt.yscale("log")
plt.grid(True)
plt.legend(fontsize=16)
if disp:
plt.show()
if not (datasaver is None): datasaver.save_figure(fig_trainloss, "train&valid_loss")
# validate
RNN_valid = RNN_numpy(N=best_net_params["N"],
dt=best_net_params["dt"],
tau=best_net_params["tau"],
activation=numpify(activation),
W_inp=best_net_params["W_inp"],
W_rec=best_net_params["W_rec"],
W_out=best_net_params["W_out"],
bias_rec=best_net_params["bias_rec"],
y_init=best_net_params["y_init"])
analyzer = PerformanceAnalyzer(RNN_valid)
score_function = lambda x, y: np.mean((x - y) ** 2)
input_batch_valid, target_batch_valid, conditions_valid = task.get_batch()
score = analyzer.get_validation_score(score_function, input_batch_valid, target_batch_valid,
mask, sigma_rec=sigma_rec, sigma_inp=sigma_inp)
print(f"MSE validation: {np.round(score, 5)}")
if not (datasaver is None): datasaver.save_data(best_net_params, f"params_{taskname}_{np.round(score, 5)}.pkl")
if not (datasaver is None): datasaver.save_data(config_dict, "config.json")
print(f"Plotting random trials")
inds = np.random.choice(np.arange(input_batch_valid.shape[-1]), 12)
inputs = input_batch_valid[..., inds]
targets = target_batch_valid[..., inds]
fig_trials = analyzer.plot_trials(inputs, targets, mask, sigma_rec=sigma_rec, sigma_inp=sigma_inp)
if disp:
plt.show()
if not (datasaver is None): datasaver.save_figure(fig_trials, "random_trials")
dsa = DynamicSystemAnalyzer(RNN_valid)
dsa.get_fixed_points(Input=np.zeros(input_size))
fig_fp = dsa.plot_fixed_points(projection='2D')
if disp:
plt.show()
if not (datasaver is None): datasaver.save_figure(fig_fp, "fp_projection") | /rnn_coach-0.1.tar.gz/rnn_coach-0.1/src/training_RNNs/training_DMTS.py | 0.490724 | 0.212743 | training_DMTS.py | pypi |
import datajoint as dj
dj.config['database.host'] = 'localhost'
dj.config['database.user'] = 'ptolmachev'
dj.config['database.password'] = 'pawa'
# dj.config['database.host'] = 'datajoint-tengel.pni.princeton.edu'
# dj.config['database.user'] = 'pt1290'
# dj.config['database.password'] = 'a9Ab?spCKz$Zh@24h'
dj.config['display.limit'] = 500
dj.config["enable_python_native_blobs"] = True
dj.conn(reset=True)
schema = dj.schema('pt1290_RNNs')
@schema
class TaskDJ(dj.Manual):
definition = """
task_hash: char(16)
---
task_name: char(16)
n_steps: smallint unsigned
n_inputs: tinyint unsigned
n_outputs: tinyint unsigned
task_params : blob
mask : blob
"""
@schema
class TrainerDJ(dj.Manual):
definition = """
trainer_hash: char(16)
---
max_iter: int
tol: float
lr: float
lambda_orth: float
lambda_r: float
same_batch : tinyint
shuffle : tinyint
"""
@schema
class RNNDJ(dj.Manual):
definition = """
-> TaskDJ
-> TrainerDJ
rnn_hash: char(16) # unique model id
---
mse_score: float
task_name: char(16)
timestamp: char(18)
n: int # number of nodes in the RNN
activation_name: enum('relu', 'tanh', 'sigmoid', 'softplus') # name of the activation function used in the dynamics
constrained: tinyint # boolean variable, either True or False, using biologically plausible connectivity or not
dt: float # Euler integration timestep
tau: float # Dynamical time-scale
sr: float # spectral radius of the recurrent conenctivity
connectivity_density_rec: Decimal(1,0) # opposite of sparsity of the connectivity. 1 - fully connected network
sigma_rec: float
sigma_inp: float
w_inp : blob
w_rec : blob
w_out: blob
b_rec: blob
fp_data = NULL: mediumblob
psycho_data = NULL: mediumblob
la_data = NULL: mediumblob
""" | /rnn_coach-0.1.tar.gz/rnn_coach-0.1/src/datajoint/datajoint_config.py | 0.510496 | 0.211885 | datajoint_config.py | pypi |
import math
import time
import RNS
from RNS.Cryptography import Fernet
class Callbacks:
def __init__(self):
self.link_established = None
self.packet = None
self.proof_requested = None
class Destination:
"""
A class used to describe endpoints in a Reticulum Network. Destination
instances are used both to create outgoing and incoming endpoints. The
destination type will decide if encryption, and what type, is used in
communication with the endpoint. A destination can also announce its
presence on the network, which will also distribute necessary keys for
encrypted communication with it.
:param identity: An instance of :ref:`RNS.Identity<api-identity>`. Can hold only public keys for an outgoing destination, or holding private keys for an ingoing.
:param direction: ``RNS.Destination.IN`` or ``RNS.Destination.OUT``.
:param type: ``RNS.Destination.SINGLE``, ``RNS.Destination.GROUP`` or ``RNS.Destination.PLAIN``.
:param app_name: A string specifying the app name.
:param \*aspects: Any non-zero number of string arguments.
"""
# Constants
SINGLE = 0x00
GROUP = 0x01
PLAIN = 0x02
LINK = 0x03
types = [SINGLE, GROUP, PLAIN, LINK]
PROVE_NONE = 0x21
PROVE_APP = 0x22
PROVE_ALL = 0x23
proof_strategies = [PROVE_NONE, PROVE_APP, PROVE_ALL]
ALLOW_NONE = 0x00
ALLOW_ALL = 0x01
ALLOW_LIST = 0x02
request_policies = [ALLOW_NONE, ALLOW_ALL, ALLOW_LIST]
IN = 0x11;
OUT = 0x12;
directions = [IN, OUT]
PR_TAG_WINDOW = 30
@staticmethod
def expand_name(identity, app_name, *aspects):
"""
:returns: A string containing the full human-readable name of the destination, for an app_name and a number of aspects.
"""
# Check input values and build name string
if "." in app_name: raise ValueError("Dots can't be used in app names")
name = app_name
for aspect in aspects:
if "." in aspect: raise ValueError("Dots can't be used in aspects")
name += "." + aspect
if identity != None:
name += "." + identity.hexhash
return name
@staticmethod
def hash(identity, app_name, *aspects):
"""
:returns: A destination name in adressable hash form, for an app_name and a number of aspects.
"""
name_hash = RNS.Identity.full_hash(Destination.expand_name(None, app_name, *aspects).encode("utf-8"))[:(RNS.Identity.NAME_HASH_LENGTH//8)]
addr_hash_material = name_hash
if identity != None:
if isinstance(identity, RNS.Identity):
addr_hash_material += identity.hash
elif isinstance(identity, bytes) and len(identity) == RNS.Reticulum.TRUNCATED_HASHLENGTH//8:
addr_hash_material += identity
else:
raise TypeError("Invalid material supplied for destination hash calculation")
return RNS.Identity.full_hash(addr_hash_material)[:RNS.Reticulum.TRUNCATED_HASHLENGTH//8]
@staticmethod
def app_and_aspects_from_name(full_name):
"""
:returns: A tuple containing the app name and a list of aspects, for a full-name string.
"""
components = full_name.split(".")
return (components[0], components[1:])
@staticmethod
def hash_from_name_and_identity(full_name, identity):
"""
:returns: A destination name in adressable hash form, for a full name string and Identity instance.
"""
app_name, aspects = Destination.app_and_aspects_from_name(full_name)
return Destination.hash(identity, app_name, *aspects)
def __init__(self, identity, direction, type, app_name, *aspects):
# Check input values and build name string
if "." in app_name: raise ValueError("Dots can't be used in app names")
if not type in Destination.types: raise ValueError("Unknown destination type")
if not direction in Destination.directions: raise ValueError("Unknown destination direction")
self.accept_link_requests = True
self.callbacks = Callbacks()
self.request_handlers = {}
self.type = type
self.direction = direction
self.proof_strategy = Destination.PROVE_NONE
self.mtu = 0
self.path_responses = {}
self.links = []
if identity == None and direction == Destination.IN and self.type != Destination.PLAIN:
identity = RNS.Identity()
aspects = aspects+(identity.hexhash,)
if identity != None and self.type == Destination.PLAIN:
raise TypeError("Selected destination type PLAIN cannot hold an identity")
self.identity = identity
self.name = Destination.expand_name(identity, app_name, *aspects)
# Generate the destination address hash
self.hash = Destination.hash(self.identity, app_name, *aspects)
self.name_hash = RNS.Identity.full_hash(self.expand_name(None, app_name, *aspects).encode("utf-8"))[:(RNS.Identity.NAME_HASH_LENGTH//8)]
self.hexhash = self.hash.hex()
self.default_app_data = None
self.callback = None
self.proofcallback = None
RNS.Transport.register_destination(self)
def __str__(self):
"""
:returns: A human-readable representation of the destination including addressable hash and full name.
"""
return "<"+self.name+"/"+self.hexhash+">"
def announce(self, app_data=None, path_response=False, attached_interface=None, tag=None, send=True):
"""
Creates an announce packet for this destination and broadcasts it on all
relevant interfaces. Application specific data can be added to the announce.
:param app_data: *bytes* containing the app_data.
:param path_response: Internal flag used by :ref:`RNS.Transport<api-transport>`. Ignore.
"""
if self.type != Destination.SINGLE:
raise TypeError("Only SINGLE destination types can be announced")
now = time.time()
stale_responses = []
for entry_tag in self.path_responses:
entry = self.path_responses[entry_tag]
if now > entry[0]+Destination.PR_TAG_WINDOW:
stale_responses.append(entry_tag)
for entry_tag in stale_responses:
self.path_responses.pop(entry_tag)
if (path_response == True and tag != None) and tag in self.path_responses:
# This code is currently not used, since Transport will block duplicate
# path requests based on tags. When multi-path support is implemented in
# Transport, this will allow Transport to detect redundant paths to the
# same destination, and select the best one based on chosen criteria,
# since it will be able to detect that a single emitted announce was
# received via multiple paths. The difference in reception time will
# potentially also be useful in determining characteristics of the
# multiple available paths, and to choose the best one.
RNS.log("Using cached announce data for answering path request with tag "+RNS.prettyhexrep(tag), RNS.LOG_EXTREME)
announce_data = self.path_responses[tag][1]
else:
destination_hash = self.hash
random_hash = RNS.Identity.get_random_hash()[0:5]+int(time.time()).to_bytes(5, "big")
if app_data == None and self.default_app_data != None:
if isinstance(self.default_app_data, bytes):
app_data = self.default_app_data
elif callable(self.default_app_data):
returned_app_data = self.default_app_data()
if isinstance(returned_app_data, bytes):
app_data = returned_app_data
signed_data = self.hash+self.identity.get_public_key()+self.name_hash+random_hash
if app_data != None:
signed_data += app_data
signature = self.identity.sign(signed_data)
announce_data = self.identity.get_public_key()+self.name_hash+random_hash+signature
if app_data != None:
announce_data += app_data
self.path_responses[tag] = [time.time(), announce_data]
if path_response:
announce_context = RNS.Packet.PATH_RESPONSE
else:
announce_context = RNS.Packet.NONE
announce_packet = RNS.Packet(self, announce_data, RNS.Packet.ANNOUNCE, context = announce_context, attached_interface = attached_interface)
if send:
announce_packet.send()
else:
return announce_packet
def accepts_links(self, accepts = None):
"""
Set or query whether the destination accepts incoming link requests.
:param accepts: If ``True`` or ``False``, this method sets whether the destination accepts incoming link requests. If not provided or ``None``, the method returns whether the destination currently accepts link requests.
:returns: ``True`` or ``False`` depending on whether the destination accepts incoming link requests, if the *accepts* parameter is not provided or ``None``.
"""
if accepts == None:
return self.accept_link_requests
if accepts:
self.accept_link_requests = True
else:
self.accept_link_requests = False
def set_link_established_callback(self, callback):
"""
Registers a function to be called when a link has been established to
this destination.
:param callback: A function or method with the signature *callback(link)* to be called when a new link is established with this destination.
"""
self.callbacks.link_established = callback
def set_packet_callback(self, callback):
"""
Registers a function to be called when a packet has been received by
this destination.
:param callback: A function or method with the signature *callback(data, packet)* to be called when this destination receives a packet.
"""
self.callbacks.packet = callback
def set_proof_requested_callback(self, callback):
"""
Registers a function to be called when a proof has been requested for
a packet sent to this destination. Allows control over when and if
proofs should be returned for received packets.
:param callback: A function or method to with the signature *callback(packet)* be called when a packet that requests a proof is received. The callback must return one of True or False. If the callback returns True, a proof will be sent. If it returns False, a proof will not be sent.
"""
self.callbacks.proof_requested = callback
def set_proof_strategy(self, proof_strategy):
"""
Sets the destinations proof strategy.
:param proof_strategy: One of ``RNS.Destination.PROVE_NONE``, ``RNS.Destination.PROVE_ALL`` or ``RNS.Destination.PROVE_APP``. If ``RNS.Destination.PROVE_APP`` is set, the `proof_requested_callback` will be called to determine whether a proof should be sent or not.
"""
if not proof_strategy in Destination.proof_strategies:
raise TypeError("Unsupported proof strategy")
else:
self.proof_strategy = proof_strategy
def register_request_handler(self, path, response_generator = None, allow = ALLOW_NONE, allowed_list = None):
"""
Registers a request handler.
:param path: The path for the request handler to be registered.
:param response_generator: A function or method with the signature *response_generator(path, data, request_id, link_id, remote_identity, requested_at)* to be called. Whatever this funcion returns will be sent as a response to the requester. If the function returns ``None``, no response will be sent.
:param allow: One of ``RNS.Destination.ALLOW_NONE``, ``RNS.Destination.ALLOW_ALL`` or ``RNS.Destination.ALLOW_LIST``. If ``RNS.Destination.ALLOW_LIST`` is set, the request handler will only respond to requests for identified peers in the supplied list.
:param allowed_list: A list of *bytes-like* :ref:`RNS.Identity<api-identity>` hashes.
:raises: ``ValueError`` if any of the supplied arguments are invalid.
"""
if path == None or path == "":
raise ValueError("Invalid path specified")
elif not callable(response_generator):
raise ValueError("Invalid response generator specified")
elif not allow in Destination.request_policies:
raise ValueError("Invalid request policy")
else:
path_hash = RNS.Identity.truncated_hash(path.encode("utf-8"))
request_handler = [path, response_generator, allow, allowed_list]
self.request_handlers[path_hash] = request_handler
def deregister_request_handler(self, path):
"""
Deregisters a request handler.
:param path: The path for the request handler to be deregistered.
:returns: True if the handler was deregistered, otherwise False.
"""
path_hash = RNS.Identity.truncated_hash(path.encode("utf-8"))
if path_hash in self.request_handlers:
self.request_handlers.pop(path_hash)
return True
else:
return False
def receive(self, packet):
if packet.packet_type == RNS.Packet.LINKREQUEST:
plaintext = packet.data
self.incoming_link_request(plaintext, packet)
else:
plaintext = self.decrypt(packet.data)
if plaintext != None:
if packet.packet_type == RNS.Packet.DATA:
if self.callbacks.packet != None:
try:
self.callbacks.packet(plaintext, packet)
except Exception as e:
RNS.log("Error while executing receive callback from "+str(self)+". The contained exception was: "+str(e), RNS.LOG_ERROR)
def incoming_link_request(self, data, packet):
if self.accept_link_requests:
link = RNS.Link.validate_request(self, data, packet)
if link != None:
self.links.append(link)
def create_keys(self):
"""
For a ``RNS.Destination.GROUP`` type destination, creates a new symmetric key.
:raises: ``TypeError`` if called on an incompatible type of destination.
"""
if self.type == Destination.PLAIN:
raise TypeError("A plain destination does not hold any keys")
if self.type == Destination.SINGLE:
raise TypeError("A single destination holds keys through an Identity instance")
if self.type == Destination.GROUP:
self.prv_bytes = Fernet.generate_key()
self.prv = Fernet(self.prv_bytes)
def get_private_key(self):
"""
For a ``RNS.Destination.GROUP`` type destination, returns the symmetric private key.
:raises: ``TypeError`` if called on an incompatible type of destination.
"""
if self.type == Destination.PLAIN:
raise TypeError("A plain destination does not hold any keys")
elif self.type == Destination.SINGLE:
raise TypeError("A single destination holds keys through an Identity instance")
else:
return self.prv_bytes
def load_private_key(self, key):
"""
For a ``RNS.Destination.GROUP`` type destination, loads a symmetric private key.
:param key: A *bytes-like* containing the symmetric key.
:raises: ``TypeError`` if called on an incompatible type of destination.
"""
if self.type == Destination.PLAIN:
raise TypeError("A plain destination does not hold any keys")
if self.type == Destination.SINGLE:
raise TypeError("A single destination holds keys through an Identity instance")
if self.type == Destination.GROUP:
self.prv_bytes = key
self.prv = Fernet(self.prv_bytes)
def load_public_key(self, key):
if self.type != Destination.SINGLE:
raise TypeError("Only the \"single\" destination type can hold a public key")
else:
raise TypeError("A single destination holds keys through an Identity instance")
def encrypt(self, plaintext):
"""
Encrypts information for ``RNS.Destination.SINGLE`` or ``RNS.Destination.GROUP`` type destination.
:param plaintext: A *bytes-like* containing the plaintext to be encrypted.
:raises: ``ValueError`` if destination does not hold a necessary key for encryption.
"""
if self.type == Destination.PLAIN:
return plaintext
if self.type == Destination.SINGLE and self.identity != None:
return self.identity.encrypt(plaintext)
if self.type == Destination.GROUP:
if hasattr(self, "prv") and self.prv != None:
try:
return self.prv.encrypt(plaintext)
except Exception as e:
RNS.log("The GROUP destination could not encrypt data", RNS.LOG_ERROR)
RNS.log("The contained exception was: "+str(e), RNS.LOG_ERROR)
else:
raise ValueError("No private key held by GROUP destination. Did you create or load one?")
def decrypt(self, ciphertext):
"""
Decrypts information for ``RNS.Destination.SINGLE`` or ``RNS.Destination.GROUP`` type destination.
:param ciphertext: *Bytes* containing the ciphertext to be decrypted.
:raises: ``ValueError`` if destination does not hold a necessary key for decryption.
"""
if self.type == Destination.PLAIN:
return ciphertext
if self.type == Destination.SINGLE and self.identity != None:
return self.identity.decrypt(ciphertext)
if self.type == Destination.GROUP:
if hasattr(self, "prv") and self.prv != None:
try:
return self.prv.decrypt(ciphertext)
except Exception as e:
RNS.log("The GROUP destination could not decrypt data", RNS.LOG_ERROR)
RNS.log("The contained exception was: "+str(e), RNS.LOG_ERROR)
else:
raise ValueError("No private key held by GROUP destination. Did you create or load one?")
def sign(self, message):
"""
Signs information for ``RNS.Destination.SINGLE`` type destination.
:param message: *Bytes* containing the message to be signed.
:returns: A *bytes-like* containing the message signature, or *None* if the destination could not sign the message.
"""
if self.type == Destination.SINGLE and self.identity != None:
return self.identity.sign(message)
else:
return None
def set_default_app_data(self, app_data=None):
"""
Sets the default app_data for the destination. If set, the default
app_data will be included in every announce sent by the destination,
unless other app_data is specified in the *announce* method.
:param app_data: A *bytes-like* containing the default app_data, or a *callable* returning a *bytes-like* containing the app_data.
"""
self.default_app_data = app_data
def clear_default_app_data(self):
"""
Clears default app_data previously set for the destination.
"""
self.set_default_app_data(app_data=None) | /rns-0.5.7-py3-none-any.whl/RNS/Destination.py | 0.673192 | 0.277905 | Destination.py | pypi |
from __future__ import annotations
import collections
import enum
import threading
import time
from types import TracebackType
from typing import Type, Callable, TypeVar, Generic, NewType
import abc
import contextlib
import struct
import RNS
from abc import ABC, abstractmethod
TPacket = TypeVar("TPacket")
class SystemMessageTypes(enum.IntEnum):
SMT_STREAM_DATA = 0xff00
class ChannelOutletBase(ABC, Generic[TPacket]):
"""
An abstract transport layer interface used by Channel.
DEPRECATED: This was created for testing; eventually
Channel will use Link or a LinkBase interface
directly.
"""
@abstractmethod
def send(self, raw: bytes) -> TPacket:
raise NotImplemented()
@abstractmethod
def resend(self, packet: TPacket) -> TPacket:
raise NotImplemented()
@property
@abstractmethod
def mdu(self):
raise NotImplemented()
@property
@abstractmethod
def rtt(self):
raise NotImplemented()
@property
@abstractmethod
def is_usable(self):
raise NotImplemented()
@abstractmethod
def get_packet_state(self, packet: TPacket) -> MessageState:
raise NotImplemented()
@abstractmethod
def timed_out(self):
raise NotImplemented()
@abstractmethod
def __str__(self):
raise NotImplemented()
@abstractmethod
def set_packet_timeout_callback(self, packet: TPacket, callback: Callable[[TPacket], None] | None,
timeout: float | None = None):
raise NotImplemented()
@abstractmethod
def set_packet_delivered_callback(self, packet: TPacket, callback: Callable[[TPacket], None] | None):
raise NotImplemented()
@abstractmethod
def get_packet_id(self, packet: TPacket) -> any:
raise NotImplemented()
class CEType(enum.IntEnum):
"""
ChannelException type codes
"""
ME_NO_MSG_TYPE = 0
ME_INVALID_MSG_TYPE = 1
ME_NOT_REGISTERED = 2
ME_LINK_NOT_READY = 3
ME_ALREADY_SENT = 4
ME_TOO_BIG = 5
class ChannelException(Exception):
"""
An exception thrown by Channel, with a type code.
"""
def __init__(self, ce_type: CEType, *args):
super().__init__(args)
self.type = ce_type
class MessageState(enum.IntEnum):
"""
Set of possible states for a Message
"""
MSGSTATE_NEW = 0
MSGSTATE_SENT = 1
MSGSTATE_DELIVERED = 2
MSGSTATE_FAILED = 3
class MessageBase(abc.ABC):
"""
Base type for any messages sent or received on a Channel.
Subclasses must define the two abstract methods as well as
the ``MSGTYPE`` class variable.
"""
# MSGTYPE must be unique within all classes sent over a
# channel. Additionally, MSGTYPE > 0xf000 are reserved.
MSGTYPE = None
"""
Defines a unique identifier for a message class.
* Must be unique within all classes registered with a ``Channel``
* Must be less than ``0xf000``. Values greater than or equal to ``0xf000`` are reserved.
"""
@abstractmethod
def pack(self) -> bytes:
"""
Create and return the binary representation of the message
:return: binary representation of message
"""
raise NotImplemented()
@abstractmethod
def unpack(self, raw: bytes):
"""
Populate message from binary representation
:param raw: binary representation
"""
raise NotImplemented()
MessageCallbackType = NewType("MessageCallbackType", Callable[[MessageBase], bool])
class Envelope:
"""
Internal wrapper used to transport messages over a channel and
track its state within the channel framework.
"""
def unpack(self, message_factories: dict[int, Type]) -> MessageBase:
msgtype, self.sequence, length = struct.unpack(">HHH", self.raw[:6])
raw = self.raw[6:]
ctor = message_factories.get(msgtype, None)
if ctor is None:
raise ChannelException(CEType.ME_NOT_REGISTERED, f"Unable to find constructor for Channel MSGTYPE {hex(msgtype)}")
message = ctor()
message.unpack(raw)
self.unpacked = True
self.message = message
return message
def pack(self) -> bytes:
if self.message.__class__.MSGTYPE is None:
raise ChannelException(CEType.ME_NO_MSG_TYPE, f"{self.message.__class__} lacks MSGTYPE")
data = self.message.pack()
self.raw = struct.pack(">HHH", self.message.MSGTYPE, self.sequence, len(data)) + data
self.packed = True
return self.raw
def __init__(self, outlet: ChannelOutletBase, message: MessageBase = None, raw: bytes = None, sequence: int = None):
self.ts = time.time()
self.id = id(self)
self.message = message
self.raw = raw
self.packet: TPacket = None
self.sequence = sequence
self.outlet = outlet
self.tries = 0
self.unpacked = False
self.packed = False
self.tracked = False
class Channel(contextlib.AbstractContextManager):
"""
Provides reliable delivery of messages over
a link.
``Channel`` differs from ``Request`` and
``Resource`` in some important ways:
**Continuous**
Messages can be sent or received as long as
the ``Link`` is open.
**Bi-directional**
Messages can be sent in either direction on
the ``Link``; neither end is the client or
server.
**Size-constrained**
Messages must be encoded into a single packet.
``Channel`` is similar to ``Packet``, except that it
provides reliable delivery (automatic retries) as well
as a structure for exchanging several types of
messages over the ``Link``.
``Channel`` is not instantiated directly, but rather
obtained from a ``Link`` with ``get_channel()``.
"""
# The initial window size at channel setup
WINDOW = 2
# Absolute minimum window size
WINDOW_MIN = 1
# The maximum window size for transfers on slow links
WINDOW_MAX_SLOW = 5
# The maximum window size for transfers on mid-speed links
WINDOW_MAX_MEDIUM = 16
# The maximum window size for transfers on fast links
WINDOW_MAX_FAST = 48
# For calculating maps and guard segments, this
# must be set to the global maximum window.
WINDOW_MAX = WINDOW_MAX_FAST
# If the fast rate is sustained for this many request
# rounds, the fast link window size will be allowed.
FAST_RATE_THRESHOLD = 10
# If the RTT rate is higher than this value,
# the max window size for fast links will be used.
RTT_FAST = 0.25
RTT_MEDIUM = 0.75
RTT_SLOW = 1.45
# The minimum allowed flexibility of the window size.
# The difference between window_max and window_min
# will never be smaller than this value.
WINDOW_FLEXIBILITY = 4
SEQ_MAX = 0xFFFF
SEQ_MODULUS = SEQ_MAX+1
def __init__(self, outlet: ChannelOutletBase):
"""
@param outlet:
"""
self._outlet = outlet
self._lock = threading.RLock()
self._tx_ring: collections.deque[Envelope] = collections.deque()
self._rx_ring: collections.deque[Envelope] = collections.deque()
self._message_callbacks: [MessageCallbackType] = []
self._next_sequence = 0
self._next_rx_sequence = 0
self._message_factories: dict[int, Type[MessageBase]] = {}
self._max_tries = 5
self.fast_rate_rounds = 0
self.medium_rate_rounds = 0
if self._outlet.rtt > Channel.RTT_SLOW:
self.window = 1
self.window_max = 1
self.window_min = 1
self.window_flexibility = 1
else:
self.window = Channel.WINDOW
self.window_max = Channel.WINDOW_MAX_SLOW
self.window_min = Channel.WINDOW_MIN
self.window_flexibility = Channel.WINDOW_FLEXIBILITY
def __enter__(self) -> Channel:
return self
def __exit__(self, __exc_type: Type[BaseException] | None, __exc_value: BaseException | None,
__traceback: TracebackType | None) -> bool | None:
self._shutdown()
return False
def register_message_type(self, message_class: Type[MessageBase]):
"""
Register a message class for reception over a ``Channel``.
Message classes must extend ``MessageBase``.
:param message_class: Class to register
"""
self._register_message_type(message_class, is_system_type=False)
def _register_message_type(self, message_class: Type[MessageBase], *, is_system_type: bool = False):
with self._lock:
if not issubclass(message_class, MessageBase):
raise ChannelException(CEType.ME_INVALID_MSG_TYPE,
f"{message_class} is not a subclass of {MessageBase}.")
if message_class.MSGTYPE is None:
raise ChannelException(CEType.ME_INVALID_MSG_TYPE,
f"{message_class} has invalid MSGTYPE class attribute.")
if message_class.MSGTYPE >= 0xf000 and not is_system_type:
raise ChannelException(CEType.ME_INVALID_MSG_TYPE,
f"{message_class} has system-reserved message type.")
try:
message_class()
except Exception as ex:
raise ChannelException(CEType.ME_INVALID_MSG_TYPE,
f"{message_class} raised an exception when constructed with no arguments: {ex}")
self._message_factories[message_class.MSGTYPE] = message_class
def add_message_handler(self, callback: MessageCallbackType):
"""
Add a handler for incoming messages. A handler
has the following signature:
``(message: MessageBase) -> bool``
Handlers are processed in the order they are
added. If any handler returns True, processing
of the message stops; handlers after the
returning handler will not be called.
:param callback: Function to call
"""
with self._lock:
if callback not in self._message_callbacks:
self._message_callbacks.append(callback)
def remove_message_handler(self, callback: MessageCallbackType):
"""
Remove a handler added with ``add_message_handler``.
:param callback: handler to remove
"""
with self._lock:
if callback in self._message_callbacks:
self._message_callbacks.remove(callback)
def _shutdown(self):
with self._lock:
self._message_callbacks.clear()
self._clear_rings()
def _clear_rings(self):
with self._lock:
for envelope in self._tx_ring:
if envelope.packet is not None:
self._outlet.set_packet_timeout_callback(envelope.packet, None)
self._outlet.set_packet_delivered_callback(envelope.packet, None)
self._tx_ring.clear()
self._rx_ring.clear()
def _emplace_envelope(self, envelope: Envelope, ring: collections.deque[Envelope]) -> bool:
with self._lock:
i = 0
window_overflow = (self._next_rx_sequence+Channel.WINDOW_MAX) % Channel.SEQ_MODULUS
for existing in ring:
if envelope.sequence == existing.sequence:
RNS.log(f"Envelope: Emplacement of duplicate envelope with sequence "+str(envelope.sequence), RNS.LOG_EXTREME)
return False
if envelope.sequence < existing.sequence and not envelope.sequence < window_overflow:
ring.insert(i, envelope)
RNS.log("Inserted seq "+str(envelope.sequence)+" at "+str(i), RNS.LOG_DEBUG)
envelope.tracked = True
return True
i += 1
envelope.tracked = True
ring.append(envelope)
return True
def _run_callbacks(self, message: MessageBase):
cbs = self._message_callbacks.copy()
for cb in cbs:
try:
if cb(message):
return
except Exception as e:
RNS.log("Channel "+str(self)+" experienced an error while running a message callback. The contained exception was: "+str(e), RNS.LOG_ERROR)
def _receive(self, raw: bytes):
try:
envelope = Envelope(outlet=self._outlet, raw=raw)
with self._lock:
message = envelope.unpack(self._message_factories)
if envelope.sequence < self._next_rx_sequence:
window_overflow = (self._next_rx_sequence+Channel.WINDOW_MAX) % Channel.SEQ_MODULUS
if window_overflow < self._next_rx_sequence:
if envelope.sequence > window_overflow:
RNS.log("Invalid packet sequence ("+str(envelope.sequence)+") received on channel "+str(self), RNS.LOG_EXTREME)
return
else:
RNS.log("Invalid packet sequence ("+str(envelope.sequence)+") received on channel "+str(self), RNS.LOG_EXTREME)
return
is_new = self._emplace_envelope(envelope, self._rx_ring)
if not is_new:
RNS.log("Duplicate message received on channel "+str(self), RNS.LOG_EXTREME)
return
else:
with self._lock:
contigous = []
for e in self._rx_ring:
if e.sequence == self._next_rx_sequence:
contigous.append(e)
self._next_rx_sequence = (self._next_rx_sequence + 1) % Channel.SEQ_MODULUS
for e in contigous:
if not e.unpacked:
m = e.unpack(self._message_factories)
else:
m = e.message
self._rx_ring.remove(e)
self._run_callbacks(m)
except Exception as e:
RNS.log("An error ocurred while receiving data on "+str(self)+". The contained exception was: "+str(e), RNS.LOG_ERROR)
def is_ready_to_send(self) -> bool:
"""
Check if ``Channel`` is ready to send.
:return: True if ready
"""
if not self._outlet.is_usable:
return False
with self._lock:
outstanding = 0
for envelope in self._tx_ring:
if envelope.outlet == self._outlet:
if not envelope.packet or not self._outlet.get_packet_state(envelope.packet) == MessageState.MSGSTATE_DELIVERED:
outstanding += 1
if outstanding >= self.window:
return False
return True
def _packet_tx_op(self, packet: TPacket, op: Callable[[TPacket], bool]):
with self._lock:
envelope = next(filter(lambda e: self._outlet.get_packet_id(e.packet) == self._outlet.get_packet_id(packet),
self._tx_ring), None)
if envelope and op(envelope):
envelope.tracked = False
if envelope in self._tx_ring:
self._tx_ring.remove(envelope)
if self.window < self.window_max:
self.window += 1
if (self.window - self.window_min) > (self.window_flexibility-1):
self.window_min += 1
# TODO: Remove at some point
# RNS.log("Increased "+str(self)+" window to "+str(self.window), RNS.LOG_EXTREME)
if self._outlet.rtt != 0:
if self._outlet.rtt > Channel.RTT_FAST:
self.fast_rate_rounds = 0
if self._outlet.rtt > Channel.RTT_MEDIUM:
self.medium_rate_rounds = 0
else:
self.medium_rate_rounds += 1
if self.window_max < Channel.WINDOW_MAX_MEDIUM and self.medium_rate_rounds == Channel.FAST_RATE_THRESHOLD:
self.window_max = Channel.WINDOW_MAX_MEDIUM
# TODO: Remove at some point
# RNS.log("Increased "+str(self)+" max window to "+str(self.window_max), RNS.LOG_EXTREME)
else:
self.fast_rate_rounds += 1
if self.window_max < Channel.WINDOW_MAX_FAST and self.fast_rate_rounds == Channel.FAST_RATE_THRESHOLD:
self.window_max = Channel.WINDOW_MAX_FAST
# TODO: Remove at some point
# RNS.log("Increased "+str(self)+" max window to "+str(self.window_max), RNS.LOG_EXTREME)
else:
RNS.log("Envelope not found in TX ring for "+str(self), RNS.LOG_EXTREME)
if not envelope:
RNS.log("Spurious message received on "+str(self), RNS.LOG_EXTREME)
def _packet_delivered(self, packet: TPacket):
self._packet_tx_op(packet, lambda env: True)
def _get_packet_timeout_time(self, tries: int) -> float:
return pow(2, tries - 1) * max(self._outlet.rtt, 0.01) * 5
def _packet_timeout(self, packet: TPacket):
def retry_envelope(envelope: Envelope) -> bool:
if envelope.tries >= self._max_tries:
RNS.log("Retry count exceeded on "+str(self)+", tearing down Link.", RNS.LOG_ERROR)
self._shutdown() # start on separate thread?
self._outlet.timed_out()
return True
envelope.tries += 1
self._outlet.resend(envelope.packet)
self._outlet.set_packet_delivered_callback(envelope.packet, self._packet_delivered)
self._outlet.set_packet_timeout_callback(envelope.packet, self._packet_timeout, self._get_packet_timeout_time(envelope.tries))
if self.window > self.window_min:
self.window -= 1
if self.window_max > self.window_min:
self.window_max -= 1
if (self.window_max - self.window) > (self.window_flexibility-1):
self.window_max -= 1
# TODO: Remove at some point
# RNS.log("Decreased "+str(self)+" window to "+str(self.window), RNS.LOG_EXTREME)
return False
if self._outlet.get_packet_state(packet) != MessageState.MSGSTATE_DELIVERED:
self._packet_tx_op(packet, retry_envelope)
def send(self, message: MessageBase) -> Envelope:
"""
Send a message. If a message send is attempted and
``Channel`` is not ready, an exception is thrown.
:param message: an instance of a ``MessageBase`` subclass
"""
envelope: Envelope | None = None
with self._lock:
if not self.is_ready_to_send():
raise ChannelException(CEType.ME_LINK_NOT_READY, f"Link is not ready")
envelope = Envelope(self._outlet, message=message, sequence=self._next_sequence)
self._next_sequence = (self._next_sequence + 1) % Channel.SEQ_MODULUS
self._emplace_envelope(envelope, self._tx_ring)
if envelope is None:
raise BlockingIOError()
envelope.pack()
if len(envelope.raw) > self._outlet.mdu:
raise ChannelException(CEType.ME_TOO_BIG, f"Packed message too big for packet: {len(envelope.raw)} > {self._outlet.mdu}")
envelope.packet = self._outlet.send(envelope.raw)
envelope.tries += 1
self._outlet.set_packet_delivered_callback(envelope.packet, self._packet_delivered)
self._outlet.set_packet_timeout_callback(envelope.packet, self._packet_timeout, self._get_packet_timeout_time(envelope.tries))
return envelope
@property
def MDU(self):
"""
Maximum Data Unit: the number of bytes available
for a message to consume in a single send. This
value is adjusted from the ``Link`` MDU to accommodate
message header information.
:return: number of bytes available
"""
return self._outlet.mdu - 6 # sizeof(msgtype) + sizeof(length) + sizeof(sequence)
class LinkChannelOutlet(ChannelOutletBase):
"""
An implementation of ChannelOutletBase for RNS.Link.
Allows Channel to send packets over an RNS Link with
Packets.
:param link: RNS Link to wrap
"""
def __init__(self, link: RNS.Link):
self.link = link
def send(self, raw: bytes) -> RNS.Packet:
packet = RNS.Packet(self.link, raw, context=RNS.Packet.CHANNEL)
if self.link.status == RNS.Link.ACTIVE:
packet.send()
return packet
def resend(self, packet: RNS.Packet) -> RNS.Packet:
receipt = packet.resend()
if not receipt:
RNS.log("Failed to resend packet", RNS.LOG_ERROR)
return packet
@property
def mdu(self):
return self.link.MDU
@property
def rtt(self):
return self.link.rtt
@property
def is_usable(self):
return True # had issues looking at Link.status
def get_packet_state(self, packet: TPacket) -> MessageState:
if packet.receipt == None:
return MessageState.MSGSTATE_FAILED
status = packet.receipt.get_status()
if status == RNS.PacketReceipt.SENT:
return MessageState.MSGSTATE_SENT
if status == RNS.PacketReceipt.DELIVERED:
return MessageState.MSGSTATE_DELIVERED
if status == RNS.PacketReceipt.FAILED:
return MessageState.MSGSTATE_FAILED
else:
raise Exception(f"Unexpected receipt state: {status}")
def timed_out(self):
self.link.teardown()
def __str__(self):
return f"{self.__class__.__name__}({self.link})"
def set_packet_timeout_callback(self, packet: RNS.Packet, callback: Callable[[RNS.Packet], None] | None,
timeout: float | None = None):
if timeout and packet.receipt:
packet.receipt.set_timeout(timeout)
def inner(receipt: RNS.PacketReceipt):
callback(packet)
if packet and packet.receipt:
packet.receipt.set_timeout_callback(inner if callback else None)
def set_packet_delivered_callback(self, packet: RNS.Packet, callback: Callable[[RNS.Packet], None] | None):
def inner(receipt: RNS.PacketReceipt):
callback(packet)
if packet and packet.receipt:
packet.receipt.set_delivery_callback(inner if callback else None)
def get_packet_id(self, packet: RNS.Packet) -> any:
if packet and hasattr(packet, "get_hash") and callable(packet.get_hash):
return packet.get_hash()
else:
return None | /rns-0.5.7-py3-none-any.whl/RNS/Channel.py | 0.891982 | 0.180089 | Channel.py | pypi |
from __future__ import annotations
import bz2
import sys
import time
import threading
from threading import RLock
import struct
from RNS.Channel import Channel, MessageBase, SystemMessageTypes
import RNS
from io import RawIOBase, BufferedRWPair, BufferedReader, BufferedWriter
from typing import Callable
from contextlib import AbstractContextManager
class StreamDataMessage(MessageBase):
MSGTYPE = SystemMessageTypes.SMT_STREAM_DATA
"""
Message type for ``Channel``. ``StreamDataMessage``
uses a system-reserved message type.
"""
STREAM_ID_MAX = 0x3fff # 16383
"""
The stream id is limited to 2 bytes - 2 bit
"""
MAX_DATA_LEN = RNS.Link.MDU - 2 - 6 # 2 for stream data message header, 6 for channel envelope
"""
When the Buffer package is imported, this value is
calculcated based on the value of OVERHEAD
"""
def __init__(self, stream_id: int = None, data: bytes = None, eof: bool = False):
"""
This class is used to encapsulate binary stream
data to be sent over a ``Channel``.
:param stream_id: id of stream relative to receiver
:param data: binary data
:param eof: set to True if signalling End of File
"""
super().__init__()
if stream_id is not None and stream_id > self.STREAM_ID_MAX:
raise ValueError("stream_id must be 0-16383")
self.stream_id = stream_id
self.compressed = False
self.data = data or bytes()
self.eof = eof
def pack(self) -> bytes:
if self.stream_id is None:
raise ValueError("stream_id")
compressed_data = bz2.compress(self.data)
saved = len(self.data)-len(compressed_data)
if saved > 0:
self.data = compressed_data
self.compressed = True
header_val = (0x3fff & self.stream_id) | (0x8000 if self.eof else 0x0000) | (0x4000 if self.compressed > 0 else 0x0000)
return bytes(struct.pack(">H", header_val) + (self.data if self.data else bytes()))
def unpack(self, raw):
self.stream_id = struct.unpack(">H", raw[:2])[0]
self.eof = (0x8000 & self.stream_id) > 0
self.compressed = (0x4000 & self.stream_id) > 0
self.stream_id = self.stream_id & 0x3fff
self.data = raw[2:]
if self.compressed:
self.data = bz2.decompress(self.data)
class RawChannelReader(RawIOBase, AbstractContextManager):
"""
An implementation of RawIOBase that receives
binary stream data sent over a ``Channel``.
This class generally need not be instantiated directly.
Use :func:`RNS.Buffer.create_reader`,
:func:`RNS.Buffer.create_writer`, and
:func:`RNS.Buffer.create_bidirectional_buffer` functions
to create buffered streams with optional callbacks.
For additional information on the API of this
object, see the Python documentation for
``RawIOBase``.
"""
def __init__(self, stream_id: int, channel: Channel):
"""
Create a raw channel reader.
:param stream_id: local stream id to receive at
:param channel: ``Channel`` object to receive from
"""
self._stream_id = stream_id
self._channel = channel
self._lock = RLock()
self._buffer = bytearray()
self._eof = False
self._channel._register_message_type(StreamDataMessage, is_system_type=True)
self._channel.add_message_handler(self._handle_message)
self._listeners: [Callable[[int], None]] = []
def add_ready_callback(self, cb: Callable[[int], None]):
"""
Add a function to be called when new data is available.
The function should have the signature ``(ready_bytes: int) -> None``
:param cb: function to call
"""
with self._lock:
self._listeners.append(cb)
def remove_ready_callback(self, cb: Callable[[int], None]):
"""
Remove a function added with :func:`RNS.RawChannelReader.add_ready_callback()`
:param cb: function to remove
"""
with self._lock:
self._listeners.remove(cb)
def _handle_message(self, message: MessageBase):
if isinstance(message, StreamDataMessage):
if message.stream_id == self._stream_id:
with self._lock:
if message.data is not None:
self._buffer.extend(message.data)
if message.eof:
self._eof = True
for listener in self._listeners:
try:
threading.Thread(target=listener, name="Message Callback", args=[len(self._buffer)], daemon=True).start()
except Exception as ex:
RNS.log("Error calling RawChannelReader(" + str(self._stream_id) + ") callback: " + str(ex))
return True
return False
def _read(self, __size: int) -> bytes | None:
with self._lock:
result = self._buffer[:__size]
self._buffer = self._buffer[__size:]
return result if len(result) > 0 or self._eof else None
def readinto(self, __buffer: bytearray) -> int | None:
ready = self._read(len(__buffer))
if ready is not None:
__buffer[:len(ready)] = ready
return len(ready) if ready is not None else None
def writable(self) -> bool:
return False
def seekable(self) -> bool:
return False
def readable(self) -> bool:
return True
def close(self):
with self._lock:
self._channel.remove_message_handler(self._handle_message)
self._listeners.clear()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
return False
class RawChannelWriter(RawIOBase, AbstractContextManager):
"""
An implementation of RawIOBase that receives
binary stream data sent over a channel.
This class generally need not be instantiated directly.
Use :func:`RNS.Buffer.create_reader`,
:func:`RNS.Buffer.create_writer`, and
:func:`RNS.Buffer.create_bidirectional_buffer` functions
to create buffered streams with optional callbacks.
For additional information on the API of this
object, see the Python documentation for
``RawIOBase``.
"""
def __init__(self, stream_id: int, channel: Channel):
"""
Create a raw channel writer.
:param stream_id: remote stream id to sent do
:param channel: ``Channel`` object to send on
"""
self._stream_id = stream_id
self._channel = channel
self._eof = False
def write(self, __b: bytes) -> int | None:
try:
chunk = bytes(__b[:StreamDataMessage.MAX_DATA_LEN])
message = StreamDataMessage(self._stream_id, chunk, self._eof)
self._channel.send(message)
return len(chunk)
except RNS.Channel.ChannelException as cex:
if cex.type != RNS.Channel.CEType.ME_LINK_NOT_READY:
raise
return 0
def close(self):
try:
link_rtt = self._channel._outlet.link.rtt
timeout = time.time() + (link_rtt * len(self._channel._tx_ring) * 1)
except Exception as e:
timeout = time.time() + 15
while time.time() < timeout and not self._channel.is_ready_to_send():
time.sleep(0.05)
self._eof = True
self.write(bytes())
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
return False
def seekable(self) -> bool:
return False
def readable(self) -> bool:
return False
def writable(self) -> bool:
return True
class Buffer:
"""
Static functions for creating buffered streams that send
and receive over a ``Channel``.
These functions use ``BufferedReader``, ``BufferedWriter``,
and ``BufferedRWPair`` to add buffering to
``RawChannelReader`` and ``RawChannelWriter``.
"""
@staticmethod
def create_reader(stream_id: int, channel: Channel,
ready_callback: Callable[[int], None] | None = None) -> BufferedReader:
"""
Create a buffered reader that reads binary data sent
over a ``Channel``, with an optional callback when
new data is available.
Callback signature: ``(ready_bytes: int) -> None``
For more information on the reader-specific functions
of this object, see the Python documentation for
``BufferedReader``
:param stream_id: the local stream id to receive from
:param channel: the channel to receive on
:param ready_callback: function to call when new data is available
:return: a BufferedReader object
"""
reader = RawChannelReader(stream_id, channel)
if ready_callback:
reader.add_ready_callback(ready_callback)
return BufferedReader(reader)
@staticmethod
def create_writer(stream_id: int, channel: Channel) -> BufferedWriter:
"""
Create a buffered writer that writes binary data over
a ``Channel``.
For more information on the writer-specific functions
of this object, see the Python documentation for
``BufferedWriter``
:param stream_id: the remote stream id to send to
:param channel: the channel to send on
:return: a BufferedWriter object
"""
writer = RawChannelWriter(stream_id, channel)
return BufferedWriter(writer)
@staticmethod
def create_bidirectional_buffer(receive_stream_id: int, send_stream_id: int, channel: Channel,
ready_callback: Callable[[int], None] | None = None) -> BufferedRWPair:
"""
Create a buffered reader/writer pair that reads and
writes binary data over a ``Channel``, with an
optional callback when new data is available.
Callback signature: ``(ready_bytes: int) -> None``
For more information on the reader-specific functions
of this object, see the Python documentation for
``BufferedRWPair``
:param receive_stream_id: the local stream id to receive at
:param send_stream_id: the remote stream id to send to
:param channel: the channel to send and receive on
:param ready_callback: function to call when new data is available
:return: a BufferedRWPair object
"""
reader = RawChannelReader(receive_stream_id, channel)
if ready_callback:
reader.add_ready_callback(ready_callback)
writer = RawChannelWriter(send_stream_id, channel)
return BufferedRWPair(reader, writer) | /rns-0.5.7-py3-none-any.whl/RNS/Buffer.py | 0.769514 | 0.195844 | Buffer.py | pypi |
import math
import os
import RNS
import time
import atexit
import hashlib
from .vendor import umsgpack as umsgpack
from RNS.Cryptography import X25519PrivateKey, X25519PublicKey, Ed25519PrivateKey, Ed25519PublicKey
from RNS.Cryptography import Fernet
class Identity:
"""
This class is used to manage identities in Reticulum. It provides methods
for encryption, decryption, signatures and verification, and is the basis
for all encrypted communication over Reticulum networks.
:param create_keys: Specifies whether new encryption and signing keys should be generated.
"""
CURVE = "Curve25519"
"""
The curve used for Elliptic Curve DH key exchanges
"""
KEYSIZE = 256*2
"""
X25519 key size in bits. A complete key is the concatenation of a 256 bit encryption key, and a 256 bit signing key.
"""
# Non-configurable constants
FERNET_OVERHEAD = RNS.Cryptography.Fernet.FERNET_OVERHEAD
AES128_BLOCKSIZE = 16 # In bytes
HASHLENGTH = 256 # In bits
SIGLENGTH = KEYSIZE # In bits
NAME_HASH_LENGTH = 80
TRUNCATED_HASHLENGTH = RNS.Reticulum.TRUNCATED_HASHLENGTH
"""
Constant specifying the truncated hash length (in bits) used by Reticulum
for addressable hashes and other purposes. Non-configurable.
"""
# Storage
known_destinations = {}
@staticmethod
def remember(packet_hash, destination_hash, public_key, app_data = None):
if len(public_key) != Identity.KEYSIZE//8:
raise TypeError("Can't remember "+RNS.prettyhexrep(destination_hash)+", the public key size of "+str(len(public_key))+" is not valid.", RNS.LOG_ERROR)
else:
Identity.known_destinations[destination_hash] = [time.time(), packet_hash, public_key, app_data]
@staticmethod
def recall(destination_hash):
"""
Recall identity for a destination hash.
:param destination_hash: Destination hash as *bytes*.
:returns: An :ref:`RNS.Identity<api-identity>` instance that can be used to create an outgoing :ref:`RNS.Destination<api-destination>`, or *None* if the destination is unknown.
"""
if destination_hash in Identity.known_destinations:
identity_data = Identity.known_destinations[destination_hash]
identity = Identity(create_keys=False)
identity.load_public_key(identity_data[2])
identity.app_data = identity_data[3]
return identity
else:
for registered_destination in RNS.Transport.destinations:
if destination_hash == registered_destination.hash:
identity = Identity(create_keys=False)
identity.load_public_key(registered_destination.identity.get_public_key())
identity.app_data = None
return identity
return None
@staticmethod
def recall_app_data(destination_hash):
"""
Recall last heard app_data for a destination hash.
:param destination_hash: Destination hash as *bytes*.
:returns: *Bytes* containing app_data, or *None* if the destination is unknown.
"""
if destination_hash in Identity.known_destinations:
app_data = Identity.known_destinations[destination_hash][3]
return app_data
else:
return None
@staticmethod
def save_known_destinations():
# TODO: Improve the storage method so we don't have to
# deserialize and serialize the entire table on every
# save, but the only changes. It might be possible to
# simply overwrite on exit now that every local client
# disconnect triggers a data persist.
try:
if hasattr(Identity, "saving_known_destinations"):
wait_interval = 0.2
wait_timeout = 5
wait_start = time.time()
while Identity.saving_known_destinations:
time.sleep(wait_interval)
if time.time() > wait_start+wait_timeout:
RNS.log("Could not save known destinations to storage, waiting for previous save operation timed out.", RNS.LOG_ERROR)
return False
Identity.saving_known_destinations = True
save_start = time.time()
storage_known_destinations = {}
if os.path.isfile(RNS.Reticulum.storagepath+"/known_destinations"):
try:
file = open(RNS.Reticulum.storagepath+"/known_destinations","rb")
storage_known_destinations = umsgpack.load(file)
file.close()
except:
pass
for destination_hash in storage_known_destinations:
if not destination_hash in Identity.known_destinations:
Identity.known_destinations[destination_hash] = storage_known_destinations[destination_hash]
RNS.log("Saving "+str(len(Identity.known_destinations))+" known destinations to storage...", RNS.LOG_DEBUG)
file = open(RNS.Reticulum.storagepath+"/known_destinations","wb")
umsgpack.dump(Identity.known_destinations, file)
file.close()
save_time = time.time() - save_start
if save_time < 1:
time_str = str(round(save_time*1000,2))+"ms"
else:
time_str = str(round(save_time,2))+"s"
RNS.log("Saved known destinations to storage in "+time_str, RNS.LOG_DEBUG)
except Exception as e:
RNS.log("Error while saving known destinations to disk, the contained exception was: "+str(e), RNS.LOG_ERROR)
Identity.saving_known_destinations = False
@staticmethod
def load_known_destinations():
if os.path.isfile(RNS.Reticulum.storagepath+"/known_destinations"):
try:
file = open(RNS.Reticulum.storagepath+"/known_destinations","rb")
loaded_known_destinations = umsgpack.load(file)
file.close()
Identity.known_destinations = {}
for known_destination in loaded_known_destinations:
if len(known_destination) == RNS.Reticulum.TRUNCATED_HASHLENGTH//8:
Identity.known_destinations[known_destination] = loaded_known_destinations[known_destination]
RNS.log("Loaded "+str(len(Identity.known_destinations))+" known destination from storage", RNS.LOG_VERBOSE)
except:
RNS.log("Error loading known destinations from disk, file will be recreated on exit", RNS.LOG_ERROR)
else:
RNS.log("Destinations file does not exist, no known destinations loaded", RNS.LOG_VERBOSE)
@staticmethod
def full_hash(data):
"""
Get a SHA-256 hash of passed data.
:param data: Data to be hashed as *bytes*.
:returns: SHA-256 hash as *bytes*
"""
return RNS.Cryptography.sha256(data)
@staticmethod
def truncated_hash(data):
"""
Get a truncated SHA-256 hash of passed data.
:param data: Data to be hashed as *bytes*.
:returns: Truncated SHA-256 hash as *bytes*
"""
return Identity.full_hash(data)[:(Identity.TRUNCATED_HASHLENGTH//8)]
@staticmethod
def get_random_hash():
"""
Get a random SHA-256 hash.
:param data: Data to be hashed as *bytes*.
:returns: Truncated SHA-256 hash of random data as *bytes*
"""
return Identity.truncated_hash(os.urandom(Identity.TRUNCATED_HASHLENGTH//8))
@staticmethod
def validate_announce(packet):
try:
if packet.packet_type == RNS.Packet.ANNOUNCE:
destination_hash = packet.destination_hash
public_key = packet.data[:Identity.KEYSIZE//8]
name_hash = packet.data[Identity.KEYSIZE//8:Identity.KEYSIZE//8+Identity.NAME_HASH_LENGTH//8]
random_hash = packet.data[Identity.KEYSIZE//8+Identity.NAME_HASH_LENGTH//8:Identity.KEYSIZE//8+Identity.NAME_HASH_LENGTH//8+10]
signature = packet.data[Identity.KEYSIZE//8+Identity.NAME_HASH_LENGTH//8+10:Identity.KEYSIZE//8+Identity.NAME_HASH_LENGTH//8+10+Identity.SIGLENGTH//8]
app_data = b""
if len(packet.data) > Identity.KEYSIZE//8+Identity.NAME_HASH_LENGTH//8+10+Identity.SIGLENGTH//8:
app_data = packet.data[Identity.KEYSIZE//8+Identity.NAME_HASH_LENGTH//8+10+Identity.SIGLENGTH//8:]
signed_data = destination_hash+public_key+name_hash+random_hash+app_data
if not len(packet.data) > Identity.KEYSIZE//8+Identity.NAME_HASH_LENGTH//8+10+Identity.SIGLENGTH//8:
app_data = None
announced_identity = Identity(create_keys=False)
announced_identity.load_public_key(public_key)
if announced_identity.pub != None and announced_identity.validate(signature, signed_data):
hash_material = name_hash+announced_identity.hash
expected_hash = RNS.Identity.full_hash(hash_material)[:RNS.Reticulum.TRUNCATED_HASHLENGTH//8]
if destination_hash == expected_hash:
# Check if we already have a public key for this destination
# and make sure the public key is not different.
if destination_hash in Identity.known_destinations:
if public_key != Identity.known_destinations[destination_hash][2]:
# In reality, this should never occur, but in the odd case
# that someone manages a hash collision, we reject the announce.
RNS.log("Received announce with valid signature and destination hash, but announced public key does not match already known public key.", RNS.LOG_CRITICAL)
RNS.log("This may indicate an attempt to modify network paths, or a random hash collision. The announce was rejected.", RNS.LOG_CRITICAL)
return False
RNS.Identity.remember(packet.get_hash(), destination_hash, public_key, app_data)
del announced_identity
if packet.rssi != None or packet.snr != None:
signal_str = " ["
if packet.rssi != None:
signal_str += "RSSI "+str(packet.rssi)+"dBm"
if packet.snr != None:
signal_str += ", "
if packet.snr != None:
signal_str += "SNR "+str(packet.snr)+"dB"
signal_str += "]"
else:
signal_str = ""
if hasattr(packet, "transport_id") and packet.transport_id != None:
RNS.log("Valid announce for "+RNS.prettyhexrep(destination_hash)+" "+str(packet.hops)+" hops away, received via "+RNS.prettyhexrep(packet.transport_id)+" on "+str(packet.receiving_interface)+signal_str, RNS.LOG_EXTREME)
else:
RNS.log("Valid announce for "+RNS.prettyhexrep(destination_hash)+" "+str(packet.hops)+" hops away, received on "+str(packet.receiving_interface)+signal_str, RNS.LOG_EXTREME)
return True
else:
RNS.log("Received invalid announce for "+RNS.prettyhexrep(destination_hash)+": Destination mismatch.", RNS.LOG_DEBUG)
return False
else:
RNS.log("Received invalid announce for "+RNS.prettyhexrep(destination_hash)+": Invalid signature.", RNS.LOG_DEBUG)
del announced_identity
return False
except Exception as e:
RNS.log("Error occurred while validating announce. The contained exception was: "+str(e), RNS.LOG_ERROR)
return False
@staticmethod
def persist_data():
if not RNS.Transport.owner.is_connected_to_shared_instance:
Identity.save_known_destinations()
@staticmethod
def exit_handler():
Identity.persist_data()
@staticmethod
def from_bytes(prv_bytes):
"""
Create a new :ref:`RNS.Identity<api-identity>` instance from *bytes* of private key.
Can be used to load previously created and saved identities into Reticulum.
:param prv_bytes: The *bytes* of private a saved private key. **HAZARD!** Never use this to generate a new key by feeding random data in prv_bytes.
:returns: A :ref:`RNS.Identity<api-identity>` instance, or *None* if the *bytes* data was invalid.
"""
identity = Identity(create_keys=False)
if identity.load_private_key(prv_bytes):
return identity
else:
return None
@staticmethod
def from_file(path):
"""
Create a new :ref:`RNS.Identity<api-identity>` instance from a file.
Can be used to load previously created and saved identities into Reticulum.
:param path: The full path to the saved :ref:`RNS.Identity<api-identity>` data
:returns: A :ref:`RNS.Identity<api-identity>` instance, or *None* if the loaded data was invalid.
"""
identity = Identity(create_keys=False)
if identity.load(path):
return identity
else:
return None
def to_file(self, path):
"""
Saves the identity to a file. This will write the private key to disk,
and anyone with access to this file will be able to decrypt all
communication for the identity. Be very careful with this method.
:param path: The full path specifying where to save the identity.
:returns: True if the file was saved, otherwise False.
"""
try:
with open(path, "wb") as key_file:
key_file.write(self.get_private_key())
return True
return False
except Exception as e:
RNS.log("Error while saving identity to "+str(path), RNS.LOG_ERROR)
RNS.log("The contained exception was: "+str(e))
def __init__(self,create_keys=True):
# Initialize keys to none
self.prv = None
self.prv_bytes = None
self.sig_prv = None
self.sig_prv_bytes = None
self.pub = None
self.pub_bytes = None
self.sig_pub = None
self.sig_pub_bytes = None
self.hash = None
self.hexhash = None
if create_keys:
self.create_keys()
def create_keys(self):
self.prv = X25519PrivateKey.generate()
self.prv_bytes = self.prv.private_bytes()
self.sig_prv = Ed25519PrivateKey.generate()
self.sig_prv_bytes = self.sig_prv.private_bytes()
self.pub = self.prv.public_key()
self.pub_bytes = self.pub.public_bytes()
self.sig_pub = self.sig_prv.public_key()
self.sig_pub_bytes = self.sig_pub.public_bytes()
self.update_hashes()
RNS.log("Identity keys created for "+RNS.prettyhexrep(self.hash), RNS.LOG_VERBOSE)
def get_private_key(self):
"""
:returns: The private key as *bytes*
"""
return self.prv_bytes+self.sig_prv_bytes
def get_public_key(self):
"""
:returns: The public key as *bytes*
"""
return self.pub_bytes+self.sig_pub_bytes
def load_private_key(self, prv_bytes):
"""
Load a private key into the instance.
:param prv_bytes: The private key as *bytes*.
:returns: True if the key was loaded, otherwise False.
"""
try:
self.prv_bytes = prv_bytes[:Identity.KEYSIZE//8//2]
self.prv = X25519PrivateKey.from_private_bytes(self.prv_bytes)
self.sig_prv_bytes = prv_bytes[Identity.KEYSIZE//8//2:]
self.sig_prv = Ed25519PrivateKey.from_private_bytes(self.sig_prv_bytes)
self.pub = self.prv.public_key()
self.pub_bytes = self.pub.public_bytes()
self.sig_pub = self.sig_prv.public_key()
self.sig_pub_bytes = self.sig_pub.public_bytes()
self.update_hashes()
return True
except Exception as e:
raise e
RNS.log("Failed to load identity key", RNS.LOG_ERROR)
RNS.log("The contained exception was: "+str(e), RNS.LOG_ERROR)
return False
def load_public_key(self, pub_bytes):
"""
Load a public key into the instance.
:param pub_bytes: The public key as *bytes*.
:returns: True if the key was loaded, otherwise False.
"""
try:
self.pub_bytes = pub_bytes[:Identity.KEYSIZE//8//2]
self.sig_pub_bytes = pub_bytes[Identity.KEYSIZE//8//2:]
self.pub = X25519PublicKey.from_public_bytes(self.pub_bytes)
self.sig_pub = Ed25519PublicKey.from_public_bytes(self.sig_pub_bytes)
self.update_hashes()
except Exception as e:
RNS.log("Error while loading public key, the contained exception was: "+str(e), RNS.LOG_ERROR)
def update_hashes(self):
self.hash = Identity.truncated_hash(self.get_public_key())
self.hexhash = self.hash.hex()
def load(self, path):
try:
with open(path, "rb") as key_file:
prv_bytes = key_file.read()
return self.load_private_key(prv_bytes)
return False
except Exception as e:
RNS.log("Error while loading identity from "+str(path), RNS.LOG_ERROR)
RNS.log("The contained exception was: "+str(e), RNS.LOG_ERROR)
def get_salt(self):
return self.hash
def get_context(self):
return None
def encrypt(self, plaintext):
"""
Encrypts information for the identity.
:param plaintext: The plaintext to be encrypted as *bytes*.
:returns: Ciphertext token as *bytes*.
:raises: *KeyError* if the instance does not hold a public key.
"""
if self.pub != None:
ephemeral_key = X25519PrivateKey.generate()
ephemeral_pub_bytes = ephemeral_key.public_key().public_bytes()
shared_key = ephemeral_key.exchange(self.pub)
derived_key = RNS.Cryptography.hkdf(
length=32,
derive_from=shared_key,
salt=self.get_salt(),
context=self.get_context(),
)
fernet = Fernet(derived_key)
ciphertext = fernet.encrypt(plaintext)
token = ephemeral_pub_bytes+ciphertext
return token
else:
raise KeyError("Encryption failed because identity does not hold a public key")
def decrypt(self, ciphertext_token):
"""
Decrypts information for the identity.
:param ciphertext: The ciphertext to be decrypted as *bytes*.
:returns: Plaintext as *bytes*, or *None* if decryption fails.
:raises: *KeyError* if the instance does not hold a private key.
"""
if self.prv != None:
if len(ciphertext_token) > Identity.KEYSIZE//8//2:
plaintext = None
try:
peer_pub_bytes = ciphertext_token[:Identity.KEYSIZE//8//2]
peer_pub = X25519PublicKey.from_public_bytes(peer_pub_bytes)
shared_key = self.prv.exchange(peer_pub)
derived_key = RNS.Cryptography.hkdf(
length=32,
derive_from=shared_key,
salt=self.get_salt(),
context=self.get_context(),
)
fernet = Fernet(derived_key)
ciphertext = ciphertext_token[Identity.KEYSIZE//8//2:]
plaintext = fernet.decrypt(ciphertext)
except Exception as e:
RNS.log("Decryption by "+RNS.prettyhexrep(self.hash)+" failed: "+str(e), RNS.LOG_DEBUG)
return plaintext;
else:
RNS.log("Decryption failed because the token size was invalid.", RNS.LOG_DEBUG)
return None
else:
raise KeyError("Decryption failed because identity does not hold a private key")
def sign(self, message):
"""
Signs information by the identity.
:param message: The message to be signed as *bytes*.
:returns: Signature as *bytes*.
:raises: *KeyError* if the instance does not hold a private key.
"""
if self.sig_prv != None:
try:
return self.sig_prv.sign(message)
except Exception as e:
RNS.log("The identity "+str(self)+" could not sign the requested message. The contained exception was: "+str(e), RNS.LOG_ERROR)
raise e
else:
raise KeyError("Signing failed because identity does not hold a private key")
def validate(self, signature, message):
"""
Validates the signature of a signed message.
:param signature: The signature to be validated as *bytes*.
:param message: The message to be validated as *bytes*.
:returns: True if the signature is valid, otherwise False.
:raises: *KeyError* if the instance does not hold a public key.
"""
if self.pub != None:
try:
self.sig_pub.verify(signature, message)
return True
except Exception as e:
return False
else:
raise KeyError("Signature validation failed because identity does not hold a public key")
def prove(self, packet, destination=None):
signature = self.sign(packet.packet_hash)
if RNS.Reticulum.should_use_implicit_proof():
proof_data = signature
else:
proof_data = packet.packet_hash + signature
if destination == None:
destination = packet.generate_proof_destination()
proof = RNS.Packet(destination, proof_data, RNS.Packet.PROOF, attached_interface = packet.receiving_interface)
proof.send()
def __str__(self):
return RNS.prettyhexrep(self.hash) | /rns-0.5.7-py3-none-any.whl/RNS/Identity.py | 0.514644 | 0.20832 | Identity.py | pypi |
import warnings as _warnings
import hashlib as _hashlib
trans_5C = bytes((x ^ 0x5C) for x in range(256))
trans_36 = bytes((x ^ 0x36) for x in range(256))
# The size of the digests returned by HMAC depends on the underlying
# hashing module used. Use digest_size from the instance of HMAC instead.
digest_size = None
class HMAC:
"""RFC 2104 HMAC class. Also complies with RFC 4231.
This supports the API for Cryptographic Hash Functions (PEP 247).
"""
blocksize = 64 # 512-bit HMAC; can be changed in subclasses.
__slots__ = (
"_hmac", "_inner", "_outer", "block_size", "digest_size"
)
def __init__(self, key, msg=None, digestmod=_hashlib.sha256):
"""Create a new HMAC object.
key: bytes or buffer, key for the keyed hash object.
msg: bytes or buffer, Initial input for the hash or None.
digestmod: A hash name suitable for hashlib.new(). *OR*
A hashlib constructor returning a new hash object. *OR*
A module supporting PEP 247.
Required as of 3.8, despite its position after the optional
msg argument. Passing it as a keyword argument is
recommended, though not required for legacy API reasons.
"""
if not isinstance(key, (bytes, bytearray)):
raise TypeError("key: expected bytes or bytearray, but got %r" % type(key).__name__)
if not digestmod:
raise TypeError("Missing required parameter 'digestmod'.")
self._hmac_init(key, msg, digestmod)
def _hmac_init(self, key, msg, digestmod):
if callable(digestmod):
digest_cons = digestmod
elif isinstance(digestmod, str):
digest_cons = lambda d=b'': _hashlib.new(digestmod, d)
else:
digest_cons = lambda d=b'': digestmod.new(d)
self._hmac = None
self._outer = digest_cons()
self._inner = digest_cons()
self.digest_size = self._inner.digest_size
if hasattr(self._inner, 'block_size'):
blocksize = self._inner.block_size
if blocksize < 16:
_warnings.warn('block_size of %d seems too small; using our '
'default of %d.' % (blocksize, self.blocksize),
RuntimeWarning, 2)
blocksize = self.blocksize
else:
_warnings.warn('No block_size attribute on given digest object; '
'Assuming %d.' % (self.blocksize),
RuntimeWarning, 2)
blocksize = self.blocksize
if len(key) > blocksize:
key = digest_cons(key).digest()
# self.blocksize is the default blocksize. self.block_size is
# effective block size as well as the public API attribute.
self.block_size = blocksize
key = key.ljust(blocksize, b'\0')
self._outer.update(key.translate(trans_5C))
self._inner.update(key.translate(trans_36))
if msg is not None:
self.update(msg)
@property
def name(self):
if self._hmac:
return self._hmac.name
else:
return f"hmac-{self._inner.name}"
def update(self, msg):
"""Feed data from msg into this hashing object."""
inst = self._hmac or self._inner
inst.update(msg)
def copy(self):
"""Return a separate copy of this hashing object.
An update to this copy won't affect the original object.
"""
# Call __new__ directly to avoid the expensive __init__.
other = self.__class__.__new__(self.__class__)
other.digest_size = self.digest_size
if self._hmac:
other._hmac = self._hmac.copy()
other._inner = other._outer = None
else:
other._hmac = None
other._inner = self._inner.copy()
other._outer = self._outer.copy()
return other
def _current(self):
"""Return a hash object for the current state.
To be used only internally with digest() and hexdigest().
"""
if self._hmac:
return self._hmac
else:
h = self._outer.copy()
h.update(self._inner.digest())
return h
def digest(self):
"""Return the hash value of this hashing object.
This returns the hmac value as bytes. The object is
not altered in any way by this function; you can continue
updating the object after calling this function.
"""
h = self._current()
return h.digest()
def hexdigest(self):
"""Like digest(), but returns a string of hexadecimal digits instead.
"""
h = self._current()
return h.hexdigest()
def new(key, msg=None, digestmod=_hashlib.sha256):
"""Create a new hashing object and return it.
key: bytes or buffer, The starting key for the hash.
msg: bytes or buffer, Initial input for the hash, or None.
digestmod: A hash name suitable for hashlib.new(). *OR*
A hashlib constructor returning a new hash object. *OR*
A module supporting PEP 247.
Required as of 3.8, despite its position after the optional
msg argument. Passing it as a keyword argument is
recommended, though not required for legacy API reasons.
You can now feed arbitrary bytes into the object using its update()
method, and can ask for the hash value at any time by calling its digest()
or hexdigest() methods.
"""
return HMAC(key, msg, digestmod)
def digest(key, msg, digest):
"""Fast inline implementation of HMAC.
key: bytes or buffer, The key for the keyed hash object.
msg: bytes or buffer, Input message.
digest: A hash name suitable for hashlib.new() for best performance. *OR*
A hashlib constructor returning a new hash object. *OR*
A module supporting PEP 247.
"""
if callable(digest):
digest_cons = digest
elif isinstance(digest, str):
digest_cons = lambda d=b'': _hashlib.new(digest, d)
else:
digest_cons = lambda d=b'': digest.new(d)
inner = digest_cons()
outer = digest_cons()
blocksize = getattr(inner, 'block_size', 64)
if len(key) > blocksize:
key = digest_cons(key).digest()
key = key + b'\x00' * (blocksize - len(key))
inner.update(key.translate(trans_36))
outer.update(key.translate(trans_5C))
inner.update(msg)
outer.update(inner.digest())
return outer.digest() | /rns-0.5.7-py3-none-any.whl/RNS/Cryptography/HMAC.py | 0.833663 | 0.412471 | HMAC.py | pypi |
# WARNING! Only the X25519PrivateKey.exchange() method attempts to hide execution time.
# In the context of Reticulum, this is sufficient, but it may not be in other systems. If
# this code is to be used to provide cryptographic security in an environment where the
# start and end times of the execution can be guessed, inferred or measured then it is
# critical that steps are taken to hide the execution time, for instance by adding a
# delay so that encrypted packets are not sent until a fixed time after the _start_ of
# execution.
import os
import time
P = 2 ** 255 - 19
_A = 486662
def _point_add(point_n, point_m, point_diff):
"""Given the projection of two points and their difference, return their sum"""
(xn, zn) = point_n
(xm, zm) = point_m
(x_diff, z_diff) = point_diff
x = (z_diff << 2) * (xm * xn - zm * zn) ** 2
z = (x_diff << 2) * (xm * zn - zm * xn) ** 2
return x % P, z % P
def _point_double(point_n):
"""Double a point provided in projective coordinates"""
(xn, zn) = point_n
xn2 = xn ** 2
zn2 = zn ** 2
x = (xn2 - zn2) ** 2
xzn = xn * zn
z = 4 * xzn * (xn2 + _A * xzn + zn2)
return x % P, z % P
def _const_time_swap(a, b, swap):
"""Swap two values in constant time"""
index = int(swap) * 2
temp = (a, b, b, a)
return temp[index:index+2]
def _raw_curve25519(base, n):
"""Raise the point base to the power n"""
zero = (1, 0)
one = (base, 1)
mP, m1P = zero, one
for i in reversed(range(256)):
bit = bool(n & (1 << i))
mP, m1P = _const_time_swap(mP, m1P, bit)
mP, m1P = _point_double(mP), _point_add(mP, m1P, one)
mP, m1P = _const_time_swap(mP, m1P, bit)
x, z = mP
inv_z = pow(z, P - 2, P)
return (x * inv_z) % P
def _unpack_number(s):
"""Unpack 32 bytes to a 256 bit value"""
if len(s) != 32:
raise ValueError('Curve25519 values must be 32 bytes')
return int.from_bytes(s, "little")
def _pack_number(n):
"""Pack a value into 32 bytes"""
return n.to_bytes(32, "little")
def _fix_secret(n):
"""Mask a value to be an acceptable exponent"""
n &= ~7
n &= ~(128 << 8 * 31)
n |= 64 << 8 * 31
return n
def curve25519(base_point_raw, secret_raw):
"""Raise the base point to a given power"""
base_point = _unpack_number(base_point_raw)
secret = _fix_secret(_unpack_number(secret_raw))
return _pack_number(_raw_curve25519(base_point, secret))
def curve25519_base(secret_raw):
"""Raise the generator point to a given power"""
secret = _fix_secret(_unpack_number(secret_raw))
return _pack_number(_raw_curve25519(9, secret))
class X25519PublicKey:
def __init__(self, x):
self.x = x
@classmethod
def from_public_bytes(cls, data):
return cls(_unpack_number(data))
def public_bytes(self):
return _pack_number(self.x)
class X25519PrivateKey:
MIN_EXEC_TIME = 0.002
MAX_EXEC_TIME = 0.5
DELAY_WINDOW = 10
T_CLEAR = None
T_MAX = 0
def __init__(self, a):
self.a = a
@classmethod
def generate(cls):
return cls.from_private_bytes(os.urandom(32))
@classmethod
def from_private_bytes(cls, data):
return cls(_fix_secret(_unpack_number(data)))
def private_bytes(self):
return _pack_number(self.a)
def public_key(self):
return X25519PublicKey.from_public_bytes(_pack_number(_raw_curve25519(9, self.a)))
def exchange(self, peer_public_key):
if isinstance(peer_public_key, bytes):
peer_public_key = X25519PublicKey.from_public_bytes(peer_public_key)
start = time.time()
shared = _pack_number(_raw_curve25519(peer_public_key.x, self.a))
end = time.time()
duration = end-start
if X25519PrivateKey.T_CLEAR == None:
X25519PrivateKey.T_CLEAR = end + X25519PrivateKey.DELAY_WINDOW
if end > X25519PrivateKey.T_CLEAR:
X25519PrivateKey.T_CLEAR = end + X25519PrivateKey.DELAY_WINDOW
X25519PrivateKey.T_MAX = 0
if duration < X25519PrivateKey.T_MAX or duration < X25519PrivateKey.MIN_EXEC_TIME:
target = start+X25519PrivateKey.T_MAX
if target > start+X25519PrivateKey.MAX_EXEC_TIME:
target = start+X25519PrivateKey.MAX_EXEC_TIME
if target < start+X25519PrivateKey.MIN_EXEC_TIME:
target = start+X25519PrivateKey.MIN_EXEC_TIME
try:
time.sleep(target-time.time())
except Exception as e:
pass
elif duration > X25519PrivateKey.T_MAX:
X25519PrivateKey.T_MAX = duration
return shared | /rns-0.5.7-py3-none-any.whl/RNS/Cryptography/X25519.py | 0.718693 | 0.555134 | X25519.py | pypi |
import os
import time
from RNS.Cryptography import HMAC
from RNS.Cryptography import PKCS7
from RNS.Cryptography.AES import AES_128_CBC
class Fernet():
"""
This class provides a slightly modified implementation of the Fernet spec
found at: https://github.com/fernet/spec/blob/master/Spec.md
According to the spec, a Fernet token includes a one byte VERSION and
eight byte TIMESTAMP field at the start of each token. These fields are
not relevant to Reticulum. They are therefore stripped from this
implementation, since they incur overhead and leak initiator metadata.
"""
FERNET_OVERHEAD = 48 # Bytes
@staticmethod
def generate_key():
return os.urandom(32)
def __init__(self, key = None):
if key == None:
raise ValueError("Fernet key cannot be None")
if len(key) != 32:
raise ValueError("Fernet key must be 32 bytes, not "+str(len(key)))
self._signing_key = key[:16]
self._encryption_key = key[16:]
def verify_hmac(self, token):
if len(token) <= 32:
raise ValueError("Cannot verify HMAC on token of only "+str(len(token))+" bytes")
else:
received_hmac = token[-32:]
expected_hmac = HMAC.new(self._signing_key, token[:-32]).digest()
if received_hmac == expected_hmac:
return True
else:
return False
def encrypt(self, data = None):
iv = os.urandom(16)
current_time = int(time.time())
if not isinstance(data, bytes):
raise TypeError("Fernet token plaintext input must be bytes")
ciphertext = AES_128_CBC.encrypt(
plaintext = PKCS7.pad(data),
key = self._encryption_key,
iv = iv,
)
signed_parts = iv+ciphertext
return signed_parts + HMAC.new(self._signing_key, signed_parts).digest()
def decrypt(self, token = None):
if not isinstance(token, bytes):
raise TypeError("Fernet token must be bytes")
if not self.verify_hmac(token):
raise ValueError("Fernet token HMAC was invalid")
iv = token[:16]
ciphertext = token[16:-32]
try:
plaintext = PKCS7.unpad(
AES_128_CBC.decrypt(
ciphertext,
self._encryption_key,
iv,
)
)
return plaintext
except Exception as e:
raise ValueError("Could not decrypt Fernet token") | /rns-0.5.7-py3-none-any.whl/RNS/Cryptography/Fernet.py | 0.668339 | 0.334481 | Fernet.py | pypi |
from cryptography.hazmat.primitives import serialization
from cryptography.hazmat.primitives.asymmetric.ed25519 import Ed25519PrivateKey, Ed25519PublicKey
from cryptography.hazmat.primitives.asymmetric.x25519 import X25519PrivateKey, X25519PublicKey
# These proxy classes exist to create a uniform API accross
# cryptography primitive providers.
class X25519PrivateKeyProxy:
def __init__(self, real):
self.real = real
@classmethod
def generate(cls):
return cls(X25519PrivateKey.generate())
@classmethod
def from_private_bytes(cls, data):
return cls(X25519PrivateKey.from_private_bytes(data))
def private_bytes(self):
return self.real.private_bytes(
encoding=serialization.Encoding.Raw,
format=serialization.PrivateFormat.Raw,
encryption_algorithm=serialization.NoEncryption(),
)
def public_key(self):
return X25519PublicKeyProxy(self.real.public_key())
def exchange(self, peer_public_key):
return self.real.exchange(peer_public_key.real)
class X25519PublicKeyProxy:
def __init__(self, real):
self.real = real
@classmethod
def from_public_bytes(cls, data):
return cls(X25519PublicKey.from_public_bytes(data))
def public_bytes(self):
return self.real.public_bytes(
encoding=serialization.Encoding.Raw,
format=serialization.PublicFormat.Raw
)
class Ed25519PrivateKeyProxy:
def __init__(self, real):
self.real = real
@classmethod
def generate(cls):
return cls(Ed25519PrivateKey.generate())
@classmethod
def from_private_bytes(cls, data):
return cls(Ed25519PrivateKey.from_private_bytes(data))
def private_bytes(self):
return self.real.private_bytes(
encoding=serialization.Encoding.Raw,
format=serialization.PrivateFormat.Raw,
encryption_algorithm=serialization.NoEncryption()
)
def public_key(self):
return Ed25519PublicKeyProxy(self.real.public_key())
def sign(self, message):
return self.real.sign(message)
class Ed25519PublicKeyProxy:
def __init__(self, real):
self.real = real
@classmethod
def from_public_bytes(cls, data):
return cls(Ed25519PublicKey.from_public_bytes(data))
def public_bytes(self):
return self.real.public_bytes(
encoding=serialization.Encoding.Raw,
format=serialization.PublicFormat.Raw
)
def verify(self, signature, message):
self.real.verify(signature, message) | /rns-0.5.7-py3-none-any.whl/RNS/Cryptography/Proxies.py | 0.849847 | 0.272287 | Proxies.py | pypi |
from base64 import b64decode, b64encode, b32encode
from hashlib import sha256
import struct
import re
I2P_B64_CHARS = "-~"
def i2p_b64encode(x):
"""Encode I2P destination"""
return b64encode(x, altchars=I2P_B64_CHARS.encode()).decode()
def i2p_b64decode(x):
"""Decode I2P destination"""
return b64decode(x, altchars=I2P_B64_CHARS, validate=True)
SAM_BUFSIZE = 4096
DEFAULT_ADDRESS = ("127.0.0.1", 7656)
DEFAULT_MIN_VER = "3.1"
DEFAULT_MAX_VER = "3.1"
TRANSIENT_DESTINATION = "TRANSIENT"
VALID_BASE32_ADDRESS = re.compile(r"^([a-zA-Z0-9]{52}).b32.i2p$")
VALID_BASE64_ADDRESS = re.compile(r"^([a-zA-Z0-9-~=]{516,528})$")
class Message(object):
"""Parse SAM message to an object"""
def __init__(self, s):
self.opts = {}
if type(s) != str:
self._reply_string = s.decode().strip()
else:
self._reply_string = s
self.cmd, self.action, opts = self._reply_string.split(" ", 2)
for v in opts.split(" "):
data = v.split("=", 1) if "=" in v else (v, True)
self.opts[data[0]] = data[1]
def __getitem__(self, key):
return self.opts[key]
@property
def ok(self):
return self["RESULT"] == "OK"
def __repr__(self):
return self._reply_string
# SAM request messages
def hello(min_version, max_version):
return "HELLO VERSION MIN={} MAX={}\n".format(min_version,
max_version).encode()
def session_create(style, session_id, destination, options=""):
return "SESSION CREATE STYLE={} ID={} DESTINATION={} {}\n".format(
style, session_id, destination, options).encode()
def stream_connect(session_id, destination, silent="false"):
return "STREAM CONNECT ID={} DESTINATION={} SILENT={}\n".format(
session_id, destination, silent).encode()
def stream_accept(session_id, silent="false"):
return "STREAM ACCEPT ID={} SILENT={}\n".format(session_id, silent).encode()
def stream_forward(session_id, port, options=""):
return "STREAM FORWARD ID={} PORT={} {}\n".format(
session_id, port, options).encode()
def naming_lookup(name):
return "NAMING LOOKUP NAME={}\n".format(name).encode()
def dest_generate(signature_type):
return "DEST GENERATE SIGNATURE_TYPE={}\n".format(signature_type).encode()
class Destination(object):
"""I2P destination
https://geti2p.net/spec/common-structures#destination
:param data: (optional) Base64 encoded data or binary data
:param path: (optional) A path to a file with binary data
:param has_private_key: (optional) Does data have a private key?
"""
ECDSA_SHA256_P256 = 1
ECDSA_SHA384_P384 = 2
ECDSA_SHA512_P521 = 3
EdDSA_SHA512_Ed25519 = 7
default_sig_type = EdDSA_SHA512_Ed25519
_pubkey_size = 256
_signkey_size = 128
_min_cert_size = 3
def __init__(self, data=None, path=None, has_private_key=False):
#: Binary destination
self.data = bytes()
#: Base64 encoded destination
self.base64 = ""
#: :class:`RNS.vendor.i2plib.PrivateKey` instance or None
self.private_key = None
if path:
with open(path, "rb") as f: data = f.read()
if data and has_private_key:
self.private_key = PrivateKey(data)
cert_len = struct.unpack("!H", self.private_key.data[385:387])[0]
data = self.private_key.data[:387+cert_len]
if not data:
raise Exception("Can't create a destination with no data")
self.data = data if type(data) == bytes else i2p_b64decode(data)
self.base64 = data if type(data) == str else i2p_b64encode(data)
def __repr__(self):
return "<Destination: {}>".format(self.base32)
@property
def base32(self):
"""Base32 destination hash of this destination"""
desthash = sha256(self.data).digest()
return b32encode(desthash).decode()[:52].lower()
class PrivateKey(object):
"""I2P private key
https://geti2p.net/spec/common-structures#keysandcert
:param data: Base64 encoded data or binary data
"""
def __init__(self, data):
#: Binary private key
self.data = data if type(data) == bytes else i2p_b64decode(data)
#: Base64 encoded private key
self.base64 = data if type(data) == str else i2p_b64encode(data) | /rns-0.5.7-py3-none-any.whl/RNS/vendor/i2plib/sam.py | 0.686685 | 0.258063 | sam.py | pypi |
import asyncio
from . import sam
from . import exceptions
from . import utils
from .log import logger
def parse_reply(data):
if not data:
raise ConnectionAbortedError("Empty response: SAM API went offline")
try:
msg = sam.Message(data.decode().strip())
logger.debug("SAM reply: "+str(msg))
except:
raise ConnectionAbortedError("Invalid SAM response")
return msg
async def get_sam_socket(sam_address=sam.DEFAULT_ADDRESS, loop=None):
"""A couroutine used to create a new SAM socket.
:param sam_address: (optional) SAM API address
:param loop: (optional) event loop instance
:return: A (reader, writer) pair
"""
reader, writer = await asyncio.open_connection(*sam_address)
writer.write(sam.hello("3.1", "3.1"))
reply = parse_reply(await reader.readline())
if reply.ok:
return (reader, writer)
else:
writer.close()
raise exceptions.SAM_EXCEPTIONS[reply["RESULT"]]()
async def dest_lookup(domain, sam_address=sam.DEFAULT_ADDRESS,
loop=None):
"""A coroutine used to lookup a full I2P destination by .i2p domain or
.b32.i2p address.
:param domain: Address to be resolved, can be a .i2p domain or a .b32.i2p
address.
:param sam_address: (optional) SAM API address
:param loop: (optional) Event loop instance
:return: An instance of :class:`Destination`
"""
reader, writer = await get_sam_socket(sam_address, loop)
writer.write(sam.naming_lookup(domain))
reply = parse_reply(await reader.readline())
writer.close()
if reply.ok:
return sam.Destination(reply["VALUE"])
else:
raise exceptions.SAM_EXCEPTIONS[reply["RESULT"]]()
async def new_destination(sam_address=sam.DEFAULT_ADDRESS, loop=None,
sig_type=sam.Destination.default_sig_type):
"""A coroutine used to generate a new destination with a private key of a
chosen signature type.
:param sam_address: (optional) SAM API address
:param loop: (optional) Event loop instance
:param sig_type: (optional) Signature type
:return: An instance of :class:`Destination`
"""
reader, writer = await get_sam_socket(sam_address, loop)
writer.write(sam.dest_generate(sig_type))
reply = parse_reply(await reader.readline())
writer.close()
return sam.Destination(reply["PRIV"], has_private_key=True)
async def create_session(session_name, sam_address=sam.DEFAULT_ADDRESS,
loop=None, style="STREAM",
signature_type=sam.Destination.default_sig_type,
destination=None, options={}):
"""A coroutine used to create a new SAM session.
:param session_name: Session nick name
:param sam_address: (optional) SAM API address
:param loop: (optional) Event loop instance
:param style: (optional) Session style, can be STREAM, DATAGRAM, RAW
:param signature_type: (optional) If the destination is TRANSIENT, this
signature type is used
:param destination: (optional) Destination to use in this session. Can be
a base64 encoded string, :class:`Destination`
instance or None. TRANSIENT destination is used when it
is None.
:param options: (optional) A dict object with i2cp options
:return: A (reader, writer) pair
"""
logger.debug("Creating session {}".format(session_name))
if destination:
if type(destination) == sam.Destination:
destination = destination
else:
destination = sam.Destination(
destination, has_private_key=True)
dest_string = destination.private_key.base64
else:
dest_string = sam.TRANSIENT_DESTINATION
options = " ".join(["{}={}".format(k, v) for k, v in options.items()])
reader, writer = await get_sam_socket(sam_address, loop)
writer.write(sam.session_create(
style, session_name, dest_string, options))
reply = parse_reply(await reader.readline())
if reply.ok:
if not destination:
destination = sam.Destination(
reply["DESTINATION"], has_private_key=True)
logger.debug(destination.base32)
logger.debug("Session created {}".format(session_name))
return (reader, writer)
else:
writer.close()
raise exceptions.SAM_EXCEPTIONS[reply["RESULT"]]()
async def stream_connect(session_name, destination,
sam_address=sam.DEFAULT_ADDRESS, loop=None):
"""A coroutine used to connect to a remote I2P destination.
:param session_name: Session nick name
:param destination: I2P destination to connect to
:param sam_address: (optional) SAM API address
:param loop: (optional) Event loop instance
:return: A (reader, writer) pair
"""
logger.debug("Connecting stream {}".format(session_name))
if isinstance(destination, str) and not destination.endswith(".i2p"):
destination = sam.Destination(destination)
elif isinstance(destination, str):
destination = await dest_lookup(destination, sam_address, loop)
reader, writer = await get_sam_socket(sam_address, loop)
writer.write(sam.stream_connect(session_name, destination.base64,
silent="false"))
reply = parse_reply(await reader.readline())
if reply.ok:
logger.debug("Stream connected {}".format(session_name))
return (reader, writer)
else:
writer.close()
raise exceptions.SAM_EXCEPTIONS[reply["RESULT"]]()
async def stream_accept(session_name, sam_address=sam.DEFAULT_ADDRESS,
loop=None):
"""A coroutine used to accept a connection from the I2P network.
:param session_name: Session nick name
:param sam_address: (optional) SAM API address
:param loop: (optional) Event loop instance
:return: A (reader, writer) pair
"""
reader, writer = await get_sam_socket(sam_address, loop)
writer.write(sam.stream_accept(session_name, silent="false"))
reply = parse_reply(await reader.readline())
if reply.ok:
return (reader, writer)
else:
writer.close()
raise exceptions.SAM_EXCEPTIONS[reply["RESULT"]]()
### Context managers
class Session:
"""Async SAM session context manager.
:param session_name: Session nick name
:param sam_address: (optional) SAM API address
:param loop: (optional) Event loop instance
:param style: (optional) Session style, can be STREAM, DATAGRAM, RAW
:param signature_type: (optional) If the destination is TRANSIENT, this
signature type is used
:param destination: (optional) Destination to use in this session. Can be
a base64 encoded string, :class:`Destination`
instance or None. TRANSIENT destination is used when it
is None.
:param options: (optional) A dict object with i2cp options
:return: :class:`Session` object
"""
def __init__(self, session_name, sam_address=sam.DEFAULT_ADDRESS,
loop=None, style="STREAM",
signature_type=sam.Destination.default_sig_type,
destination=None, options={}):
self.session_name = session_name
self.sam_address = sam_address
self.loop = loop
self.style = style
self.signature_type = signature_type
self.destination = destination
self.options = options
async def __aenter__(self):
self.reader, self.writer = await create_session(self.session_name,
sam_address=self.sam_address, loop=self.loop, style=self.style,
signature_type=self.signature_type,
destination=self.destination, options=self.options)
return self
async def __aexit__(self, exc_type, exc, tb):
### TODO handle exceptions
self.writer.close()
class StreamConnection:
"""Async stream connection context manager.
:param session_name: Session nick name
:param destination: I2P destination to connect to
:param sam_address: (optional) SAM API address
:param loop: (optional) Event loop instance
:return: :class:`StreamConnection` object
"""
def __init__(self, session_name, destination,
sam_address=sam.DEFAULT_ADDRESS, loop=None):
self.session_name = session_name
self.sam_address = sam_address
self.loop = loop
self.destination = destination
async def __aenter__(self):
self.reader, self.writer = await stream_connect(self.session_name,
self.destination, sam_address=self.sam_address, loop=self.loop)
self.read = self.reader.read
self.write = self.writer.write
return self
async def __aexit__(self, exc_type, exc, tb):
### TODO handle exceptions
self.writer.close()
class StreamAcceptor:
"""Async stream acceptor context manager.
:param session_name: Session nick name
:param sam_address: (optional) SAM API address
:param loop: (optional) Event loop instance
:return: :class:`StreamAcceptor` object
"""
def __init__(self, session_name, sam_address=sam.DEFAULT_ADDRESS,
loop=None):
self.session_name = session_name
self.sam_address = sam_address
self.loop = loop
async def __aenter__(self):
self.reader, self.writer = await stream_accept(self.session_name,
sam_address=self.sam_address, loop=self.loop)
self.read = self.reader.read
self.write = self.writer.write
return self
async def __aexit__(self, exc_type, exc, tb):
### TODO handle exceptions
self.writer.close() | /rns-0.5.7-py3-none-any.whl/RNS/vendor/i2plib/aiosam.py | 0.68616 | 0.200382 | aiosam.py | pypi |
Reticulum Network Stack β <img align="right" src="https://static.pepy.tech/personalized-badge/rns?period=total&units=international_system&left_color=grey&right_color=blue&left_text=Installs"/>
==========
<p align="center"><img width="200" src="https://raw.githubusercontent.com/markqvist/Reticulum/master/docs/source/graphics/rns_logo_512.png"></p>
Reticulum is the cryptography-based networking stack for building local and wide-area
networks with readily available hardware. It can operate even with very high latency
and extremely low bandwidth. Reticulum allows you to build wide-area networks
with off-the-shelf tools, and offers end-to-end encryption and connectivity,
initiator anonymity, autoconfiguring cryptographically backed multi-hop
transport, efficient addressing, unforgeable delivery acknowledgements and
more.
The vision of Reticulum is to allow anyone to be their own network operator,
and to make it cheap and easy to cover vast areas with a myriad of independent,
inter-connectable and autonomous networks. Reticulum **is not** *one* network.
It is **a tool** for building *thousands of networks*. Networks without
kill-switches, surveillance, censorship and control. Networks that can freely
interoperate, associate and disassociate with each other, and require no
central oversight. Networks for human beings. *Networks for the people*.
Reticulum is a complete networking stack, and does not rely on IP or higher
layers, but it is possible to use IP as the underlying carrier for Reticulum.
It is therefore trivial to tunnel Reticulum over the Internet or private IP
networks.
Having no dependencies on traditional networking stacks frees up overhead that
has been used to implement a networking stack built directly on cryptographic
principles, allowing resilience and stable functionality, even in open and
trustless networks.
No kernel modules or drivers are required. Reticulum runs completely in
userland, and can run on practically any system that runs Python 3.
## Read The Manual
The full documentation for Reticulum is available at [markqvist.github.io/Reticulum/manual/](https://markqvist.github.io/Reticulum/manual/).
You can also [download the Reticulum manual as a PDF](https://github.com/markqvist/Reticulum/raw/master/docs/Reticulum%20Manual.pdf)
For more info, see [reticulum.network](https://reticulum.network/)
## Notable Features
- Coordination-less globally unique addressing and identification
- Fully self-configuring multi-hop routing
- Initiator anonymity, communicate without revealing your identity
- Asymmetric X25519 encryption and Ed25519 signatures as a basis for all communication
- Forward Secrecy with ephemeral Elliptic Curve Diffie-Hellman keys on Curve25519
- Reticulum uses the [Fernet](https://github.com/fernet/spec/blob/master/Spec.md) specification for on-the-wire / over-the-air encryption
- Keys are ephemeral and derived from an ECDH key exchange on Curve25519
- AES-128 in CBC mode with PKCS7 padding
- HMAC using SHA256 for authentication
- IVs are generated through os.urandom()
- Unforgeable packet delivery confirmations
- A variety of supported interface types
- An intuitive and easy-to-use API
- Reliable and efficient transfer of arbitrary amounts of data
- Reticulum can handle a few bytes of data or files of many gigabytes
- Sequencing, transfer coordination and checksumming are automatic
- The API is very easy to use, and provides transfer progress
- Lightweight, flexible and expandable Request/Response mechanism
- Efficient link establishment
- Total bandwidth cost of setting up an encrypted link is 3 packets totaling 297 bytes
- Low cost of keeping links open at only 0.44 bits per second
## Roadmap
While Reticulum is already a fully featured and functional networking stack, many improvements and additions are actively being worked on, and planned for the future.
To learn more about the direction and future of Reticulum, please see the [Development Roadmap](./Roadmap.md).
## Examples of Reticulum Applications
If you want to quickly get an idea of what Reticulum can do, take a look at the
following resources.
- You can use the [rnsh](https://github.com/acehoss/rnsh) program to establish remote shell sessions over Reticulum.
- For an off-grid, encrypted and resilient mesh communications platform, see [Nomad Network](https://github.com/markqvist/NomadNet)
- The Android, Linux and macOS app [Sideband](https://github.com/markqvist/Sideband) has a graphical interface and focuses on ease of use.
- [LXMF](https://github.com/markqvist/lxmf) is a distributed, delay and disruption tolerant message transfer protocol built on Reticulum
## Where can Reticulum be used?
Over practically any medium that can support at least a half-duplex channel
with 500 bits per second throughput, and an MTU of 500 bytes. Data radios,
modems, LoRa radios, serial lines, AX.25 TNCs, amateur radio digital modes,
WiFi and Ethernet devices, free-space optical links, and similar systems are
all examples of the types of physical devices Reticulum can use.
An open-source LoRa-based interface called
[RNode](https://markqvist.github.io/Reticulum/manual/hardware.html#rnode) has
been designed specifically for use with Reticulum. It is possible to build
yourself, or it can be purchased as a complete transceiver that just needs a
USB connection to the host.
Reticulum can also be encapsulated over existing IP networks, so there's
nothing stopping you from using it over wired Ethernet, your local WiFi network
or the Internet, where it'll work just as well. In fact, one of the strengths
of Reticulum is how easily it allows you to connect different mediums into a
self-configuring, resilient and encrypted mesh, using any available mixture of
available infrastructure.
As an example, it's possible to set up a Raspberry Pi connected to both a LoRa
radio, a packet radio TNC and a WiFi network. Once the interfaces are
configured, Reticulum will take care of the rest, and any device on the WiFi
network can communicate with nodes on the LoRa and packet radio sides of the
network, and vice versa.
## How do I get started?
The best way to get started with the Reticulum Network Stack depends on what
you want to do. For full details and examples, have a look at the
[Getting Started Fast](https://markqvist.github.io/Reticulum/manual/gettingstartedfast.html)
section of the [Reticulum Manual](https://markqvist.github.io/Reticulum/manual/).
To simply install Reticulum and related utilities on your system, the easiest way is via pip:
```bash
pip install rns
```
You can then start any program that uses Reticulum, or start Reticulum as a
system service with [the rnsd utility](https://markqvist.github.io/Reticulum/manual/using.html#the-rnsd-utility).
When first started, Reticulum will create a default configuration file,
providing basic connectivity to other Reticulum peers that might be locally
reachable. The default config file contains a few examples, and references for
creating a more complex configuration.
If you have an old version of `pip` on your system, you may need to upgrade it first with `pip install pip --upgrade`. If you no not already have `pip` installed, you can install it using the package manager of your system with `sudo apt install python3-pip` or similar.
For more detailed examples on how to expand communication over many mediums such
as packet radio or LoRa, serial ports, or over fast IP links and the Internet using
the UDP and TCP interfaces, take a look at the [Supported Interfaces](https://markqvist.github.io/Reticulum/manual/interfaces.html)
section of the [Reticulum Manual](https://markqvist.github.io/Reticulum/manual/).
## Included Utilities
Reticulum includes a range of useful utilities for managing your networks,
viewing status and information, and other tasks. You can read more about these
programs in the [Included Utility Programs](https://markqvist.github.io/Reticulum/manual/using.html#included-utility-programs)
section of the [Reticulum Manual](https://markqvist.github.io/Reticulum/manual/).
- The system daemon `rnsd` for running Reticulum as an always-available service
- An interface status utility called `rnstatus`, that displays information about interfaces
- The path lookup and management tool `rnpath` letting you view and modify path tables
- A diagnostics tool called `rnprobe` for checking connectivity to destinations
- A simple file transfer program called `rncp` making it easy to copy files to remote systems
- The remote command execution program `rnx` let's you run commands and
programs and retrieve output from remote systems
All tools, including `rnx` and `rncp`, work reliably and well even over very
low-bandwidth links like LoRa or Packet Radio.
## Supported interface types and devices
Reticulum implements a range of generalised interface types that covers most of
the communications hardware that Reticulum can run over. If your hardware is
not supported, it's relatively simple to implement an interface class. I will
gratefully accept pull requests for custom interfaces if they are generally
useful.
Currently, the following interfaces are supported:
- Any Ethernet device
- LoRa using [RNode](https://unsigned.io/rnode/)
- Packet Radio TNCs (with or without AX.25)
- KISS-compatible hardware and software modems
- Any device with a serial port
- TCP over IP networks
- UDP over IP networks
- External programs via stdio or pipes
- Custom hardware via stdio or pipes
## Performance
Reticulum targets a *very* wide usable performance envelope, but prioritises
functionality and performance on low-bandwidth mediums. The goal is to
provide a dynamic performance envelope from 250 bits per second, to 1 gigabit
per second on normal hardware.
Currently, the usable performance envelope is approximately 500 bits per second
to 20 megabits per second, with physical mediums faster than that not being
saturated. Performance beyond the current level is intended for future
upgrades, but not highly prioritised at this point in time.
## Current Status
Reticulum should currently be considered beta software. All core protocol
features are implemented and functioning, but additions will probably occur as
real-world use is explored. There will be bugs. The API and wire-format can be
considered relatively stable at the moment, but could change if warranted.
## Dependencies
The installation of the default `rns` package requires the dependencies listed
below. Almost all systems and distributions have readily available packages for
these dependencies, and when the `rns` package is installed with `pip`, they
will be downloaded and installed as well.
- [PyCA/cryptography](https://github.com/pyca/cryptography)
- [pyserial](https://github.com/pyserial/pyserial)
On more unusual systems, and in some rare cases, it might not be possible to
install or even compile one or more of the above modules. In such situations,
you can use the `rnspure` package instead, which require no external
dependencies for installation. Please note that the contents of the `rns` and
`rnspure` packages are *identical*. The only difference is that the `rnspure`
package lists no dependencies required for installation.
No matter how Reticulum is installed and started, it will load external
dependencies only if they are *needed* and *available*. If for example you want
to use Reticulum on a system that cannot support
[pyserial](https://github.com/pyserial/pyserial), it is perfectly possible to
do so using the `rnspure` package, but Reticulum will not be able to use
serial-based interfaces. All other available modules will still be loaded when
needed.
**Please Note!** If you use the `rnspure` package to run Reticulum on systems
that do not support [PyCA/cryptography](https://github.com/pyca/cryptography),
it is important that you read and understand the [Cryptographic
Primitives](#cryptographic-primitives) section of this document.
## Public Testnet
If you just want to get started experimenting without building any physical
networks, you are welcome to join the Unsigned.io RNS Testnet. The testnet is
just that, an informal network for testing and experimenting. It will be up
most of the time, and anyone can join, but it also means that there's no
guarantees for service availability.
The testnet runs the very latest version of Reticulum (often even a short while
before it is publicly released). Sometimes experimental versions of Reticulum
might be deployed to nodes on the testnet, which means strange behaviour might
occur. If none of that scares you, you can join the testnet via either TCP or
I2P. Just add one of the following interfaces to your Reticulum configuration
file:
```
# TCP/IP interface to the RNS Dublin Hub
[[RNS Testnet Dublin]]
type = TCPClientInterface
enabled = yes
target_host = dublin.connect.reticulum.network
target_port = 4965
# TCP/IP interface to the BetweenTheBorders Hub (community-provided)
[[RNS Testnet BetweenTheBorders]]
type = TCPClientInterface
enabled = yes
target_host = betweentheborders.com
target_port = 4242
# Interface to Testnet I2P Hub
[[RNS Testnet I2P Hub]]
type = I2PInterface
enabled = yes
peers = pmlm3l5rpympihoy2o5ago43kluei2jjjzsalcuiuylbve3mwi2a.b32.i2p
```
The testnet also contains a number of [Nomad Network](https://github.com/markqvist/nomadnet) nodes, and LXMF propagation nodes.
## Support Reticulum
You can help support the continued development of open, free and private communications systems by donating via one of the following channels:
- Monero:
```
84FpY1QbxHcgdseePYNmhTHcrgMX4nFfBYtz2GKYToqHVVhJp8Eaw1Z1EedRnKD19b3B8NiLCGVxzKV17UMmmeEsCrPyA5w
```
- Ethereum
```
0x81F7B979fEa6134bA9FD5c701b3501A2e61E897a
```
- Bitcoin
```
3CPmacGm34qYvR6XWLVEJmi2aNe3PZqUuq
```
- Ko-Fi: https://ko-fi.com/markqvist
Are certain features in the development roadmap are important to you or your
organisation? Make them a reality quickly by sponsoring their implementation.
## Cryptographic Primitives
Reticulum uses a simple suite of efficient, strong and modern cryptographic
primitives, with widely available implementations that can be used both on
general-purpose CPUs and on microcontrollers. The necessary primitives are:
- Ed25519 for signatures
- X22519 for ECDH key exchanges
- HKDF for key derivation
- Modified Fernet for encrypted tokens
- AES-128 in CBC mode
- HMAC for message authentication
- No Fernet version and timestamp fields
- SHA-256
- SHA-512
In the default installation configuration, the `X25519`, `Ed25519` and
`AES-128-CBC` primitives are provided by [OpenSSL](https://www.openssl.org/)
(via the [PyCA/cryptography](https://github.com/pyca/cryptography) package).
The hashing functions `SHA-256` and `SHA-512` are provided by the standard
Python [hashlib](https://docs.python.org/3/library/hashlib.html). The `HKDF`,
`HMAC`, `Fernet` primitives, and the `PKCS7` padding function are always
provided by the following internal implementations:
- [HKDF.py](RNS/Cryptography/HKDF.py)
- [HMAC.py](RNS/Cryptography/HMAC.py)
- [Fernet.py](RNS/Cryptography/Fernet.py)
- [PKCS7.py](RNS/Cryptography/PKCS7.py)
Reticulum also includes a complete implementation of all necessary primitives
in pure Python. If OpenSSL & PyCA are not available on the system when
Reticulum is started, Reticulum will instead use the internal pure-python
primitives. A trivial consequence of this is performance, with the OpenSSL
backend being *much* faster. The most important consequence however, is the
potential loss of security by using primitives that has not seen the same
amount of scrutiny, testing and review as those from OpenSSL.
If you want to use the internal pure-python primitives, it is **highly
advisable** that you have a good understanding of the risks that this pose, and
make an informed decision on whether those risks are acceptable to you.
Reticulum is relatively young software, and should be considered as such. While
it has been built with cryptography best-practices very foremost in mind, it
_has not_ been externally security audited, and there could very well be
privacy or security breaking bugs. If you want to help out, or help sponsor an
audit, please do get in touch.
## Acknowledgements & Credits
Reticulum can only exist because of the mountain of Open Source work it was
built on top of, the contributions of everyone involved, and everyone that has
supported the project through the years. To everyone who has helped, thank you
so much.
A number of other modules and projects are either part of, or used by
Reticulum. Sincere thanks to the authors and contributors of the following
projects:
- [PyCA/cryptography](https://github.com/pyca/cryptography), *BSD License*
- [Pure-25519](https://github.com/warner/python-pure25519) by [Brian Warner](https://github.com/warner), *MIT License*
- [Pysha2](https://github.com/thomdixon/pysha2) by [Thom Dixon](https://github.com/thomdixon), *MIT License*
- [Python-AES](https://github.com/orgurar/python-aes) by [Or Gur Arie](https://github.com/orgurar), *MIT License*
- [Curve25519.py](https://gist.github.com/nickovs/cc3c22d15f239a2640c185035c06f8a3#file-curve25519-py) by [Nicko van Someren](https://gist.github.com/nickovs), *Public Domain*
- [I2Plib](https://github.com/l-n-s/i2plib) by [Viktor Villainov](https://github.com/l-n-s)
- [PySerial](https://github.com/pyserial/pyserial) by Chris Liechti, *BSD License*
- [Configobj](https://github.com/DiffSK/configobj) by Michael Foord, Nicola Larosa, Rob Dennis & Eli Courtwright, *BSD License*
- [Six](https://github.com/benjaminp/six) by [Benjamin Peterson](https://github.com/benjaminp), *MIT License*
- [ifaddr](https://github.com/pydron/ifaddr) by [Pydron](https://github.com/pydron), *MIT License*
- [Umsgpack.py](https://github.com/vsergeev/u-msgpack-python) by [Ivan A. Sergeev](https://github.com/vsergeev)
- [Python](https://www.python.org)
| /rnspure-0.5.7.tar.gz/rnspure-0.5.7/README.md | 0.520253 | 0.90813 | README.md | pypi |
import math
import time
import RNS
from RNS.Cryptography import Fernet
class Callbacks:
def __init__(self):
self.link_established = None
self.packet = None
self.proof_requested = None
class Destination:
"""
A class used to describe endpoints in a Reticulum Network. Destination
instances are used both to create outgoing and incoming endpoints. The
destination type will decide if encryption, and what type, is used in
communication with the endpoint. A destination can also announce its
presence on the network, which will also distribute necessary keys for
encrypted communication with it.
:param identity: An instance of :ref:`RNS.Identity<api-identity>`. Can hold only public keys for an outgoing destination, or holding private keys for an ingoing.
:param direction: ``RNS.Destination.IN`` or ``RNS.Destination.OUT``.
:param type: ``RNS.Destination.SINGLE``, ``RNS.Destination.GROUP`` or ``RNS.Destination.PLAIN``.
:param app_name: A string specifying the app name.
:param \*aspects: Any non-zero number of string arguments.
"""
# Constants
SINGLE = 0x00
GROUP = 0x01
PLAIN = 0x02
LINK = 0x03
types = [SINGLE, GROUP, PLAIN, LINK]
PROVE_NONE = 0x21
PROVE_APP = 0x22
PROVE_ALL = 0x23
proof_strategies = [PROVE_NONE, PROVE_APP, PROVE_ALL]
ALLOW_NONE = 0x00
ALLOW_ALL = 0x01
ALLOW_LIST = 0x02
request_policies = [ALLOW_NONE, ALLOW_ALL, ALLOW_LIST]
IN = 0x11;
OUT = 0x12;
directions = [IN, OUT]
PR_TAG_WINDOW = 30
@staticmethod
def expand_name(identity, app_name, *aspects):
"""
:returns: A string containing the full human-readable name of the destination, for an app_name and a number of aspects.
"""
# Check input values and build name string
if "." in app_name: raise ValueError("Dots can't be used in app names")
name = app_name
for aspect in aspects:
if "." in aspect: raise ValueError("Dots can't be used in aspects")
name += "." + aspect
if identity != None:
name += "." + identity.hexhash
return name
@staticmethod
def hash(identity, app_name, *aspects):
"""
:returns: A destination name in adressable hash form, for an app_name and a number of aspects.
"""
name_hash = RNS.Identity.full_hash(Destination.expand_name(None, app_name, *aspects).encode("utf-8"))[:(RNS.Identity.NAME_HASH_LENGTH//8)]
addr_hash_material = name_hash
if identity != None:
if isinstance(identity, RNS.Identity):
addr_hash_material += identity.hash
elif isinstance(identity, bytes) and len(identity) == RNS.Reticulum.TRUNCATED_HASHLENGTH//8:
addr_hash_material += identity
else:
raise TypeError("Invalid material supplied for destination hash calculation")
return RNS.Identity.full_hash(addr_hash_material)[:RNS.Reticulum.TRUNCATED_HASHLENGTH//8]
@staticmethod
def app_and_aspects_from_name(full_name):
"""
:returns: A tuple containing the app name and a list of aspects, for a full-name string.
"""
components = full_name.split(".")
return (components[0], components[1:])
@staticmethod
def hash_from_name_and_identity(full_name, identity):
"""
:returns: A destination name in adressable hash form, for a full name string and Identity instance.
"""
app_name, aspects = Destination.app_and_aspects_from_name(full_name)
return Destination.hash(identity, app_name, *aspects)
def __init__(self, identity, direction, type, app_name, *aspects):
# Check input values and build name string
if "." in app_name: raise ValueError("Dots can't be used in app names")
if not type in Destination.types: raise ValueError("Unknown destination type")
if not direction in Destination.directions: raise ValueError("Unknown destination direction")
self.accept_link_requests = True
self.callbacks = Callbacks()
self.request_handlers = {}
self.type = type
self.direction = direction
self.proof_strategy = Destination.PROVE_NONE
self.mtu = 0
self.path_responses = {}
self.links = []
if identity == None and direction == Destination.IN and self.type != Destination.PLAIN:
identity = RNS.Identity()
aspects = aspects+(identity.hexhash,)
if identity != None and self.type == Destination.PLAIN:
raise TypeError("Selected destination type PLAIN cannot hold an identity")
self.identity = identity
self.name = Destination.expand_name(identity, app_name, *aspects)
# Generate the destination address hash
self.hash = Destination.hash(self.identity, app_name, *aspects)
self.name_hash = RNS.Identity.full_hash(self.expand_name(None, app_name, *aspects).encode("utf-8"))[:(RNS.Identity.NAME_HASH_LENGTH//8)]
self.hexhash = self.hash.hex()
self.default_app_data = None
self.callback = None
self.proofcallback = None
RNS.Transport.register_destination(self)
def __str__(self):
"""
:returns: A human-readable representation of the destination including addressable hash and full name.
"""
return "<"+self.name+"/"+self.hexhash+">"
def announce(self, app_data=None, path_response=False, attached_interface=None, tag=None, send=True):
"""
Creates an announce packet for this destination and broadcasts it on all
relevant interfaces. Application specific data can be added to the announce.
:param app_data: *bytes* containing the app_data.
:param path_response: Internal flag used by :ref:`RNS.Transport<api-transport>`. Ignore.
"""
if self.type != Destination.SINGLE:
raise TypeError("Only SINGLE destination types can be announced")
now = time.time()
stale_responses = []
for entry_tag in self.path_responses:
entry = self.path_responses[entry_tag]
if now > entry[0]+Destination.PR_TAG_WINDOW:
stale_responses.append(entry_tag)
for entry_tag in stale_responses:
self.path_responses.pop(entry_tag)
if (path_response == True and tag != None) and tag in self.path_responses:
# This code is currently not used, since Transport will block duplicate
# path requests based on tags. When multi-path support is implemented in
# Transport, this will allow Transport to detect redundant paths to the
# same destination, and select the best one based on chosen criteria,
# since it will be able to detect that a single emitted announce was
# received via multiple paths. The difference in reception time will
# potentially also be useful in determining characteristics of the
# multiple available paths, and to choose the best one.
RNS.log("Using cached announce data for answering path request with tag "+RNS.prettyhexrep(tag), RNS.LOG_EXTREME)
announce_data = self.path_responses[tag][1]
else:
destination_hash = self.hash
random_hash = RNS.Identity.get_random_hash()[0:5]+int(time.time()).to_bytes(5, "big")
if app_data == None and self.default_app_data != None:
if isinstance(self.default_app_data, bytes):
app_data = self.default_app_data
elif callable(self.default_app_data):
returned_app_data = self.default_app_data()
if isinstance(returned_app_data, bytes):
app_data = returned_app_data
signed_data = self.hash+self.identity.get_public_key()+self.name_hash+random_hash
if app_data != None:
signed_data += app_data
signature = self.identity.sign(signed_data)
announce_data = self.identity.get_public_key()+self.name_hash+random_hash+signature
if app_data != None:
announce_data += app_data
self.path_responses[tag] = [time.time(), announce_data]
if path_response:
announce_context = RNS.Packet.PATH_RESPONSE
else:
announce_context = RNS.Packet.NONE
announce_packet = RNS.Packet(self, announce_data, RNS.Packet.ANNOUNCE, context = announce_context, attached_interface = attached_interface)
if send:
announce_packet.send()
else:
return announce_packet
def accepts_links(self, accepts = None):
"""
Set or query whether the destination accepts incoming link requests.
:param accepts: If ``True`` or ``False``, this method sets whether the destination accepts incoming link requests. If not provided or ``None``, the method returns whether the destination currently accepts link requests.
:returns: ``True`` or ``False`` depending on whether the destination accepts incoming link requests, if the *accepts* parameter is not provided or ``None``.
"""
if accepts == None:
return self.accept_link_requests
if accepts:
self.accept_link_requests = True
else:
self.accept_link_requests = False
def set_link_established_callback(self, callback):
"""
Registers a function to be called when a link has been established to
this destination.
:param callback: A function or method with the signature *callback(link)* to be called when a new link is established with this destination.
"""
self.callbacks.link_established = callback
def set_packet_callback(self, callback):
"""
Registers a function to be called when a packet has been received by
this destination.
:param callback: A function or method with the signature *callback(data, packet)* to be called when this destination receives a packet.
"""
self.callbacks.packet = callback
def set_proof_requested_callback(self, callback):
"""
Registers a function to be called when a proof has been requested for
a packet sent to this destination. Allows control over when and if
proofs should be returned for received packets.
:param callback: A function or method to with the signature *callback(packet)* be called when a packet that requests a proof is received. The callback must return one of True or False. If the callback returns True, a proof will be sent. If it returns False, a proof will not be sent.
"""
self.callbacks.proof_requested = callback
def set_proof_strategy(self, proof_strategy):
"""
Sets the destinations proof strategy.
:param proof_strategy: One of ``RNS.Destination.PROVE_NONE``, ``RNS.Destination.PROVE_ALL`` or ``RNS.Destination.PROVE_APP``. If ``RNS.Destination.PROVE_APP`` is set, the `proof_requested_callback` will be called to determine whether a proof should be sent or not.
"""
if not proof_strategy in Destination.proof_strategies:
raise TypeError("Unsupported proof strategy")
else:
self.proof_strategy = proof_strategy
def register_request_handler(self, path, response_generator = None, allow = ALLOW_NONE, allowed_list = None):
"""
Registers a request handler.
:param path: The path for the request handler to be registered.
:param response_generator: A function or method with the signature *response_generator(path, data, request_id, link_id, remote_identity, requested_at)* to be called. Whatever this funcion returns will be sent as a response to the requester. If the function returns ``None``, no response will be sent.
:param allow: One of ``RNS.Destination.ALLOW_NONE``, ``RNS.Destination.ALLOW_ALL`` or ``RNS.Destination.ALLOW_LIST``. If ``RNS.Destination.ALLOW_LIST`` is set, the request handler will only respond to requests for identified peers in the supplied list.
:param allowed_list: A list of *bytes-like* :ref:`RNS.Identity<api-identity>` hashes.
:raises: ``ValueError`` if any of the supplied arguments are invalid.
"""
if path == None or path == "":
raise ValueError("Invalid path specified")
elif not callable(response_generator):
raise ValueError("Invalid response generator specified")
elif not allow in Destination.request_policies:
raise ValueError("Invalid request policy")
else:
path_hash = RNS.Identity.truncated_hash(path.encode("utf-8"))
request_handler = [path, response_generator, allow, allowed_list]
self.request_handlers[path_hash] = request_handler
def deregister_request_handler(self, path):
"""
Deregisters a request handler.
:param path: The path for the request handler to be deregistered.
:returns: True if the handler was deregistered, otherwise False.
"""
path_hash = RNS.Identity.truncated_hash(path.encode("utf-8"))
if path_hash in self.request_handlers:
self.request_handlers.pop(path_hash)
return True
else:
return False
def receive(self, packet):
if packet.packet_type == RNS.Packet.LINKREQUEST:
plaintext = packet.data
self.incoming_link_request(plaintext, packet)
else:
plaintext = self.decrypt(packet.data)
if plaintext != None:
if packet.packet_type == RNS.Packet.DATA:
if self.callbacks.packet != None:
try:
self.callbacks.packet(plaintext, packet)
except Exception as e:
RNS.log("Error while executing receive callback from "+str(self)+". The contained exception was: "+str(e), RNS.LOG_ERROR)
def incoming_link_request(self, data, packet):
if self.accept_link_requests:
link = RNS.Link.validate_request(self, data, packet)
if link != None:
self.links.append(link)
def create_keys(self):
"""
For a ``RNS.Destination.GROUP`` type destination, creates a new symmetric key.
:raises: ``TypeError`` if called on an incompatible type of destination.
"""
if self.type == Destination.PLAIN:
raise TypeError("A plain destination does not hold any keys")
if self.type == Destination.SINGLE:
raise TypeError("A single destination holds keys through an Identity instance")
if self.type == Destination.GROUP:
self.prv_bytes = Fernet.generate_key()
self.prv = Fernet(self.prv_bytes)
def get_private_key(self):
"""
For a ``RNS.Destination.GROUP`` type destination, returns the symmetric private key.
:raises: ``TypeError`` if called on an incompatible type of destination.
"""
if self.type == Destination.PLAIN:
raise TypeError("A plain destination does not hold any keys")
elif self.type == Destination.SINGLE:
raise TypeError("A single destination holds keys through an Identity instance")
else:
return self.prv_bytes
def load_private_key(self, key):
"""
For a ``RNS.Destination.GROUP`` type destination, loads a symmetric private key.
:param key: A *bytes-like* containing the symmetric key.
:raises: ``TypeError`` if called on an incompatible type of destination.
"""
if self.type == Destination.PLAIN:
raise TypeError("A plain destination does not hold any keys")
if self.type == Destination.SINGLE:
raise TypeError("A single destination holds keys through an Identity instance")
if self.type == Destination.GROUP:
self.prv_bytes = key
self.prv = Fernet(self.prv_bytes)
def load_public_key(self, key):
if self.type != Destination.SINGLE:
raise TypeError("Only the \"single\" destination type can hold a public key")
else:
raise TypeError("A single destination holds keys through an Identity instance")
def encrypt(self, plaintext):
"""
Encrypts information for ``RNS.Destination.SINGLE`` or ``RNS.Destination.GROUP`` type destination.
:param plaintext: A *bytes-like* containing the plaintext to be encrypted.
:raises: ``ValueError`` if destination does not hold a necessary key for encryption.
"""
if self.type == Destination.PLAIN:
return plaintext
if self.type == Destination.SINGLE and self.identity != None:
return self.identity.encrypt(plaintext)
if self.type == Destination.GROUP:
if hasattr(self, "prv") and self.prv != None:
try:
return self.prv.encrypt(plaintext)
except Exception as e:
RNS.log("The GROUP destination could not encrypt data", RNS.LOG_ERROR)
RNS.log("The contained exception was: "+str(e), RNS.LOG_ERROR)
else:
raise ValueError("No private key held by GROUP destination. Did you create or load one?")
def decrypt(self, ciphertext):
"""
Decrypts information for ``RNS.Destination.SINGLE`` or ``RNS.Destination.GROUP`` type destination.
:param ciphertext: *Bytes* containing the ciphertext to be decrypted.
:raises: ``ValueError`` if destination does not hold a necessary key for decryption.
"""
if self.type == Destination.PLAIN:
return ciphertext
if self.type == Destination.SINGLE and self.identity != None:
return self.identity.decrypt(ciphertext)
if self.type == Destination.GROUP:
if hasattr(self, "prv") and self.prv != None:
try:
return self.prv.decrypt(ciphertext)
except Exception as e:
RNS.log("The GROUP destination could not decrypt data", RNS.LOG_ERROR)
RNS.log("The contained exception was: "+str(e), RNS.LOG_ERROR)
else:
raise ValueError("No private key held by GROUP destination. Did you create or load one?")
def sign(self, message):
"""
Signs information for ``RNS.Destination.SINGLE`` type destination.
:param message: *Bytes* containing the message to be signed.
:returns: A *bytes-like* containing the message signature, or *None* if the destination could not sign the message.
"""
if self.type == Destination.SINGLE and self.identity != None:
return self.identity.sign(message)
else:
return None
def set_default_app_data(self, app_data=None):
"""
Sets the default app_data for the destination. If set, the default
app_data will be included in every announce sent by the destination,
unless other app_data is specified in the *announce* method.
:param app_data: A *bytes-like* containing the default app_data, or a *callable* returning a *bytes-like* containing the app_data.
"""
self.default_app_data = app_data
def clear_default_app_data(self):
"""
Clears default app_data previously set for the destination.
"""
self.set_default_app_data(app_data=None) | /rnspure-0.5.7.tar.gz/rnspure-0.5.7/RNS/Destination.py | 0.673192 | 0.277905 | Destination.py | pypi |
from __future__ import annotations
import collections
import enum
import threading
import time
from types import TracebackType
from typing import Type, Callable, TypeVar, Generic, NewType
import abc
import contextlib
import struct
import RNS
from abc import ABC, abstractmethod
TPacket = TypeVar("TPacket")
class SystemMessageTypes(enum.IntEnum):
SMT_STREAM_DATA = 0xff00
class ChannelOutletBase(ABC, Generic[TPacket]):
"""
An abstract transport layer interface used by Channel.
DEPRECATED: This was created for testing; eventually
Channel will use Link or a LinkBase interface
directly.
"""
@abstractmethod
def send(self, raw: bytes) -> TPacket:
raise NotImplemented()
@abstractmethod
def resend(self, packet: TPacket) -> TPacket:
raise NotImplemented()
@property
@abstractmethod
def mdu(self):
raise NotImplemented()
@property
@abstractmethod
def rtt(self):
raise NotImplemented()
@property
@abstractmethod
def is_usable(self):
raise NotImplemented()
@abstractmethod
def get_packet_state(self, packet: TPacket) -> MessageState:
raise NotImplemented()
@abstractmethod
def timed_out(self):
raise NotImplemented()
@abstractmethod
def __str__(self):
raise NotImplemented()
@abstractmethod
def set_packet_timeout_callback(self, packet: TPacket, callback: Callable[[TPacket], None] | None,
timeout: float | None = None):
raise NotImplemented()
@abstractmethod
def set_packet_delivered_callback(self, packet: TPacket, callback: Callable[[TPacket], None] | None):
raise NotImplemented()
@abstractmethod
def get_packet_id(self, packet: TPacket) -> any:
raise NotImplemented()
class CEType(enum.IntEnum):
"""
ChannelException type codes
"""
ME_NO_MSG_TYPE = 0
ME_INVALID_MSG_TYPE = 1
ME_NOT_REGISTERED = 2
ME_LINK_NOT_READY = 3
ME_ALREADY_SENT = 4
ME_TOO_BIG = 5
class ChannelException(Exception):
"""
An exception thrown by Channel, with a type code.
"""
def __init__(self, ce_type: CEType, *args):
super().__init__(args)
self.type = ce_type
class MessageState(enum.IntEnum):
"""
Set of possible states for a Message
"""
MSGSTATE_NEW = 0
MSGSTATE_SENT = 1
MSGSTATE_DELIVERED = 2
MSGSTATE_FAILED = 3
class MessageBase(abc.ABC):
"""
Base type for any messages sent or received on a Channel.
Subclasses must define the two abstract methods as well as
the ``MSGTYPE`` class variable.
"""
# MSGTYPE must be unique within all classes sent over a
# channel. Additionally, MSGTYPE > 0xf000 are reserved.
MSGTYPE = None
"""
Defines a unique identifier for a message class.
* Must be unique within all classes registered with a ``Channel``
* Must be less than ``0xf000``. Values greater than or equal to ``0xf000`` are reserved.
"""
@abstractmethod
def pack(self) -> bytes:
"""
Create and return the binary representation of the message
:return: binary representation of message
"""
raise NotImplemented()
@abstractmethod
def unpack(self, raw: bytes):
"""
Populate message from binary representation
:param raw: binary representation
"""
raise NotImplemented()
MessageCallbackType = NewType("MessageCallbackType", Callable[[MessageBase], bool])
class Envelope:
"""
Internal wrapper used to transport messages over a channel and
track its state within the channel framework.
"""
def unpack(self, message_factories: dict[int, Type]) -> MessageBase:
msgtype, self.sequence, length = struct.unpack(">HHH", self.raw[:6])
raw = self.raw[6:]
ctor = message_factories.get(msgtype, None)
if ctor is None:
raise ChannelException(CEType.ME_NOT_REGISTERED, f"Unable to find constructor for Channel MSGTYPE {hex(msgtype)}")
message = ctor()
message.unpack(raw)
self.unpacked = True
self.message = message
return message
def pack(self) -> bytes:
if self.message.__class__.MSGTYPE is None:
raise ChannelException(CEType.ME_NO_MSG_TYPE, f"{self.message.__class__} lacks MSGTYPE")
data = self.message.pack()
self.raw = struct.pack(">HHH", self.message.MSGTYPE, self.sequence, len(data)) + data
self.packed = True
return self.raw
def __init__(self, outlet: ChannelOutletBase, message: MessageBase = None, raw: bytes = None, sequence: int = None):
self.ts = time.time()
self.id = id(self)
self.message = message
self.raw = raw
self.packet: TPacket = None
self.sequence = sequence
self.outlet = outlet
self.tries = 0
self.unpacked = False
self.packed = False
self.tracked = False
class Channel(contextlib.AbstractContextManager):
"""
Provides reliable delivery of messages over
a link.
``Channel`` differs from ``Request`` and
``Resource`` in some important ways:
**Continuous**
Messages can be sent or received as long as
the ``Link`` is open.
**Bi-directional**
Messages can be sent in either direction on
the ``Link``; neither end is the client or
server.
**Size-constrained**
Messages must be encoded into a single packet.
``Channel`` is similar to ``Packet``, except that it
provides reliable delivery (automatic retries) as well
as a structure for exchanging several types of
messages over the ``Link``.
``Channel`` is not instantiated directly, but rather
obtained from a ``Link`` with ``get_channel()``.
"""
# The initial window size at channel setup
WINDOW = 2
# Absolute minimum window size
WINDOW_MIN = 1
# The maximum window size for transfers on slow links
WINDOW_MAX_SLOW = 5
# The maximum window size for transfers on mid-speed links
WINDOW_MAX_MEDIUM = 16
# The maximum window size for transfers on fast links
WINDOW_MAX_FAST = 48
# For calculating maps and guard segments, this
# must be set to the global maximum window.
WINDOW_MAX = WINDOW_MAX_FAST
# If the fast rate is sustained for this many request
# rounds, the fast link window size will be allowed.
FAST_RATE_THRESHOLD = 10
# If the RTT rate is higher than this value,
# the max window size for fast links will be used.
RTT_FAST = 0.25
RTT_MEDIUM = 0.75
RTT_SLOW = 1.45
# The minimum allowed flexibility of the window size.
# The difference between window_max and window_min
# will never be smaller than this value.
WINDOW_FLEXIBILITY = 4
SEQ_MAX = 0xFFFF
SEQ_MODULUS = SEQ_MAX+1
def __init__(self, outlet: ChannelOutletBase):
"""
@param outlet:
"""
self._outlet = outlet
self._lock = threading.RLock()
self._tx_ring: collections.deque[Envelope] = collections.deque()
self._rx_ring: collections.deque[Envelope] = collections.deque()
self._message_callbacks: [MessageCallbackType] = []
self._next_sequence = 0
self._next_rx_sequence = 0
self._message_factories: dict[int, Type[MessageBase]] = {}
self._max_tries = 5
self.fast_rate_rounds = 0
self.medium_rate_rounds = 0
if self._outlet.rtt > Channel.RTT_SLOW:
self.window = 1
self.window_max = 1
self.window_min = 1
self.window_flexibility = 1
else:
self.window = Channel.WINDOW
self.window_max = Channel.WINDOW_MAX_SLOW
self.window_min = Channel.WINDOW_MIN
self.window_flexibility = Channel.WINDOW_FLEXIBILITY
def __enter__(self) -> Channel:
return self
def __exit__(self, __exc_type: Type[BaseException] | None, __exc_value: BaseException | None,
__traceback: TracebackType | None) -> bool | None:
self._shutdown()
return False
def register_message_type(self, message_class: Type[MessageBase]):
"""
Register a message class for reception over a ``Channel``.
Message classes must extend ``MessageBase``.
:param message_class: Class to register
"""
self._register_message_type(message_class, is_system_type=False)
def _register_message_type(self, message_class: Type[MessageBase], *, is_system_type: bool = False):
with self._lock:
if not issubclass(message_class, MessageBase):
raise ChannelException(CEType.ME_INVALID_MSG_TYPE,
f"{message_class} is not a subclass of {MessageBase}.")
if message_class.MSGTYPE is None:
raise ChannelException(CEType.ME_INVALID_MSG_TYPE,
f"{message_class} has invalid MSGTYPE class attribute.")
if message_class.MSGTYPE >= 0xf000 and not is_system_type:
raise ChannelException(CEType.ME_INVALID_MSG_TYPE,
f"{message_class} has system-reserved message type.")
try:
message_class()
except Exception as ex:
raise ChannelException(CEType.ME_INVALID_MSG_TYPE,
f"{message_class} raised an exception when constructed with no arguments: {ex}")
self._message_factories[message_class.MSGTYPE] = message_class
def add_message_handler(self, callback: MessageCallbackType):
"""
Add a handler for incoming messages. A handler
has the following signature:
``(message: MessageBase) -> bool``
Handlers are processed in the order they are
added. If any handler returns True, processing
of the message stops; handlers after the
returning handler will not be called.
:param callback: Function to call
"""
with self._lock:
if callback not in self._message_callbacks:
self._message_callbacks.append(callback)
def remove_message_handler(self, callback: MessageCallbackType):
"""
Remove a handler added with ``add_message_handler``.
:param callback: handler to remove
"""
with self._lock:
if callback in self._message_callbacks:
self._message_callbacks.remove(callback)
def _shutdown(self):
with self._lock:
self._message_callbacks.clear()
self._clear_rings()
def _clear_rings(self):
with self._lock:
for envelope in self._tx_ring:
if envelope.packet is not None:
self._outlet.set_packet_timeout_callback(envelope.packet, None)
self._outlet.set_packet_delivered_callback(envelope.packet, None)
self._tx_ring.clear()
self._rx_ring.clear()
def _emplace_envelope(self, envelope: Envelope, ring: collections.deque[Envelope]) -> bool:
with self._lock:
i = 0
window_overflow = (self._next_rx_sequence+Channel.WINDOW_MAX) % Channel.SEQ_MODULUS
for existing in ring:
if envelope.sequence == existing.sequence:
RNS.log(f"Envelope: Emplacement of duplicate envelope with sequence "+str(envelope.sequence), RNS.LOG_EXTREME)
return False
if envelope.sequence < existing.sequence and not envelope.sequence < window_overflow:
ring.insert(i, envelope)
RNS.log("Inserted seq "+str(envelope.sequence)+" at "+str(i), RNS.LOG_DEBUG)
envelope.tracked = True
return True
i += 1
envelope.tracked = True
ring.append(envelope)
return True
def _run_callbacks(self, message: MessageBase):
cbs = self._message_callbacks.copy()
for cb in cbs:
try:
if cb(message):
return
except Exception as e:
RNS.log("Channel "+str(self)+" experienced an error while running a message callback. The contained exception was: "+str(e), RNS.LOG_ERROR)
def _receive(self, raw: bytes):
try:
envelope = Envelope(outlet=self._outlet, raw=raw)
with self._lock:
message = envelope.unpack(self._message_factories)
if envelope.sequence < self._next_rx_sequence:
window_overflow = (self._next_rx_sequence+Channel.WINDOW_MAX) % Channel.SEQ_MODULUS
if window_overflow < self._next_rx_sequence:
if envelope.sequence > window_overflow:
RNS.log("Invalid packet sequence ("+str(envelope.sequence)+") received on channel "+str(self), RNS.LOG_EXTREME)
return
else:
RNS.log("Invalid packet sequence ("+str(envelope.sequence)+") received on channel "+str(self), RNS.LOG_EXTREME)
return
is_new = self._emplace_envelope(envelope, self._rx_ring)
if not is_new:
RNS.log("Duplicate message received on channel "+str(self), RNS.LOG_EXTREME)
return
else:
with self._lock:
contigous = []
for e in self._rx_ring:
if e.sequence == self._next_rx_sequence:
contigous.append(e)
self._next_rx_sequence = (self._next_rx_sequence + 1) % Channel.SEQ_MODULUS
for e in contigous:
if not e.unpacked:
m = e.unpack(self._message_factories)
else:
m = e.message
self._rx_ring.remove(e)
self._run_callbacks(m)
except Exception as e:
RNS.log("An error ocurred while receiving data on "+str(self)+". The contained exception was: "+str(e), RNS.LOG_ERROR)
def is_ready_to_send(self) -> bool:
"""
Check if ``Channel`` is ready to send.
:return: True if ready
"""
if not self._outlet.is_usable:
return False
with self._lock:
outstanding = 0
for envelope in self._tx_ring:
if envelope.outlet == self._outlet:
if not envelope.packet or not self._outlet.get_packet_state(envelope.packet) == MessageState.MSGSTATE_DELIVERED:
outstanding += 1
if outstanding >= self.window:
return False
return True
def _packet_tx_op(self, packet: TPacket, op: Callable[[TPacket], bool]):
with self._lock:
envelope = next(filter(lambda e: self._outlet.get_packet_id(e.packet) == self._outlet.get_packet_id(packet),
self._tx_ring), None)
if envelope and op(envelope):
envelope.tracked = False
if envelope in self._tx_ring:
self._tx_ring.remove(envelope)
if self.window < self.window_max:
self.window += 1
if (self.window - self.window_min) > (self.window_flexibility-1):
self.window_min += 1
# TODO: Remove at some point
# RNS.log("Increased "+str(self)+" window to "+str(self.window), RNS.LOG_EXTREME)
if self._outlet.rtt != 0:
if self._outlet.rtt > Channel.RTT_FAST:
self.fast_rate_rounds = 0
if self._outlet.rtt > Channel.RTT_MEDIUM:
self.medium_rate_rounds = 0
else:
self.medium_rate_rounds += 1
if self.window_max < Channel.WINDOW_MAX_MEDIUM and self.medium_rate_rounds == Channel.FAST_RATE_THRESHOLD:
self.window_max = Channel.WINDOW_MAX_MEDIUM
# TODO: Remove at some point
# RNS.log("Increased "+str(self)+" max window to "+str(self.window_max), RNS.LOG_EXTREME)
else:
self.fast_rate_rounds += 1
if self.window_max < Channel.WINDOW_MAX_FAST and self.fast_rate_rounds == Channel.FAST_RATE_THRESHOLD:
self.window_max = Channel.WINDOW_MAX_FAST
# TODO: Remove at some point
# RNS.log("Increased "+str(self)+" max window to "+str(self.window_max), RNS.LOG_EXTREME)
else:
RNS.log("Envelope not found in TX ring for "+str(self), RNS.LOG_EXTREME)
if not envelope:
RNS.log("Spurious message received on "+str(self), RNS.LOG_EXTREME)
def _packet_delivered(self, packet: TPacket):
self._packet_tx_op(packet, lambda env: True)
def _get_packet_timeout_time(self, tries: int) -> float:
return pow(2, tries - 1) * max(self._outlet.rtt, 0.01) * 5
def _packet_timeout(self, packet: TPacket):
def retry_envelope(envelope: Envelope) -> bool:
if envelope.tries >= self._max_tries:
RNS.log("Retry count exceeded on "+str(self)+", tearing down Link.", RNS.LOG_ERROR)
self._shutdown() # start on separate thread?
self._outlet.timed_out()
return True
envelope.tries += 1
self._outlet.resend(envelope.packet)
self._outlet.set_packet_delivered_callback(envelope.packet, self._packet_delivered)
self._outlet.set_packet_timeout_callback(envelope.packet, self._packet_timeout, self._get_packet_timeout_time(envelope.tries))
if self.window > self.window_min:
self.window -= 1
if self.window_max > self.window_min:
self.window_max -= 1
if (self.window_max - self.window) > (self.window_flexibility-1):
self.window_max -= 1
# TODO: Remove at some point
# RNS.log("Decreased "+str(self)+" window to "+str(self.window), RNS.LOG_EXTREME)
return False
if self._outlet.get_packet_state(packet) != MessageState.MSGSTATE_DELIVERED:
self._packet_tx_op(packet, retry_envelope)
def send(self, message: MessageBase) -> Envelope:
"""
Send a message. If a message send is attempted and
``Channel`` is not ready, an exception is thrown.
:param message: an instance of a ``MessageBase`` subclass
"""
envelope: Envelope | None = None
with self._lock:
if not self.is_ready_to_send():
raise ChannelException(CEType.ME_LINK_NOT_READY, f"Link is not ready")
envelope = Envelope(self._outlet, message=message, sequence=self._next_sequence)
self._next_sequence = (self._next_sequence + 1) % Channel.SEQ_MODULUS
self._emplace_envelope(envelope, self._tx_ring)
if envelope is None:
raise BlockingIOError()
envelope.pack()
if len(envelope.raw) > self._outlet.mdu:
raise ChannelException(CEType.ME_TOO_BIG, f"Packed message too big for packet: {len(envelope.raw)} > {self._outlet.mdu}")
envelope.packet = self._outlet.send(envelope.raw)
envelope.tries += 1
self._outlet.set_packet_delivered_callback(envelope.packet, self._packet_delivered)
self._outlet.set_packet_timeout_callback(envelope.packet, self._packet_timeout, self._get_packet_timeout_time(envelope.tries))
return envelope
@property
def MDU(self):
"""
Maximum Data Unit: the number of bytes available
for a message to consume in a single send. This
value is adjusted from the ``Link`` MDU to accommodate
message header information.
:return: number of bytes available
"""
return self._outlet.mdu - 6 # sizeof(msgtype) + sizeof(length) + sizeof(sequence)
class LinkChannelOutlet(ChannelOutletBase):
"""
An implementation of ChannelOutletBase for RNS.Link.
Allows Channel to send packets over an RNS Link with
Packets.
:param link: RNS Link to wrap
"""
def __init__(self, link: RNS.Link):
self.link = link
def send(self, raw: bytes) -> RNS.Packet:
packet = RNS.Packet(self.link, raw, context=RNS.Packet.CHANNEL)
if self.link.status == RNS.Link.ACTIVE:
packet.send()
return packet
def resend(self, packet: RNS.Packet) -> RNS.Packet:
receipt = packet.resend()
if not receipt:
RNS.log("Failed to resend packet", RNS.LOG_ERROR)
return packet
@property
def mdu(self):
return self.link.MDU
@property
def rtt(self):
return self.link.rtt
@property
def is_usable(self):
return True # had issues looking at Link.status
def get_packet_state(self, packet: TPacket) -> MessageState:
if packet.receipt == None:
return MessageState.MSGSTATE_FAILED
status = packet.receipt.get_status()
if status == RNS.PacketReceipt.SENT:
return MessageState.MSGSTATE_SENT
if status == RNS.PacketReceipt.DELIVERED:
return MessageState.MSGSTATE_DELIVERED
if status == RNS.PacketReceipt.FAILED:
return MessageState.MSGSTATE_FAILED
else:
raise Exception(f"Unexpected receipt state: {status}")
def timed_out(self):
self.link.teardown()
def __str__(self):
return f"{self.__class__.__name__}({self.link})"
def set_packet_timeout_callback(self, packet: RNS.Packet, callback: Callable[[RNS.Packet], None] | None,
timeout: float | None = None):
if timeout and packet.receipt:
packet.receipt.set_timeout(timeout)
def inner(receipt: RNS.PacketReceipt):
callback(packet)
if packet and packet.receipt:
packet.receipt.set_timeout_callback(inner if callback else None)
def set_packet_delivered_callback(self, packet: RNS.Packet, callback: Callable[[RNS.Packet], None] | None):
def inner(receipt: RNS.PacketReceipt):
callback(packet)
if packet and packet.receipt:
packet.receipt.set_delivery_callback(inner if callback else None)
def get_packet_id(self, packet: RNS.Packet) -> any:
if packet and hasattr(packet, "get_hash") and callable(packet.get_hash):
return packet.get_hash()
else:
return None | /rnspure-0.5.7.tar.gz/rnspure-0.5.7/RNS/Channel.py | 0.891982 | 0.180089 | Channel.py | pypi |
from __future__ import annotations
import bz2
import sys
import time
import threading
from threading import RLock
import struct
from RNS.Channel import Channel, MessageBase, SystemMessageTypes
import RNS
from io import RawIOBase, BufferedRWPair, BufferedReader, BufferedWriter
from typing import Callable
from contextlib import AbstractContextManager
class StreamDataMessage(MessageBase):
MSGTYPE = SystemMessageTypes.SMT_STREAM_DATA
"""
Message type for ``Channel``. ``StreamDataMessage``
uses a system-reserved message type.
"""
STREAM_ID_MAX = 0x3fff # 16383
"""
The stream id is limited to 2 bytes - 2 bit
"""
MAX_DATA_LEN = RNS.Link.MDU - 2 - 6 # 2 for stream data message header, 6 for channel envelope
"""
When the Buffer package is imported, this value is
calculcated based on the value of OVERHEAD
"""
def __init__(self, stream_id: int = None, data: bytes = None, eof: bool = False):
"""
This class is used to encapsulate binary stream
data to be sent over a ``Channel``.
:param stream_id: id of stream relative to receiver
:param data: binary data
:param eof: set to True if signalling End of File
"""
super().__init__()
if stream_id is not None and stream_id > self.STREAM_ID_MAX:
raise ValueError("stream_id must be 0-16383")
self.stream_id = stream_id
self.compressed = False
self.data = data or bytes()
self.eof = eof
def pack(self) -> bytes:
if self.stream_id is None:
raise ValueError("stream_id")
compressed_data = bz2.compress(self.data)
saved = len(self.data)-len(compressed_data)
if saved > 0:
self.data = compressed_data
self.compressed = True
header_val = (0x3fff & self.stream_id) | (0x8000 if self.eof else 0x0000) | (0x4000 if self.compressed > 0 else 0x0000)
return bytes(struct.pack(">H", header_val) + (self.data if self.data else bytes()))
def unpack(self, raw):
self.stream_id = struct.unpack(">H", raw[:2])[0]
self.eof = (0x8000 & self.stream_id) > 0
self.compressed = (0x4000 & self.stream_id) > 0
self.stream_id = self.stream_id & 0x3fff
self.data = raw[2:]
if self.compressed:
self.data = bz2.decompress(self.data)
class RawChannelReader(RawIOBase, AbstractContextManager):
"""
An implementation of RawIOBase that receives
binary stream data sent over a ``Channel``.
This class generally need not be instantiated directly.
Use :func:`RNS.Buffer.create_reader`,
:func:`RNS.Buffer.create_writer`, and
:func:`RNS.Buffer.create_bidirectional_buffer` functions
to create buffered streams with optional callbacks.
For additional information on the API of this
object, see the Python documentation for
``RawIOBase``.
"""
def __init__(self, stream_id: int, channel: Channel):
"""
Create a raw channel reader.
:param stream_id: local stream id to receive at
:param channel: ``Channel`` object to receive from
"""
self._stream_id = stream_id
self._channel = channel
self._lock = RLock()
self._buffer = bytearray()
self._eof = False
self._channel._register_message_type(StreamDataMessage, is_system_type=True)
self._channel.add_message_handler(self._handle_message)
self._listeners: [Callable[[int], None]] = []
def add_ready_callback(self, cb: Callable[[int], None]):
"""
Add a function to be called when new data is available.
The function should have the signature ``(ready_bytes: int) -> None``
:param cb: function to call
"""
with self._lock:
self._listeners.append(cb)
def remove_ready_callback(self, cb: Callable[[int], None]):
"""
Remove a function added with :func:`RNS.RawChannelReader.add_ready_callback()`
:param cb: function to remove
"""
with self._lock:
self._listeners.remove(cb)
def _handle_message(self, message: MessageBase):
if isinstance(message, StreamDataMessage):
if message.stream_id == self._stream_id:
with self._lock:
if message.data is not None:
self._buffer.extend(message.data)
if message.eof:
self._eof = True
for listener in self._listeners:
try:
threading.Thread(target=listener, name="Message Callback", args=[len(self._buffer)], daemon=True).start()
except Exception as ex:
RNS.log("Error calling RawChannelReader(" + str(self._stream_id) + ") callback: " + str(ex))
return True
return False
def _read(self, __size: int) -> bytes | None:
with self._lock:
result = self._buffer[:__size]
self._buffer = self._buffer[__size:]
return result if len(result) > 0 or self._eof else None
def readinto(self, __buffer: bytearray) -> int | None:
ready = self._read(len(__buffer))
if ready is not None:
__buffer[:len(ready)] = ready
return len(ready) if ready is not None else None
def writable(self) -> bool:
return False
def seekable(self) -> bool:
return False
def readable(self) -> bool:
return True
def close(self):
with self._lock:
self._channel.remove_message_handler(self._handle_message)
self._listeners.clear()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
return False
class RawChannelWriter(RawIOBase, AbstractContextManager):
"""
An implementation of RawIOBase that receives
binary stream data sent over a channel.
This class generally need not be instantiated directly.
Use :func:`RNS.Buffer.create_reader`,
:func:`RNS.Buffer.create_writer`, and
:func:`RNS.Buffer.create_bidirectional_buffer` functions
to create buffered streams with optional callbacks.
For additional information on the API of this
object, see the Python documentation for
``RawIOBase``.
"""
def __init__(self, stream_id: int, channel: Channel):
"""
Create a raw channel writer.
:param stream_id: remote stream id to sent do
:param channel: ``Channel`` object to send on
"""
self._stream_id = stream_id
self._channel = channel
self._eof = False
def write(self, __b: bytes) -> int | None:
try:
chunk = bytes(__b[:StreamDataMessage.MAX_DATA_LEN])
message = StreamDataMessage(self._stream_id, chunk, self._eof)
self._channel.send(message)
return len(chunk)
except RNS.Channel.ChannelException as cex:
if cex.type != RNS.Channel.CEType.ME_LINK_NOT_READY:
raise
return 0
def close(self):
try:
link_rtt = self._channel._outlet.link.rtt
timeout = time.time() + (link_rtt * len(self._channel._tx_ring) * 1)
except Exception as e:
timeout = time.time() + 15
while time.time() < timeout and not self._channel.is_ready_to_send():
time.sleep(0.05)
self._eof = True
self.write(bytes())
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
return False
def seekable(self) -> bool:
return False
def readable(self) -> bool:
return False
def writable(self) -> bool:
return True
class Buffer:
"""
Static functions for creating buffered streams that send
and receive over a ``Channel``.
These functions use ``BufferedReader``, ``BufferedWriter``,
and ``BufferedRWPair`` to add buffering to
``RawChannelReader`` and ``RawChannelWriter``.
"""
@staticmethod
def create_reader(stream_id: int, channel: Channel,
ready_callback: Callable[[int], None] | None = None) -> BufferedReader:
"""
Create a buffered reader that reads binary data sent
over a ``Channel``, with an optional callback when
new data is available.
Callback signature: ``(ready_bytes: int) -> None``
For more information on the reader-specific functions
of this object, see the Python documentation for
``BufferedReader``
:param stream_id: the local stream id to receive from
:param channel: the channel to receive on
:param ready_callback: function to call when new data is available
:return: a BufferedReader object
"""
reader = RawChannelReader(stream_id, channel)
if ready_callback:
reader.add_ready_callback(ready_callback)
return BufferedReader(reader)
@staticmethod
def create_writer(stream_id: int, channel: Channel) -> BufferedWriter:
"""
Create a buffered writer that writes binary data over
a ``Channel``.
For more information on the writer-specific functions
of this object, see the Python documentation for
``BufferedWriter``
:param stream_id: the remote stream id to send to
:param channel: the channel to send on
:return: a BufferedWriter object
"""
writer = RawChannelWriter(stream_id, channel)
return BufferedWriter(writer)
@staticmethod
def create_bidirectional_buffer(receive_stream_id: int, send_stream_id: int, channel: Channel,
ready_callback: Callable[[int], None] | None = None) -> BufferedRWPair:
"""
Create a buffered reader/writer pair that reads and
writes binary data over a ``Channel``, with an
optional callback when new data is available.
Callback signature: ``(ready_bytes: int) -> None``
For more information on the reader-specific functions
of this object, see the Python documentation for
``BufferedRWPair``
:param receive_stream_id: the local stream id to receive at
:param send_stream_id: the remote stream id to send to
:param channel: the channel to send and receive on
:param ready_callback: function to call when new data is available
:return: a BufferedRWPair object
"""
reader = RawChannelReader(receive_stream_id, channel)
if ready_callback:
reader.add_ready_callback(ready_callback)
writer = RawChannelWriter(send_stream_id, channel)
return BufferedRWPair(reader, writer) | /rnspure-0.5.7.tar.gz/rnspure-0.5.7/RNS/Buffer.py | 0.769514 | 0.195844 | Buffer.py | pypi |
import math
import os
import RNS
import time
import atexit
import hashlib
from .vendor import umsgpack as umsgpack
from RNS.Cryptography import X25519PrivateKey, X25519PublicKey, Ed25519PrivateKey, Ed25519PublicKey
from RNS.Cryptography import Fernet
class Identity:
"""
This class is used to manage identities in Reticulum. It provides methods
for encryption, decryption, signatures and verification, and is the basis
for all encrypted communication over Reticulum networks.
:param create_keys: Specifies whether new encryption and signing keys should be generated.
"""
CURVE = "Curve25519"
"""
The curve used for Elliptic Curve DH key exchanges
"""
KEYSIZE = 256*2
"""
X25519 key size in bits. A complete key is the concatenation of a 256 bit encryption key, and a 256 bit signing key.
"""
# Non-configurable constants
FERNET_OVERHEAD = RNS.Cryptography.Fernet.FERNET_OVERHEAD
AES128_BLOCKSIZE = 16 # In bytes
HASHLENGTH = 256 # In bits
SIGLENGTH = KEYSIZE # In bits
NAME_HASH_LENGTH = 80
TRUNCATED_HASHLENGTH = RNS.Reticulum.TRUNCATED_HASHLENGTH
"""
Constant specifying the truncated hash length (in bits) used by Reticulum
for addressable hashes and other purposes. Non-configurable.
"""
# Storage
known_destinations = {}
@staticmethod
def remember(packet_hash, destination_hash, public_key, app_data = None):
if len(public_key) != Identity.KEYSIZE//8:
raise TypeError("Can't remember "+RNS.prettyhexrep(destination_hash)+", the public key size of "+str(len(public_key))+" is not valid.", RNS.LOG_ERROR)
else:
Identity.known_destinations[destination_hash] = [time.time(), packet_hash, public_key, app_data]
@staticmethod
def recall(destination_hash):
"""
Recall identity for a destination hash.
:param destination_hash: Destination hash as *bytes*.
:returns: An :ref:`RNS.Identity<api-identity>` instance that can be used to create an outgoing :ref:`RNS.Destination<api-destination>`, or *None* if the destination is unknown.
"""
if destination_hash in Identity.known_destinations:
identity_data = Identity.known_destinations[destination_hash]
identity = Identity(create_keys=False)
identity.load_public_key(identity_data[2])
identity.app_data = identity_data[3]
return identity
else:
for registered_destination in RNS.Transport.destinations:
if destination_hash == registered_destination.hash:
identity = Identity(create_keys=False)
identity.load_public_key(registered_destination.identity.get_public_key())
identity.app_data = None
return identity
return None
@staticmethod
def recall_app_data(destination_hash):
"""
Recall last heard app_data for a destination hash.
:param destination_hash: Destination hash as *bytes*.
:returns: *Bytes* containing app_data, or *None* if the destination is unknown.
"""
if destination_hash in Identity.known_destinations:
app_data = Identity.known_destinations[destination_hash][3]
return app_data
else:
return None
@staticmethod
def save_known_destinations():
# TODO: Improve the storage method so we don't have to
# deserialize and serialize the entire table on every
# save, but the only changes. It might be possible to
# simply overwrite on exit now that every local client
# disconnect triggers a data persist.
try:
if hasattr(Identity, "saving_known_destinations"):
wait_interval = 0.2
wait_timeout = 5
wait_start = time.time()
while Identity.saving_known_destinations:
time.sleep(wait_interval)
if time.time() > wait_start+wait_timeout:
RNS.log("Could not save known destinations to storage, waiting for previous save operation timed out.", RNS.LOG_ERROR)
return False
Identity.saving_known_destinations = True
save_start = time.time()
storage_known_destinations = {}
if os.path.isfile(RNS.Reticulum.storagepath+"/known_destinations"):
try:
file = open(RNS.Reticulum.storagepath+"/known_destinations","rb")
storage_known_destinations = umsgpack.load(file)
file.close()
except:
pass
for destination_hash in storage_known_destinations:
if not destination_hash in Identity.known_destinations:
Identity.known_destinations[destination_hash] = storage_known_destinations[destination_hash]
RNS.log("Saving "+str(len(Identity.known_destinations))+" known destinations to storage...", RNS.LOG_DEBUG)
file = open(RNS.Reticulum.storagepath+"/known_destinations","wb")
umsgpack.dump(Identity.known_destinations, file)
file.close()
save_time = time.time() - save_start
if save_time < 1:
time_str = str(round(save_time*1000,2))+"ms"
else:
time_str = str(round(save_time,2))+"s"
RNS.log("Saved known destinations to storage in "+time_str, RNS.LOG_DEBUG)
except Exception as e:
RNS.log("Error while saving known destinations to disk, the contained exception was: "+str(e), RNS.LOG_ERROR)
Identity.saving_known_destinations = False
@staticmethod
def load_known_destinations():
if os.path.isfile(RNS.Reticulum.storagepath+"/known_destinations"):
try:
file = open(RNS.Reticulum.storagepath+"/known_destinations","rb")
loaded_known_destinations = umsgpack.load(file)
file.close()
Identity.known_destinations = {}
for known_destination in loaded_known_destinations:
if len(known_destination) == RNS.Reticulum.TRUNCATED_HASHLENGTH//8:
Identity.known_destinations[known_destination] = loaded_known_destinations[known_destination]
RNS.log("Loaded "+str(len(Identity.known_destinations))+" known destination from storage", RNS.LOG_VERBOSE)
except:
RNS.log("Error loading known destinations from disk, file will be recreated on exit", RNS.LOG_ERROR)
else:
RNS.log("Destinations file does not exist, no known destinations loaded", RNS.LOG_VERBOSE)
@staticmethod
def full_hash(data):
"""
Get a SHA-256 hash of passed data.
:param data: Data to be hashed as *bytes*.
:returns: SHA-256 hash as *bytes*
"""
return RNS.Cryptography.sha256(data)
@staticmethod
def truncated_hash(data):
"""
Get a truncated SHA-256 hash of passed data.
:param data: Data to be hashed as *bytes*.
:returns: Truncated SHA-256 hash as *bytes*
"""
return Identity.full_hash(data)[:(Identity.TRUNCATED_HASHLENGTH//8)]
@staticmethod
def get_random_hash():
"""
Get a random SHA-256 hash.
:param data: Data to be hashed as *bytes*.
:returns: Truncated SHA-256 hash of random data as *bytes*
"""
return Identity.truncated_hash(os.urandom(Identity.TRUNCATED_HASHLENGTH//8))
@staticmethod
def validate_announce(packet):
try:
if packet.packet_type == RNS.Packet.ANNOUNCE:
destination_hash = packet.destination_hash
public_key = packet.data[:Identity.KEYSIZE//8]
name_hash = packet.data[Identity.KEYSIZE//8:Identity.KEYSIZE//8+Identity.NAME_HASH_LENGTH//8]
random_hash = packet.data[Identity.KEYSIZE//8+Identity.NAME_HASH_LENGTH//8:Identity.KEYSIZE//8+Identity.NAME_HASH_LENGTH//8+10]
signature = packet.data[Identity.KEYSIZE//8+Identity.NAME_HASH_LENGTH//8+10:Identity.KEYSIZE//8+Identity.NAME_HASH_LENGTH//8+10+Identity.SIGLENGTH//8]
app_data = b""
if len(packet.data) > Identity.KEYSIZE//8+Identity.NAME_HASH_LENGTH//8+10+Identity.SIGLENGTH//8:
app_data = packet.data[Identity.KEYSIZE//8+Identity.NAME_HASH_LENGTH//8+10+Identity.SIGLENGTH//8:]
signed_data = destination_hash+public_key+name_hash+random_hash+app_data
if not len(packet.data) > Identity.KEYSIZE//8+Identity.NAME_HASH_LENGTH//8+10+Identity.SIGLENGTH//8:
app_data = None
announced_identity = Identity(create_keys=False)
announced_identity.load_public_key(public_key)
if announced_identity.pub != None and announced_identity.validate(signature, signed_data):
hash_material = name_hash+announced_identity.hash
expected_hash = RNS.Identity.full_hash(hash_material)[:RNS.Reticulum.TRUNCATED_HASHLENGTH//8]
if destination_hash == expected_hash:
# Check if we already have a public key for this destination
# and make sure the public key is not different.
if destination_hash in Identity.known_destinations:
if public_key != Identity.known_destinations[destination_hash][2]:
# In reality, this should never occur, but in the odd case
# that someone manages a hash collision, we reject the announce.
RNS.log("Received announce with valid signature and destination hash, but announced public key does not match already known public key.", RNS.LOG_CRITICAL)
RNS.log("This may indicate an attempt to modify network paths, or a random hash collision. The announce was rejected.", RNS.LOG_CRITICAL)
return False
RNS.Identity.remember(packet.get_hash(), destination_hash, public_key, app_data)
del announced_identity
if packet.rssi != None or packet.snr != None:
signal_str = " ["
if packet.rssi != None:
signal_str += "RSSI "+str(packet.rssi)+"dBm"
if packet.snr != None:
signal_str += ", "
if packet.snr != None:
signal_str += "SNR "+str(packet.snr)+"dB"
signal_str += "]"
else:
signal_str = ""
if hasattr(packet, "transport_id") and packet.transport_id != None:
RNS.log("Valid announce for "+RNS.prettyhexrep(destination_hash)+" "+str(packet.hops)+" hops away, received via "+RNS.prettyhexrep(packet.transport_id)+" on "+str(packet.receiving_interface)+signal_str, RNS.LOG_EXTREME)
else:
RNS.log("Valid announce for "+RNS.prettyhexrep(destination_hash)+" "+str(packet.hops)+" hops away, received on "+str(packet.receiving_interface)+signal_str, RNS.LOG_EXTREME)
return True
else:
RNS.log("Received invalid announce for "+RNS.prettyhexrep(destination_hash)+": Destination mismatch.", RNS.LOG_DEBUG)
return False
else:
RNS.log("Received invalid announce for "+RNS.prettyhexrep(destination_hash)+": Invalid signature.", RNS.LOG_DEBUG)
del announced_identity
return False
except Exception as e:
RNS.log("Error occurred while validating announce. The contained exception was: "+str(e), RNS.LOG_ERROR)
return False
@staticmethod
def persist_data():
if not RNS.Transport.owner.is_connected_to_shared_instance:
Identity.save_known_destinations()
@staticmethod
def exit_handler():
Identity.persist_data()
@staticmethod
def from_bytes(prv_bytes):
"""
Create a new :ref:`RNS.Identity<api-identity>` instance from *bytes* of private key.
Can be used to load previously created and saved identities into Reticulum.
:param prv_bytes: The *bytes* of private a saved private key. **HAZARD!** Never use this to generate a new key by feeding random data in prv_bytes.
:returns: A :ref:`RNS.Identity<api-identity>` instance, or *None* if the *bytes* data was invalid.
"""
identity = Identity(create_keys=False)
if identity.load_private_key(prv_bytes):
return identity
else:
return None
@staticmethod
def from_file(path):
"""
Create a new :ref:`RNS.Identity<api-identity>` instance from a file.
Can be used to load previously created and saved identities into Reticulum.
:param path: The full path to the saved :ref:`RNS.Identity<api-identity>` data
:returns: A :ref:`RNS.Identity<api-identity>` instance, or *None* if the loaded data was invalid.
"""
identity = Identity(create_keys=False)
if identity.load(path):
return identity
else:
return None
def to_file(self, path):
"""
Saves the identity to a file. This will write the private key to disk,
and anyone with access to this file will be able to decrypt all
communication for the identity. Be very careful with this method.
:param path: The full path specifying where to save the identity.
:returns: True if the file was saved, otherwise False.
"""
try:
with open(path, "wb") as key_file:
key_file.write(self.get_private_key())
return True
return False
except Exception as e:
RNS.log("Error while saving identity to "+str(path), RNS.LOG_ERROR)
RNS.log("The contained exception was: "+str(e))
def __init__(self,create_keys=True):
# Initialize keys to none
self.prv = None
self.prv_bytes = None
self.sig_prv = None
self.sig_prv_bytes = None
self.pub = None
self.pub_bytes = None
self.sig_pub = None
self.sig_pub_bytes = None
self.hash = None
self.hexhash = None
if create_keys:
self.create_keys()
def create_keys(self):
self.prv = X25519PrivateKey.generate()
self.prv_bytes = self.prv.private_bytes()
self.sig_prv = Ed25519PrivateKey.generate()
self.sig_prv_bytes = self.sig_prv.private_bytes()
self.pub = self.prv.public_key()
self.pub_bytes = self.pub.public_bytes()
self.sig_pub = self.sig_prv.public_key()
self.sig_pub_bytes = self.sig_pub.public_bytes()
self.update_hashes()
RNS.log("Identity keys created for "+RNS.prettyhexrep(self.hash), RNS.LOG_VERBOSE)
def get_private_key(self):
"""
:returns: The private key as *bytes*
"""
return self.prv_bytes+self.sig_prv_bytes
def get_public_key(self):
"""
:returns: The public key as *bytes*
"""
return self.pub_bytes+self.sig_pub_bytes
def load_private_key(self, prv_bytes):
"""
Load a private key into the instance.
:param prv_bytes: The private key as *bytes*.
:returns: True if the key was loaded, otherwise False.
"""
try:
self.prv_bytes = prv_bytes[:Identity.KEYSIZE//8//2]
self.prv = X25519PrivateKey.from_private_bytes(self.prv_bytes)
self.sig_prv_bytes = prv_bytes[Identity.KEYSIZE//8//2:]
self.sig_prv = Ed25519PrivateKey.from_private_bytes(self.sig_prv_bytes)
self.pub = self.prv.public_key()
self.pub_bytes = self.pub.public_bytes()
self.sig_pub = self.sig_prv.public_key()
self.sig_pub_bytes = self.sig_pub.public_bytes()
self.update_hashes()
return True
except Exception as e:
raise e
RNS.log("Failed to load identity key", RNS.LOG_ERROR)
RNS.log("The contained exception was: "+str(e), RNS.LOG_ERROR)
return False
def load_public_key(self, pub_bytes):
"""
Load a public key into the instance.
:param pub_bytes: The public key as *bytes*.
:returns: True if the key was loaded, otherwise False.
"""
try:
self.pub_bytes = pub_bytes[:Identity.KEYSIZE//8//2]
self.sig_pub_bytes = pub_bytes[Identity.KEYSIZE//8//2:]
self.pub = X25519PublicKey.from_public_bytes(self.pub_bytes)
self.sig_pub = Ed25519PublicKey.from_public_bytes(self.sig_pub_bytes)
self.update_hashes()
except Exception as e:
RNS.log("Error while loading public key, the contained exception was: "+str(e), RNS.LOG_ERROR)
def update_hashes(self):
self.hash = Identity.truncated_hash(self.get_public_key())
self.hexhash = self.hash.hex()
def load(self, path):
try:
with open(path, "rb") as key_file:
prv_bytes = key_file.read()
return self.load_private_key(prv_bytes)
return False
except Exception as e:
RNS.log("Error while loading identity from "+str(path), RNS.LOG_ERROR)
RNS.log("The contained exception was: "+str(e), RNS.LOG_ERROR)
def get_salt(self):
return self.hash
def get_context(self):
return None
def encrypt(self, plaintext):
"""
Encrypts information for the identity.
:param plaintext: The plaintext to be encrypted as *bytes*.
:returns: Ciphertext token as *bytes*.
:raises: *KeyError* if the instance does not hold a public key.
"""
if self.pub != None:
ephemeral_key = X25519PrivateKey.generate()
ephemeral_pub_bytes = ephemeral_key.public_key().public_bytes()
shared_key = ephemeral_key.exchange(self.pub)
derived_key = RNS.Cryptography.hkdf(
length=32,
derive_from=shared_key,
salt=self.get_salt(),
context=self.get_context(),
)
fernet = Fernet(derived_key)
ciphertext = fernet.encrypt(plaintext)
token = ephemeral_pub_bytes+ciphertext
return token
else:
raise KeyError("Encryption failed because identity does not hold a public key")
def decrypt(self, ciphertext_token):
"""
Decrypts information for the identity.
:param ciphertext: The ciphertext to be decrypted as *bytes*.
:returns: Plaintext as *bytes*, or *None* if decryption fails.
:raises: *KeyError* if the instance does not hold a private key.
"""
if self.prv != None:
if len(ciphertext_token) > Identity.KEYSIZE//8//2:
plaintext = None
try:
peer_pub_bytes = ciphertext_token[:Identity.KEYSIZE//8//2]
peer_pub = X25519PublicKey.from_public_bytes(peer_pub_bytes)
shared_key = self.prv.exchange(peer_pub)
derived_key = RNS.Cryptography.hkdf(
length=32,
derive_from=shared_key,
salt=self.get_salt(),
context=self.get_context(),
)
fernet = Fernet(derived_key)
ciphertext = ciphertext_token[Identity.KEYSIZE//8//2:]
plaintext = fernet.decrypt(ciphertext)
except Exception as e:
RNS.log("Decryption by "+RNS.prettyhexrep(self.hash)+" failed: "+str(e), RNS.LOG_DEBUG)
return plaintext;
else:
RNS.log("Decryption failed because the token size was invalid.", RNS.LOG_DEBUG)
return None
else:
raise KeyError("Decryption failed because identity does not hold a private key")
def sign(self, message):
"""
Signs information by the identity.
:param message: The message to be signed as *bytes*.
:returns: Signature as *bytes*.
:raises: *KeyError* if the instance does not hold a private key.
"""
if self.sig_prv != None:
try:
return self.sig_prv.sign(message)
except Exception as e:
RNS.log("The identity "+str(self)+" could not sign the requested message. The contained exception was: "+str(e), RNS.LOG_ERROR)
raise e
else:
raise KeyError("Signing failed because identity does not hold a private key")
def validate(self, signature, message):
"""
Validates the signature of a signed message.
:param signature: The signature to be validated as *bytes*.
:param message: The message to be validated as *bytes*.
:returns: True if the signature is valid, otherwise False.
:raises: *KeyError* if the instance does not hold a public key.
"""
if self.pub != None:
try:
self.sig_pub.verify(signature, message)
return True
except Exception as e:
return False
else:
raise KeyError("Signature validation failed because identity does not hold a public key")
def prove(self, packet, destination=None):
signature = self.sign(packet.packet_hash)
if RNS.Reticulum.should_use_implicit_proof():
proof_data = signature
else:
proof_data = packet.packet_hash + signature
if destination == None:
destination = packet.generate_proof_destination()
proof = RNS.Packet(destination, proof_data, RNS.Packet.PROOF, attached_interface = packet.receiving_interface)
proof.send()
def __str__(self):
return RNS.prettyhexrep(self.hash) | /rnspure-0.5.7.tar.gz/rnspure-0.5.7/RNS/Identity.py | 0.514644 | 0.20832 | Identity.py | pypi |
import warnings as _warnings
import hashlib as _hashlib
trans_5C = bytes((x ^ 0x5C) for x in range(256))
trans_36 = bytes((x ^ 0x36) for x in range(256))
# The size of the digests returned by HMAC depends on the underlying
# hashing module used. Use digest_size from the instance of HMAC instead.
digest_size = None
class HMAC:
"""RFC 2104 HMAC class. Also complies with RFC 4231.
This supports the API for Cryptographic Hash Functions (PEP 247).
"""
blocksize = 64 # 512-bit HMAC; can be changed in subclasses.
__slots__ = (
"_hmac", "_inner", "_outer", "block_size", "digest_size"
)
def __init__(self, key, msg=None, digestmod=_hashlib.sha256):
"""Create a new HMAC object.
key: bytes or buffer, key for the keyed hash object.
msg: bytes or buffer, Initial input for the hash or None.
digestmod: A hash name suitable for hashlib.new(). *OR*
A hashlib constructor returning a new hash object. *OR*
A module supporting PEP 247.
Required as of 3.8, despite its position after the optional
msg argument. Passing it as a keyword argument is
recommended, though not required for legacy API reasons.
"""
if not isinstance(key, (bytes, bytearray)):
raise TypeError("key: expected bytes or bytearray, but got %r" % type(key).__name__)
if not digestmod:
raise TypeError("Missing required parameter 'digestmod'.")
self._hmac_init(key, msg, digestmod)
def _hmac_init(self, key, msg, digestmod):
if callable(digestmod):
digest_cons = digestmod
elif isinstance(digestmod, str):
digest_cons = lambda d=b'': _hashlib.new(digestmod, d)
else:
digest_cons = lambda d=b'': digestmod.new(d)
self._hmac = None
self._outer = digest_cons()
self._inner = digest_cons()
self.digest_size = self._inner.digest_size
if hasattr(self._inner, 'block_size'):
blocksize = self._inner.block_size
if blocksize < 16:
_warnings.warn('block_size of %d seems too small; using our '
'default of %d.' % (blocksize, self.blocksize),
RuntimeWarning, 2)
blocksize = self.blocksize
else:
_warnings.warn('No block_size attribute on given digest object; '
'Assuming %d.' % (self.blocksize),
RuntimeWarning, 2)
blocksize = self.blocksize
if len(key) > blocksize:
key = digest_cons(key).digest()
# self.blocksize is the default blocksize. self.block_size is
# effective block size as well as the public API attribute.
self.block_size = blocksize
key = key.ljust(blocksize, b'\0')
self._outer.update(key.translate(trans_5C))
self._inner.update(key.translate(trans_36))
if msg is not None:
self.update(msg)
@property
def name(self):
if self._hmac:
return self._hmac.name
else:
return f"hmac-{self._inner.name}"
def update(self, msg):
"""Feed data from msg into this hashing object."""
inst = self._hmac or self._inner
inst.update(msg)
def copy(self):
"""Return a separate copy of this hashing object.
An update to this copy won't affect the original object.
"""
# Call __new__ directly to avoid the expensive __init__.
other = self.__class__.__new__(self.__class__)
other.digest_size = self.digest_size
if self._hmac:
other._hmac = self._hmac.copy()
other._inner = other._outer = None
else:
other._hmac = None
other._inner = self._inner.copy()
other._outer = self._outer.copy()
return other
def _current(self):
"""Return a hash object for the current state.
To be used only internally with digest() and hexdigest().
"""
if self._hmac:
return self._hmac
else:
h = self._outer.copy()
h.update(self._inner.digest())
return h
def digest(self):
"""Return the hash value of this hashing object.
This returns the hmac value as bytes. The object is
not altered in any way by this function; you can continue
updating the object after calling this function.
"""
h = self._current()
return h.digest()
def hexdigest(self):
"""Like digest(), but returns a string of hexadecimal digits instead.
"""
h = self._current()
return h.hexdigest()
def new(key, msg=None, digestmod=_hashlib.sha256):
"""Create a new hashing object and return it.
key: bytes or buffer, The starting key for the hash.
msg: bytes or buffer, Initial input for the hash, or None.
digestmod: A hash name suitable for hashlib.new(). *OR*
A hashlib constructor returning a new hash object. *OR*
A module supporting PEP 247.
Required as of 3.8, despite its position after the optional
msg argument. Passing it as a keyword argument is
recommended, though not required for legacy API reasons.
You can now feed arbitrary bytes into the object using its update()
method, and can ask for the hash value at any time by calling its digest()
or hexdigest() methods.
"""
return HMAC(key, msg, digestmod)
def digest(key, msg, digest):
"""Fast inline implementation of HMAC.
key: bytes or buffer, The key for the keyed hash object.
msg: bytes or buffer, Input message.
digest: A hash name suitable for hashlib.new() for best performance. *OR*
A hashlib constructor returning a new hash object. *OR*
A module supporting PEP 247.
"""
if callable(digest):
digest_cons = digest
elif isinstance(digest, str):
digest_cons = lambda d=b'': _hashlib.new(digest, d)
else:
digest_cons = lambda d=b'': digest.new(d)
inner = digest_cons()
outer = digest_cons()
blocksize = getattr(inner, 'block_size', 64)
if len(key) > blocksize:
key = digest_cons(key).digest()
key = key + b'\x00' * (blocksize - len(key))
inner.update(key.translate(trans_36))
outer.update(key.translate(trans_5C))
inner.update(msg)
outer.update(inner.digest())
return outer.digest() | /rnspure-0.5.7.tar.gz/rnspure-0.5.7/RNS/Cryptography/HMAC.py | 0.833663 | 0.412471 | HMAC.py | pypi |
# WARNING! Only the X25519PrivateKey.exchange() method attempts to hide execution time.
# In the context of Reticulum, this is sufficient, but it may not be in other systems. If
# this code is to be used to provide cryptographic security in an environment where the
# start and end times of the execution can be guessed, inferred or measured then it is
# critical that steps are taken to hide the execution time, for instance by adding a
# delay so that encrypted packets are not sent until a fixed time after the _start_ of
# execution.
import os
import time
P = 2 ** 255 - 19
_A = 486662
def _point_add(point_n, point_m, point_diff):
"""Given the projection of two points and their difference, return their sum"""
(xn, zn) = point_n
(xm, zm) = point_m
(x_diff, z_diff) = point_diff
x = (z_diff << 2) * (xm * xn - zm * zn) ** 2
z = (x_diff << 2) * (xm * zn - zm * xn) ** 2
return x % P, z % P
def _point_double(point_n):
"""Double a point provided in projective coordinates"""
(xn, zn) = point_n
xn2 = xn ** 2
zn2 = zn ** 2
x = (xn2 - zn2) ** 2
xzn = xn * zn
z = 4 * xzn * (xn2 + _A * xzn + zn2)
return x % P, z % P
def _const_time_swap(a, b, swap):
"""Swap two values in constant time"""
index = int(swap) * 2
temp = (a, b, b, a)
return temp[index:index+2]
def _raw_curve25519(base, n):
"""Raise the point base to the power n"""
zero = (1, 0)
one = (base, 1)
mP, m1P = zero, one
for i in reversed(range(256)):
bit = bool(n & (1 << i))
mP, m1P = _const_time_swap(mP, m1P, bit)
mP, m1P = _point_double(mP), _point_add(mP, m1P, one)
mP, m1P = _const_time_swap(mP, m1P, bit)
x, z = mP
inv_z = pow(z, P - 2, P)
return (x * inv_z) % P
def _unpack_number(s):
"""Unpack 32 bytes to a 256 bit value"""
if len(s) != 32:
raise ValueError('Curve25519 values must be 32 bytes')
return int.from_bytes(s, "little")
def _pack_number(n):
"""Pack a value into 32 bytes"""
return n.to_bytes(32, "little")
def _fix_secret(n):
"""Mask a value to be an acceptable exponent"""
n &= ~7
n &= ~(128 << 8 * 31)
n |= 64 << 8 * 31
return n
def curve25519(base_point_raw, secret_raw):
"""Raise the base point to a given power"""
base_point = _unpack_number(base_point_raw)
secret = _fix_secret(_unpack_number(secret_raw))
return _pack_number(_raw_curve25519(base_point, secret))
def curve25519_base(secret_raw):
"""Raise the generator point to a given power"""
secret = _fix_secret(_unpack_number(secret_raw))
return _pack_number(_raw_curve25519(9, secret))
class X25519PublicKey:
def __init__(self, x):
self.x = x
@classmethod
def from_public_bytes(cls, data):
return cls(_unpack_number(data))
def public_bytes(self):
return _pack_number(self.x)
class X25519PrivateKey:
MIN_EXEC_TIME = 0.002
MAX_EXEC_TIME = 0.5
DELAY_WINDOW = 10
T_CLEAR = None
T_MAX = 0
def __init__(self, a):
self.a = a
@classmethod
def generate(cls):
return cls.from_private_bytes(os.urandom(32))
@classmethod
def from_private_bytes(cls, data):
return cls(_fix_secret(_unpack_number(data)))
def private_bytes(self):
return _pack_number(self.a)
def public_key(self):
return X25519PublicKey.from_public_bytes(_pack_number(_raw_curve25519(9, self.a)))
def exchange(self, peer_public_key):
if isinstance(peer_public_key, bytes):
peer_public_key = X25519PublicKey.from_public_bytes(peer_public_key)
start = time.time()
shared = _pack_number(_raw_curve25519(peer_public_key.x, self.a))
end = time.time()
duration = end-start
if X25519PrivateKey.T_CLEAR == None:
X25519PrivateKey.T_CLEAR = end + X25519PrivateKey.DELAY_WINDOW
if end > X25519PrivateKey.T_CLEAR:
X25519PrivateKey.T_CLEAR = end + X25519PrivateKey.DELAY_WINDOW
X25519PrivateKey.T_MAX = 0
if duration < X25519PrivateKey.T_MAX or duration < X25519PrivateKey.MIN_EXEC_TIME:
target = start+X25519PrivateKey.T_MAX
if target > start+X25519PrivateKey.MAX_EXEC_TIME:
target = start+X25519PrivateKey.MAX_EXEC_TIME
if target < start+X25519PrivateKey.MIN_EXEC_TIME:
target = start+X25519PrivateKey.MIN_EXEC_TIME
try:
time.sleep(target-time.time())
except Exception as e:
pass
elif duration > X25519PrivateKey.T_MAX:
X25519PrivateKey.T_MAX = duration
return shared | /rnspure-0.5.7.tar.gz/rnspure-0.5.7/RNS/Cryptography/X25519.py | 0.718693 | 0.555134 | X25519.py | pypi |
import os
import time
from RNS.Cryptography import HMAC
from RNS.Cryptography import PKCS7
from RNS.Cryptography.AES import AES_128_CBC
class Fernet():
"""
This class provides a slightly modified implementation of the Fernet spec
found at: https://github.com/fernet/spec/blob/master/Spec.md
According to the spec, a Fernet token includes a one byte VERSION and
eight byte TIMESTAMP field at the start of each token. These fields are
not relevant to Reticulum. They are therefore stripped from this
implementation, since they incur overhead and leak initiator metadata.
"""
FERNET_OVERHEAD = 48 # Bytes
@staticmethod
def generate_key():
return os.urandom(32)
def __init__(self, key = None):
if key == None:
raise ValueError("Fernet key cannot be None")
if len(key) != 32:
raise ValueError("Fernet key must be 32 bytes, not "+str(len(key)))
self._signing_key = key[:16]
self._encryption_key = key[16:]
def verify_hmac(self, token):
if len(token) <= 32:
raise ValueError("Cannot verify HMAC on token of only "+str(len(token))+" bytes")
else:
received_hmac = token[-32:]
expected_hmac = HMAC.new(self._signing_key, token[:-32]).digest()
if received_hmac == expected_hmac:
return True
else:
return False
def encrypt(self, data = None):
iv = os.urandom(16)
current_time = int(time.time())
if not isinstance(data, bytes):
raise TypeError("Fernet token plaintext input must be bytes")
ciphertext = AES_128_CBC.encrypt(
plaintext = PKCS7.pad(data),
key = self._encryption_key,
iv = iv,
)
signed_parts = iv+ciphertext
return signed_parts + HMAC.new(self._signing_key, signed_parts).digest()
def decrypt(self, token = None):
if not isinstance(token, bytes):
raise TypeError("Fernet token must be bytes")
if not self.verify_hmac(token):
raise ValueError("Fernet token HMAC was invalid")
iv = token[:16]
ciphertext = token[16:-32]
try:
plaintext = PKCS7.unpad(
AES_128_CBC.decrypt(
ciphertext,
self._encryption_key,
iv,
)
)
return plaintext
except Exception as e:
raise ValueError("Could not decrypt Fernet token") | /rnspure-0.5.7.tar.gz/rnspure-0.5.7/RNS/Cryptography/Fernet.py | 0.668339 | 0.334481 | Fernet.py | pypi |
from cryptography.hazmat.primitives import serialization
from cryptography.hazmat.primitives.asymmetric.ed25519 import Ed25519PrivateKey, Ed25519PublicKey
from cryptography.hazmat.primitives.asymmetric.x25519 import X25519PrivateKey, X25519PublicKey
# These proxy classes exist to create a uniform API accross
# cryptography primitive providers.
class X25519PrivateKeyProxy:
def __init__(self, real):
self.real = real
@classmethod
def generate(cls):
return cls(X25519PrivateKey.generate())
@classmethod
def from_private_bytes(cls, data):
return cls(X25519PrivateKey.from_private_bytes(data))
def private_bytes(self):
return self.real.private_bytes(
encoding=serialization.Encoding.Raw,
format=serialization.PrivateFormat.Raw,
encryption_algorithm=serialization.NoEncryption(),
)
def public_key(self):
return X25519PublicKeyProxy(self.real.public_key())
def exchange(self, peer_public_key):
return self.real.exchange(peer_public_key.real)
class X25519PublicKeyProxy:
def __init__(self, real):
self.real = real
@classmethod
def from_public_bytes(cls, data):
return cls(X25519PublicKey.from_public_bytes(data))
def public_bytes(self):
return self.real.public_bytes(
encoding=serialization.Encoding.Raw,
format=serialization.PublicFormat.Raw
)
class Ed25519PrivateKeyProxy:
def __init__(self, real):
self.real = real
@classmethod
def generate(cls):
return cls(Ed25519PrivateKey.generate())
@classmethod
def from_private_bytes(cls, data):
return cls(Ed25519PrivateKey.from_private_bytes(data))
def private_bytes(self):
return self.real.private_bytes(
encoding=serialization.Encoding.Raw,
format=serialization.PrivateFormat.Raw,
encryption_algorithm=serialization.NoEncryption()
)
def public_key(self):
return Ed25519PublicKeyProxy(self.real.public_key())
def sign(self, message):
return self.real.sign(message)
class Ed25519PublicKeyProxy:
def __init__(self, real):
self.real = real
@classmethod
def from_public_bytes(cls, data):
return cls(Ed25519PublicKey.from_public_bytes(data))
def public_bytes(self):
return self.real.public_bytes(
encoding=serialization.Encoding.Raw,
format=serialization.PublicFormat.Raw
)
def verify(self, signature, message):
self.real.verify(signature, message) | /rnspure-0.5.7.tar.gz/rnspure-0.5.7/RNS/Cryptography/Proxies.py | 0.849847 | 0.272287 | Proxies.py | pypi |
from base64 import b64decode, b64encode, b32encode
from hashlib import sha256
import struct
import re
I2P_B64_CHARS = "-~"
def i2p_b64encode(x):
"""Encode I2P destination"""
return b64encode(x, altchars=I2P_B64_CHARS.encode()).decode()
def i2p_b64decode(x):
"""Decode I2P destination"""
return b64decode(x, altchars=I2P_B64_CHARS, validate=True)
SAM_BUFSIZE = 4096
DEFAULT_ADDRESS = ("127.0.0.1", 7656)
DEFAULT_MIN_VER = "3.1"
DEFAULT_MAX_VER = "3.1"
TRANSIENT_DESTINATION = "TRANSIENT"
VALID_BASE32_ADDRESS = re.compile(r"^([a-zA-Z0-9]{52}).b32.i2p$")
VALID_BASE64_ADDRESS = re.compile(r"^([a-zA-Z0-9-~=]{516,528})$")
class Message(object):
"""Parse SAM message to an object"""
def __init__(self, s):
self.opts = {}
if type(s) != str:
self._reply_string = s.decode().strip()
else:
self._reply_string = s
self.cmd, self.action, opts = self._reply_string.split(" ", 2)
for v in opts.split(" "):
data = v.split("=", 1) if "=" in v else (v, True)
self.opts[data[0]] = data[1]
def __getitem__(self, key):
return self.opts[key]
@property
def ok(self):
return self["RESULT"] == "OK"
def __repr__(self):
return self._reply_string
# SAM request messages
def hello(min_version, max_version):
return "HELLO VERSION MIN={} MAX={}\n".format(min_version,
max_version).encode()
def session_create(style, session_id, destination, options=""):
return "SESSION CREATE STYLE={} ID={} DESTINATION={} {}\n".format(
style, session_id, destination, options).encode()
def stream_connect(session_id, destination, silent="false"):
return "STREAM CONNECT ID={} DESTINATION={} SILENT={}\n".format(
session_id, destination, silent).encode()
def stream_accept(session_id, silent="false"):
return "STREAM ACCEPT ID={} SILENT={}\n".format(session_id, silent).encode()
def stream_forward(session_id, port, options=""):
return "STREAM FORWARD ID={} PORT={} {}\n".format(
session_id, port, options).encode()
def naming_lookup(name):
return "NAMING LOOKUP NAME={}\n".format(name).encode()
def dest_generate(signature_type):
return "DEST GENERATE SIGNATURE_TYPE={}\n".format(signature_type).encode()
class Destination(object):
"""I2P destination
https://geti2p.net/spec/common-structures#destination
:param data: (optional) Base64 encoded data or binary data
:param path: (optional) A path to a file with binary data
:param has_private_key: (optional) Does data have a private key?
"""
ECDSA_SHA256_P256 = 1
ECDSA_SHA384_P384 = 2
ECDSA_SHA512_P521 = 3
EdDSA_SHA512_Ed25519 = 7
default_sig_type = EdDSA_SHA512_Ed25519
_pubkey_size = 256
_signkey_size = 128
_min_cert_size = 3
def __init__(self, data=None, path=None, has_private_key=False):
#: Binary destination
self.data = bytes()
#: Base64 encoded destination
self.base64 = ""
#: :class:`RNS.vendor.i2plib.PrivateKey` instance or None
self.private_key = None
if path:
with open(path, "rb") as f: data = f.read()
if data and has_private_key:
self.private_key = PrivateKey(data)
cert_len = struct.unpack("!H", self.private_key.data[385:387])[0]
data = self.private_key.data[:387+cert_len]
if not data:
raise Exception("Can't create a destination with no data")
self.data = data if type(data) == bytes else i2p_b64decode(data)
self.base64 = data if type(data) == str else i2p_b64encode(data)
def __repr__(self):
return "<Destination: {}>".format(self.base32)
@property
def base32(self):
"""Base32 destination hash of this destination"""
desthash = sha256(self.data).digest()
return b32encode(desthash).decode()[:52].lower()
class PrivateKey(object):
"""I2P private key
https://geti2p.net/spec/common-structures#keysandcert
:param data: Base64 encoded data or binary data
"""
def __init__(self, data):
#: Binary private key
self.data = data if type(data) == bytes else i2p_b64decode(data)
#: Base64 encoded private key
self.base64 = data if type(data) == str else i2p_b64encode(data) | /rnspure-0.5.7.tar.gz/rnspure-0.5.7/RNS/vendor/i2plib/sam.py | 0.686685 | 0.258063 | sam.py | pypi |
import asyncio
from . import sam
from . import exceptions
from . import utils
from .log import logger
def parse_reply(data):
if not data:
raise ConnectionAbortedError("Empty response: SAM API went offline")
try:
msg = sam.Message(data.decode().strip())
logger.debug("SAM reply: "+str(msg))
except:
raise ConnectionAbortedError("Invalid SAM response")
return msg
async def get_sam_socket(sam_address=sam.DEFAULT_ADDRESS, loop=None):
"""A couroutine used to create a new SAM socket.
:param sam_address: (optional) SAM API address
:param loop: (optional) event loop instance
:return: A (reader, writer) pair
"""
reader, writer = await asyncio.open_connection(*sam_address)
writer.write(sam.hello("3.1", "3.1"))
reply = parse_reply(await reader.readline())
if reply.ok:
return (reader, writer)
else:
writer.close()
raise exceptions.SAM_EXCEPTIONS[reply["RESULT"]]()
async def dest_lookup(domain, sam_address=sam.DEFAULT_ADDRESS,
loop=None):
"""A coroutine used to lookup a full I2P destination by .i2p domain or
.b32.i2p address.
:param domain: Address to be resolved, can be a .i2p domain or a .b32.i2p
address.
:param sam_address: (optional) SAM API address
:param loop: (optional) Event loop instance
:return: An instance of :class:`Destination`
"""
reader, writer = await get_sam_socket(sam_address, loop)
writer.write(sam.naming_lookup(domain))
reply = parse_reply(await reader.readline())
writer.close()
if reply.ok:
return sam.Destination(reply["VALUE"])
else:
raise exceptions.SAM_EXCEPTIONS[reply["RESULT"]]()
async def new_destination(sam_address=sam.DEFAULT_ADDRESS, loop=None,
sig_type=sam.Destination.default_sig_type):
"""A coroutine used to generate a new destination with a private key of a
chosen signature type.
:param sam_address: (optional) SAM API address
:param loop: (optional) Event loop instance
:param sig_type: (optional) Signature type
:return: An instance of :class:`Destination`
"""
reader, writer = await get_sam_socket(sam_address, loop)
writer.write(sam.dest_generate(sig_type))
reply = parse_reply(await reader.readline())
writer.close()
return sam.Destination(reply["PRIV"], has_private_key=True)
async def create_session(session_name, sam_address=sam.DEFAULT_ADDRESS,
loop=None, style="STREAM",
signature_type=sam.Destination.default_sig_type,
destination=None, options={}):
"""A coroutine used to create a new SAM session.
:param session_name: Session nick name
:param sam_address: (optional) SAM API address
:param loop: (optional) Event loop instance
:param style: (optional) Session style, can be STREAM, DATAGRAM, RAW
:param signature_type: (optional) If the destination is TRANSIENT, this
signature type is used
:param destination: (optional) Destination to use in this session. Can be
a base64 encoded string, :class:`Destination`
instance or None. TRANSIENT destination is used when it
is None.
:param options: (optional) A dict object with i2cp options
:return: A (reader, writer) pair
"""
logger.debug("Creating session {}".format(session_name))
if destination:
if type(destination) == sam.Destination:
destination = destination
else:
destination = sam.Destination(
destination, has_private_key=True)
dest_string = destination.private_key.base64
else:
dest_string = sam.TRANSIENT_DESTINATION
options = " ".join(["{}={}".format(k, v) for k, v in options.items()])
reader, writer = await get_sam_socket(sam_address, loop)
writer.write(sam.session_create(
style, session_name, dest_string, options))
reply = parse_reply(await reader.readline())
if reply.ok:
if not destination:
destination = sam.Destination(
reply["DESTINATION"], has_private_key=True)
logger.debug(destination.base32)
logger.debug("Session created {}".format(session_name))
return (reader, writer)
else:
writer.close()
raise exceptions.SAM_EXCEPTIONS[reply["RESULT"]]()
async def stream_connect(session_name, destination,
sam_address=sam.DEFAULT_ADDRESS, loop=None):
"""A coroutine used to connect to a remote I2P destination.
:param session_name: Session nick name
:param destination: I2P destination to connect to
:param sam_address: (optional) SAM API address
:param loop: (optional) Event loop instance
:return: A (reader, writer) pair
"""
logger.debug("Connecting stream {}".format(session_name))
if isinstance(destination, str) and not destination.endswith(".i2p"):
destination = sam.Destination(destination)
elif isinstance(destination, str):
destination = await dest_lookup(destination, sam_address, loop)
reader, writer = await get_sam_socket(sam_address, loop)
writer.write(sam.stream_connect(session_name, destination.base64,
silent="false"))
reply = parse_reply(await reader.readline())
if reply.ok:
logger.debug("Stream connected {}".format(session_name))
return (reader, writer)
else:
writer.close()
raise exceptions.SAM_EXCEPTIONS[reply["RESULT"]]()
async def stream_accept(session_name, sam_address=sam.DEFAULT_ADDRESS,
loop=None):
"""A coroutine used to accept a connection from the I2P network.
:param session_name: Session nick name
:param sam_address: (optional) SAM API address
:param loop: (optional) Event loop instance
:return: A (reader, writer) pair
"""
reader, writer = await get_sam_socket(sam_address, loop)
writer.write(sam.stream_accept(session_name, silent="false"))
reply = parse_reply(await reader.readline())
if reply.ok:
return (reader, writer)
else:
writer.close()
raise exceptions.SAM_EXCEPTIONS[reply["RESULT"]]()
### Context managers
class Session:
"""Async SAM session context manager.
:param session_name: Session nick name
:param sam_address: (optional) SAM API address
:param loop: (optional) Event loop instance
:param style: (optional) Session style, can be STREAM, DATAGRAM, RAW
:param signature_type: (optional) If the destination is TRANSIENT, this
signature type is used
:param destination: (optional) Destination to use in this session. Can be
a base64 encoded string, :class:`Destination`
instance or None. TRANSIENT destination is used when it
is None.
:param options: (optional) A dict object with i2cp options
:return: :class:`Session` object
"""
def __init__(self, session_name, sam_address=sam.DEFAULT_ADDRESS,
loop=None, style="STREAM",
signature_type=sam.Destination.default_sig_type,
destination=None, options={}):
self.session_name = session_name
self.sam_address = sam_address
self.loop = loop
self.style = style
self.signature_type = signature_type
self.destination = destination
self.options = options
async def __aenter__(self):
self.reader, self.writer = await create_session(self.session_name,
sam_address=self.sam_address, loop=self.loop, style=self.style,
signature_type=self.signature_type,
destination=self.destination, options=self.options)
return self
async def __aexit__(self, exc_type, exc, tb):
### TODO handle exceptions
self.writer.close()
class StreamConnection:
"""Async stream connection context manager.
:param session_name: Session nick name
:param destination: I2P destination to connect to
:param sam_address: (optional) SAM API address
:param loop: (optional) Event loop instance
:return: :class:`StreamConnection` object
"""
def __init__(self, session_name, destination,
sam_address=sam.DEFAULT_ADDRESS, loop=None):
self.session_name = session_name
self.sam_address = sam_address
self.loop = loop
self.destination = destination
async def __aenter__(self):
self.reader, self.writer = await stream_connect(self.session_name,
self.destination, sam_address=self.sam_address, loop=self.loop)
self.read = self.reader.read
self.write = self.writer.write
return self
async def __aexit__(self, exc_type, exc, tb):
### TODO handle exceptions
self.writer.close()
class StreamAcceptor:
"""Async stream acceptor context manager.
:param session_name: Session nick name
:param sam_address: (optional) SAM API address
:param loop: (optional) Event loop instance
:return: :class:`StreamAcceptor` object
"""
def __init__(self, session_name, sam_address=sam.DEFAULT_ADDRESS,
loop=None):
self.session_name = session_name
self.sam_address = sam_address
self.loop = loop
async def __aenter__(self):
self.reader, self.writer = await stream_accept(self.session_name,
sam_address=self.sam_address, loop=self.loop)
self.read = self.reader.read
self.write = self.writer.write
return self
async def __aexit__(self, exc_type, exc, tb):
### TODO handle exceptions
self.writer.close() | /rnspure-0.5.7.tar.gz/rnspure-0.5.7/RNS/vendor/i2plib/aiosam.py | 0.68616 | 0.200382 | aiosam.py | pypi |
# RNT - Reddit Network Toolkit
A simple tool for generating and extracting network objects from Reddit data sets.
## Author
**Jacob Rohde**
Email: [jarohde1\@gmail.com](mailto:jarohde1@gmail.com) \| Twitter: [\@jacobrohde](https://twitter.com/JacobRohde) \| GitHub: [\@jarohde](https://github.com/jarohde)
## Features
- Extracts a simple data set of Reddit submissions and their associated comments via keyword or subreddit search terms.
- Provides single and batch subreddit- and thread-level network statistics.
- Generates edge and node lists, and creates network objects (via NetworkX) from Reddit data sets. Networks:
- can be directed or undirected;
- contain subreddit node attributes;
- contain subreddit and weight edge attributes;
- allow for optional text classification attributes.
- Can visualize simple graphs via Matplotlib.
- The "url_functions" sub-package provides additional features for extracting URL information from Reddit data.
## General
**Current version:**
0.1.6 (released 03/31/2023)
*Note: The minor changes in this release reflect Pushshift's modified endpoints after the recent server migration. There are still some Pushshift bugs and timeouts that may affect how the* `GetRedditData()` *feature in this package works. I will be sure to address future bugs ASAP. Version 0.1.6 also added a sub-package for extracting URL information from Reddit data*.
**Import RNT library:**
import rnt
**Classes and functions**
- `GetRedditData()`
- `GetRedditNetwork()`
- `subreddit_statistics()`
- `reddit_thread_statistics()`
- `single_network_plot()`
## Usage
### GetRedditData()
rnt.GetRedditData(search_term,
search_term_is_subreddit,
size,
start_date,
end_date)
**Overview:** A class object for extracting a Reddit data set.
**Arguments/attributes:**
`search_term`: The only required argument. Takes a string as a single search term or list of strings for multiple search terms (e.g., `search_term='news'` or `search_term=['news', 'cnn']`). If extracting a subreddit data set (see '`search_term_is_subreddit`' below), only provide a string of a single subreddit name (e.g., 'AskReddit').
`search_term_is_subreddit`: Optional Boolean (True or False) argument to signify whether `GetRedditData` extracts a subreddit data set; default set to False.
`size`: Optional integer argument to signify how many Reddit submissions and their associated comments to extract; default set to 500 submission. `GetRedditData` should only be used to extract limited or exploratory data sets. I recommend using the Pushshift Reddit repository for extracting large data sets.
`start_date`/`end_date`: Optional date arguments for `GetRedditData`; default end date set to current date and default start date set to one week prior. Format should be string objects organized like 'YYYY, MM, DD' (e.g., `start_date='2022, 5, 27'` for May 27, 2022).
`GetRedditData.df`: Object attribute; extracts the Reddit data set as a pandas DataFrame object.
`GetRedditData.write_data()`: Object method that writes the pandas DataFrame object to file. The method can take `file_type` and `file_name` as optional arguments. `file_type` indicates what file format to use when writing the data set and accepts a string argument of either 'json' or 'csv'; default set to 'json'. `file_name` takes a string to indicate what the file name should be saved as; default set to the search term provided.
`GetRedditData.extract_urls()`: Object method to extract and append a list of URLs and URL domains in the Reddit data set.
### GetRedditNetwork()
rnt.GetRedditNetwork(reddit_dataset,
edge_type,
text_attribute)
**Overview:** A class object for generating edge and node lists, and a NetworkX graph object from a Reddit data set.
**Arguments/attributes:**
`reddit_dataset`: The only required argument. Takes a Reddit data set or a `GetRedditData` object.
`edge_type`: Optional string argument of either 'directed' or 'undirected' to signify network edge type; default set to directed.
`text_attribute`: Optional string, list, or dictionary argument to characterize an edge attribute based on one or more text categories. Result will return True or False for a network edge if the Reddit submission initiating the edge contains the provided keyword(s). Providing the argument with a string or list data type will generate a single text attribute column in the edge list and NetworkX graph object. Providing the argument with a dictionary data type will generate multiple text attribute columns. Dictionary text attribute example:
text_attribute={'apples': ['fuji', 'red delicious', 'granny smith'],
'oranges': ['valencia', 'mandarin', 'tangerine'],
'berries': ['blueberry', 'raspberry', 'blackberry']}
`GetRedditNetwork.edge_list`: Returns a pandas DataFrame of the network edge list with columns for the poster, commenter, the subreddit the edge occurred in, and an optional text attribute column.
`GetRedditNetwork.node_list`: Returns a pandas DataFrame of the network node list with columns for each unique node, the node's in-degree and out-degree values, and a list of subreddits the node participated in within the network.
`GetRedditNetwork.adjacency`: Returns a dictionary of network adjacency matrices. Both weighted and unweighted matrices are returned by default. The dictionary will also return weighted adjacency matrices for each optional edge-based text attribute that users identified when creating the class.
`GetRedditNetwork.graph`: Returns a NetworkX graph object.
`GetRedditNetwork.write_data()`: Object method that writes `edge_list` and `node_list` data sets to file. The method takes `file_type`, `file_name`, and `adjacency` as optional arguments. `file_type` indicates what file format to use when writing the data sets and accepts a string argument of either 'json' or 'csv'; default set to 'json'. `file_name` takes a string to indicate what to append at the end of the edge and node list file names (e.g., `file_name='apple'` will save the files as 'edge_list_apple.json' and 'node_list_apple.json'). `adjacency` accepts a boolean and indicates whether to save the data sets as adjacency matrices instead of edge and node lists.
### subreddit_statistics()
rnt.subreddit_statistics(reddit_dataset, subreddit_list)
**Overview:** A function for extracting basic statistics for single or batch subreddit networks. The function currently returns a single pandas DataFrame with example subreddit network statistics including number of nodes, edges, and network density, among others.
**Arguments:**
`reddit_dataset`: The only required argument. Takes a Reddit data set or a `GetRedditData` object.
`subreddit_list`: An optional list argument to indicate the specific subreddits to compute analyses for; default set to all unique subreddits in a data set that Reddit submissions were published in.
### reddit_thread_statistics()
rnt.reddit_thread_statistics(reddit_dataset, reddit_thread_list)
**Overview:** A function for extracting basic statistics for single or batch Reddit threads (initiated by Reddit submissions). The function currently returns a single pandas DataFrame with example statistics including the number of unique commenters to the thread, and the earliest/latest response times to the thread, among others.
**Arguments:**
`reddit_dataset`: The only required argument. Takes a Reddit data set or a `GetRedditData` object.
`reddit_thread_list`: An optional list argument to provide the specific Reddit thread IDs (i.e., Reddit submission IDs) to analyze; default set to all unique threads in a Reddit data set.
### single_network_plot()
rnt.single_network_plot(network, **kwargs)
**Overview:** A simple function for plotting networks via NetworkX and Matplotlib (additional install required). Please note this function is currently a work in progress and is meant to be basic tool to plot a single graph. See NetworkX documentation for more advanced plotting needs.
**Arguments:**
`network`: The only required argument. Takes a `GetRedditNetwork` or NetworkX graph object.
`title`: Optional string argument to add a title to the plot.
`pos`: Optional string argument to set the NetworkX plotting algorithm. For ease of use, the argument currently accepts one of the following layout types as a string:
- 'spring_layout' (default)
- 'kamada_kawai_layout'
- 'circular_layout'
- 'random_layout'
`**kwargs`: The function also accepts several other NetworkX keyword arguments for plotting (please see NetworkX documentation for more info on these arguments). Currently accepted arguments include:
- 'arrows' (bool)
- 'arrowsize' (int)
- 'edge_color' (str or list/array)
- 'font_size' (int)
- 'node_color' (str or list/array)
- 'node_size' (str or list/array)
- 'verticalalignment' (str)
- 'width' (int/float or list/array)
- 'with_labels' (bool)
## Requirements
- Python 3.XX
- numpy - a Python library for handling arrays and matrices
- pandas - a Python library for data management
- NetworkX - a Python library for network analysis
- PMAW - a multithread tool for extracting Reddit data via the [Pushshift API](https://pushshift.io/api-parameters/)
- Matplotlib (only if using the `single_network_plot()` function) - a Python library for plotting
## Support
For support, email [jarohde1\@gmail.com](mailto:jarohde1@gmail.com).
## License
[MIT](https://choosealicense.com/licenses/mit/)
| /rnt-0.1.6.tar.gz/rnt-0.1.6/README.md | 0.788583 | 0.809276 | README.md | pypi |
from pyquery import PyQuery as pq
from urllib.parse import urljoin
base_url = 'https://www.rnz.co.nz/'
categories = [
('news/national', 'New Zealand'),
('news/world', 'World'),
('news/political', 'Politics'),
('international/pacific-news', 'Pacific News'),
('news/te-manu-korihi', 'Te Ao Māori'),
('news/sport', 'Sport'),
('news/business', 'Business'),
('news/country', 'Country'),
('news/ldr', 'Local Democracy Reporting'),
('news/on-the-inside', 'Comment & Analysis'),
('news/in-depth', 'In Depth'),
]
class RnzNewsArticle:
def __init__(self, category, path, title, summary):
self._category = category
self._path = path
self._title = title
self._summary = summary
self._body = None
self._html = None
self._time = None
@property
def category(self):
return self._category
@property
def path(self):
return self._path
@property
def address(self):
return urljoin(base_url, self.path)
@property
def title(self):
return self._title
@property
def summary(self):
return self._summary
def _fetch(self):
doc = pq(url=self.address)
self._body = doc('.article__body').text()
self._html = doc('.article__body').html()
self._time = doc('.article__header .updated').text()
@property
def content(self):
if self._body is None:
self._fetch()
return self._body
@property
def html(self):
if self._html is None:
self._fetch()
return self._html
@property
def time(self):
if self._time is None:
self._fetch()
return self._time
class RnzNewsCategory:
def __init__(self, path, description):
self._path = path
self._description = description
self._last_page = 0
self._has_next = True
@property
def path(self):
return self._path
@property
def address(self):
return urljoin(base_url, self.path)
@property
def description(self):
return self._description
def __getitem__(self, page):
doc = pq('{}?page={}'.format(self.address, page))
news_list = []
for news in doc('.o-digest__detail').items():
link = news('a.faux-link')
path = link.attr('href')
title = link.text()
summary = news('.o-digest__summary').text()
news_list.append(RnzNewsArticle(self, path, title, summary))
has_next = doc('a[rel=next]').length > 0
return news_list, has_next
def __iter__(self):
return self
def __next__(self):
if not self._has_next:
raise StopIteration
self._last_page += 1
news_list, self._has_next = self[self._last_page]
return news_list, self._last_page
class RnzNews:
def categories(self):
'''Retrieve all categories.
Returns: a list of RnzNewsCategory.
'''
return [
RnzNewsCategory(path, description)
for path, description in categories
]
def __getitem__(self, path):
for _path, description in categories:
if _path == path:
return RnzNewsCategory(path, description)
raise Exception('No such category') | /rnz_news-1.0.1.tar.gz/rnz_news-1.0.1/rnz_news/__init__.py | 0.754915 | 0.155015 | __init__.py | pypi |
import zipfile
import logging
from pathlib import Path
import numpy as np
import torch
from torch.utils.data import DataLoader, IterableDataset
from collections import OrderedDict
from urllib.request import urlretrieve
from tqdm import tqdm
from .diacritics_dataset import DiacriticsDataset
from .diacritics_model import Diacritics
from .diacritcs_train import predict
from . import diacritics_dataset
from .diacritics_utils import LOG_NAME
logger = logging.getLogger(LOG_NAME)
_model: Diacritics = None
_dataset: DiacriticsDataset = None
CACHED_DIR = ".model"
MODEL_FILE = "diacritice.pt"
MODEL_URL = (
"https://github.com/AndyTheFactory/andythefactory.github.io/raw/main/diacritice.zip"
)
def get_cached_model():
"""
Loads the cached model. If not, it tries to download the model from github
:return: local path to model
"""
filename = Path(CACHED_DIR) / MODEL_FILE
if filename.exists():
return filename
else:
def reporthook(t):
last_b = [0]
def inner(b=1, bsize=1, tsize=None):
if tsize is not None:
t.total = tsize
t.update((b - last_b[0]) * bsize)
last_b[0] = b
return inner
Path(CACHED_DIR).mkdir(exist_ok=True)
dest_file = Path(CACHED_DIR) / Path(MODEL_URL).name
with tqdm(unit="B", unit_scale=True, miniters=1, desc=str(filename)) as t:
try:
urlretrieve(MODEL_URL, dest_file, reporthook=reporthook(t))
except KeyboardInterrupt as e: # remove the partial zip file
dest_file.unlink(missing_ok=True)
raise e
if dest_file.suffix == ".zip":
with zipfile.ZipFile(dest_file, "r") as zf:
zf.extractall(CACHED_DIR)
return filename
def load_model(filename) -> (Diacritics, DiacriticsDataset):
"""
Loads a trained :class:Diacritics model from cached file.
Also, used hyperparams are loaded from the file
:param filename: local path to stored model
:return: loaded :class:Diacritics object and the used vocabulary (must be the same as in training)
"""
import sys
sys.modules["diacritics_dataset"] = diacritics_dataset
checkpoint = torch.load(filename, map_location="cpu")
params = checkpoint["hyperparams"]
model = Diacritics(
nr_classes=params["nr_classes"],
word_embedding_size=params["word_embedding_size"],
character_embedding_size=params["character_embedding_size"],
char_vocabulary_size=params["char_vocabulary_size"],
char_padding_index=params["char_padding_index"],
character_window=params["character_window"],
sentence_window=params["sentence_window"],
characters_lstm_size=params["characters_lstm_size"],
sentence_lstm_size=params["sentence_lstm_size"],
)
model.load_state_dict(checkpoint["model_state"])
checkpoint["valid_f1"] = checkpoint["valid_f1"] if "valid_f1" in checkpoint else 0
logger.info(
f"Loaded checkpoint: Epoch: {checkpoint['epoch']}, valid_acc: {checkpoint['valid_acc']}, valid_f1: {checkpoint['valid_f1']}"
)
return model, checkpoint["vocabulary"]
def initmodel(filename=None):
global _model, _dataset
if filename is None:
filename = get_cached_model()
_model, vocab = load_model(filename)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
if device == "cpu":
logger.warning("GPU not available, using CPU")
else:
logger.info("GPU available")
_model = _model.to(device)
_dataset = DiacriticsDataset(
"", _model.character_window, _model.sentence_window, diacritics_vocab=vocab
)
def restore_diacritics(text, batch_size=128):
"""
Transforms a Romanian text without diacritics (“ă”, “â”, “î”, “ș”, and “ț”) into a
proper spelled text with romanian character set. The casing of the text is preserved.
:param text: input text (without diacritics)
:param batch_size: Adjust the batch size according to the available memory.
:return: text with diacrtics replaced
"""
if _model is None:
initmodel()
input_tensors, character_indices = _dataset.gen_batch(text, stride=10)
class DS(IterableDataset):
def __init__(self, input_tensors):
self.input_tensors = input_tensors
def __iter__(self):
return iter(self.input_tensors)
input_data = DataLoader(DS(input_tensors), batch_size=batch_size)
predictions = predict(_model, input_data)
prediction_tuples = sorted(zip(character_indices, predictions), key=lambda x: x[0])
d = OrderedDict()
for k, v in prediction_tuples:
d.setdefault(k, []).append(v)
text = list(text)
for idx, p in d.items():
p = np.argmax(np.average(p, axis=0))
chr = DiacriticsDataset.get_char_from_label(text[idx].lower(), p)
if text[idx].lower() == text[idx]:
if chr == "â" and (
idx == 0
or idx == len(text) - 1
or not text[idx - 1].isalpha()
or not text[idx + 1].isalpha()
):
# â is not admitted at the beginning or end
continue
if chr == "î" and (
idx > 0
and idx < len(text) - 1
and text[idx - 1].isalpha()
and text[idx + 1].isalpha()
):
# î is not admitted inside a word
continue
text[idx] = chr
else:
text[idx] = chr.upper()
return "".join(text) | /ro-diacritics-0.9.3.tar.gz/ro-diacritics-0.9.3/ro_diacritics/diacritics_inference.py | 0.61659 | 0.235064 | diacritics_inference.py | pypi |
import torch
import torch.nn as nn
class Diacritics(nn.Module):
"""
The pytorch model :class:Diacritics for diacritics restoration
:param nr_classes: for Romanian language we defined 3 classes
0 - No diacritics,
1 - One of the following transformations
"a" -> "ă"
"i" -> "î"
"s" -> "ș"
"t" -> "ț"
2 - The alternative transformation for "a" -> "â"
:param word_embedding_size: Word Embedding vector size
:param character_embedding_size: Character Embedding vector size
:param char_vocabulary_size: How many tokens in the word embedding vocabulary
:param char_padding_index: How many characters in the character embedding vocabulary (can be more than 255 - UTF-8)
:param character_window: The analysed character context window size in the text (during training)
:param sentence_window: The analysed word context window size in the text (during training)
:param characters_lstm_size: Hidden LSTM size for the character embedding pathway
:param sentence_lstm_size: Hidden LSTM size for the sentence embedding pathway
:param dropout: Dropout probability for the classification layer (relevant for train only)
"""
def __init__(
self,
nr_classes=3,
word_embedding_size=300,
character_embedding_size=20,
char_vocabulary_size=771,
char_padding_index=0,
character_window=13,
sentence_window=31,
characters_lstm_size=64,
sentence_lstm_size=256,
dropout=0.2,
):
super(Diacritics, self).__init__()
self.nr_classes = nr_classes
self.character_embedding_size = character_embedding_size
self.char_vocabulary_size = char_vocabulary_size
self.word_embedding_size = word_embedding_size
self.char_padding_index = char_padding_index
self.characters_lstm_size = characters_lstm_size
self.sentence_lstm_size = sentence_lstm_size
self.character_window = character_window
self.sentence_window = sentence_window
self.dropout = dropout
self.embedding = nn.Embedding(
self.char_vocabulary_size,
self.character_embedding_size,
self.char_padding_index,
)
self.character_lstm_layer = nn.LSTM(
input_size=self.character_embedding_size,
hidden_size=self.characters_lstm_size,
num_layers=1,
batch_first=True,
bidirectional=True,
)
self.sentence_bi_lstm_layer = nn.LSTM(
input_size=self.word_embedding_size,
hidden_size=self.sentence_lstm_size,
num_layers=1,
batch_first=True,
bidirectional=True,
)
concat_size = (
2 * self.characters_lstm_size
+ 2 * self.sentence_lstm_size
+ self.word_embedding_size
)
self.drop = nn.Dropout(p=self.dropout)
self.dense = nn.Linear(concat_size, self.nr_classes)
def forward(self, char_input, word_embedding, sentence_embedding):
"""
:param char_input: shape=(self.window_character * 2 + 1,)
:param word_embedding: shape=(word_embedding_size,)
:param sentence_embedding: shape=(self.window_sentence * 2 + 1, self.word_embedding_size,)
:return:
"""
char_emb = self.embedding(char_input)
device = next(self.parameters()).device
h = torch.zeros(
(
2 * self.character_lstm_layer.num_layers,
char_emb.size(0),
self.character_lstm_layer.hidden_size,
)
)
c = torch.zeros(
(
2 * self.character_lstm_layer.num_layers,
char_emb.size(0),
self.character_lstm_layer.hidden_size,
)
)
torch.nn.init.xavier_normal_(h)
torch.nn.init.xavier_normal_(c)
char_hidden, _ = self.character_lstm_layer(
char_emb, (h.to(device), c.to(device))
)
h = torch.zeros(
(
2 * self.sentence_bi_lstm_layer.num_layers,
sentence_embedding.size(0),
self.sentence_bi_lstm_layer.hidden_size,
)
)
c = torch.zeros(
(
2 * self.sentence_bi_lstm_layer.num_layers,
sentence_embedding.size(0),
self.sentence_bi_lstm_layer.hidden_size,
)
)
torch.nn.init.xavier_normal_(h)
torch.nn.init.xavier_normal_(c)
sentence_hidden, _ = self.sentence_bi_lstm_layer(
sentence_embedding, (h.to(device), c.to(device))
)
concatenated = torch.cat(
(char_hidden[:, -1, :], word_embedding, sentence_hidden[:, -1, :]), dim=-1
)
concatenated = self.drop(concatenated)
out = self.dense(concatenated)
# out = F.softmax(out, dim=-1)
return out
def save(self, filename, vocabulary, epoch=None, valid_acc=0.0, valid_f1=0.0):
to_save = {
"epoch": epoch,
"model_state": self.state_dict(),
"vocabulary": vocabulary,
"hyperparams": {
"nr_classes": self.nr_classes,
"character_embedding_size": self.character_embedding_size,
"char_vocabulary_size": self.char_vocabulary_size,
"word_embedding_size": self.word_embedding_size,
"char_padding_index": self.char_padding_index,
"characters_lstm_size": self.characters_lstm_size,
"sentence_lstm_size": self.sentence_lstm_size,
"character_window": self.character_window,
"sentence_window": self.sentence_window,
},
"valid_acc": valid_acc,
"valid_f1": valid_f1,
}
torch.save(to_save, filename) | /ro-diacritics-0.9.3.tar.gz/ro-diacritics-0.9.3/ro_diacritics/diacritics_model.py | 0.929656 | 0.499207 | diacritics_model.py | pypi |
from pathlib import Path
import numpy as np
import torch
import torch.nn.functional as F
import torch.optim as optim
from sklearn.metrics import f1_score, accuracy_score, classification_report
from torch.utils.data import DataLoader
from tqdm import tqdm
def train(
model,
loss_func,
train_dataloader: DataLoader,
valid_dataloader: DataLoader,
epochs=10,
checkpoint_file=None,
):
optimizer = optim.Adam(model.parameters(), lr=0.001)
device = next(model.parameters()).device
print(f"{device} device for training")
# initialize running values
history = {
"train_loss": [],
"valid_loss": [],
"acc": [],
"f1": [],
"epoch": [],
"step": [],
}
best_acc = 0
best_acc_epoch = 0
nr_non_improving = 0
patience = 3
def evaluate_step(step, epoch, running_loss, max_eval_steps=None):
nonlocal best_acc, best_acc_epoch, nr_non_improving
if valid_dataloader is None:
return
print(f"Evaluating (Epoch/Step) {epoch +1} / {step}: ")
average_train_loss = running_loss / step
valid_loss, valid_acc, valid_f1 = evaluate(
model, valid_dataloader, loss_func, epoch, max_eval_steps
)
history["epoch"].append(epoch)
history["step"].append(step)
history["train_loss"].append(average_train_loss)
history["valid_loss"].append(valid_loss)
history["acc"].append(valid_acc)
history["f1"].append(valid_f1)
print(
f"Epoch [{epoch + 1}/{epochs}] - step {step}, Train Loss: {average_train_loss}, "
f"Val Loss: {valid_loss}, Val Acc:{valid_acc}, Val F1:{valid_f1}"
)
if round(best_acc, 5) < round(valid_acc, 5):
best_acc = valid_acc
best_acc_epoch = epoch
if checkpoint_file is not None:
model.save(
checkpoint_file,
train_dataloader.dataset.diacritics_vocab,
epoch,
valid_acc,
valid_f1,
)
nr_non_improving = 0
else:
nr_non_improving += 1
print(f"Accuracy did not improve {best_acc} < {valid_acc}")
if Path(checkpoint_file).exists():
# Evaluate the existing model
evaluate_step(1, 0, 0, 300_000 // valid_dataloader.batch_size)
for epoch in range(epochs):
model.train()
step = 0
running_loss = 0.0
optimizer.zero_grad()
epoch_predictions = []
epoch_true_labels = []
for (char_input, word_emb, sentence_emb), labels in train_dataloader:
labels = labels.to(device)
logits = model(
char_input.to(device), word_emb.to(device), sentence_emb.to(device)
)
loss = loss_func(logits, labels)
_, predicted = torch.max(logits, -1)
epoch_predictions.extend(predicted.tolist())
epoch_true_labels.extend([np.argmax(x) for x in labels.tolist()])
loss.backward()
optimizer.step()
optimizer.zero_grad()
# update running values
running_loss += loss.item()
step += 1
if step % 1000 == 0:
epoch_accuracy = accuracy_score(epoch_true_labels, epoch_predictions)
epoch_f1_metrics = f1_score(
np.array(epoch_true_labels).reshape(-1),
np.array(epoch_predictions).reshape(-1),
average="weighted",
)
average_train_loss = running_loss / step
print(
f"Epoch {epoch+1} / Step: {step}, Loss: {loss.item()}, Avg Loss: {average_train_loss} , Train ACC: {epoch_accuracy}, Train F1: {epoch_f1_metrics}"
)
if step % 10000 == 0:
evaluate_step(
step, epoch, running_loss, 300_000 // valid_dataloader.batch_size
)
model.train()
if nr_non_improving >= patience:
print(f"Early stopping, patience ({patience}) ran out ")
break
# print progress
print(f"--------------------------- Epoch {epoch+1} -------------------------")
evaluate_step(step, epoch, running_loss)
if valid_dataloader is None and checkpoint_file is not None:
model.save(checkpoint_file, train_dataloader.dataset.diacritics_vocab, epoch)
print(f"Finished Training! Best Epoch = {best_acc_epoch}")
def evaluate(model, dataloader: DataLoader, loss_func, epoch=None, max_eval_steps=None):
# print("***** Running prediction *****")
model.eval()
predict_out = []
all_label_ids = []
eval_loss = 0
total = 0
correct = 0
steps = 0
device = next(model.parameters()).device
with torch.no_grad():
for (char_input, word_emb, sentence_emb), labels in tqdm(
dataloader, total=max_eval_steps
):
labels = labels.to(device)
logits = model(
char_input.to(device), word_emb.to(device), sentence_emb.to(device)
)
loss = loss_func(logits, labels)
eval_loss += loss.item()
_, predicted = torch.max(logits, -1)
predict_out.extend(predicted.tolist())
all_label_ids.extend(labels.argmax(dim=1).tolist())
eval_accuracy = predicted.eq(labels.argmax(dim=1)).sum().item()
total += len(labels)
correct += eval_accuracy
steps += 1
if max_eval_steps is not None and steps > max_eval_steps:
break
f1_metrics = f1_score(
np.array(all_label_ids).reshape(-1),
np.array(predict_out).reshape(-1),
average="weighted",
)
report = classification_report(
np.array(all_label_ids).reshape(-1),
np.array(predict_out).reshape(-1),
digits=4,
)
print("Evaluation Report")
print(report)
eval_acc = correct / total
eval_loss = eval_loss / steps
if epoch:
print(
f"Evaluation at Epoch: {epoch +1 }, Acc={eval_acc}, F1={f1_metrics}, Loss={eval_loss}"
)
else:
print(f"Evaluation on Test, Acc={eval_acc}, F1={f1_metrics}, Loss={eval_loss}")
return eval_loss, eval_acc, f1_metrics
def predict(model, dataloader: DataLoader):
# print("***** Running prediction *****")
model.eval()
predict_out = []
device = next(model.parameters()).device
with torch.no_grad():
for char_input, word_emb, sentence_emb in dataloader:
logits = model(
char_input.to(device), word_emb.to(device), sentence_emb.to(device)
)
predicted = F.softmax(logits, dim=1)
predict_out.extend(predicted.tolist())
return predict_out | /ro-diacritics-0.9.3.tar.gz/ro-diacritics-0.9.3/ro_diacritics/diacritcs_train.py | 0.791781 | 0.368292 | diacritcs_train.py | pypi |
import pickle as pkl
from collections import Counter
from pathlib import Path
import numpy as np
import torch
from nltk.tokenize import TreebankWordTokenizer, PunktSentenceTokenizer
from nltk.tokenize import sent_tokenize, word_tokenize
from torch.utils.data import IterableDataset
from torchtext.vocab import FastText
from .diacritics_utils import (
correct_diacritics,
remove_diacritics,
has_interesting_chars,
DIACRITICS_CANDIDATES,
)
class DiacriticsVocab:
def __init__(
self,
distinct_tokens: Counter,
max_vocab,
pad_token,
unk_token,
max_char_vocab,
overflow_char,
):
self.vocab = {
"itos": {},
"stoi": {},
"vectors": torch.zeros(max_vocab + 2, 300, dtype=torch.float32),
}
self.pad_token = pad_token
self.unk_token = unk_token
self.max_char_vocab = max_char_vocab
self.overflow_char = overflow_char
embedding = FastText("ro")
distinct_tokens = dict(distinct_tokens.most_common(max_vocab))
self.vocab["itos"] = dict(enumerate(distinct_tokens, 1))
self.vocab["itos"][self.pad_token] = "<pad>"
self.vocab["itos"][self.unk_token] = "<unk>"
self.vocab["stoi"] = {v: k for k, v in self.vocab["itos"].items()}
fasttext_counts = Counter([remove_diacritics(word) for word in embedding.stoi])
for (
word,
index,
) in embedding.stoi.items(): # Aggregate embeddings with diacritics
word = remove_diacritics(word)
if word in self.vocab["stoi"]:
idx = self.vocab["stoi"][word]
self.vocab["vectors"][idx] = (
self.vocab["vectors"][idx] + embedding.vectors[index]
)
for word, index in self.vocab["stoi"].items():
if fasttext_counts[word] > 1:
self.vocab["vectors"][index] = (
self.vocab["vectors"][index] / fasttext_counts[word]
)
def encode_char(self, c):
return ord(c) if ord(c) <= self.max_char_vocab else self.overflow_char
class DiacriticsDataset(IterableDataset):
def __init__(
self,
data,
character_window,
sentence_window,
min_line_length=50,
max_vocab=25000,
max_char_vocab=770,
overflow_char=255,
diacritics_vocab: DiacriticsVocab = None,
):
"""
:param data: Textfile, Pickle file or raw text
:param character_window:
:param sentence_window:
:param min_line_length:
:param max_vocab:
:param max_char_vocab:
:param overflow_char:
:param diacritics_vocab:
"""
self.character_window = character_window
self.sentence_window = sentence_window
self.min_line_length = min_line_length
self.texts, distinct_tokens = self.load_texts(data, self.min_line_length)
self.max_vocab = max_vocab
self.pad_character = 0
if diacritics_vocab is None:
self.max_char_vocab = max_char_vocab
self.overflow_char = overflow_char
self.pad_token = 0
self.unk_token = max_vocab + 1
else:
self.max_char_vocab = diacritics_vocab.max_char_vocab
self.overflow_char = diacritics_vocab.overflow_char
self.pad_token = diacritics_vocab.pad_token
self.unk_token = diacritics_vocab.unk_token
self.diacritics_vocab = (
DiacriticsVocab(
distinct_tokens,
self.max_vocab,
self.pad_token,
self.unk_token,
self.max_char_vocab,
self.overflow_char,
)
if diacritics_vocab is None
else diacritics_vocab
)
@property
def vocab(self):
return self.diacritics_vocab.vocab
def encode_char(self, c):
return self.diacritics_vocab.encode_char(c)
def __iter__(self):
return self.parse_text()
@staticmethod
def get_label(original_char):
"""
:param original_char: lowercase diacritics char
:return: 0 if no change (not turing into diacritics),
1 if changed to first candidate of diacritics
2 if changed to second candidate of diacritics (a has 2 candidates)
"""
diacritic_to_label = {
"ă": 1,
"â": 2,
"î": 1,
"ș": 1,
"ț": 1,
}
label = (
diacritic_to_label[original_char]
if original_char in diacritic_to_label
else 0
)
label_tensor = torch.eye(3)
return label_tensor[label]
@staticmethod
def get_char_from_label(original_char, label):
if original_char not in DIACRITICS_CANDIDATES:
return original_char
if label == 1:
return {
"a": "ă",
"i": "î",
"s": "ș",
"t": "ț",
}[original_char]
elif label == 2:
return "â"
else:
return original_char
def get_char_input(self, line, line_orig, token_idx):
prefix_s = " ".join(line[:token_idx])
suffix_s = " ".join(line[token_idx + 1 :])
word = line[token_idx]
word_orig = line_orig[token_idx].lower()
encoded_list = []
labels = []
char_positions = []
for ix, c in enumerate(word):
if c in DIACRITICS_CANDIDATES:
encode_text = (prefix_s + " " + word[:ix]).strip()
encode_text = encode_text[-self.character_window :]
l = (
[self.pad_character] * (self.character_window - len(encode_text))
+ list(map(self.encode_char, encode_text))
+ [self.encode_char(c)]
)
encode_text = (word[ix + 1 :] + " " + suffix_s).strip()
encode_text = encode_text[: self.character_window]
l = (
l
+ list(map(self.encode_char, encode_text))
+ [self.pad_character] * (self.character_window - len(encode_text))
)
encoded_list.append(l)
labels.append(self.get_label(word_orig[ix]))
char_positions.append(ix)
return encoded_list, labels, char_positions
def get_word_emb(self, word):
if word in self.vocab["stoi"]:
idx = self.vocab["stoi"][word]
embed = self.vocab["vectors"][idx]
else:
embed = self.vocab["vectors"][self.unk_token]
return embed
def get_sentence_emb(self, sentence):
encoded = list(map(self.get_word_emb, sentence))
if len(encoded) < self.sentence_window:
encoded = encoded + [self.get_word_emb("<pad>")] * (
self.sentence_window - len(encoded)
)
return torch.stack(encoded)
def parse_text(self):
"Generates one sample of data"
# Select sample
for line_orig in self.texts:
line = [
remove_diacritics(x).lower() for x in line_orig
] # embeddings based on words without diacritics
line_indices = np.arange(len(line))
if len(line_indices) > self.sentence_window:
windows = np.lib.stride_tricks.sliding_window_view(
line_indices, self.sentence_window
)
else:
windows = [line_indices]
for window in windows:
sentence_emb = self.get_sentence_emb(line[window[0] : window[-1] + 1])
for token_idx in window:
if has_interesting_chars(line[token_idx]):
char_inputs, labels, _ = self.get_char_input(
line, line_orig, token_idx
)
word_emb = self.get_word_emb(line[token_idx])
for ix, char_input in enumerate(char_inputs):
yield (
torch.tensor(char_input),
word_emb,
sentence_emb,
), labels[ix]
def gen_batch(self, text, stride=1):
"""
:param text: input text to be processed
:param stride: generate sentence windows with this stride (you have to pool the results since you will
get more than 1 prediciton per character)
:return:
"""
text_plain = remove_diacritics(text).lower()
lines = PunktSentenceTokenizer().span_tokenize(text)
character_indices = []
input_tensors = []
for line_span in lines:
line = text[line_span[0] : line_span[1]]
words = list(TreebankWordTokenizer().span_tokenize(line))
word_indices = np.arange(len(words))
line_tokens = [line[x[0] : x[1]] for x in words]
if len(words) > self.sentence_window:
windows = np.lib.stride_tricks.sliding_window_view(
word_indices, self.sentence_window
)[::stride, :]
# add a reminder window (if it does not fit perfectly)
if windows.max() < max(word_indices):
windows = np.vstack(
[windows, word_indices[-self.sentence_window :]]
)
else:
windows = [word_indices]
for window in windows:
sentence = [
line[x[0] : x[1]] for x in words[window[0] : window[-1] + 1]
]
sentence_emb = self.get_sentence_emb(sentence)
for token_idx in window:
word = line[words[token_idx][0] : words[token_idx][1]]
if has_interesting_chars(word):
char_inputs, _, char_positions = self.get_char_input(
line_tokens, line_tokens, token_idx
)
word_emb = self.get_word_emb(word)
for ix, char_input in enumerate(char_inputs):
input_tensors.append(
[torch.tensor(char_input), word_emb, sentence_emb]
)
character_indices.append(
line_span[0] + words[token_idx][0] + char_positions[ix]
)
return input_tensors, character_indices
@staticmethod
def load_texts(data, min_line_length):
if data and Path(data).exists():
filename = data
if Path(filename).suffix.lower() == ".pkl": # loading cached pickle
texts, distinct_no_diacritics = pkl.load(open(filename, "rb"))
else:
with open(filename, "r", encoding="utf-8") as f:
texts = [
correct_diacritics(line)
for line in f
if len(line) > min_line_length
]
texts = [
x
for line in texts
for x in sent_tokenize(line, "english")
if len(x) > min_line_length
]
texts = [word_tokenize(line) for line in texts]
distinct_no_diacritics = Counter(
[remove_diacritics(x) for line in texts for x in line]
)
else:
texts = [
x for x in sent_tokenize(data, "english") if len(x) > min_line_length
]
texts = [word_tokenize(line) for line in texts]
distinct_no_diacritics = Counter(
[remove_diacritics(x) for line in texts for x in line]
)
return texts, distinct_no_diacritics | /ro-diacritics-0.9.3.tar.gz/ro-diacritics-0.9.3/ro_diacritics/diacritics_dataset.py | 0.762424 | 0.183795 | diacritics_dataset.py | pypi |
__author__ = "Graham Klyne (GK@ACM.ORG)"
__copyright__ = "Copyright 2011-2013, Graham Klyne, University of Oxford"
__license__ = "MIT (http://opensource.org/licenses/MIT)"
import unittest
import logging
junitxml_present = False
try:
import junitxml
junitxml_present = True
except ImportError:
pass
def getTestSuite(testclass, testdict, select="unit"):
"""
Assemble test suite from supplied class, dictionary and selector
testclass is the test class whose methods are test cases
testdict is a dictionary of test cases in named test suite,
keyed by "unit", "component", etc., or by a named test.
select is the test suite selector:
"unit" return suite of unit tests only
"component" return suite of component tests
"integrate" return suite of integration tests
"pending" return suite of pending tests
"all" return suite of unit and component tests
name a single named test to be run
"""
suite = unittest.TestSuite()
# Named test only
if select[0:3] not in ["uni","com","all","int","pen"]:
if not hasattr(testclass, select):
print "%s: no test named '%s'"%(testclass.__name__, select)
return None
suite.addTest(testclass(select))
return suite
# Select test classes to include
if select[0:3] == "uni":
testclasses = ["unit"]
elif select[0:3] == "com":
testclasses = ["component"]
elif select[0:3] == "int":
testclasses = ["integration"]
elif select[0:3] == "pen":
testclasses = ["pending"]
elif select[0:3] == "all":
testclasses = ["unit", "component"]
else:
testclasses = ["unit"]
for c in testclasses:
for t in testdict.get(c,[]):
if not hasattr(testclass, t):
print "%s: in '%s' tests, no test named '%s'"%(testclass.__name__, c, t)
return None
suite.addTest(testclass(t))
return suite
def runTests(logname, getSuite, args):
"""
Run unit tests based on supplied command line argument values
logname name for logging output file, if used
getSuite function to retrieve test suite, given selector value
args command line arguments (or equivalent values)
"""
sel = "unit"
vrb = 1
if len(args) > 1:
sel = args[1]
if sel == "xml":
# Run with XML test output for use in Jenkins environment
if not junitxml_present:
print "junitxml module not available for XML test output"
raise ValueError, "junitxml module not available for XML test output"
with open('xmlresults.xml', 'w') as report:
result = junitxml.JUnitXmlResult(report)
result.startTestRun()
try:
getSuite(select="unit").run(result)
finally:
result.stopTestRun()
else:
if sel[0:3] in ["uni","com","all","int","pen"]:
logging.basicConfig(level=logging.WARNING)
if sel[0:3] in ["com","all"]: vrb = 2
else:
# Run single test with elevated logging to file via new handler
logging.basicConfig(level=logging.DEBUG)
# Enable debug logging to a file
fileloghandler = logging.FileHandler(logname,"w")
fileloghandler.setLevel(logging.DEBUG)
# Use this formatter for shorter log records
###filelogformatter = logging.Formatter('%(levelname)s %(message)s', "%H:%M:%S")
# Use this formatter to display timing information:
filelogformatter = logging.Formatter('%(asctime)s.%(msecs)03d %(levelname)s %(message)s', "%H:%M:%S")
fileloghandler.setFormatter(filelogformatter)
logging.getLogger('').addHandler(fileloghandler)
vrb = 2
runner = unittest.TextTestRunner(verbosity=vrb)
tests = getSuite(select=sel)
if tests: runner.run(tests)
return
# End. | /ro-manager-0.2.20.tar.gz/ro-manager-0.2.20/MiscUtils/TestUtils.py | 0.442637 | 0.386706 | TestUtils.py | pypi |
__author__ = "Graham Klyne (GK@ACM.ORG)"
__copyright__ = "Copyright 2011-2013, Graham Klyne, University of Oxford"
__license__ = "MIT (http://opensource.org/licenses/MIT)"
from operator import concat, and_, or_
def concatMap(f,vs):
"""
Map function over list and concatenate results.
"""
return reduce( concat, map (f, vs), "")
def fst((a,b)):
"""
First element of a pair.
"""
return a
def snd((a,b)):
"""
Second element of a pair.
"""
return b
def iterAll(c,sentinel=None):
"""
Like the built-in 'iter' function, except that when the supplied container
has no more objects to return an indefinite sequence of 'sentinel' values is
returned. (This is almost the converse of built-in iter(c,sentinel).)
"""
i = iter(c)
try:
while True: yield i.next()
except StopIteration:
while True: yield sentinel
def zipAll(*lists):
"""
A zip-iterator that, unlike the built-in zip function, keeps on returning
tuples until all elements of the supplied lists have been returned. When
the values from any list have been exhausted, None values are returned.
The iterator stops when all lists have been exhausted.
"""
iters = map(iterAll,lists)
while True:
result = [i.next() for i in iters]
if allEq(None,result): break
yield tuple(result)
return
def isEq(v):
"""
Return a function that tests for equality with the supplied value.
(Curried equality function.)
"""
return (lambda v2: v==v2)
def isNe(v):
"""
Return a function that tests for inequality with the supplied value.
(Curried inequality function.)
"""
return (lambda v2: v!=v2)
def all_orig(f,c):
"""
Do all members of c satisfy f?
"""
for i in c:
if not f(i): return False
return True
def all(p, *lsargs):
"""
Test if all sets of members from supplied lists satisfy predicate p
"""
return reduce(and_, map(p, *lsargs), True)
def any(p, *lsargs):
"""
Test if all sets of members from supplied lists satisfy predicate p
"""
return reduce(or_, map(p, *lsargs), False)
def allEq(v,c):
"""
Are all members of c equal to v?
"""
return all(isEq(v),c)
def allNe(v,c):
"""
Are all members of c not equal to v?
"""
return all(isNe(v),c)
def filterSplit(p, values):
"""
Function filters a list into two sub-lists, the first containing entries
satisfying the supplied predicate p, and the second of entries not satisfying p.
"""
satp = []
notp = []
for v in values:
if p(v):
satp.append(v)
else:
notp.append(v)
return (satp,notp)
def cond(cond,v1,v2):
"""
Conditional expression.
"""
if cond:
return v1
else:
return v2
def interleave(l1,l2):
"""
Interleave lists.
"""
if not l1: return l2
if not l2: return l1
return [l1[0],l2[0]]+interleave(l1[1:],l2[1:])
def endsWith(base,suff):
"""
Test if list (sequence) ends with given suffix
"""
return base[-len(suff):] == suff
def formatIntList(ints, sep=",", intfmt=str):
"""
Format list of integers, using a supplied function to format each value,
and inserting a supplied separator between each.
Default comma-separated list of decimals.
"""
return sep.join(map(intfmt, ints))
def formatInt(fmt):
"""
returns a function to format a single integer value using the supplied
format string.
"""
def dofmt(n): return fmt % (n,)
return dofmt
def formatList(lst,left=0,right=0):
"""
Format a list over one or more lines, using the supplied margins.
Left margin padding is *not* added to the first line of output,
and no final newline is included following the last line of output.
"""
# Try for format on one line
out = formatList1(lst,right-left)
if not out:
# format over multiple lines
out = "("
pre = " "
pad = "\n"+left*" "
for i in lst:
out += pre
if isinstance(i,list) or isinstance(i,tuple):
out += formatList(i, left+2, right)
elif isinstance(i,dict):
out += formatDict(i, left+2, right, left+2)
else:
out += repr(i)
pre = pad+", "
out += pad + ")"
return out
def formatList1(lst,width):
"""
Attempt to format a list on a single line, within supplied width,
or return None if the list does not fit.
"""
out = "("
pre = ""
ol = 2
for i in lst:
o = pre+repr(i)
ol += len(o)
if ol > width: return None
pre = ", "
out += o
return out+")"
def formatDict(dic,left=0,right=0,pos=0):
"""
Format a dictionary over one or more lines, using the supplied margins.
Left margin padding is *not* added to the first line of output,
and no final newline is included following the last line of output.
"""
# Try for format on one line
out = formatDict1(dic,right-pos)
if not out:
# format over multiple lines
out = "{"
pre = " "
pad = "\n"+left*" "
for k in dic.keys():
out += pre
v = dic[k]
ks = repr(k)+': '
p = pos+2+len(ks)
if isinstance(v,dict):
o = formatDict1(v, right-p)
if not o:
o = pad + " " + formatDict(v, left+2, right, left+2)
out += ks + o
elif isinstance(v,list) or isinstance(v,tuple):
o = formatList1(v, right-p)
if not o:
o = pad + " " + formatList(v, left+2, right)
out += ks + o
else:
out += ks + repr(v)
pre = pad+", "
pos = left+2
out += pad + "}"
return out
def formatDict1(dic,width):
"""
Attempt to format a dictionary on a single line, within the supplied width,
or return None if it does not fit.
"""
out = "{"
pre = ""
ol = 2
for k in dic.keys():
v = dic[k]
o = pre + repr(k)+': '
if isinstance(v,dict):
vs = formatDict1(v,width)
if not vs: return None
o += vs
elif isinstance(v,list) or isinstance(v,tuple):
vs = formatList1(v,width)
if not vs: return None
o += vs
else:
o += repr(v)
ol += len(o)
if ol > width: return None
pre = ", "
out += o
return out+"}"
def compareLists(c1,c2):
"""
Compare a pair of lists, returning None if the lists are identical,
or a pair of lists containing:
(1) elements of first list not in second, and
(2) elements of second list not in first list.
"""
c1 = c1 or []
c2 = c2 or []
c1d = []
c2d = []
for c in c1:
if not (c in c2): c1d.append(c)
for c in c2:
if not (c in c1): c2d.append(c)
if c1d or c2d: return (c1d,c2d)
return None
def compareDicts(d1,d2):
"""
Return None if dictionaries are identical, or pair of lists containing
entries in d1 not in d2, and entries in d2 not in d1.
"""
dif1 = diffDicts(d1,d2)
dif2 = diffDicts(d2,d1)
if dif1 or dif2:
return (dif1,dif2)
else:
return None
def diffDicts(d1,d2):
"""
Return dictionary of entries in d1 that are not in d2.
"""
difs = {}
for (k,v1) in d1.iteritems():
if v1:
if k not in d2:
difs[k] = v1
else:
d = diffPair(v1,d2[k])
if nonEmpty(d):
difs[k] = d
return difs
def diffLists(t1,t2):
"""
Compares pairwise elements of 2 lists, and returns a list of elements
in the first that are not in the second.
Where the elements are dictionaries or tuples, the element difference is
determined recursively, otherwise the value is treated atomically.
"""
ps = zipAll(t1,t2)
ds = filter(nonEmpty, [diffPair(a,b) for (a,b) in ps])
return ds
def diffTuples(t1,t2):
"""
Compares pairwise elements of 2 tuples, and returns a list of elements
in the first that are not in the second.
Where the elements are dictionaries or tuples, the element difference is
determined recursively, otherwise the value is treated atomically.
"""
return tuple(diffLists(t1,t2))
def diffPair(v1,v2):
"""
Return the part of v1 that is not present in v2.
Returns None if v11 and v2 are equal, or if every element of v1 is
also present in v2.
"""
if isinstance(v1,tuple) and isinstance(v2,tuple):
return diffTuples(v1,v2)
if isinstance(v1,list) and isinstance(v2,list):
return diffLists(v1,v2)
if isinstance(v1,dict) and isinstance(v2,dict):
return diffDicts(v1,v2)
if v1!=v2:
return v1
return None
def nonEmpty(v):
"""
If v is a container (tuple, list or dictionary), return None if it is empty,
otherwise return v itself.
"""
if isinstance(v,(tuple,list,dict)):
if len(v) == 0: return None
return v
# End. | /ro-manager-0.2.20.tar.gz/ro-manager-0.2.20/MiscUtils/Functions.py | 0.594316 | 0.481393 | Functions.py | pypi |
__author__ = "Graham Klyne (GK@ACM.ORG)"
__copyright__ = "Copyright 2011-2013, University of Oxford"
__license__ = "MIT (http://opensource.org/licenses/MIT)"
import re # Used for link header parsing
import httplib
import urlparse
import rdflib
import logging
# Logger for this module
log = logging.getLogger(__name__)
RDF_CONTENT_TYPES = (
{ "application/rdf+xml": "xml"
, "text/turtle": "n3"
, "text/n3": "n3"
, "text/nt": "nt"
, "application/json": "jsonld"
, "application/xhtml": "rdfa"
})
ACCEPT_RDF_CONTENT_TYPES = "application/rdf+xml, text/turtle"
def splitValues(txt, sep=",", lq='"<', rq='">'):
"""
Helper function returns list of delimited values in a string,
where delimiters in quotes are protected.
sep is string of separator
lq is string of opening quotes for strings within which separators are not recognized
rq is string of corresponding closing quotes
"""
result = []
cursor = 0
begseg = cursor
while cursor < len(txt):
if txt[cursor] in lq:
# Skip quoted or bracketed string
eq = rq[lq.index(txt[cursor])] # End quote/bracket character
cursor += 1
while cursor < len(txt) and txt[cursor] != eq:
if txt[cursor] == '\\': cursor += 1 # skip '\' quoted-pair
cursor += 1
if cursor < len(txt):
cursor += 1 # Skip closing quote/bracket
elif txt[cursor] in sep:
result.append(txt[begseg:cursor])
cursor += 1
begseg = cursor
else:
cursor += 1
# append final segment
result.append(txt[begseg:cursor])
return result
def testSplitValues():
assert splitValues("a,b,c") == ['a','b','c']
assert splitValues('a,"b,c",d') == ['a','"b,c"','d']
assert splitValues('a, "b, c\\", c1", d') == ['a',' "b, c\\", c1"',' d']
assert splitValues('a,"b,c",d', ";") == ['a,"b,c",d']
assert splitValues('a;"b;c";d', ";") == ['a','"b;c"','d']
assert splitValues('a;<b;c>;d', ";") == ['a','<b;c>','d']
assert splitValues('"a;b";(c;d);e', ";", lq='"(', rq='")') == ['"a;b"','(c;d)','e']
def parseLinks(headerlist):
"""
Helper function to parse 'link:' headers,
returning a dictionary of links keyed by link relation type
headerlist is a list of header (name,value) pairs
"""
linkheaders = [ v for (h,v) in headerlist if h.lower() == "link" ]
log.debug("parseLinks linkheaders %s"%(repr(linkheaders)))
links = {}
for linkheader in linkheaders:
for linkval in splitValues(linkheader, ","):
linkparts = splitValues(linkval, ";")
linkmatch = re.match(r'''\s*<([^>]*)>\s*''', linkparts[0])
if linkmatch:
linkuri = linkmatch.group(1)
for linkparam in linkparts[1:]:
linkmatch = re.match(r'''\s*rel\s*=\s*"?(.*?)"?\s*$''', linkparam) # .*? is non-greedy
if linkmatch:
linkrel = linkmatch.group(1)
log.debug("parseLinks links[%s] = %s"%(linkrel, linkuri))
links[linkrel] = linkuri
return links
def testParseLinks():
links = (
('Link', '<http://example.org/foo>; rel=foo'),
('Link', ' <http://example.org/bar> ; rel = bar '),
('Link', '<http://example.org/bas>; rel=bas; par = zzz , <http://example.org/bat>; rel = bat'),
('Link', ' <http://example.org/fie> ; par = fie '),
('Link', ' <http://example.org/fum> ; rel = "http://example.org/rel/fum" '),
('Link', ' <http://example.org/fas;far> ; rel = "http://example.org/rel/fas" '),
)
assert str(parseLinks(links)['foo']) == 'http://example.org/foo'
assert str(parseLinks(links)['bar']) == 'http://example.org/bar'
assert str(parseLinks(links)['bas']) == 'http://example.org/bas'
assert str(parseLinks(links)['bat']) == 'http://example.org/bat'
assert str(parseLinks(links)['http://example.org/rel/fum']) == 'http://example.org/fum'
assert str(parseLinks(links)['http://example.org/rel/fas']) == 'http://example.org/fas;far'
# Class for exceptions raised by HTTP session
class HTTP_Error(Exception):
def __init__(self, msg="HTTP_Error", value=None, uri=None):
self._msg = msg
self._value = value
self._uri = uri
return
def __str__(self):
txt = self._msg
if self._uri: txt += " for "+str(self._uri)
if self._value: txt += ": "+repr(self._value)
return txt
def __repr__(self):
return ( "HTTP_Error(%s, value=%s, uri=%s)"%
(repr(self._msg), repr(self._value), repr(self._uri)))
# Class for handling Access in an HTTP session
class HTTP_Session(object):
"""
Client access class for HTTP session.
Creates a session to access a single HTTP endpoint,
and provides methods to issue requests on this session
This class is primarily designed to access a specific endpoint, and
by default refuses requests for different endpoints. But the request
methods accept an additional "exthost" parameter that can be used to
override this behaviour. Specifying "exthost=True" causes the request
to allow URIs that use different scheme, hostname or port than the original
request, but such requests are not issued using the access key of the HTTP
session.
"""
def __init__(self, baseuri, accesskey=None):
log.debug("HTTP_Session.__init__: baseuri "+baseuri)
self._baseuri = baseuri
self._key = accesskey
parseduri = urlparse.urlsplit(baseuri)
self._scheme = parseduri.scheme
self._host = parseduri.netloc
self._path = parseduri.path
self._httpcon = httplib.HTTPConnection(self._host)
return
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
return
def close(self):
self._key = None
self._httpcon.close()
return
def baseuri(self):
return self._baseuri
def getpathuri(self, uripath):
# str used here so rdflib.URIRef values can be accepted
return urlparse.urljoin(self._baseuri, str(uripath))
def error(self, msg, value=None):
return HTTP_Error(msg=msg, value=value, uri=self._baseuri)
def parseLinks(self, headers):
"""
Parse link header(s), return dictionary of links keyed by link relation type
"""
return parseLinks(headers["_headerlist"])
def doRequest(self, uripath,
method="GET", body=None, ctype=None, accept=None, reqheaders=None, exthost=False):
"""
Perform HTTP request.
Parameters:
uripath URI reference of resource to access, resolved against the base URI of
the current HTTP_Session object.
method HTTP method to use (default GET)
body request body to use (default none)
ctype content-type of request body (default none)
accept string containing list of content types for HTTP accept header
reqheaders dictionary of additional header fields to send with the HTTP request
exthost True if a request to a URI with a scheme and/or host different than
the session base URI is to be respected (default False).
Return:
status, reason(text), response headers, response body
"""
# Construct request path
uriparts = urlparse.urlsplit(self.getpathuri(uripath))
path = uriparts.path
if uriparts.query: path += ("?"+uriparts.query)
# Sort out HTTP connection to use: session or new
if ( (uriparts.scheme and uriparts.scheme != self._scheme) or
(uriparts.netloc and uriparts.netloc != self._host) ):
if exthost:
newhttpcon = httplib.HTTPConnection(uriparts.netloc)
usehttpcon = newhttpcon
usescheme = uriparts.scheme
usekey = None
elif (uriparts.scheme and uriparts.scheme != self._scheme):
raise HTTP_Error(
"URI scheme mismatch",
value=uriparts.scheme,
uri=self._baseuri)
elif (uriparts.netloc and uriparts.netloc != self._host):
raise HTTP_Error(
"URI host:port mismatch",
value=uriparts.netloc,
uri=self._baseuri)
else:
newhttpcon = None
usehttpcon = self._httpcon
usescheme = self._scheme
usekey = self._key
# Assemble request headers
if not reqheaders:
reqheaders = {}
if usekey:
reqheaders["authorization"] = "Bearer "+usekey
if ctype:
reqheaders["content-type"] = ctype
if accept:
reqheaders["accept"] = accept
# Execute request
log.debug("HTTP_Session.doRequest method: "+method)
log.debug("HTTP_Session.doRequest path: "+path)
log.debug("HTTP_Session.doRequest reqheaders: "+repr(reqheaders))
log.debug("HTTP_Session.doRequest body: "+repr(body))
usehttpcon.request(method, path, body, reqheaders)
# Pick out elements of response
try:
response = usehttpcon.getresponse()
status = response.status
reason = response.reason
headerlist = [ (h.lower(),v) for (h,v) in response.getheaders() ]
headers = dict(headerlist) # dict(...) keeps last result of multiple keys
headers["_headerlist"] = headerlist
data = response.read()
if status < 200 or status >= 300: data = None
log.debug("HTTP_Session.doRequest response: "+str(status)+" "+reason)
log.debug("HTTP_Session.doRequest rspheaders: "+repr(headers))
except Exception, e:
log.warn("HTTP_Session error %r accessing %s with request headers %r"%(e, uripath, reqheaders))
status = 900
reason = str(e)
headers = {"_headerlist": []}
data = None
###log.debug("HTTP_Session.doRequest data: "+repr(data))
if newhttpcon:
newhttpcon.close()
return (status, reason, headers, data)
def doRequestFollowRedirect(self, uripath,
method="GET", body=None, ctype=None, accept=None, reqheaders=None, exthost=False):
"""
Perform HTTP request, following any redirect returned.
Parameters:
uripath URI reference of resource to access, resolved against the base URI of
the current HTTP_Session object.
method HTTP method to use (default GET)
body request body to use (default none)
ctype content-type of request body (default none)
accept string containing list of content types for HTTP accept header
reqheaders dictionary of additional header fields to send with the HTTP request
exthost True if a request to a URI with a scheme and/or host different than
the session base URI is to be respected (default False).
Return:
status, reason(text), response headers, final URI, response body
"""
(status, reason, headers, data) = self.doRequest(uripath,
method=method, accept=accept,
body=body, ctype=ctype, reqheaders=reqheaders,
exthost=exthost)
if status in [302,303,307]:
uripath = headers["location"]
(status, reason, headers, data) = self.doRequest(uripath,
method=method, accept=accept,
body=body, ctype=ctype, reqheaders=reqheaders,
exthost=exthost)
if status in [302,307]:
# Allow second temporary redirect
uripath = headers["location"]
(status, reason, headers, data) = self.doRequest(uripath,
method=method,
body=body, ctype=ctype, reqheaders=reqheaders,
exthost=exthost)
return (status, reason, headers, uripath, data)
def doRequestRDF(self, uripath,
method="GET", body=None, ctype=None, reqheaders=None, exthost=False, graph=None):
"""
Perform HTTP request with RDF response.
If the request succeeds, return response as RDF graph,
or return fake 9xx status if RDF cannot be parsed.
Otherwise return response and content per request.
Thus, only 2xx responses include RDF data.
Parameters:
uripath URI reference of resource to access, resolved against the base URI of
the current HTTP_Session object.
method HTTP method to use (default GET)
body request body to use (default none)
ctype content-type of request body (default none)
reqheaders dictionary of additional header fields to send with the HTTP request
exthost True if a request to a URI with a scheme and/or host different than
the session base URI is to be respected (default False).
graph an rdflib.Graph object to which any RDF read is added. If not
provided, a new RDF graph is created and returmned.
Return:
status, reason(text), response headers, response graph or body
"""
(status, reason, headers, data) = self.doRequest(uripath,
method=method, body=body,
ctype=ctype, accept=ACCEPT_RDF_CONTENT_TYPES, reqheaders=reqheaders,
exthost=exthost)
if status >= 200 and status < 300:
content_type = headers["content-type"].split(";",1)[0].strip().lower()
if content_type in RDF_CONTENT_TYPES:
rdfgraph = graph if graph != None else rdflib.graph.Graph()
baseuri = self.getpathuri(uripath)
bodyformat = RDF_CONTENT_TYPES[content_type]
# log.debug("HTTP_Session.doRequestRDF data:\n----\n"+data+"\n------------")
try:
# rdfgraph.parse(data=data, location=baseuri, format=bodyformat)
rdfgraph.parse(data=data, publicID=baseuri, format=bodyformat)
data = rdfgraph
except Exception, e:
log.info("HTTP_Session.doRequestRDF: %s"%(e))
log.info("HTTP_Session.doRequestRDF parse failure: '%s', '%s'"%(content_type, bodyformat))
# log.debug("HTTP_Session.doRequestRDF data:\n----\n"+data[:200]+"\n------------")
status = 902
reason = "RDF (%s) parse failure"%bodyformat
else:
status = 901
reason = "Non-RDF content-type returned"
return (status, reason, headers, data)
def doRequestRDFFollowRedirect(self, uripath,
method="GET", body=None, ctype=None, reqheaders=None, exthost=False, graph=None):
"""
Perform HTTP request with RDF response, following any redirect returned
If the request succeeds, return response as an RDF graph,
or return fake 9xx status if RDF cannot be parsed.
Otherwise return response and content per request.
Thus, only 2xx responses include RDF data.
Parameters:
uripath URI reference of resource to access, resolved against the base URI of
the current HTTP_Session object.
method HTTP method to use (default GET)
body request body to use (default none)
ctype content-type of request body (default none)
reqheaders dictionary of additional header fields to send with the HTTP request
exthost True if a request to a URI with a scheme and/or host different than
the session base URI is to be respected (default False).
graph an rdflib.Graph object to which any RDF read is added. If not
provided, a new RDF graph is created and returmned.
Return:
status, reason(text), response headers, final URI, response graph or body
"""
(status, reason, headers, data) = self.doRequestRDF(uripath,
method=method,
body=body, ctype=ctype, reqheaders=reqheaders,
exthost=exthost, graph=graph)
log.debug("%03d %s from request to %s"%(status, reason, uripath))
if status in [302,303,307]:
uripath = headers["location"]
(status, reason, headers, data) = self.doRequestRDF(uripath,
method=method,
body=body, ctype=ctype, reqheaders=reqheaders,
exthost=exthost, graph=graph)
log.debug("%03d %s from redirect to %s"%(status, reason, uripath))
if status in [302,307]:
# Allow second temporary redirect
uripath = headers["location"]
(status, reason, headers, data) = self.doRequestRDF(uripath,
method=method,
body=body, ctype=ctype, reqheaders=reqheaders,
exthost=exthost, graph=graph)
log.debug("%03d %s from redirect to %s"%(status, reason, uripath))
return (status, reason, headers, uripath, data)
# End. | /ro-manager-0.2.20.tar.gz/ro-manager-0.2.20/MiscUtils/HttpSession.py | 0.534612 | 0.251889 | HttpSession.py | pypi |
__author__ = "Graham Klyne (GK@ACM.ORG)"
__copyright__ = "Copyright 2011-2013, University of Oxford"
__license__ = "MIT (http://opensource.org/licenses/MIT)"
from django.http import HttpResponse
from django.views import generic
class ContentNegotiationView(generic.View):
"""
Generic view class with content negotiation decorators and generic error value methods
Note: generic.View dispatcher assigns HTTPRequest object to self.request.
"""
@staticmethod
def accept_types(types):
"""
Decorator to use associated function to render the indicated content types
"""
def decorator(func):
def guard(self, values):
accept_header = self.request.META.get('HTTP_ACCEPT', "*/*")
accept_types = [ a.split(';')[0].strip().lower()
for a in accept_header.split(',') ]
for t in types:
if t in accept_types:
values['accept_type'] = t
return func(self, values)
return None
return guard
return decorator
@staticmethod
def content_types(types):
"""
Decorator to use associated function when supplied with the indicated content types
"""
def decorator(func):
def guard(self, values):
content_type = self.request.META.get('CONTENT_TYPE', "application/octet-stream")
if content_type.split(';')[0].strip().lower() in types:
return func(self, values)
return None
return guard
return decorator
def get_request_uri(self):
"""
Utility function returns URI of current request
(useful when building new URIs with POST, etc.)
"""
return self.request.build_absolute_uri()
def error(self, values):
"""
Default error method using errorvalues
"""
responsebody = """
<html>
<head>
<title>Error %(status)s: %(reason)s</title>
</head>
<body>
<h1>Error %(status)s: %(reason)s</h1>
<p>%(message)s</p>
</body>
</html>
""" % values
# @@TODO: with Django 1.6, can also set reason string
return HttpResponse(responsebody, status=values['status'])
# Define values for display with common error cases.
# @@TODO: This should really be a separate mixin. Needs fleshing out.
def errorvalues(self, status, reason, message):
return (
{ 'status': status
, 'reason': reason
, 'message': message%
{ 'method': self.request.method
, 'request_uri': self.request.build_absolute_uri()
, 'accept_types': self.request.META.get('HTTP_ACCEPT',"default_type")
, 'content_type': self.request.META.get('CONTENT_TYPE', "application/octet-stream")
}
})
def error404values(self):
return self.errorvalues(404, "Not found",
"Resource %(request_uri)s not found"
)
def error405values(self):
return self.errorvalues(405, "Method not allowed",
"Method %(method)s is not recognized for %(request_uri)s"
)
def error406values(self):
return self.errorvalues(406, "Not acceptable",
"%(method)s returning %(accept_types)s not supported for %(request_uri)s"
)
def error415values(self):
return self.errorvalues(415, "Unsupported Media Type",
"%(method)s with %(content_type)s not supported for %(request_uri)s"
)
# End. | /ro-manager-0.2.20.tar.gz/ro-manager-0.2.20/roverlay/rovweb/rovserver/ContentNegotiationView.py | 0.533884 | 0.218993 | ContentNegotiationView.py | pypi |
from ro_py.utilities.url import url
endpoint = url("groups")
class RolePermissions:
"""
Represents role permissions.
"""
view_wall = None
post_to_wall = None
delete_from_wall = None
view_status = None
post_to_status = None
change_rank = None
invite_members = None
remove_members = None
manage_relationships = None
view_audit_logs = None
spend_group_funds = None
advertise_group = None
create_items = None
manage_items = None
manage_group_games = None
def get_rp_names(rp):
"""
Converts permissions into something Roblox can read.
Parameters
----------
rp : ro_py.roles.RolePermissions
Returns
-------
dict
"""
return {
"viewWall": rp.view_wall,
"PostToWall": rp.post_to_wall,
"deleteFromWall": rp.delete_from_wall,
"viewStatus": rp.view_status,
"postToStatus": rp.post_to_status,
"changeRank": rp.change_rank,
"inviteMembers": rp.invite_members,
"removeMembers": rp.remove_members,
"manageRelationships": rp.manage_relationships,
"viewAuditLogs": rp.view_audit_logs,
"spendGroupFunds": rp.spend_group_funds,
"advertiseGroup": rp.advertise_group,
"createItems": rp.create_items,
"manageItems": rp.manage_items,
"manageGroupGames": rp.manage_group_games
}
class Role:
"""
Represents a role
Parameters
----------
requests : ro_py.utilities.requests.Requests
Requests object to use for API requests.
group : ro_py.groups.Group
Group the role belongs to.
role_data : dict
Dictionary containing role information.
"""
def __init__(self, cso, group, role_data):
self.cso = cso
self.requests = cso.requests
self.group = group
self.id = role_data['id']
self.name = role_data['name']
self.description = role_data.get('description')
self.rank = role_data['rank']
self.member_count = role_data.get('memberCount')
async def update(self):
"""
Updates information of the role.
"""
update_req = await self.requests.get(
url=endpoint + f"/v1/groups/{self.group.id}/roles"
)
data = update_req.json()
for role in data['roles']:
if role['id'] == self.id:
self.name = role['name']
self.description = role['description']
self.rank = role['rank']
self.member_count = role['memberCount']
break
async def edit(self, name=None, description=None, rank=None):
"""
Edits the name, description or rank of a role
Parameters
----------
name : str, optional
New name for the role.
description : str, optional
New description for the role.
rank : int, optional
Number from 1-254 that determains the new rank number for the role.
Returns
-------
int
"""
edit_req = await self.requests.patch(
url=endpoint + f"/v1/groups/{self.group.id}/rolesets/{self.id}",
data={
"description": description if description else self.description,
"name": name if name else self.name,
"rank": rank if rank else self.rank
}
)
return edit_req.status_code == 200
async def edit_permissions(self, role_permissions):
"""
Edits the permissions of a role.
Parameters
----------
role_permissions : ro_py.roles.RolePermissions
New permissions that will overwrite the old ones.
Returns
-------
int
"""
data = {
"permissions": {}
}
for key, value in get_rp_names(role_permissions):
if value is True or False:
data['permissions'][key] = value
edit_req = await self.requests.patch(
url=endpoint + f"/v1/groups/{self.group.id}/roles/{self.id}/permissions",
data=data
)
return edit_req.status_code == 200 | /ro-py-1.2.0.5.tar.gz/ro-py-1.2.0.5/ro_py/roles.py | 0.84346 | 0.26445 | roles.py | pypi |
from lxml import html
from io import StringIO
class EndpointDocsPathRequestTypeProperties:
def __init__(self, data):
self.internal = data["internal"]
self.metric_ids = data["metricIds"]
class EndpointDocsPathRequestTypeResponse:
def __init__(self, data):
self.description = None
self.schema = None
if "description" in data:
self.description = data["description"]
if "schema" in data:
self.schema = data["schema"]
class EndpointDocsPathRequestTypeParameter:
def __init__(self, data):
self.name = data["name"]
self.iin = data["in"] # I can't make this say "in" so this is close enough
if "description" in data:
self.description = data["description"]
else:
self.description = None
self.required = data["required"]
self.type = None
if "type" in data:
self.type = data["type"]
if "format" in data:
self.format = data["format"]
else:
self.format = None
class EndpointDocsPathRequestType:
def __init__(self, data):
self.tags = data["tags"]
self.description = None
self.summary = None
if "summary" in data:
self.summary = data["summary"]
if "description" in data:
self.description = data["description"]
self.consumes = data["consumes"]
self.produces = data["produces"]
self.parameters = []
self.responses = {}
self.properties = EndpointDocsPathRequestTypeProperties(data["properties"])
for raw_parameter in data["parameters"]:
self.parameters.append(EndpointDocsPathRequestTypeParameter(raw_parameter))
for rr_k, rr_v in data["responses"].items():
self.responses[rr_k] = EndpointDocsPathRequestTypeResponse(rr_v)
class EndpointDocsPath:
def __init__(self, data):
self.data = {}
for type_k, type_v in data.items():
self.data[type_k] = EndpointDocsPathRequestType(type_v)
class EndpointDocsDataInfo:
def __init__(self, data):
self.version = data["version"]
self.title = data["title"]
class EndpointDocsData:
def __init__(self, data):
self.swagger_version = data["swagger"]
self.info = EndpointDocsDataInfo(data["info"])
self.host = data["host"]
self.schemes = data["schemes"]
self.paths = {}
for path_k, path_v in data["paths"].items():
self.paths[path_k] = EndpointDocsPath(path_v)
class EndpointDocs:
def __init__(self, requests, docs_url):
self.requests = requests
self.url = docs_url
async def get_versions(self):
docs_req = await self.requests.get(self.url + "/docs")
root = html.parse(StringIO(docs_req.text)).getroot()
try:
vs_element = root.get_element_by_id("version-selector")
return vs_element.value_options
except KeyError:
return ["v1"]
async def get_data_for_version(self, version):
data_req = await self.requests.get(self.url + "/docs/json/" + version)
version_data = data_req.json()
return EndpointDocsData(version_data) | /ro-py-1.2.0.5.tar.gz/ro-py-1.2.0.5/ro_py/robloxdocs.py | 0.431944 | 0.268053 | robloxdocs.py | pypi |
import enum
from ro_py.utilities.url import url
endpoint = url("accountsettings")
class PrivacyLevel(enum.Enum):
"""
Represents a privacy level as you might see at https://www.roblox.com/my/account#!/privacy.
"""
no_one = "NoOne"
friends = "Friends"
everyone = "AllUsers"
class PrivacySettings(enum.Enum):
"""
Represents a privacy setting as you might see at https://www.roblox.com/my/account#!/privacy.
"""
app_chat_privacy = 0
game_chat_privacy = 1
inventory_privacy = 2
phone_discovery = 3
phone_discovery_enabled = 4
private_message_privacy = 5
class RobloxEmail:
"""
Represents an obfuscated version of the email you have set on your account.
Parameters
----------
email_data : dict
Raw data to parse from.
"""
def __init__(self, email_data: dict):
self.email_address = email_data["emailAddress"]
self.verified = email_data["verified"]
class AccountSettings:
"""
Represents authenticated client account settings (https://accountsettings.roblox.com/)
This is only available for authenticated clients as it cannot be accessed otherwise.
Parameters
----------
cso : ro_py.client.ClientSharedObject
ClientSharedObject.
"""
def __init__(self, cso):
self.cso = cso
self.requests = cso.requests
async def get_privacy_setting(self, privacy_setting):
"""
Gets the value of a privacy setting.
"""
privacy_setting = privacy_setting.value
privacy_endpoint = [
"app-chat-privacy",
"game-chat-privacy",
"inventory-privacy",
"privacy",
"privacy/info",
"private-message-privacy"
][privacy_setting]
privacy_key = [
"appChatPrivacy",
"gameChatPrivacy",
"inventoryPrivacy",
"phoneDiscovery",
"isPhoneDiscoveryEnabled",
"privateMessagePrivacy"
][privacy_setting]
privacy_endpoint = endpoint + "v1/" + privacy_endpoint
privacy_req = await self.requests.get(privacy_endpoint)
return privacy_req.json()[privacy_key] | /ro-py-1.2.0.5.tar.gz/ro-py-1.2.0.5/ro_py/accountsettings.py | 0.577495 | 0.164215 | accountsettings.py | pypi |
from urllib.parse import quote
from math import floor
import re
from ro_py.utilities.url import url
endpoint = url("gamepersistence")
class DataStore:
"""
Represents the in-game datastore system for storing data for games (https://gamepersistence.roblox.com).
This is only available for authenticated clients, and games that they own.
Parameters
----------
requests : ro_py.utilities.requests.Requests
Requests object to use for API requests.
place_id : int
PlaceId to modify the DataStores for,
if the currently authenticated user doesn't have sufficient permissions,
it will raise a NotAuthorizedToModifyPlaceDataStores exception
name : str
The name of the DataStore,
as in the Second Parameter of
`std::shared_ptr<RBX::Instance> DataStoreService::getDataStore(const DataStoreService* this, std::string name, std::string scope = "global")`
scope : str, optional
The scope of the DataStore,
as on the Second Parameter of
`std::shared_ptr<RBX::Instance> DataStoreService::getDataStore(const DataStoreService* this, std::string name, std::string scope = "global")`
legacy : bool, optional
Describes whether or not this will use the legacy endpoints,
over the new v1 endpoints (Does not apply to getSortedValues)
legacy_naming_scheme : bool, optional
Describes whether or not this will use legacy names for data stores, if true, the qkeys[idx].scope will match the current scope (global by default),
there will be no qkeys[idx].target (normally the key that is passed into each method),
and the qkeys[idx].key will match the key passed into each method.
"""
def __init__(self, requests, place_id, name, scope, legacy=True, legacy_naming_scheme=False):
self.requests = requests
self.place_id = place_id
self.legacy = legacy
self.legacy_naming_scheme = legacy_naming_scheme
self.name = name
self.scope = scope if scope is not None else "global"
async def get(self, key):
"""
Represents a get request to a data store,
using legacy works the same
Parameters
----------
key : str
The key of the value you wish to get,
as in the Second Parameter of
`void DataStore::getAsync(const DataStore* this, std::string key, boost::function<void(RBX::Reflection::Variant)> resumeFunction, boost::function<void(std::string)> errorFunction)`
Returns
-------
typing.Any
"""
if self.legacy:
data = f"qkeys[0].scope={quote(self.scope)}&qkeys[0].target=&qkeys[0].key={quote(key)}" if self.legacy_naming_scheme == True else f"qkeys[0].scope={quote(self.scope)}&qkeys[0].target={quote(key)}&qkeys[0].key={quote(self.name)}"
r = await self.requests.post(
url=endpoint + f"persistence/getV2?placeId={str(self.place_id)}&type=standard&scope={quote(self.scope)}",
headers={
'Roblox-Place-Id': str(self.place_id),
'Content-Type': 'application/x-www-form-urlencoded'
}, data=data)
if len(r.json()['data']) == 0:
return None
else:
return r.json()['data'][0]['Value']
else:
url = endpoint + f"v1/persistence/ro_py?type=standard&key={quote(key)}&scope={quote(self.scope)}&target=" if self.legacy_naming_scheme == True else endpoint + f"v1/persistence/ro_py?type=standard&key={quote(self.name)}&scope={quote(self.scope)}&target={quote(key)}"
r = await self.requests.get(
url=url,
headers={
'Roblox-Place-Id': str(self.place_id)
})
if r.status_code == 204:
return None
else:
return r.text
async def set(self, key, value):
"""
Represents a set request to a data store,
using legacy works the same
Parameters
----------
key : str
The key of the value you wish to get,
as in the Second Parameter of
`void DataStore::getAsync(const DataStore* this, std::string key, boost::function<void(RBX::Reflection::Variant)> resumeFunction, boost::function<void(std::string)> errorFunction)`
value
The value to set for the key,
as in the 3rd parameter of
`void DataStore::setAsync(const DataStore* this, std::string key, RBX::Reflection::Variant value, boost::function<void()> resumeFunction, boost::function<void(std::string)> errorFunction)`
Returns
-------
typing.Any
"""
if self.legacy:
data = f"value={quote(str(value))}"
url = endpoint + f"persistence/set?placeId={self.place_id}&type=standard&key={quote(key)}&type=standard&scope={quote(self.scope)}&target=&valueLength={str(len(str(value)))}" if self.legacy_naming_scheme == True else endpoint + f"persistence/set?placeId={str(self.place_id)}&type=standard&key={quote(self.name)}&type=standard&scope={quote(self.scope)}&target={quote(key)}&valueLength={str(len(str(value)))}"
r = await self.requests.post(
url=url,
headers={
'Roblox-Place-Id': str(self.place_id),
'Content-Type': 'application/x-www-form-urlencoded'
}, data=data)
if len(r.json()['data']) == 0:
return None
else:
return r.json()['data']
else:
url = endpoint + f"v1/persistence/ro_py?type=standard&key={quote(key)}&scope={quote(self.scope)}&target=" if self.legacy_naming_scheme == True else endpoint + f"v1/persistence/ro_py?type=standard&key={quote(self.name)}&scope={quote(self.scope)}&target={quote(key)}"
r = await self.requests.post(
url=url,
headers={
'Roblox-Place-Id': str(self.place_id),
'Content-Type': '*/*',
'Content-Length': str(len(str(value)))
}, data=quote(str(value)))
if r.status_code == 200:
return value
async def set_if_value(self, key, value, expected_value):
"""
Represents a conditional set request to a data store,
only supports legacy
Parameters
----------
key : str
The key of the value you wish to get,
as in the Second Parameter of
`void DataStore::getAsync(const DataStore* this, std::string key, boost::function<void(RBX::Reflection::Variant)> resumeFunction, boost::function<void(std::string)> errorFunction)`
value
The value to set for the key,
as in the 3rd parameter of
`void DataStore::setAsync(const DataStore* this, std::string key, RBX::Reflection::Variant value, boost::function<void()> resumeFunction, boost::function<void(std::string)> errorFunction)`
expected_value
The expected_value for that key, if you know the key doesn't exist, then set this as None
Returns
-------
typing.Any
"""
data = f"value={quote(str(value))}&expectedValue={quote(str(expected_value)) if expected_value is not None else ''}"
url = endpoint + f"persistence/set?placeId={str(self.place_id)}&type=standard&key={quote(key)}&type=standard&scope={quote(self.scope)}&target=&valueLength={str(len(str(value)))}&expectedValueLength={str(len(str(expected_value))) if expected_value is not None else str(0)}" if self.legacy_naming_scheme == True else endpoint + f"persistence/set?placeId={str(self.place_id)}&type=standard&key={quote(self.name)}&type=standard&scope={quote(self.scope)}&target={quote(key)}&valueLength={str(len(str(value)))}&expectedValueLength={str(len(str(expected_value))) if expected_value is not None else str(0)}"
r = await self.requests.post(
url=url,
headers={
'Roblox-Place-Id': str(self.place_id),
'Content-Type': 'application/x-www-form-urlencoded'
}, data=data)
try:
if r.json()['data'] != 0:
return r.json()['data']
except KeyError:
return r.json()['error']
async def set_if_idx(self, key, value, idx):
"""
Represents a conditional set request to a data store,
only supports new endpoints,
Parameters
----------
key : str
The key of the value you wish to get,
as in the Second Parameter of
`void DataStore::getAsync(const DataStore* this, std::string key, boost::function<void(RBX::Reflection::Variant)> resumeFunction, boost::function<void(std::string)> errorFunction)`
value
The value to set for the key,
as in the 3rd parameter of
`void DataStore::setAsync(const DataStore* this, std::string key, RBX::Reflection::Variant value, boost::function<void()> resumeFunction, boost::function<void(std::string)> errorFunction)`
idx : int
The expectedidx, there
Returns
-------
typing.Any
"""
url = endpoint + f"v1/persistence/ro_py?type=standard&key={quote(key)}&scope={quote(self.scope)}&target=" if self.legacy_naming_scheme == True else endpoint + f"v1/persistence/ro_py?type=standard&key={quote(self.name)}&scope={quote(self.scope)}&target={quote(key)}&usn=0.0"
r = await self.requests.post(
url=url,
headers={
'Roblox-Place-Id': str(self.place_id),
'Content-Type': '*/*',
'Content-Length': str(len(str(value)))
}, data=quote(str(value)))
if r.status_code == 409:
usn = r.headers['roblox-usn']
split = usn.split('.')
msn_hash = split[0]
current_value = split[1]
url = endpoint + f"v1/persistence/ro_py?type=standard&key={quote(key)}&scope={quote(self.scope)}&target=" if self.legacy_naming_scheme == True else endpoint + f"v1/persistence/ro_py?type=standard&key={quote(self.name)}&scope={quote(self.scope)}&target={quote(key)}&usn={msn_hash}.{hex(idx).split('x')[1]}"
r2 = await self.requests.post(
url=url,
headers={
'Roblox-Place-Id': str(self.place_id),
'Content-Type': '*/*',
'Content-Length': str(len(str(value)))
}, data=quote(str(value)))
if r2.status_code == 409:
return "Expected idx did not match current idx, current idx is " + str(floor(int(current_value, 16)))
else:
return value
async def increment(self, key, delta=0):
"""
Represents a conditional set request to a data store,
only supports legacy
Parameters
----------
key : str
The key of the value you wish to get,
as in the Second Parameter of
`void DataStore::getAsync(const DataStore* this, std::string key, boost::function<void(RBX::Reflection::Variant)> resumeFunction, boost::function<void(std::string)> errorFunction)`
delta : int, optional
The value to set for the key,
as in the 3rd parameter of
`void DataStore::setAsync(const DataStore* this, std::string key, RBX::Reflection::Variant value, boost::function<void()> resumeFunction, boost::function<void(std::string)> errorFunction)`
Returns
-------
typing.Any
"""
data = ""
url = endpoint + f"persistence/increment?placeId={str(self.place_id)}&type=standard&key={quote(key)}&type=standard&scope={quote(self.scope)}&target=&value={str(delta)}" if self.legacy_naming_scheme else endpoint + f"persistence/increment?placeId={str(self.place_id)}&type=standard&key={quote(self.name)}&type=standard&scope={quote(self.scope)}&target={quote(key)}&value={str(delta)}"
r = await self.requests.post(
url=url,
headers={
'Roblox-Place-Id': str(self.place_id),
'Content-Type': 'application/x-www-form-urlencoded'
}, data=data)
try:
if r.json()['data'] != 0:
return r.json()['data']
except KeyError:
cap = re.search("\(.+\)", r.json()['error'])
reason = cap.group(0).replace("(", "").replace(")", "")
if reason == "ExistingValueNotNumeric":
return "The requested key you tried to increment had a different value other than byte, short, int, long, long long, float, double or long double"
async def remove(self, key):
"""
Represents a get request to a data store,
using legacy works the same
Parameters
----------
key : str
The key of the value you wish to remove,
as in the Second Parameter of
`void DataStore::removeAsync(const DataStore* this, std::string key, boost::function<void(RBX::Reflection::Variant)> resumeFunction, boost::function<void(std::string)> errorFunction)`
Returns
-------
typing.Any
"""
if self.legacy:
data = ""
url = endpoint + f"persistence/remove?placeId={str(self.place_id)}&type=standard&key={quote(key)}&type=standard&scope={quote(self.scope)}&target=" if self.legacy_naming_scheme else endpoint + f"persistence/remove?placeId={str(self.place_id)}&type=standard&key={quote(self.name)}&type=standard&scope={quote(self.scope)}&target={quote(key)}"
r = await self.requests.post(
url=url,
headers={
'Roblox-Place-Id': str(self.place_id),
'Content-Type': 'application/x-www-form-urlencoded'
}, data=data)
if r.json()['data'] is None:
return None
else:
return r.json()['data']
else:
url = endpoint + f"v1/persistence/ro_py/remove?type=standard&key={quote(key)}&scope={quote(self.scope)}&target=" if self.legacy_naming_scheme == True else endpoint + f"v1/persistence/ro_py/remove?type=standard&key={quote(self.name)}&scope={quote(self.scope)}&target={quote(key)}"
r = await self.requests.post(
url=url,
headers={
'Roblox-Place-Id': str(self.place_id)
})
if r.status_code == 204:
return None
else:
return r.text | /ro-py-1.2.0.5.tar.gz/ro-py-1.2.0.5/ro_py/gamepersistence.py | 0.811228 | 0.299502 | gamepersistence.py | pypi |
from datetime import datetime
from ro_py.gender import RobloxGender
from ro_py.utilities.url import url
endpoint = url("accountinformation")
class AccountInformationMetadata:
"""
Represents account information metadata.
"""
def __init__(self, metadata_raw):
self.is_allowed_notifications_endpoint_disabled = metadata_raw["isAllowedNotificationsEndpointDisabled"]
"""Unsure what this does."""
self.is_account_settings_policy_enabled = metadata_raw["isAccountSettingsPolicyEnabled"]
"""Whether the account settings policy is enabled (unsure exactly what this does)"""
self.is_phone_number_enabled = metadata_raw["isPhoneNumberEnabled"]
"""Whether the user's linked phone number is enabled."""
self.max_user_description_length = metadata_raw["MaxUserDescriptionLength"]
"""Maximum length of the user's description."""
self.is_user_description_enabled = metadata_raw["isUserDescriptionEnabled"]
"""Whether the user's description is enabled."""
self.is_user_block_endpoints_updated = metadata_raw["isUserBlockEndpointsUpdated"]
"""Whether the UserBlock endpoints are updated (unsure exactly what this does)"""
class PromotionChannels:
"""
Represents account information promotion channels.
"""
def __init__(self, promotion_raw):
self.promotion_channels_visibility_privacy = promotion_raw["promotionChannelsVisibilityPrivacy"]
"""Visibility of promotion channels."""
self.facebook = promotion_raw["facebook"]
"""Link to the user's Facebook page."""
self.twitter = promotion_raw["twitter"]
"""Link to the user's Twitter page."""
self.youtube = promotion_raw["youtube"]
"""Link to the user's YouTube page."""
self.twitch = promotion_raw["twitch"]
"""Link to the user's Twitch page."""
class AccountInformation:
"""
Represents authenticated client account information (https://accountinformation.roblox.com/)
This is only available for authenticated clients as it cannot be accessed otherwise.
Parameters
----------
cso : ro_py.client.ClientSharedObject
ClientSharedObject.
"""
def __init__(self, cso):
self.cso = cso
self.requests = cso.requests
self.account_information_metadata = None
self.promotion_channels = None
async def update(self):
"""
Updates the account information.
"""
account_information_req = await self.requests.get(
url="https://accountinformation.roblox.com/v1/metadata"
)
self.account_information_metadata = AccountInformationMetadata(account_information_req.json())
promotion_channels_req = await self.requests.get(
url="https://accountinformation.roblox.com/v1/promotion-channels"
)
self.promotion_channels = PromotionChannels(promotion_channels_req.json())
async def get_gender(self):
"""
Gets the user's gender.
Returns
-------
ro_py.gender.RobloxGender
"""
gender_req = await self.requests.get(endpoint + "v1/gender")
return RobloxGender(gender_req.json()["gender"])
async def set_gender(self, gender):
"""
Sets the user's gender.
Parameters
----------
gender : ro_py.gender.RobloxGender
"""
await self.requests.post(
url=endpoint + "v1/gender",
data={
"gender": str(gender.value)
}
)
async def get_birthdate(self):
"""
Grabs the user's birthdate.
Returns
-------
datetime.datetime
"""
birthdate_req = await self.requests.get(endpoint + "v1/birthdate")
birthdate_raw = birthdate_req.json()
birthdate = datetime(
year=birthdate_raw["birthYear"],
month=birthdate_raw["birthMonth"],
day=birthdate_raw["birthDay"]
)
return birthdate
async def set_birthdate(self, birthdate):
"""
Sets the user's birthdate.
Parameters
----------
birthdate : datetime.datetime
"""
await self.requests.post(
url=endpoint + "v1/birthdate",
data={
"birthMonth": birthdate.month,
"birthDay": birthdate.day,
"birthYear": birthdate.year
}
) | /ro-py-1.2.0.5.tar.gz/ro-py-1.2.0.5/ro_py/accountinformation.py | 0.929871 | 0.198646 | accountinformation.py | pypi |
from ro_py.robloxbadges import RobloxBadge
from ro_py.utilities.pages import Pages
from ro_py.assets import UserAsset
from ro_py.badges import Badge
import iso8601
from ro_py.utilities.url import url
endpoint = url("users")
def limited_handler(requests, data, args):
assets = []
for asset in data:
assets.append(UserAsset(requests, asset["assetId"], asset['userAssetId']))
return assets
class BaseUser:
def __init__(self, cso, user_id):
self.cso = cso
self.requests = cso.requests
self.id = user_id
self.profile_url = f"https://www.roblox.com/users/{self.id}/profile"
async def expand(self):
"""
Expands into a full User object.
Returns
------
ro_py.users.User
"""
return await self.cso.client.get_user(self.id)
async def get_roblox_badges(self) :
"""
Gets the user's roblox badges.
Returns
-------
List[ro_py.robloxbadges.RobloxBadge]
"""
roblox_badges_req = await self.requests.get(
f"https://accountinformation.roblox.com/v1/users/{self.id}/roblox-badges")
roblox_badges = []
for roblox_badge_data in roblox_badges_req.json():
roblox_badges.append(RobloxBadge(roblox_badge_data))
return roblox_badges
async def get_friends_count(self) -> int:
"""
Gets the user's friends count.
Returns
-------
int
"""
friends_count_req = await self.requests.get(f"https://friends.roblox.com/v1/users/{self.id}/friends/count")
friends_count = friends_count_req.json()["count"]
return friends_count
async def get_followers_count(self) -> int:
"""
Gets the user's followers count.
Returns
-------
int
"""
followers_count_req = await self.requests.get(f"https://friends.roblox.com/v1/users/{self.id}/followers/count")
followers_count = followers_count_req.json()["count"]
return followers_count
async def get_followings_count(self) -> int:
"""
Gets the user's followings count.
Returns
-------
int
"""
followings_count_req = await self.requests.get(
f"https://friends.roblox.com/v1/users/{self.id}/followings/count")
followings_count = followings_count_req.json()["count"]
return followings_count
async def get_friends(self):
"""
Gets the user's friends.
Returns
-------
List[ro_py.users.Friend]
"""
from ro_py.friends import Friend # Hacky circular import fix
friends_req = await self.requests.get(f"https://friends.roblox.com/v1/users/{self.id}/friends")
friends_raw = friends_req.json()["data"]
friends_list = []
for friend_raw in friends_raw:
friends_list.append(Friend(self.cso, friend_raw))
return friends_list
async def get_groups(self):
"""
Gets the user's groups.
Returns
-------
List[ro_py.groups.PartialGroup]
"""
from ro_py.groups import PartialGroup
member_req = await self.requests.get(
url=f"https://groups.roblox.com/v2/users/{self.id}/groups/roles"
)
data = member_req.json()
groups = []
for group in data['data']:
group = group['group']
groups.append(PartialGroup(self.cso, group))
return groups
async def get_limiteds(self):
"""
Gets all limiteds the user owns.
Returns
-------
bababooey
"""
return Pages(
cso=self.cso,
url=f"https://inventory.roblox.com/v1/users/{self.id}/assets/collectibles?cursor=&limit=100&sortOrder=Desc",
handler=limited_handler
)
async def get_status(self):
"""
Gets the user's status.
Returns
-------
str
"""
status_req = await self.requests.get(endpoint + f"v1/users/{self.id}/status")
return status_req.json()["status"]
async def has_badge(self, badge: Badge):
"""
Checks if a user was awarded a badge and grabs the time that they were awarded it.
Functionally identical to ro_py.badges.Badge.owned_by.
Parameters
----------
badge: ro_py.badges.Badge
Badge to check ownership of.
Returns
-------
tuple[bool, datetime.datetime]
"""
has_badge_req = await self.requests.get(
url=url("badges") + f"v1/users/{self.id}/badges/awarded-dates",
params={
"badgeIds": badge.id
}
)
has_badge_data = has_badge_req.json()["data"]
if len(has_badge_data) >= 1:
return True, iso8601.parse_date(has_badge_data[0]["awardedDate"])
else:
return False, None
class PartialUser(BaseUser):
def __init__(self, cso, data):
self.id = data.get("id") or data.get("Id") or data.get("userId") or data.get("user_id") or data.get("UserId")
super().__init__(cso, self.id)
self.name = data.get("name") or data.get("Name") or data.get("Username") or data.get("username")
self.display_name = data.get("displayName") or data.get("DisplayName") or data.get("display_name") | /ro-py-1.2.0.5.tar.gz/ro-py-1.2.0.5/ro_py/bases/baseuser.py | 0.649579 | 0.204104 | baseuser.py | pypi |
class ApiError(Exception):
"""Called in requests when an API request fails with an error code that doesn't have an independent error."""
pass
class BadRequest(ApiError):
"""400 HTTP error"""
pass
class Unauthorized(ApiError):
"""401 HTTP error"""
pass
class Forbidden(ApiError):
"""403 HTTP error"""
pass
class NotFound(ApiError):
"""404 HTTP error (also used for other things)"""
pass
class Conflict(ApiError):
"""409 HTTP error"""
pass
class TooManyRequests(ApiError):
"""429 HTTP error"""
pass
class InternalServerError(ApiError):
"""500 HTTP error"""
pass
class BadGateway(ApiError):
"""502 HTTP error"""
pass
# The following errors are specific to certain parts of ro.py
class NotLimitedError(Exception):
"""Called when code attempts to read limited-only information."""
pass
class InvalidIconSizeError(Exception):
"""Called when code attempts to pass in an improper size to a thumbnail function."""
pass
class InvalidShotTypeError(Exception):
"""Called when code attempts to pass in an improper avatar image type to a thumbnail function."""
pass
class ChatError(Exception):
"""Called in chat when a chat action fails."""
class InvalidPageError(Exception):
"""Called when an invalid page is requested."""
class UserDoesNotExistError(Exception):
"""Called when a user does not exist."""
class GameJoinError(Exception):
"""Called when an error occurs when joining a game."""
class InvalidPlaceIDError(Exception):
"""Called when place ID is invalid."""
class IncorrectKeyError(Exception):
"""Raised when the api key for 2captcha is incorrect."""
pass
class InsufficientCreditError(Exception):
"""Raised when there is insufficient credit in 2captcha."""
pass
class NoAvailableWorkersError(Exception):
"""Raised when there are no available workers."""
pass
c_errors = {
"400": BadRequest,
"401": Unauthorized,
"403": Forbidden,
"404": NotFound,
"409": Conflict,
"429": TooManyRequests,
"500": InternalServerError,
"502": BadGateway
} | /ro-py-1.2.0.5.tar.gz/ro-py-1.2.0.5/ro_py/utilities/errors.py | 0.817137 | 0.226473 | errors.py | pypi |
from ro_py.utilities.errors import InvalidPageError
import enum
class SortOrder(enum.Enum):
"""
Order in which page data should load in.
"""
Ascending = "Asc"
Descending = "Desc"
class Page:
"""
Represents a single page from a Pages object.
"""
def __init__(self, cso, data, pages, handler=None, handler_args=None):
self.cso = cso
"""Client shared object."""
self.previous_page_cursor = data["previousPageCursor"]
"""Cursor to navigate to the previous page."""
self.next_page_cursor = data["nextPageCursor"]
"""Cursor to navigate to the next page."""
self.data = data["data"]
"""Raw data from this page."""
self.pages = pages
"""Pages object for iteration."""
self.handler = handler
self.handler_args = handler_args
if handler:
self.data = handler(self.cso, self.data, handler_args)
def update(self, data):
self.previous_page_cursor = data["previousPageCursor"]
self.next_page_cursor = data["nextPageCursor"]
self.data = data["data"]
if self.handler:
self.data = self.handler(self.cso, data["data"], self.handler_args)
def __getitem__(self, key):
return self.data[key]
class Pages:
"""
Represents a paged object.
!!! warning
This object is *slow*, especially with a custom handler.
Automatic page caching will be added in the future. It is suggested to
cache the pages yourself if speed is required.
"""
def __init__(self, cso, url, sort_order=SortOrder.Ascending, limit=10, extra_parameters=None, handler=None, handler_args=None):
if extra_parameters is None:
extra_parameters = {}
self.handler = handler
"""Function that is passed to Page as data handler."""
extra_parameters["sortOrder"] = sort_order.value
extra_parameters["limit"] = limit
self.parameters = extra_parameters
"""Extra parameters for the request."""
self.cso = cso
self.requests = cso.requests
"""Requests object."""
self.url = url
"""URL containing the paginated data, accessible with a GET request."""
self.page = 0
"""Current page number."""
self.handler_args = handler_args
self.data = None
self.i = 0
def __aiter__(self):
return self
async def __anext__(self):
if self.i == len(self.data.data):
if not self.data.next_page_cursor:
self.i = 0
raise StopAsyncIteration
await self.next()
self.i = 0
data = self.data.data[self.i]
self.i += 1
return data
async def get_page(self, cursor=None):
"""
Gets a page at the specified cursor position.
"""
this_parameters = self.parameters
if cursor:
this_parameters["cursor"] = cursor
for name, value in self.parameters.items():
this_parameters[name] = value
page_req = await self.requests.get(
url=self.url,
params=this_parameters
)
if self.data:
self.data.update(page_req.json())
return
self.data = Page(
cso=self.cso,
data=page_req.json(),
pages=self,
handler=self.handler,
handler_args=self.handler_args
)
async def previous(self):
"""
Moves to the previous page.
"""
if self.data.previous_page_cursor:
await self.get_page(self.data.previous_page_cursor)
else:
raise InvalidPageError
async def next(self):
"""
Moves to the next page.
"""
if self.data.next_page_cursor:
await self.get_page(self.data.next_page_cursor)
else:
raise InvalidPageError | /ro-py-1.2.0.5.tar.gz/ro-py-1.2.0.5/ro_py/utilities/pages.py | 0.840881 | 0.190969 | pages.py | pypi |
from datetime import datetime
from LatLon import LatLon, Latitude, Longitude
class Agent():
"""
Agents are located at ther *point*, which is a lat-lon coordinate pair.
They move through their route, which is a series of points that
connect their current location to their destination.
"""
def __init__(self,
point,
dest,
router,
speed=3):
"""
:param point: current location of agent
:param dest: destination point of agent
:param speed: speed in metres per second
:param router: a router instance
:type point: LatLon point
:type dest: LatLon point
:type speed: float
:type router: NXRouter or BRouter instance
"""
self.set_point(point)
self.set_destination(dest)
self.speed = speed
self.heading = 0
self.destination_heading = 0
self.stamp = datetime.now()
self.route = []
self.length = 0
self.router = router
def point(self):
"""
:return: LatLon point object with agent's current location
"""
return LatLon(Latitude(self.lat),
Longitude(self.lon))
def set_point(self, point):
"""
Get lat and lon from LatLon point object, set them as agent's point.
:param point: set agent's point to here
:type point: LatLon point object
"""
self.lat = float(point.lat)
self.lon = float(point.lon)
def destination(self):
"""
:return: LatLon point object with agent's destination point
"""
return LatLon(Latitude(self.dest_lat),
Longitude(self.dest_lon))
def set_destination(self, point):
"""
Get lat and lon from LatLon point object, set them as agent's
destination point.
:param point: set agent's destination to point
:type point: LatLon point
"""
self.dest_lat = float(point.lat)
self.dest_lon = float(point.lon)
def update(self, new_point, update_speed=False):
"""
Updates time stamp and speed.
uses @new_point to update:
- point
- heading
- destination_heading
- speed, if update_speed=True
:param new_point: new current point to update to
:param bool update_speed: wether to update agent's speed attribute
:type new_point: LatLon point
"""
self.heading = self.point().heading_initial(new_point)
self.destination_heading = new_point.heading_initial(
self.destination())
if update_speed:
tdelta = datetime.now() - self.stamp
seconds = tdelta.total_seconds()
distance = self.point().distance(new_point) / 1000.0
self.speed = distance / seconds
self.stamp = datetime.now()
self.set_point(new_point)
def heading_to(self, other_point):
"""
:return: Heading from my point to @other_point, in degrees.
:rtype: float
:param other_point: return heading from agent's point to here
:type other_point: LatLon point
"""
return self.point().heading_initial(other_point)
def distance_to(self, other_point):
"""
:return: distance from agent to another point, in metres.
:rtype: float
:param other_point: return distance from agent's point to here
:type other_point: LatLon point
"""
return self.point().distance(other_point) * 1000.0
def got_there(self):
"""
:return: True if one step or less away
:rtype: bool
"""
if self.distance_to(self.destination()) < self.speed:
return True
else:
return False
def update_route(self, points=[]):
"""
Query route server for points connecting current location to
destination.
If @points is given, route connects current location, sequence
of points, and destination.
:param points: list of LatLon points
:return: True if succesful update of route, False if route is empty
"""
assert self.router is not None
route = self.router.get_route(points=[self.point(), ]
+ points
+ [self.destination(), ],
speed=self.speed)
if route:
self.route = route
self.length = self.router.length
return True
else:
return False
def step(self):
"""
Calls update method to move agent to next point in route.
Pops first item of route point list.
"""
if self.route:
p = self.route.pop(0)
p = LatLon(Latitude(p[1]),
Longitude(p[0]))
else:
p = self.destination()
self.update(p)
def __str__(self):
return "<A-%s %0.2fm @%sm/s %s>" % (id(self),
self.distance_to(
self.destination()),
self.speed,
self.point()) | /road_agent-1.0.1.tar.gz/road_agent-1.0.1/road_agent/__init__.py | 0.939464 | 0.532729 | __init__.py | pypi |
import uuid
from typing import (
Any,
Iterable,
Mapping
)
class GenericObjects():
def __init__(self, *args, **kwargs):
'''
:kwarg data:
:kwarg child_class:
'''
self._uuid = kwargs.get('uuid', None)
self.child_class = kwargs.get('child_class', GenericObject)
self._data = [
self.child_class.parse(d) for d in kwargs.get('data', [])
]
@property
def uuid(self):
if self._uuid is None:
self._uuid = uuid.uuid4()
return self._uuid
def __getitem__(self, i):
return self._data[i]
def __iter__(self):
return (i for i in self._data)
def __len__(self):
return len(self._data)
def append(self, obj: Any) -> None:
'''
:param obj:
'''
if not isinstance(obj, self.child_class):
obj = self.child_class.parse(obj)
self._data.append(obj)
def extend(self, objs: Iterable[Any]) -> None:
'''
:param objs:
'''
for obj in objs:
self.append(obj)
def serialize(self) -> Iterable[Mapping[str, Any]]:
return [obj.serialize() for obj in self.data]
def reload(self) -> None:
'''
Reload to get rid of object cached properties. This should be done on
any modifications to the data within any of the object's objects.
'''
self._data = [
self.child_class(data=obj.data) for obj in self.data if len(obj.data) > 0
]
def remove(self, obj: Any) -> None:
if not isinstance(obj, self.child_class):
raise TypeError('Bad type: %s' % (type(obj)))
self._data = [
o for o in self._data if o.id != obj.id
]
@staticmethod
def parse(objs):
raise NotImplementedError()
@property
def data(self) -> Iterable[Any]:
return self._data
class GenericObject():
def __init__(self, *args, **kwargs):
'''
:kwarg data:
'''
self._uuid = kwargs.get('uuid', None)
@property
def uuid(self):
if self._uuid is None:
self._uuid = uuid.uuid4()
return self._uuid
def __repr__(self):
return str('%s(%s)' % (
type(self).__name__,
self.uuid
))
@staticmethod
def parse(obj):
raise NotImplementedError()
def serialize(self):
raise NotImplementedError() | /road_collisions_base-0.0.22.tar.gz/road_collisions_base-0.0.22/src/road_collisions_base/models/generic.py | 0.746786 | 0.175044 | generic.py | pypi |
class Casualties():
def __init__(self, *args, **kwargs):
self._data = kwargs.get('data', [])
def __getitem__(self, i):
return self._data[i]
def __iter__(self):
return (i for i in self._data)
def __len__(self):
return len(self._data)
def append(self, data):
self._data.append(data)
def extend(self, data):
self._data.extend(data)
def serialize(self):
return [
d.serialize() for d in self
]
@staticmethod
def parse(data):
casualties = Casualties()
if isinstance(data, list):
for d in data:
if isinstance(d, dict):
casualties.append(
Casualty(
**d
)
)
else:
raise NotImplementedError()
elif isinstance(data, dict):
casualties.append(
Casualty(
**data
)
)
else:
import pdb; pdb.set_trace()
raise NotImplementedError()
return casualties
class Casualty():
__slots__ = [
'year_of_birth',
'sex',
'actp',
'secu',
'grav',
'locp',
'num_veh',
'place',
'catu',
'etatp',
'trajet'
]
def __init__(self, *args, **kwargs):
self.year_of_birth = int(kwargs['an_nais']) if kwargs['an_nais'] else None
self.sex = int(kwargs['sexe']) if kwargs['sexe'] else None
self.actp = kwargs['actp']
self.secu = int(kwargs['secu']) if kwargs.get('secu', None) else None
self.grav = int(kwargs['grav']) if kwargs['grav'] else None
self.locp = int(kwargs['locp']) if kwargs['locp'] else None
self.num_veh = kwargs['num_veh']
self.place = int(kwargs['place']) if kwargs['place'] else None
self.catu = int(kwargs['catu']) if kwargs['catu'] else None
self.etatp = int(kwargs['etatp']) if kwargs['etatp'] else None
self.trajet = int(kwargs['trajet']) if kwargs['trajet'] else None
def serialize(self):
return {
'year_of_birth': self.year_of_birth,
'sex': self.sex,
'actp': self.actp,
'secu': self.secu,
'grav': self.grav,
'locp': self.locp,
'num_veh': self.num_veh,
'place': self.place,
'catu': self.catu,
'etatp': self.etatp,
'trajet': self.etatp
} | /road_collisions_france-0.0.3-py3-none-any.whl/road_collisions_france/models/casualty.py | 0.538255 | 0.154376 | casualty.py | pypi |
class Vehicles():
def __init__(self, *args, **kwargs):
self._data = kwargs.get('data', [])
def __getitem__(self, i):
return self._data[i]
def __iter__(self):
return (i for i in self._data)
def __len__(self):
return len(self._data)
def append(self, data):
self._data.append(data)
def extend(self, data):
self._data.extend(data)
def serialize(self):
return [
d.serialize() for d in self
]
@staticmethod
def parse(data):
vehicles = Vehicles()
if isinstance(data, list):
for d in data:
if isinstance(d, dict):
vehicles.append(
Vehicle(
**d
)
)
else:
raise NotImplementedError()
elif isinstance(data, dict):
vehicles.append(
Vehicle(
**data
)
)
else:
raise NotImplementedError()
return vehicles
class Vehicle():
__slots__ = [
'choc',
'manv',
'senc',
'obsm',
'catv',
'num_veh',
'obs',
'occutc'
]
def __init__(self, *args, **kwargs):
self.choc = int(kwargs['choc']) if kwargs['choc'] else None
self.manv = int(kwargs['manv']) if kwargs['manv'] else None
self.senc = int(kwargs['senc']) if kwargs['senc'] else None
self.obsm = int(kwargs['obsm']) if kwargs['obsm'] else None
self.catv = int(kwargs['catv']) if kwargs['catv'] else None
self.num_veh = kwargs['num_veh']
self.obs = int(kwargs['obs']) if kwargs['obs'] else None
self.occutc = int(kwargs['occutc']) if kwargs['occutc'] else None
def serialize(self):
return {
'choc': self.choc,
'manv': self.manv,
'senc': self.senc,
'obsm': self.obsm,
'catv': self.catv,
'num_veh': self.num_veh,
'obs': self.obs,
'occutc': self.occutc
} | /road_collisions_france-0.0.3-py3-none-any.whl/road_collisions_france/models/vehicle.py | 0.5769 | 0.229956 | vehicle.py | pypi |
class Casualties():
def __init__(self, *args, **kwargs):
self._data = kwargs.get('data', [])
def __getitem__(self, i):
return self._data[i]
def __iter__(self):
return (i for i in self._data)
def __len__(self):
return len(self._data)
def append(self, data):
self._data.append(data)
def extend(self, data):
self._data.extend(data)
def serialize(self):
return [
d.serialize() for d in self
]
@staticmethod
def parse(data):
casualties = Casualties()
if isinstance(data, list):
for d in data:
if isinstance(d, dict):
casualties.append(
Casualty(
**d
)
)
else:
raise NotImplementedError()
elif isinstance(data, dict):
casualties.append(
Casualty(
**data
)
)
else:
raise NotImplementedError()
return casualties
class Casualty():
__slots__ = [
'vehicle_reference',
'casualty_reference',
'casualty_class',
'sex_of_casualty',
'age_of_casualty',
'age_band_of_casualty',
'casualty_severity',
'pedestrian_location',
'pedestrian_movement',
'car_passenger',
'bus_or_coach_passenger',
'pedestrian_road_maintenance_worker',
'casualty_type',
'casualty_home_area_type',
'casualty_imd_decile',
]
def __init__(self, *args, **kwargs):
self.vehicle_reference = int(kwargs['vehicle_reference'])
self.casualty_reference = int(kwargs['casualty_reference'])
self.casualty_class = int(kwargs['casualty_class'])
self.sex_of_casualty = int(kwargs['sex_of_casualty'])
self.age_of_casualty = int(kwargs['age_of_casualty'])
self.age_band_of_casualty = int(kwargs['age_band_of_casualty'])
self.casualty_severity = int(kwargs['casualty_severity'])
self.pedestrian_location = int(kwargs['pedestrian_location'])
self.pedestrian_movement = int(kwargs['pedestrian_movement'])
self.car_passenger = int(kwargs['car_passenger'])
self.bus_or_coach_passenger = int(kwargs['bus_or_coach_passenger'])
self.pedestrian_road_maintenance_worker = int(kwargs['pedestrian_road_maintenance_worker'])
self.casualty_type = int(kwargs['casualty_type'])
self.casualty_home_area_type = int(kwargs['casualty_home_area_type'])
self.casualty_imd_decile = int(kwargs['casualty_imd_decile'])
def serialize(self):
return {
'vehicle_reference': self.vehicle_reference,
'casualty_reference': self.casualty_reference,
'casualty_class': self.casualty_class,
'sex_of_casualty': self.sex_of_casualty,
'age_of_casualty': self.age_of_casualty,
'age_band_of_casualty': self.age_band_of_casualty,
'casualty_severity': self.casualty_severity,
'pedestrian_location': self.pedestrian_location,
'pedestrian_movement': self.pedestrian_movement,
'car_passenger': self.car_passenger,
'bus_or_coach_passenger': self.bus_or_coach_passenger,
'pedestrian_road_maintenance_worker': self.pedestrian_road_maintenance_worker,
'casualty_type': self.casualty_type,
'casualty_home_area_type': self.casualty_home_area_type,
'casualty_imd_decile': self.casualty_imd_decile,
} | /road_collisions_uk-0.0.3-py3-none-any.whl/road_collisions_uk/models/casualty.py | 0.572364 | 0.180107 | casualty.py | pypi |
class Casualties():
def __init__(self, *args, **kwargs):
self._data = kwargs.get('data', [])
def __getitem__(self, i):
return self._data[i]
def __iter__(self):
return (i for i in self._data)
def __len__(self):
return len(self._data)
def append(self, data):
self._data.append(data)
def extend(self, data):
self._data.extend(data)
def serialize(self):
return [
d.serialize() for d in self
]
@staticmethod
def parse(data):
casualties = Casualties()
if isinstance(data, list):
for d in data:
if isinstance(d, dict):
casualties.append(
Casualty(
**d
)
)
else:
raise NotImplementedError()
elif isinstance(data, dict):
casualties.append(
Casualty(
**data
)
)
else:
raise NotImplementedError()
return casualties
class Casualty():
__slots__ = [
'vehicle_reference',
'casualty_reference',
'casualty_class',
'sex_of_casualty',
'age_of_casualty',
'age_band_of_casualty',
'casualty_severity',
'pedestrian_location',
'pedestrian_movement',
'car_passenger',
'bus_or_coach_passenger',
'pedestrian_road_maintenance_worker',
'casualty_type',
'casualty_home_area_type',
'casualty_imd_decile',
]
def __init__(self, *args, **kwargs):
self.vehicle_reference = int(kwargs['vehicle_reference'])
self.casualty_reference = int(kwargs['casualty_reference'])
self.casualty_class = int(kwargs['casualty_class'])
self.sex_of_casualty = int(kwargs['sex_of_casualty'])
self.age_of_casualty = int(kwargs['age_of_casualty'])
self.age_band_of_casualty = int(kwargs['age_band_of_casualty'])
self.casualty_severity = int(kwargs['casualty_severity'])
self.pedestrian_location = int(kwargs['pedestrian_location'])
self.pedestrian_movement = int(kwargs['pedestrian_movement'])
self.car_passenger = int(kwargs['car_passenger'])
self.bus_or_coach_passenger = int(kwargs['bus_or_coach_passenger'])
self.pedestrian_road_maintenance_worker = int(kwargs['pedestrian_road_maintenance_worker'])
self.casualty_type = int(kwargs['casualty_type'])
self.casualty_home_area_type = int(kwargs['casualty_home_area_type'])
self.casualty_imd_decile = int(kwargs['casualty_imd_decile'])
def serialize(self):
return {
'vehicle_reference': self.vehicle_reference,
'casualty_reference': self.casualty_reference,
'casualty_class': self.casualty_class,
'sex_of_casualty': self.sex_of_casualty,
'age_of_casualty': self.age_of_casualty,
'age_band_of_casualty': self.age_band_of_casualty,
'casualty_severity': self.casualty_severity,
'pedestrian_location': self.pedestrian_location,
'pedestrian_movement': self.pedestrian_movement,
'car_passenger': self.car_passenger,
'bus_or_coach_passenger': self.bus_or_coach_passenger,
'pedestrian_road_maintenance_worker': self.pedestrian_road_maintenance_worker,
'casualty_type': self.casualty_type,
'casualty_home_area_type': self.casualty_home_area_type,
'casualty_imd_decile': self.casualty_imd_decile,
} | /road_collisions_us-0.0.1.tar.gz/road_collisions_us-0.0.1/src/road_collisions_us/models/casualty.py | 0.572364 | 0.180107 | casualty.py | pypi |
import uuid
from typing import (
Any,
Iterable,
Mapping
)
class GenericObjects():
def __init__(self, *args, **kwargs):
'''
:kwarg data:
:kwarg child_class:
'''
self._uuid = kwargs.get('uuid', None)
self.child_class = kwargs.get('child_class', GenericObject)
self._data = [
self.child_class.parse(d) for d in kwargs.get('data', [])
]
@property
def uuid(self):
if self._uuid is None:
self._uuid = uuid.uuid4()
return self._uuid
def __getitem__(self, i):
return self._data[i]
def __iter__(self):
return (i for i in self._data)
def __len__(self):
return len(self._data)
def append(self, obj: Any) -> None:
'''
:param obj:
'''
if not isinstance(obj, self.child_class):
obj = self.child_class.parse(obj)
self._data.append(obj)
def extend(self, objs: Iterable[Any]) -> None:
'''
:param objs:
'''
for obj in objs:
self.append(obj)
def serialize(self) -> Iterable[Mapping[str, Any]]:
return [obj.serialize() for obj in self.data]
def reload(self) -> None:
'''
Reload to get rid of object cached properties. This should be done on
any modifications to the data within any of the object's objects.
'''
self._data = [
self.child_class(data=obj.data) for obj in self.data if len(obj.data) > 0
]
def remove(self, obj: Any) -> None:
if not isinstance(obj, self.child_class):
raise TypeError('Bad type: %s' % (type(obj)))
self._data = [
o for o in self._data if o.id != obj.id
]
@staticmethod
def parse(objs):
raise NotImplementedError()
@property
def data(self) -> Iterable[Any]:
return self._data
class GenericObject():
def __init__(self, *args, **kwargs):
'''
:kwarg data:
'''
self._uuid = kwargs.get('uuid', None)
@property
def uuid(self):
if self._uuid is None:
self._uuid = uuid.uuid4()
return self._uuid
def __repr__(self):
return str('%s(%s)' % (
type(self).__name__,
self.uuid
))
@staticmethod
def parse(obj):
raise NotImplementedError()
def serialize(self):
raise NotImplementedError() | /road_collisions-0.0.6-py3-none-any.whl/road_collisions/models/generic.py | 0.746786 | 0.175044 | generic.py | pypi |
import ast
import calendar
import configparser
import logging
import shutil
import time
from datetime import datetime
from functools import partial
from pathlib import Path
from dateutil.relativedelta import relativedelta
from road_data_scraper import TMP_LOG_PATH
from road_data_scraper.report.report import run_reports
from road_data_scraper.steps.download import THREAD_POOL, download
from road_data_scraper.steps.file_handler import (
dump_config,
file_handler,
gcp_upload_from_directory,
)
from road_data_scraper.steps.metadata import get_sensor_urls, get_sites_by_sensor
LOGGER = logging.getLogger(__name__)
def run(config: dict, api_run: bool) -> None:
"""
Orchestrates the execution of a web scraping pipeline that extracts Road Traffic Sensor Data
from the Highways England WebTRIS API. It handles various stages including configuration setup,
data fetching, report generation, and optional actions such as data upload to a Google Cloud
Platform bucket and directory removal.
Args:
config (dict): Configuration file containing various settings for the pipeline run.
Contains settings such as start and end dates, the number of threads to use,
test run flag, report generation flag, and details for Google Cloud Storage, etc.
api_run (bool): Flag indicating whether the pipeline is being run through the FastAPI
framework. Affects how certain configuration settings are processed.
The function also interacts with several other functions to accomplish its task:
1. It uses the `file_handler` function to create necessary directories.
2. It uses `get_sites_by_sensor` and `get_sensor_urls` to fetch sensor data.
3. It calls the `download` function to download sensor data.
4. If configured, it uses the `run_reports` function to generate reports.
5. It uses `dump_config` to save the configuration file to metadata.
6. If configured, it uses `gcp_upload_from_directory` to upload data to Google Cloud.
7. Finally, if configured, it removes the run directory.
Returns:
None. This function performs operations but does not return any value. It manages the
web scraping pipeline including data fetching, report generation, data uploading and
directory cleanup.
Raises:
ValueError: If the output directory in config is not valid, it's raised by `file_handler`.
Various exceptions can also be raised during downloading, report generation, and GCP upload stages.
Example usage:
>>> config = configparser.ConfigParser()
>>> config.read("./config.ini")
>>> run(config, api_run=False)
"""
start_time = time.time()
if api_run:
def my_ast(*args):
return args[0]
else:
def my_ast(*args):
return ast.literal_eval(*args)
start_date = my_ast(config["user_settings"]["start_date"])
end_date = my_ast(config["user_settings"]["end_date"])
if not start_date and not end_date:
# 2-month API data lag (date today minus two months)
date_object_today = datetime.strptime(time.strftime("%Y-%m"), "%Y-%m")
minus_two_months = date_object_today - relativedelta(months=2)
year, month = map(int, minus_two_months.strftime("%Y %m").split())
last_day_of_month = calendar.monthrange(year, month)[1]
start_date = f"{year}-{month}-01"
end_date = f"{year}-{month}-{last_day_of_month}"
data_path, metadata_path, report_path, run_id_path = file_handler(
config, api_run, start_date, end_date
)
LOGGER.info(f"Using {THREAD_POOL} threads")
test_run = my_ast(config["user_settings"]["test_run"])
generate_report = my_ast(config["user_settings"]["generate_report"])
LOGGER.info("Getting Road Sensor Lookup Table")
sensor_tables, lookup_df = get_sites_by_sensor()
lookup_df.to_csv(f"{str(metadata_path)}/road_data_sensor_lookup.csv", index=False)
midas_metadata, tmu_metadata, tame_metadata = get_sensor_urls(
sensor_tables, start_date, end_date
)
LOGGER.info("Processed Road Sensor Lookup Table")
if test_run:
LOGGER.info("Test Run")
midas_metadata = midas_metadata[1:2]
tmu_metadata = tmu_metadata[1:2]
tame_metadata = tame_metadata[1:2]
download_partial = partial(
download,
start_date=start_date,
end_date=end_date,
test_run=test_run,
run_id_path=data_path,
)
download_partial(site_name="midas", metadata=midas_metadata)
download_partial(site_name="tmu", metadata=tmu_metadata)
download_partial(site_name="tame", metadata=tame_metadata)
if generate_report:
run_reports(lookup_df, report_path, start_date, end_date)
if api_run:
dump_config(config, metadata_path, api_run=True)
else:
dump_config(config, metadata_path, api_run=False)
LOGGER.info(f"Script Run Time: {round((time.time() - start_time)/60, 2)} minutes")
log_file_path = f"{metadata_path}/road_data_pipeline.log"
shutil.copyfile(TMP_LOG_PATH, log_file_path)
gcp_storage = my_ast(config["user_settings"]["gcp_storage"])
if gcp_storage:
gcp_upload_from_directory(
run_id_path,
destination_bucket_name=my_ast(config["user_settings"]["gcp_bucket_name"]),
destination_blob_name=my_ast(config["user_settings"]["gcp_blob_name"]),
gcp_credentials=my_ast(config["user_settings"]["gcp_credentials"]),
)
rm_dir = my_ast(config["user_settings"]["rm_dir"])
if rm_dir:
LOGGER.info(
f"Removing {run_id_path[run_id_path.find('output_data/'):].split('/')[1]} folder"
)
shutil.rmtree(Path(run_id_path))
if __name__ == "__main__":
config = configparser.ConfigParser()
config.read("./config.ini")
run(config, api_run=False) | /road_data_scraper-0.0.20-py3-none-any.whl/road_data_scraper/main.py | 0.783119 | 0.318631 | main.py | pypi |
import glob
import logging
import warnings
from pathlib import Path
import pandas as pd
with warnings.catch_warnings():
warnings.simplefilter(action="ignore", category=FutureWarning)
import papermill as pm
from nbconvert.exporters import HTMLExporter
from nbconvert.preprocessors import TagRemovePreprocessor
from nbconvert.writers import FilesWriter
from traitlets.config import Config
warnings.simplefilter(action="ignore")
c = Config()
c.TemplateExporter.exclude_input = True
c.TagRemovePreprocessor.remove_cell_tags = ("remove_cell",)
c.TagRemovePreprocessor.remove_all_outputs_tags = ("remove_output",)
c.TagRemovePreprocessor.remove_input_tags = ("remove_input",)
c.TagRemovePreprocessor.enabled = True
c.HTMLExporter.preprocessors = ["nbconvert.preprocessors.TagRemovePreprocessor"]
exporter = HTMLExporter(config=c)
exporter.register_preprocessor(TagRemovePreprocessor(config=c), True)
def run_reports(
data: pd.DataFrame, full_path: str, start_date: str, end_date: str
) -> None:
"""
Generates an HTML report by executing a template Jupyter notebook. The function populates the template with the provided data
and saves the generated report as an HTML file.
The function takes the following steps:
- Specifies the input path for the template notebook.
- Defines the output path for the generated report notebook.
- Sets up the parameters for the notebook execution, including the report title and data to be included.
- Executes the template notebook using Papermill, passing the parameters and enabling report mode.
- Converts the executed notebook to HTML format using nbconvert.
- Saves the HTML report to the specified output directory.
Note:
- The template notebook should have the appropriate placeholders to receive the data and generate the report.
- The data should be in a pandas DataFrame format.
- The full path provided should be an existing directory where the report will be saved.
Args:
data (pd.DataFrame): Data to be included in the report.
full_path (str): Full path of the directory where the report will be saved.
start_date (str): Start date of the data range used for the report.
end_date (str): End date of the data range used for the report.
Returns:
None
Example:
```
data = pd.DataFrame(...)
full_path = "/path/to/reports"
start_date = "2022-01-01"
end_date = "2022-01-31"
run_reports(data, full_path, start_date, end_date)
```
The above example generates an HTML report using the provided data and saves it in the specified directory.
"""
logging.info(f"Generating HTML Report at {full_path}")
input_path = "./report/road_data_report_template.ipynb"
output_path = f"{full_path}/road_data_report.ipynb"
report_date = f"{start_date} to {end_date}"
report_title = f"__Road Data Sensors API Scraping Report__\n Date: {report_date}"
params = {"title": report_title, "data": data.to_json()}
logging.disable(logging.INFO)
pm.execute_notebook(
input_path=input_path,
output_path=output_path,
parameters=params,
report_mode=True,
)
notebook_files = glob.glob(f"{str(full_path)}/*.ipynb")
notebook_files = Path(notebook_files[0])
(body, resources) = HTMLExporter(config=c).from_filename(notebook_files)
fw = FilesWriter(config=c)
fw.write(body, resources, notebook_name="road_data_report")
logging.disable(logging.NOTSET) | /road_data_scraper-0.0.20-py3-none-any.whl/road_data_scraper/report/report.py | 0.703753 | 0.550305 | report.py | pypi |
```
# This cell is tagged parameters
title = "Road Data Dumps"
data = None
import warnings
warnings.filterwarnings("ignore")
from IPython.display import Markdown as md
from IPython.display import HTML
import pandas as pd
import panel as pn
import plotly.express as px
def create_counts_table(df):
df = df["status"].value_counts(normalize=False)
df = pd.DataFrame(df).reset_index()
df = df.rename({"index": "ID Status", "status": "Count"}, axis=1)
df["Percentage"] = ((df["Count"] / df["Count"].sum()) * 100).round(2)
return df
def plotly_bar_counts(df, sensor_name):
fig = px.bar(
df,
y="Count",
color="ID Status",
color_discrete_map={"Active": "#2C3E50", "Inactive": "#E31A1C"},
)
fig.update_layout(
title=f"{sensor_name}: Active vs. Inactive ID'S",
xaxis_title="",
yaxis_title="ID Counts",
)
fig.update_traces(hovertemplate="Count:%{y}")
fig.update_xaxes(visible=True, showticklabels=False)
return fig
pn.extension()
%%html
<style>
h1,h2 {
color: #2C3E50;
position: static;
font-weight: bold;
text-transform: uppercase;
}
</style>
df = pd.read_json(data)
df = df.astype({"easting": "object", "northing": "object"})
md(f"# {title}")
md(f"# __Lookup Table__")
df_widget = pn.widgets.DataFrame(
df, name="DataFrame", height=400, width=800
)
df_widget
md(f"# __Active vs. Inactive ID's for MIDAS, TMU, and TAME__")
md(f"## __MIDAS__")
midas_df = df.query("name.str.contains('midas', case = True)")
midas_counts = create_counts_table(midas_df)
midas_counts
plotly_bar_counts(midas_counts, sensor_name="MIDAS")
md(f"## __TMU__")
tmu_df = df.query("name.str.contains('tmu', case = True)")
tmu_counts = create_counts_table(tmu_df)
tmu_counts
plotly_bar_counts(tmu_counts, sensor_name="TMU")
md(f"## __TAME__")
tame_df = df.query("name.str.contains('tame', case = True)")
tame_counts = create_counts_table(tame_df)
tame_counts
plotly_bar_counts(tame_counts, sensor_name="TAME")
md(f"## __Other__")
other_df = df.query("name.str.contains('midas|tame|tmu', case = True)==False")
other_counts = create_counts_table(other_df)
other_counts
plotly_bar_counts(other_counts, sensor_name="Other")
```
| /road_data_scraper-0.0.20-py3-none-any.whl/road_data_scraper/report/road_data_report_template.ipynb | 0.472927 | 0.357876 | road_data_report_template.ipynb | pypi |
import ast
import datetime
import glob
import logging
import os
from pathlib import Path
from google.cloud import storage
LOGGER = logging.getLogger(__name__)
def file_handler(config: dict, api_run: bool, start_date: str, end_date: str) -> tuple:
"""
Creates directories to store scraped data and returns newly created directories as Path objects.
The function makes use of current date and time for the creation of unique directory names. These directories are then used to
store data, metadata, and report related to the data scraping process. All the paths are then returned as Path objects.
Args:
config (dict): Configuration file for this run. Contains various parameters for the scraping process including user settings.
api_run (bool): Flag indicating whether this run is made through FastAPI. It helps decide how to extract 'output_path' from config.
start_date (str): The start date for data to be scraped. Format should be '%Y-%m-%d'.
end_date (str): The end date for data to be scraped. Format should be '%Y-%m-%d'.
Raises:
ValueError: If user does not provide a valid output directory in the configuration file.
Returns:
tuple: Returns four Path objects. These represent paths to directories where data, metadata, and reports will be stored. The fourth
path is to the main directory where the above directories reside.
"""
run_id = datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S.%f")
start_date_string = datetime.datetime.strptime(start_date, "%Y-%m-%d").strftime(
"%B-%d-%Y"
)
end_date_string = datetime.datetime.strptime(end_date, "%Y-%m-%d").strftime(
"%B-%d-%Y"
)
if api_run:
user_output_path = config["user_settings"]["output_path"].strip("\"'")
else:
user_output_path = ast.literal_eval(config["user_settings"]["output_path"])
if not user_output_path:
raise ValueError("Please provide a valid output directory.")
run_id_path = f"{user_output_path}/output_data/{run_id}_{start_date_string}_to_{end_date_string}/"
run_id_path_data = Path(f"{run_id_path}data/")
run_id_path_metadata = Path(f"{run_id_path}metadata/")
run_id_path_report = Path(f"{run_id_path}report/")
LOGGER.info(f"Making Data Directory at: {run_id_path_data}")
run_id_path_data.mkdir(parents=True, exist_ok=True)
LOGGER.info(f"Making Metadata Directory at: {run_id_path_metadata}")
run_id_path_metadata.mkdir(parents=True, exist_ok=True)
LOGGER.info(f"Making Report Directory at: {run_id_path_report}")
run_id_path_report.mkdir(parents=True, exist_ok=True)
return run_id_path_data, run_id_path_metadata, run_id_path_report, run_id_path
def dump_config(config: dict, metadata_path: Path, api_run: bool) -> None:
"""
Saves the configuration file to the metadata path.
The function dumps the configuration file that was used for a particular run into the metadata directory. This can help keep a track
of what settings were used for a particular run and can be helpful in debugging and reproducibility.
Args:
config (dict): Configuration file for this run.
metadata_path (Path): Path object containing path to metadata output directory.
api_run (bool): Flag indicating whether this run is made through FastAPI. It helps decide how to format the config file.
Returns:
None
"""
LOGGER.info(f"Dumping config.ini for Run at {metadata_path}")
if api_run:
config_dict = config
else:
config_dict = {
section: dict(config.items(section)) for section in config.sections()
}
with open(f"{str(metadata_path)}/config_metadata.txt", "w") as file:
print(config_dict, file=file)
def gcp_upload_from_directory(
directory_path: str,
destination_bucket_name: str,
destination_blob_name: str,
gcp_credentials: str,
) -> None:
"""
Uploads the entire output directory for a Pipeline Run to a Google Cloud Platform (GCP) Bucket.
This function is used to upload all the files in a given directory to a specified GCP bucket. It uses the Google Cloud storage
client to handle the uploading process.
Args:
directory_path (str): The directory that needs to be uploaded. It should contain the path to the directory.
destination_bucket_name (str): The name of the GCP bucket where the directory will be uploaded.
destination_blob_name (str): The name of the blob (similar to a folder) in the GCP bucket where the directory will be uploaded.
gcp_credentials (str): The path to the JSON file containing GCP credentials.
Returns:
None
"""
os.environ["GOOGLE_APPLICATION_CREDENTIALS"] = gcp_credentials.strip("\"'")
storage_client = storage.Client()
bucket = storage_client.get_bucket(destination_bucket_name)
local_paths = glob.glob(directory_path + "/**", recursive=True)
for local_file in local_paths:
local_file = Path(local_file)
remote_path = f"{destination_blob_name}/{str(local_file)[str(local_file).find('output_data/'):]}"
if local_file.is_file():
blob = bucket.blob(remote_path)
blob.upload_from_filename(local_file)
LOGGER.info(
f"Uploading {local_file} to Google Cloud Bucket: {destination_bucket_name} \n"
f"Subfolder {destination_blob_name}"
) | /road_data_scraper-0.0.20-py3-none-any.whl/road_data_scraper/steps/file_handler.py | 0.690559 | 0.270706 | file_handler.py | pypi |
import csv
import logging
import multiprocessing
import threading
import time
from concurrent.futures import ThreadPoolExecutor
from dataclasses import dataclass
from itertools import repeat
from pathlib import Path
from typing import Callable
import pandas as pd
import requests
THREAD_POOL = multiprocessing.cpu_count()
session = requests.Session()
session.mount(
"https://",
requests.adapters.HTTPAdapter(
pool_maxsize=THREAD_POOL, max_retries=3, pool_block=True
),
)
LOGGER = logging.getLogger(__name__)
@dataclass(frozen=True)
class UrlMetadata:
url: str
site_id: str
site_type: str
direction: str
longitude: str
latitude: str
status: str
easting: str
northing: str
"""
A class used to represent the metadata of a URL.
Attributes
----------
url : str
The URL to be scraped.
site_id : str
The unique ID of the road traffic sensor.
site_type : str
The type of the road traffic sensor, e.g., 'midas', 'tmu', 'tame'.
direction : str
The direction of the road traffic sensor.
longitude : str
The longitude of the road traffic sensor.
latitude : str
The latitude of the road traffic sensor.
status : str
The status of the road traffic sensor.
easting : str
The easting of the road traffic sensor.
northing : str
The northing of the road traffic sensor.
"""
def _get_headers() -> list[str]:
"""
Returns a list of headers that are used to create a CSV file of data
downloaded from the Highways England WebTRIS API. These headers correspond
to the columns of the CSV file.
The headers include information like the unique site ID, site name,
report date, time period, speed intervals, vehicle length intervals,
average speed, total volume, geographical information and status of
the Road Traffic Sensor.
Returns:
list[str]: A list of strings, where each string is a header name.
"""
return [
"site_id",
"site_name",
"report_date",
"time_period_end",
"interval",
"len_0_520_cm",
"len_521_660_cm",
"len_661_1160_cm",
"len_1160_plus_cm",
"speed_0_10_mph",
"speed_11_15_mph",
"speed_16_20_mph",
"speed_21_25_mph",
"speed_26_30_mph",
"speed_31_35_mph",
"speed_36_40_mph",
"speed_41_45_mph",
"speed_46_50_mph",
"speed_51_55_mph",
"speed_56_60_mph",
"speed_61_70_mph",
"speed_71_80_mph",
"speed_80_plus_mph",
"speed_avg_mph",
"total_vol",
"longitude",
"latitude",
"sites_status",
"type",
"direction",
"easting",
"northing",
]
def _response_to_df(response, metadata: UrlMetadata):
"""
Converts JSON stored `requests` response object to a Pandas DataFrame.
Then adds additional metadata/columns to the DataFrame using an instance of UrlMetadata.
Args:
response (requests.Response): `requests` response object.
metadata (UrlMetadata): An instance of UrlMetadata containing the metadata for a URL.
Returns:
df (pd.DataFrame): Pandas DataFrame.
"""
df = pd.DataFrame.from_dict(response.json()["Rows"])
df.insert(0, "site_id", metadata.site_id)
df = df.assign(
longitude=metadata.longitude,
latitude=metadata.latitude,
sites_status=metadata.status,
type=metadata.site_type,
direction=metadata.direction,
easting=metadata.easting,
northing=metadata.northing,
)
return df
def make_get_url() -> Callable:
"""
Constructs a new instance of the `get_url` function with its own counter and counter lock.
This function leverages the concept of closures in Python. A closure in Python is a tuple of
variables that are created by the function's environment when it is defined. Thus, the `get_url` function
created by this function encloses its own counter and counter lock, making it thread-safe.
The `get_url` function is used in multi-threading environments where each thread processes a URL.
It increments the enclosed counter in a thread-safe manner after processing a URL and prints a log
message every time a certain number of URLs (defined by LOG_INTERVAL) have been processed.
Returns:
Callable: The `get_url` function enclosed with its own counter and counter lock. It takes parameters
`site_name`, `start_date`, `end_date`, `test_run`, `full_csv_name`, and `metadata` of type UrlMetadata
and returns a requests.Response object.
"""
counter = [0]
counter_lock = threading.Lock()
def get_url(
site_name: str,
start_date: str,
end_date: str,
test_run: bool,
full_csv_name: str,
metadata: UrlMetadata,
total_urls: int,
):
"""
Scrapes a URL from WebTRIS Highways /reports/daily/ endpoint as a Pandas DataFrame,
appends the DataFrame to a CSV, and logs progress.
Args:
site_name (str): The name of the road traffic sensor.
start_date (str): The start date in the format %Y-%m-%d.
end_date (str): The end date in the format %Y-%m-%d.
test_run (bool): If True, will only download a small subset data from WebTRIS Highways England API.
full_csv_name (str): The output CSV file path.
metadata (UrlMetadata): An instance of UrlMetadata containing the metadata for a URL.
total_urls (int): The total number of URLs to be processed.
Returns:
response: The HTTP response received when accessing the URL.
"""
message = "Parallel request of data for use in ONS. Emerging Platforms Team. @GitHub: dombean/road_data_scraper"
headers = {"Message": f"{message}"}
response = session.get(metadata.url, headers=headers)
with counter_lock:
counter[0] += 1
remaining = total_urls - counter[0]
log_interval = max(
1, total_urls // 10
) # log after processing about 10% of the URLs, but at least once
if counter[0] % log_interval == 0 or counter[0] == total_urls:
LOGGER.info(
f"Processed {counter[0]} URLs. Remaining: {remaining}. Last request was completed in {response.elapsed.total_seconds()} seconds. [{response.url}]"
)
df = _response_to_df(response=response, metadata=metadata)
df.to_csv(f"{full_csv_name}", mode="a", header=False, index=False)
if response.status_code != 200:
logging.error(
"request failed, error code %s [%s]", response.status_code, response.url
)
if 500 <= response.status_code < 600:
# server is overloaded? give it a break
time.sleep(5)
return response
return get_url
def download(
site_name: str,
start_date: str,
end_date: str,
metadata: list[tuple],
test_run: bool,
run_id_path: Path,
):
"""
Scrapes data from Highways England WebTRIS API in parallel.
Args:
site_name (str): The name of the road traffic sensor.
start_date (str): The start date in the format %Y-%m-%d.
end_date (str): The end date in the format %Y-%m-%d.
metadata (list[tuple]): A list of tuples. Each tuple contains a URL and associated metadata
for a road traffic sensor.
test_run (bool): If True, will only download a small subset data from WebTRIS Highways England API.
run_id_path (Path): A Path object to the run_id directory.
"""
LOGGER.info(f"Downloading {site_name} URLs.")
if site_name not in ("midas", "tame", "tmu"):
raise ValueError("Available sites are: midas, tame, tmu.")
if test_run:
full_csv_name = (
f"{str(run_id_path)}/{site_name}_{start_date}-{end_date}_TEST_RUN.csv"
)
else:
full_csv_name = f"{str(run_id_path)}/{site_name}_{start_date}-{end_date}.csv"
with open(full_csv_name, "w") as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=_get_headers())
writer.writeheader()
total_urls = len(metadata)
url_metadata_list = [UrlMetadata(*item) for item in metadata]
get_url = make_get_url()
with ThreadPoolExecutor(max_workers=THREAD_POOL) as executor:
executor.map(
get_url,
repeat(site_name),
repeat(start_date),
repeat(end_date),
repeat(test_run),
repeat(full_csv_name),
url_metadata_list,
repeat(total_urls),
)
LOGGER.info(f"Finish Downloading {site_name} URLs.") | /road_data_scraper-0.0.20-py3-none-any.whl/road_data_scraper/steps/download.py | 0.87168 | 0.356699 | download.py | pypi |
import datetime
from functools import partial
import pandas as pd
import requests
BASE_URL = "https://webtris.highwaysengland.co.uk/api/v1/"
def create_sensor_metadata_tuples(
sensor_tables: dict[str, pd.DataFrame],
start_date: str,
end_date: str,
sensor_name: str = None,
) -> list[tuple[str, int, str, str, float, float, str, float, float]]:
"""
Generates metadata tuples for specific road traffic sensor based on the given sensor name.
This function constructs the URL for accessing the sensor data and combines it with various
sensor metadata to form a tuple. The list of these tuples for all sensors of the specified
type is then returned.
Args:
sensor_tables (dict): A dictionary mapping sensor names (midas, tame, tmu) to their corresponding
metadata stored in pandas DataFrame.
start_date (str): Start date in the format 'YYYY-MM-DD'.
end_date (str): End date in the format 'YYYY-MM-DD'.
sensor_name (str, optional): The name of the sensor for which metadata tuples are to be created.
Should be one of 'midas', 'tame', or 'tmu'. Defaults to None.
Raises:
ValueError: If sensor_name is not 'midas', 'tame' or 'tmu'.
Returns:
sensor_metadata (list[tuple]): List of tuples, each containing metadata of a sensor.
"""
if sensor_name not in ("midas", "tame", "tmu"):
raise ValueError("Available Sensors are: midas, tame, tmu.")
sensor_ids = list(sensor_tables[sensor_name]["id"])
sensor_urls = [
f"{BASE_URL}reports/{start_date}/to/{end_date}/daily?sites={site}&page=1&page_size=40000"
for site in sensor_ids
]
sensor_metadata = list(
zip(
sensor_urls,
sensor_tables[sensor_name]["id"],
sensor_tables[sensor_name]["name"],
sensor_tables[sensor_name]["direction"],
sensor_tables[sensor_name]["longitude"],
sensor_tables[sensor_name]["latitude"],
sensor_tables[sensor_name]["status"],
sensor_tables[sensor_name]["easting"],
sensor_tables[sensor_name]["northing"],
)
)
return sensor_metadata
def get_sensor_urls(
sensor_tables: dict[str, pd.DataFrame], start_date: str, end_date: str
) -> tuple[
list[tuple[str, int, str, str, float, float, str, float, float]],
list[tuple[str, int, str, str, float, float, str, float, float]],
list[tuple[str, int, str, str, float, float, str, float, float]],
]:
"""
Generates URLs and associated metadata for all road sensors: MIDAS, TAME, TMU.
This function uses the create_sensor_metadata_tuples function to create metadata tuples
for all types of sensors.
Args:
sensor_tables (dict): A dictionary mapping sensor names (midas, tame, tmu) to their corresponding
metadata stored in pandas DataFrame.
start_date (str): Start date in the format 'YYYY-MM-DD'.
end_date (str): End date in the format 'YYYY-MM-DD'.
Returns:
tuple[list]: Three lists containing metadata tuples for MIDAS, TMU, and TAME sensors respectively.
"""
create_sensor_metadata_tuples_partial = partial(
create_sensor_metadata_tuples,
sensor_tables=sensor_tables,
start_date=datetime.datetime.strptime(start_date, "%Y-%m-%d").strftime(
"%d%m%Y"
),
end_date=datetime.datetime.strptime(end_date, "%Y-%m-%d").strftime("%d%m%Y"),
)
midas_metadata = create_sensor_metadata_tuples_partial(sensor_name="midas")
tmu_metadata = create_sensor_metadata_tuples_partial(sensor_name="tmu")
tame_metadata = create_sensor_metadata_tuples_partial(sensor_name="tame")
return midas_metadata, tmu_metadata, tame_metadata
def direction_string_cleaner(record: str) -> str:
"""
Cleans a string in the "direction" column of the sensor metadata.
Based on some predetermined rules, the function assigns a cleaned version of the direction string.
Args:
record (str): Strings in "direction" column.
Returns:
str: Cleaned string.
"""
if "eastbound" in str(record):
return "eastbound"
elif "northbound" in str(record):
return "northbound"
elif "southbound" in str(record):
return "southbound"
elif "westbound" in str(record):
return "westbound"
elif "clockwise" in str(record):
return "clockwise"
elif "anti-clockwise" in str(record):
return "clockwise"
elif "legacy site" in str(record):
return "legacy site"
elif "on connector" in str(record):
return "carriageway connector"
else:
return record
def name_string_cleaner(record: str) -> str:
"""
Cleans a string in the "name" column of the sensor metadata.
The function checks if certain substrings are in the original string and replaces the original
string with a cleaned version based on that.
Args:
record (str): Strings in "name" column.
Returns:
str: Cleaned string.
"""
if "MIDAS" in str(record):
return "midas"
elif "TMU" in str(record):
return "tmu"
elif "TAME" in str(record):
return "tame"
elif "Legacy Site" in str(record):
return "Legacy Site"
else:
return record
def get_sites_by_sensor() -> tuple[dict[str, pd.DataFrame], pd.DataFrame]:
"""
Retrieves site metadata from the Highways England WebTRIS API.
This function sends a GET request to the API and processes the response into a Pandas DataFrame.
The function also performs data cleaning operations on the "name" and "direction" columns.
Returns:
sensor_tables (dict[pd.DataFrame]): A dictionary mapping sensor names to their respective metadata DataFrames.
lookup_df (pd.DataFrame): A DataFrame containing metadata for all road traffic sensors.
"""
response = requests.get(f"{BASE_URL}sites")
lookup_df = pd.DataFrame.from_dict(response.json()["sites"])
lookup_df.columns = [col.lower() for col in lookup_df.columns]
lookup_df["id"] = lookup_df["id"].astype(int)
lookup_df["direction"] = lookup_df["name"].str.split("; ").str[-1]
lookup_df[["easting", "northing"]] = (
lookup_df["name"].str.extract(r"(\d+;\d+)")[0].str.split(";", expand=True)
)
lookup_df["direction"] = lookup_df["direction"].str.lower()
lookup_df["direction"] = lookup_df["direction"].apply(direction_string_cleaner)
lookup_df["name"] = lookup_df["name"].apply(name_string_cleaner)
midas_df = lookup_df.query("name.str.contains('midas', case = True)")
tmu_df = lookup_df.query("name.str.contains('tmu', case = True)")
tame_df = lookup_df.query("name.str.contains('tame', case = True)")
other_df = lookup_df.query(
"name.str.contains('midas|tmu|tame', case = True)==False"
)
sensor_tables = {
"midas": midas_df,
"tmu": tmu_df,
"tame": tame_df,
"other": other_df,
}
return sensor_tables, lookup_df | /road_data_scraper-0.0.20-py3-none-any.whl/road_data_scraper/steps/metadata.py | 0.835886 | 0.388444 | metadata.py | pypi |
from .base_dataset import BaseDataset
from .file_operation import download_file, unzip_file, random_filename
import os
from xml.dom import minidom
from cv2 import cv2
class KaggleRoadSign(BaseDataset):
website_prefix = "https://github.com/elem3ntary/roadsign_db/raw/master/"
download_folder_name = "KaggleRoadSign"
def __init__(self, *args):
super().__init__(*args)
def download_files(self):
"""
Downloads required dataset
"""
database_filename = "Kaggle_db.zip"
database_path = self._at_mydir(database_filename)
database_url = self.website_prefix + database_filename
if not os.path.exists(database_path):
download_file(database_url, database_path)
unzip_file(database_path, self.directory_name)
def get_value_by_tag(self, doc, tag):
"""
Returns value of first child node in first element that has a proper tag
"""
return doc.getElementsByTagName(tag)[0].firstChild.data
def convert_and_add(self):
"""
Refactors images and adds new entries to dataset
"""
annotations_path = os.path.join(self.directory_name, 'annotations')
images_path = os.path.join(self.directory_name, 'images')
annotations_files = [i for i in os.listdir(annotations_path) if os.path.splitext(i)[1] == '.xml']
for file_num, file_path in enumerate(annotations_files):
if file_num % 10 == 0:
print(f"Processing file: {file_path}", end=' \r')
doc = minidom.parse(os.path.join(annotations_path, file_path))
image_filename = self.get_value_by_tag(doc, 'filename')
sign_type = self.get_value_by_tag(doc, 'name').upper()
bndbox = doc.getElementsByTagName('bndbox')[0]
coordinates = [i.firstChild.data for i in bndbox.childNodes if i.nodeType == 1]
x1, y1, x2, y2 = list(map(int, coordinates))
img = cv2.imread(os.path.join(images_path, image_filename))
img = img[y1:y2, x1:x2]
img_conv = cv2.resize(img, (128, 128), interpolation=cv2.INTER_AREA)
image_path_new = self.generate_new_image_file_path('jpg')
cv2.imwrite(image_path_new, img_conv)
initial_size_x, initial_size_y = x2 - x1, y2-y1
self.spa.add_entry(image_name=os.path.basename(image_path_new), initial_size_x=initial_size_x, initial_size_y=initial_size_y,
country='GERMANY',occlusions='VISIBLE',sign_class='OTHER', sign_type=sign_type)
self.append_data_to_file()
if __name__ == '__main__':
MAIN_PATH = os.path.dirname(os.path.realpath(__file__))
dataset_filename = os.path.join(MAIN_PATH, 'DATASET.csv')
images_dirname = os.path.join(MAIN_PATH, 'images')
DATABASES_PREFIX = os.path.join(MAIN_PATH, "Databases")
# create the nessesary directories
for directory in [dataset_filename, images_dirname, DATABASES_PREFIX]:
if not os.path.exists(directory):
os.mkdir(directory)
data = KaggleRoadSign(dataset_filename, images_dirname, DATABASES_PREFIX)
data.download_files()
data.convert_and_add() | /road-signs-db-convertion-michael-0.0.5.tar.gz/road-signs-db-convertion-michael-0.0.5/src/database_convertion/stefan.py | 0.406862 | 0.172817 | stefan.py | pypi |
import os
from abc import ABC, abstractmethod
from .AnalysisADT import SignPointArray
class BaseDataset(ABC):
"""
This module a database dosnloader & processor inherits.
"""
@property
@abstractmethod
def download_folder_name(self) -> str:
"""
This property is to be set to the name of the class
"""
return "BaseDataset"
@property
def image_file_prefix(self) -> str:
"""
This property is to be set to the prefix of an image.
If not set (recommended), returns download_folder_name
"""
return self.download_folder_name
def __init__(self, app_dataset_filename, images_dirname, databases_prefix):
"""
Initialise the class working directory.
app_dataset_filename -- dataset csv path to append to
images_dirname -- directory to download images to
databases_prefix -- the 'Databases' directory path.
Creates a USEFUL self._at_mydir() function.
USE IT to read the downloaded images:
cv2.imread(self._at_mydir(image_name))
OR use self.directory_name ( = self._at_mydir("")) to
UNPACK them beforehand:
unzip_file("path/to/zipfile.zip", self.directory_name)
"""
self.app_dataset_filename = app_dataset_filename
self.images_dirname = images_dirname
self.directory_name = os.path.join(
databases_prefix, self.download_folder_name
)
self.create_dirs()
self.spa = SignPointArray()
self._at_mydir = lambda p: os.path.join(self.directory_name, p)
self._free_img_num = 0
def append_data_to_file(self):
"""
Appends data to the dataset file. Use this in convert_and_add() function.
"""
self.spa.to_file(self.app_dataset_filename)
def generate_new_image_file_path(self, image_postfix):
"""
This function generates a filename for the
image that is to be downloaded.
"""
self._free_img_num += 1
return os.path.join(
self.images_dirname,
(
self.image_file_prefix +
f'{self._free_img_num:8}'.replace(' ', '0') +
'.' + image_postfix
)
)
def create_dirs(self):
"""
Create the nessesary directories.
override this don't forget super()!) if you want
to create more dirs
"""
if not os.path.exists(self.directory_name):
os.mkdir(self.directory_name)
@abstractmethod
def download_files(self):
"""
Download all the needed files here.
For EXAMPLE:
smth_url = "example.com/smth.txt"
smth_filename = smth_url.split("/")[-1]
self._smth_path = self._at_mydir(smth_filename)
if not os.path.exists(self._smth_path):
download_file(smth_url, self._smth_path)
# you may also unpack the ZIP files here...
"""
@abstractmethod
def convert_and_add(self):
"""
Convert all the downloaded files here.
Save the images to self.generate_new_image_file_path()
For EXAMPLE:
img = cv2.imread(self._at_mydir("some_img.jpg"))
img_conv = img # convert your image here
image_path_new = self.generate_new_image_file_path()
if not os.path.exists(image_path_new):
img_conv = cv2.resize(
img_conv, (128, 128), interpolation=cv2.INTER_AREA
)
cv2.imwrite(image_path_new, img_conv)
WHEN writing to the CSV, obtain the image name using the following:
os.path.basename(image_path_new)
""" | /road-signs-db-convertion-michael-0.0.5.tar.gz/road-signs-db-convertion-michael-0.0.5/src/database_convertion/base_dataset.py | 0.689096 | 0.281937 | base_dataset.py | pypi |
import os
import sys
import ctypes
import numpy as np
from cv2 import cv2
# Contains all info about a point taken from the database.
class _SignPointStruct(ctypes.Structure):
_pack_ = 1
_fields_ = [
('image_name', ctypes.c_char * 128),
('initial_size_x', ctypes.c_int),
('initial_size_y', ctypes.c_int),
('country', ctypes.c_char * 64),
('occlusions', ctypes.c_char * 64),
('sign_class', ctypes.c_char * 64),
('sign_type', ctypes.c_char * 64),
]
class SignPointProcess:
"""
Contains all processing methods for the _SignPointStruct class
"""
@staticmethod
def from_props(
image_name: str, initial_size_x: int, initial_size_y: int, country: str='-1',
occlusions: str='-1', sign_class: str='-1', sign_type: str='-1'
):
"""
initialise from props
>>> SignPointProcess.from_props("img.jpg", 100, 50, "GERMANY", 'VISIBLE', 'PROHIBITORY', '120_SIGN').image_name
b'img.jpg'
"""
initial_size_x, initial_size_y = int(initial_size_x), int(initial_size_y)
image_name, country, occlusions, sign_class, sign_type = map(
lambda x: bytes(x, encoding='utf-8'), (x for x in (image_name, country, occlusions, sign_class, sign_type))
)
sign_point = _SignPointStruct()
(
sign_point.image_name, sign_point.initial_size_x, sign_point.initial_size_y,
sign_point.country, sign_point.occlusions, sign_point.sign_class, sign_point.sign_type
) = (
image_name, initial_size_x, initial_size_y, country, occlusions, sign_class, sign_type
)
return sign_point
@staticmethod
def to_repr(sign_point):
"""
transform to a comma-separated representation
>>> SignPointProcess.to_repr(SignPointProcess.from_repr("img.jpg,100,50,GERMANY,VISIBLE,PROHIBITORY,120_SIGN")).image_name
b'img.jpg'
"""
image_name, country, occlusions, sign_class, sign_type = map(
lambda x: str(x, encoding='utf-8'), (x for x in (
sign_point.image_name, sign_point.country, sign_point.occlusions,
sign_point.sign_class, sign_point.sign_type
))
)
return ','.join(map(str, (
image_name, sign_point.initial_size_x, sign_point.initial_size_y,
country, occlusions, sign_class, sign_type
)))
@staticmethod
def from_repr(file_line):
"""
>>> SignPointProcess.from_repr("img.jpg,100,50,GERMANY,VISIBLE,PROHIBITORY,120_SIGN").image_name
b'img.jpg'
"""
props = file_line.strip().split(",")
props[0] = bytes(props[0], encoding='utf-8')
props[1], props[2] = int(props[1]), int(props[2])
for i in range(3, 7):
props[i] = bytes(props[i], encoding='utf-8')
sign_point = _SignPointStruct()
(
sign_point.image_name, sign_point.initial_size_x, sign_point.initial_size_y,
sign_point.country, sign_point.occlusions, sign_point.sign_class, sign_point.sign_type
) = props
return sign_point
class SignPointArray:
"""
Represents an array of signs.
"""
def __init__(self):
self._arr_size = 16
self._rows = (_SignPointStruct * self._arr_size)()
self._free_ind = 0
self._append_from = 0
def __len__(self):
"""
Returns array length
"""
return self._free_ind
def from_file(self, filename):
"""
initialise an array from a file
returns self
"""
with open(filename, 'r') as file_in:
lines = [line for line in file_in]
self._arr_size = len(lines)
self._rows = (_SignPointStruct * self._arr_size)()
for row, line in enumerate(lines):
self._rows[row] = SignPointProcess.from_repr(line)
self._free_ind = self._arr_size
self._append_from = self._free_ind
return self
def add_entry(
self, image_name: str, initial_size_x: int, initial_size_y: int, country: str='-1',
occlusions: str='-1', sign_class: str='-1', sign_type: str='-1'
):
"""
adds a new entry and enlarges an array is needed
"""
sign_point = SignPointProcess.from_props(
image_name, initial_size_x, initial_size_y, country, occlusions, sign_class, sign_type
)
while self._free_ind >= self._arr_size:
self._enlarge()
self._rows[self._free_ind] = sign_point
self._free_ind += 1
def _enlarge(self, times=2):
"""
makes an array 2 times bigger
"""
self._arr_size *= times
new_rows = (_SignPointStruct * self._arr_size)()
ctypes.memmove(new_rows, self._rows, ctypes.sizeof(self._rows))
self._rows = new_rows
def to_file(self, filename, mode='a'):
"""
write the data to the file.
change mode to 'w' to rewite file
"""
with open(filename, mode=mode) as f_out:
ind_from = 0
if mode == 'a':
ind_from = self._append_from
f_out.write('\n'.join(SignPointProcess.to_repr(self._rows[i]) for i in range(ind_from, self._free_ind)) + "\n")
def __sizeof__(self):
"""
Calculates class's size in bytes.
"""
size = ctypes.sizeof(self._rows)
size += sum(
sys.getsizeof(elem)
for elem in self.__dict__.values()
)
return size
def analyse_country(self, show=True, save=True, save_dir='./analysis/'):
"""
Analyse origin country of the signs
"""
countries_nums_dict = dict()
for row in self._rows:
country = row.country
if country in countries_nums_dict:
countries_nums_dict[country] += 1
else:
countries_nums_dict[country] = 0
return countries_nums_dict
def analyse_types(self):
"""
Analyse the variety of the sign types
"""
types_nums_dict = dict()
for row in self._rows:
country = row.sign_type
if country in types_nums_dict:
types_nums_dict[country] += 1
else:
types_nums_dict[country] = 0
return types_nums_dict
def analyse_visibility(self):
"""
analyse sign visibility
"""
visibilities_nums_dict = dict()
for row in self._rows:
country = row.occlusions
if country in visibilities_nums_dict:
visibilities_nums_dict[country] += 1
else:
visibilities_nums_dict[country] = 0
return visibilities_nums_dict
def analyse_brightness(self, images_dirname):
"""
analyse sign image brightness distribution
"""
brightness_list = []
for row in self._rows:
img_name = os.path.join(images_dirname, str(row.image_name, encoding='utf-8'))
img = cv2.imread(img_name)
pixels_num = img.shape[0] * img.shape[1] * img.shape[2]
brightness_list.append(np.sum(img) / pixels_num)
return np.array(brightness_list)
def analyse_ratio(self):
"""
analyse image x / y ratio distribution
"""
ratio_list = []
for row in self._rows:
ratio_list.append(row.initial_size_x / row.initial_size_y)
return np.array(ratio_list)
def analyse(self, images_dirname):
"""
performs an analysis on the dataset
"""
analysis = dict()
print("Analysing dataset sign countries...")
analysis['country'] = self.analyse_country()
print("Analysing dataset sign types...")
analysis['type'] = self.analyse_types()
print("Analysing dataset sign visibility...")
analysis['visibility'] = self.analyse_visibility()
print("Analysing dataset sign image brightness...")
analysis['brightness'] = self.analyse_brightness(images_dirname)
print("Analysing dataset sign image initial resolution ratio...")
analysis['ratio'] = self.analyse_ratio()
return analysis | /road-signs-db-convertion-michael-0.0.5.tar.gz/road-signs-db-convertion-michael-0.0.5/src/database_convertion/AnalysisADT.py | 0.507324 | 0.287056 | AnalysisADT.py | pypi |
from . import model
class Items(model.Model):
itemTypes = {
"idea": 0,
"story": 1,
"done": 2,
"attached": 3
}
columnIndexes = {
"current": 2,
"soon": 1,
"future": 0,
"team": 2,
"user": 1,
"widget": 0,
"completed": 99
}
commentTypes = {
"system": 0,
"message": 1,
"like": 2,
"email": 3
}
def is_feedback(self, item):
return \
(item.type == self.itemTypes['idea']
and item.column == self.columnIndexes['user']) or \
(item.type == self.itemTypes['attached']
and item.column == self.columnIndexes['user'])
def is_idea(self, item):
return \
(item.type == self.itemTypes["idea"] and
(item.column == self.columnIndexes["widget"] or
item.column == self.columnIndexes["team"])) or \
(item.type == self.itemTypes["attached"] and
(item.column == self.columnIndexes["widget"] or
item.column == self.columnIndexes["team"]))
def is_story(self, item):
return item.type == self.itemTypes["story"]
def get_new_id(self, cb):
self.get(
"/items/newid",
lambda id: cb(None, id),
lambda err: cb(err)
)
def get_by_id(self, id, cb):
if not id or id.find("|") == -1:
cb("invalid parameter")
return
parts = id.split("|")
if parts.length != 2:
cb("invalid format")
return
self.get(
"/item/" + self.compress_id(parts[0], parts[1]),
lambda result: cb(None, result),
lambda err: cb(err)
)
def get_by_type(self, roadmap_id, is_story, cb):
type = "story" if is_story else "idea"
self.get(
"/items/" + roadmap_id + "/" + type,
lambda result: cb(None, result),
lambda err: cb(err)
)
def add_comment(self, comment, cb):
self.post(
"/items/comment",
comment,
lambda result: cb(None, result),
lambda err: cb(err)
)
def update_comment(self, roadmap_id, item_id, comment_id, body, cb):
self.put(
"/items/comment",
{
"roadmapId": roadmap_id,
"itemId": item_id,
"commentId": comment_id,
"body": body
},
lambda result: cb(None, result),
lambda err: cb(err)
)
def delete_comment(self, id, token, comment_id, cb):
self.delete(
"/items/" + self.compress_id(id, token) + "/comment/" + comment_id,
lambda result: cb(None, result),
lambda err: cb(err)
)
def move_to_roadmap(self, id, token, roadmap_id, cb):
self.put(
"/items/" + id + "/roadmap",
{"itemId": id, "token": token, "toRoadmapId": roadmap_id},
lambda result: cb(None, result),
lambda err: cb(err)
)
def force_delete(self, id, token, cb):
self.delete(
"/items/force/" + self.compress_id(id, token),
lambda result: cb(None, result),
lambda err: cb(err)
)
def get_engagements(self, params, cb):
self.post(
"/items/findengagements",
params,
lambda results: cb(None, results),
lambda err: cb(err)
)
def remove_pm_links(self, roadmap_id, item_id, token, cb):
data = {
"roadmapId": roadmap_id,
"itemId": item_id,
"token": token
}
self.post(
"/items/removepmlinks",
data,
lambda item: cb(None, item),
lambda err: cb(err)
)
def get_parent(self, roadmap_id, idea_id, idea_token, cb):
self.get(
"/items/" + roadmap_id + "/fromattached/" +
self.compress_id(idea_id, idea_token),
lambda result: cb(None, result),
lambda err: cb(err)
) | /roadmap-py-1.0.2.tar.gz/roadmap-py-1.0.2/roadmap/resources/items.py | 0.431584 | 0.336113 | items.py | pypi |
import functools
import re
def coroutine(func):
@functools.wraps(func)
def start(*args, **kwargs):
cr = func(*args, **kwargs)
cr.next()
return cr
return start
class Router(dict):
def __init__(self, processor):
'''Creates a new :class:`roadmap.Router` and assigns a processor.
:param processor: single variable function that *does something* with output
Because of Roadmap's coroutine based architechture, routing an object
returns no value. At first it may seem strange that nothing can be
returned, but the mindset of Roadmap is that you don't return values
just for the sake of returning them; you return values to *do something*
with them. In the case of my IRC bot, I wanted my functions to return
strings. My :func:`processor` function received these strings and sent
them over the socket. Other processing functions I can easily imagine are
printing, logging, or commiting an object to a database.
This concept might be a little tricky, but the example will show
how simple it actually is.
'''
@coroutine
@functools.wraps(processor)
def _processor():
try:
while True:
obj = (yield)
processor(obj)
except GeneratorExit:
pass
self.processor = _processor()
self.find_match_target = self.find_match()
def destination(self, reg_str, pass_obj=True):
'''Decorates functions to be called from a :class:`roadmap.Router`
instance.
:param reg_str: string that a regular expression can be created from
:param pass_obj: whether or not to pass object to decorated function
:type pass_obj: boolean
It is generally a good idea to use raw strings to create regular
expressions. If the input string matches :obj:`reg_str`, the decorated
function will be called.
'''
regex = re.compile(reg_str)
def decorator(func):
self[regex] = {'func': func, 'pass_obj': pass_obj}
return func
return decorator
def get_function(self, obj, key=None):
string = key or obj
for regex in self.keys():
match = regex.match(string)
if match:
return self[regex]['func']
def route(self, obj, key=None):
'''Maps an object to its appropriate function
:param obj: object to route
:param key: an optional string to route :obj`obj` by
:rtype: :obj:`None`
:meth:`~roadmap.Router.route` is the method that will receive the input
and begin the routing process for an object. The :obj:`key` parameter
must be used if the object itself can't be match by a regular
expression, which, to the extent of my knowledge, means that that
:obj:`obj` isn't a string. Even if :obj:`obj` is matchable, :obj:`key`
will be used if defined.
'''
string = key or obj
self.find_match_target.send((obj, string))
@coroutine
def find_match(self):
handle_match = self.handle_match()
while True:
obj, string = (yield)
for regex in self.keys():
match = regex.match(string)
if match:
handle_match.send((match, obj))
@coroutine
def handle_match(self):
process_pair = self.process_pair()
while True:
match, obj = (yield)
groups = match.groups()
groupdict = match.groupdict()
if groupdict:
if len(groupdict) == len(groups):
process_pair.send((match.re, obj, (), groupdict))
else:
exclusives = [s for s in groups if s not in groupdict.values()]
process_pair.send((match.re, obj, exclusives, groupdict))
else:
process_pair.send((match.re, obj, groups, {}))
@coroutine
def process_pair(self):
while True:
regex, obj, args, kwargs = (yield)
if self[regex]['pass_obj']:
if hasattr(obj, '__iter__'):
objects = [o for o in obj]
objects.extend(args)
self.processor.send(self[regex]['func'](*objects, **kwargs))
else:
self.processor.send(self[regex]['func'](obj, *args, **kwargs))
else:
self.processor.send(self[regex]['func'](*args, **kwargs)) | /roadmap-0.1.tar.gz/roadmap-0.1/roadmap.py | 0.688364 | 0.419172 | roadmap.py | pypi |
import json
from numpy import isnan
from schematics import Model
from schematics.types import StringType, FloatType, ListType
class CbaResult(Model):
orma_way_id = StringType(max_length=20, min_length=1)
work_class = StringType(required=True, min_length=1)
work_type = StringType(required=True, min_length=1)
work_name = StringType(required=True, min_length=1)
work_cost = FloatType(required=True)
work_cost_km = FloatType(required=True)
work_year = FloatType(required=True)
npv = FloatType(required=True)
npv_km = FloatType(required=True)
npv_cost = FloatType(required=True)
eirr = FloatType(required=True)
aadt = ListType(FloatType, required=True, min_size=20, max_size=20)
truck_percent = FloatType(required=True)
vehicle_utilization = FloatType(required=True)
esa_loading = FloatType(required=True)
iri_projection = ListType(FloatType, required=True, min_size=20, max_size=20)
iri_base = ListType(FloatType, required=True, min_size=20, max_size=20)
con_projection = ListType(FloatType, required=True, min_size=20, max_size=20)
con_base = ListType(FloatType, required=True, min_size=20, max_size=20)
financial_recurrent_cost = ListType(FloatType, required=True, min_size=20, max_size=20)
net_benefits = ListType(FloatType, required=True, min_size=20, max_size=20)
def __repr__(self):
return str(self.to_primitive())
@classmethod
def from_file(cls, filename):
with open(filename) as f:
return CbaResult(json.load(f))
def to_dict(self):
rv = {
"orma_way_id": self.orma_way_id,
"work_class": self.work_class,
"work_type": self.work_type,
"work_name": self.work_name,
"work_cost": self.work_cost,
"work_cost_km": self.work_cost_km,
"work_year": self.work_year,
"npv": self.npv,
"npv_km": self.npv_km,
"npv_cost": self.npv_cost,
"eirr": self.eirr,
"truck_percent": self.truck_percent,
"vehicle_utilization": self.vehicle_utilization,
"esa_loading": self.esa_loading,
}
rv.update({f"aadt_{i + 1}": self.aadt[i] for i in range(0, len(self.aadt))})
rv.update({f"iri_base_{i + 1}": self.iri_base[i] for i in range(0, len(self.iri_base))})
rv.update({f"iri_projection_{i + 1}": self.iri_projection[i] for i in range(0, len(self.iri_projection))})
rv.update({f"con_projection_{i + 1}": self.con_projection[i] for i in range(0, len(self.con_projection))})
rv.update({f"con_base_{i + 1}": self.con_base[i] for i in range(0, len(self.con_base))})
rv.update({f"net_benefits_{i + 1}": self.net_benefits[i] for i in range(0, len(self.net_benefits))})
rv.update(
{
f"financial_recurrent_cost_{i + 1}": self.financial_recurrent_cost[i]
for i in range(0, len(self.financial_recurrent_cost))
}
)
return rv
def compare(self, other):
a = self.to_dict()
b = other.to_dict()
def comparison(x, y):
eq = "==" if x == y else "!="
if isinstance(x, str) or isinstance(y, str):
return f"{x: >20} {eq} {y: >20}"
if isnan(x) and isnan(y):
return 0
if abs(x - y) < 0.000001:
return 0
return x - y
keys = set.intersection(set(a.keys()), set(b.keys()))
return {k: comparison(a[k], b[k]) for k in sorted(list(keys))} | /roads-cba-py-0.1.2.tar.gz/roads-cba-py-0.1.2/roads_cba_py/cba_result.py | 0.61173 | 0.425784 | cba_result.py | pypi |
import json
from os import stat
from typing import List, Optional
from schematics.exceptions import ConversionError
from schematics.models import Model
from schematics.types import IntType, StringType, FloatType
class InvalidSection(object):
def __init__(self, errors, original_data):
self.errors = InvalidSection.clean_errors(errors)
self.original_data = original_data
def invalid_reason(self):
return self.errors
@staticmethod
def clean_errors(errors):
return [InvalidSection.clean_error(k, v) for k, v in errors.items()]
@staticmethod
def clean_error(k, v):
# print(type(v), v)
# print(type(k), k)
# print(isinstance(v, ConversionError))
if isinstance(v, ConversionError):
return f"Invalid characters in '{k}', expected float"
return f"Generic error: {k}"
def parse_section(json):
try:
return Section(json)
except Exception as err:
return InvalidSection(err.errors, json)
class Section(Model):
orma_way_id = StringType(max_length=20, min_length=1, required=True)
vpromm_id = StringType(max_length=20, min_length=1)
# section_id = StringType(max_length=30, required=True)
road_number = StringType(max_length=10)
road_name = StringType(max_length=255, min_length=1)
road_start = StringType(max_length=255, min_length=1)
road_end = StringType(max_length=255, min_length=1)
section_order = IntType()
province = StringType(max_length=255, min_length=1)
district = StringType(max_length=255, min_length=1)
# province = ForeignKey('administrations.AdminUnit', null=True,
# blank=True, on_delete=PROTECT,
# related_name='province_sections')
# district = ForeignKey('administrations.AdminUnit', null=True,
# blank=True, on_delete=PROTECT,
# related_name='district_sections')
commune = StringType(max_length=25)
management = IntType()
start_km = FloatType()
end_km = FloatType()
length = FloatType(required=True)
vpromms_length = FloatType()
lanes = IntType(default=0)
width = FloatType(default=0)
road_class = IntType(default=0)
terrain = IntType(default=0)
temperature = IntType(default=0)
moisture = IntType(default=0)
road_type = IntType(default=0)
surface_type = IntType(default=0)
condition_class = IntType(default=0)
roughness = FloatType(default=0)
traffic_level = IntType(default=0)
traffic_growth = IntType(default=0)
structural_no = FloatType(default=0)
pavement_age = IntType(default=0)
aadt_motorcyle = FloatType(default=0.0)
aadt_carsmall = FloatType(default=0.0)
aadt_carmedium = FloatType(default=0.0)
aadt_delivery = FloatType(default=0.0)
aadt_4wheel = FloatType(default=0.0)
aadt_smalltruck = FloatType(default=0.0)
aadt_mediumtruck = FloatType(default=0.0)
aadt_largetruck = FloatType(default=0.0)
aadt_articulatedtruck = FloatType(default=0.0)
aadt_smallbus = FloatType(default=0.0)
aadt_mediumbus = FloatType(default=0.0)
aadt_largebus = FloatType(default=0.0)
aadt_total = FloatType(default=0.0)
def __str__(self):
return str(self.to_primitive())
@classmethod
def from_file(cls, filename):
with open(filename) as f:
return Section(json.load(f))
@staticmethod
def maybe_int(maybe_int: Optional[int]):
return int(maybe_int) if maybe_int else 0.0
@staticmethod
def maybe_float(maybe_float: Optional[float]):
return float(maybe_float) if maybe_float else 0.0
@classmethod
def from_row(cls, row):
in_data = {
"orma_way_id": row["way_id_district"],
"section_id": row["way_id_district"],
"road_number": row["road number"],
"road_name": row["name"],
"road_start": row["road start location"],
"road_end": row["road end location"],
"province": row["province"],
"district": row["district"],
"commune": row["section_commune_gso"],
"management": Section.maybe_int(row["management"]),
# "start_km": float(row["Start_Km"]),
# "end_km": float(row["End_Km"]),
"length": row["length"],
"lanes": Section.maybe_int(row["section_lanes"]),
"width": 6.0 if row["width"] == "6+" else Section.maybe_float(row["width"]),
"road_class": row["link_class"],
"terrain": row["section_terrain"],
"temperature": row["section_temperature"],
"moisture": row["section_moisture"],
"surface_type": row["section_surface"],
"condition_class": row["condition"],
"roughness": Section.maybe_float(row["iri"]),
"traffic_level": row["section_traffic"],
"traffic_growth": row["section_traffic_growth"],
"pavement_age": Section.maybe_int(row["section_pavement_age"]),
"aadt_motorcyle": Section.maybe_int(row["section_motorcycle"]),
"aadt_carsmall": Section.maybe_int(row["section_small_car"]),
"aadt_carmedium": Section.maybe_int(row["section_medium_car"]),
"aadt_delivery": Section.maybe_int(row["section_delivery_vehicle"]),
"aadt_4wheel": Section.maybe_int(row["section_four_wheel"]),
"aadt_smalltruck": Section.maybe_int(row["section_light_truck"]),
"aadt_mediumtruck": Section.maybe_int(row["section_medium_truck"]),
"aadt_largetruck": Section.maybe_int(row["section_heavy_truck"]),
"aadt_articulatedtruck": Section.maybe_int(row["section_articulated_truck"]),
"aadt_smallbus": Section.maybe_int(row["section_small_bus"]),
"aadt_mediumbus": Section.maybe_int(row["section_medium_bus"]),
"aadt_largebus": Section.maybe_int(row["section_large_bus"]),
"aadt_total": Section.maybe_int(row["aadt"]),
}
try:
return Section(in_data)
except Exception as err:
return InvalidSection(err, in_data)
def to_dict(self):
return {
"orma_way_id": self.id,
"section_id": self.section_id,
"road_number": self.road_number,
"road_name": self.road_name,
"road_start": self.road_start,
"road_end": self.road_end,
"province": self.province,
"district": self.district,
"commune": self.commune,
"management": self.management,
"road_length": self.length,
"lanes": self.lanes,
"width": self.width,
"road_class": self.road_class,
"terrain": self.terrain,
"temperature": self.temperature,
"moisture": self.moisture,
"surface_type": self.surface_type,
"condition_class": self.condition_class,
"roughness": self.roughness,
"pavement_age": self.pavement_age,
"aadt_total": self.aadt_total,
}
def set_aadts(self, aadts: List[float]):
(
self.aadt_motorcyle,
self.aadt_carsmall,
self.aadt_carmedium,
self.aadt_delivery,
self.aadt_4wheel,
self.aadt_smalltruck,
self.aadt_mediumtruck,
self.aadt_largetruck,
self.aadt_articulatedtruck,
self.aadt_smallbus,
self.aadt_mediumbus,
self.aadt_largebus,
) = aadts
def get_aadts(self):
return (
self.aadt_motorcyle,
self.aadt_carsmall,
self.aadt_carmedium,
self.aadt_delivery,
self.aadt_4wheel,
self.aadt_smalltruck,
self.aadt_mediumtruck,
self.aadt_largetruck,
self.aadt_articulatedtruck,
self.aadt_smallbus,
self.aadt_mediumbus,
self.aadt_largebus,
)
REQUIRED_FIELDS = ["lanes", "width"]
def invalid_reason(self):
errors = []
if is_missing(self.road_type) and is_missing(self.surface_type):
errors.append("Must define either road type or road surface type")
if is_missing(self.width) and is_missing(self.lanes):
errors.append("Must define either road width or number of lanes")
if is_missing(self.roughness) and is_missing(self.condition_class):
errors.append("Must define either roughness or road condition")
if is_missing(self.traffic_level) and is_missing(self.aadt_total):
errors.append("Must define either aadt_total or traffic_level")
if is_missing(self.terrain):
errors.append("No terrain data")
return errors if errors else None
def is_missing(val):
return val is None or val == 0 | /roads-cba-py-0.1.2.tar.gz/roads-cba-py-0.1.2/roads_cba_py/section.py | 0.548674 | 0.33039 | section.py | pypi |
import re
from collections import defaultdict
from itertools import takewhile
from typing import List, Match, Tuple, Dict
def format_markdown(contents: Dict[str, str]) -> Dict[str, str]:
# Extract backlinks from the markdown
forward_links = {file_name: extract_links(content) for file_name, content in contents.items()}
back_links: Dict[str, List[Tuple[str, Match]]] = defaultdict(list)
for file_name, links in forward_links.items():
for link in links:
back_links[f"{link.group(1)}.md"].append((file_name, link))
# Format and write the markdown files
out = {}
for file_name, content in contents.items():
content = format_to_do(content)
content = add_backward_links(content, back_links[file_name])
content = format_link(content)
if len(content) > 0:
out[file_name] = content
return out
def format_to_do(contents: str):
contents = re.sub(r"{{\[\[TODO\]\]}} *", r"[ ] ", contents)
contents = re.sub(r"{{\[\[DONE\]\]}} *", r"[x] ", contents)
return contents
def extract_links(string: str) -> List[Match]:
return list(re.finditer(r"\[\[([^\]]+)\]\]", string))
def add_backward_links(content: str, back_links: List[Tuple[str, Match]]) -> str:
if not back_links:
return content
files = sorted(set((file_name[:-3], match) for file_name, match in back_links),
key=lambda e: (e[0], e[1].start()))
new_lines = []
for file, match in files:
new_lines.append(f"## [{file}](<{file}.md>)")
start_context_ = list(takewhile(lambda c: c != "\n", match.string[:match.start()][::-1]))
start_context = "".join(start_context_[::-1])
middle_context = match.string[match.start():match.end()]
end_context_ = takewhile(lambda c: c != "\n", match.string[match.end()])
end_context = "".join(end_context_)
context = (start_context + middle_context + end_context).strip()
new_lines.extend([context, ""])
backlinks_str = "\n".join(new_lines)
return f"{content}\n# Backlinks\n{backlinks_str}\n"
def format_link(string: str) -> str:
"""Transform a RoamResearch-like link to a Markdown link."""
# Regex are read-only and can't parse [[[[recursive]] [[links]]]], but they do the job.
# We use a special syntax for links that can have SPACES in them
string = re.sub(r"\[\[([^\]]+)\]\]", r"[\1](<\1.md>)", string)
string = re.sub(r"#([a-zA-Z-_0-9]+)", r"[\1](<\1.md>)", string)
return string | /roam_to_git-0.1.tar.gz/roam_to_git-0.1/roam_to_git/formatter.py | 0.467089 | 0.169303 | formatter.py | pypi |
import asyncio
import os
from pathlib import Path
import pyppeteer.connection
def patch_pyppeteer():
"""Fix https://github.com/miyakogi/pyppeteer/issues/178"""
import pyppeteer.connection
original_method = pyppeteer.connection.websockets.client.connect
def new_method(*args, **kwargs):
kwargs['ping_interval'] = None
kwargs['ping_timeout'] = None
return original_method(*args, **kwargs)
pyppeteer.connection.websockets.client.connect = new_method
async def get_text(page, b, norm=True):
"""Get the inner text of an element"""
text = await page.evaluate('(element) => element.textContent', b)
if norm:
text = text.lower().strip()
return text
async def download_rr_archive(output_type: str,
output_directory: Path,
devtools=False,
sleep_duration=1.,
slow_motion=10):
"""Download an archive in RoamResearch.
:param output_type: Download JSON or Markdown
:param output_directory: Directory where to stock the outputs
:param devtools: Should we open Chrome
:param sleep_duration: How many seconds to wait after the clicks
:param slow_motion: How many seconds to before to close the browser when the download is started
"""
print("Creating browser")
browser = await pyppeteer.launch(devtools=devtools, slowMo=slow_motion)
document = await browser.newPage()
if not devtools:
print("Configure downloads to", output_directory)
cdp = await document.target.createCDPSession()
await cdp.send('Page.setDownloadBehavior',
{'behavior': 'allow', 'downloadPath': str(output_directory)})
print("Opening signin page")
await document.goto('https://roamresearch.com/#/signin')
await asyncio.sleep(sleep_duration)
print("Fill email")
email_elem = await document.querySelector("input[name='email']")
await email_elem.click()
await email_elem.type(os.environ["ROAMRESEARCH_USER"])
print("Fill password")
passwd_elem = await document.querySelector("input[name='password']")
await passwd_elem.click()
await passwd_elem.type(os.environ["ROAMRESEARCH_PASSWORD"])
print("Click on sign-in")
buttons = await document.querySelectorAll('button')
signin_confirm, = [b for b in buttons if await get_text(document, b) == 'sign in']
await signin_confirm.click()
await asyncio.sleep(sleep_duration)
print("Wait for interface to load")
dot_button = None
for _ in range(100):
# Starting is a little bit slow
dot_button = await document.querySelector(".bp3-icon-more")
if dot_button is None:
await asyncio.sleep(sleep_duration)
else:
break
assert dot_button is not None
await dot_button.click()
print("Launch popup")
divs_pb3 = await document.querySelectorAll(".bp3-fill")
export_all, = [b for b in divs_pb3 if await get_text(document, b) == 'export all']
await export_all.click()
await asyncio.sleep(sleep_duration)
async def get_dropdown_button():
dropdown_button = await document.querySelector(".bp3-button-text")
dropdown_button_text = await get_text(document, dropdown_button)
# Defensive check if the interface change
assert dropdown_button_text in ["markdown", "json"], dropdown_button_text
return dropdown_button, dropdown_button_text
print("Checking download type")
button, button_text = await get_dropdown_button()
if button_text != output_type:
print("Changing output type to", output_type)
await button.click()
await asyncio.sleep(sleep_duration)
output_type_elem, = await document.querySelectorAll(".bp3-text-overflow-ellipsis")
await output_type_elem.click()
# defensive check
await asyncio.sleep(sleep_duration)
_, button_text_ = await get_dropdown_button()
assert button_text_ == output_type, (button_text_, output_type)
print("Downloading output of type", output_type)
buttons = await document.querySelectorAll('button')
export_all_confirm, = [b for b in buttons if await get_text(document, b) == 'export all']
await export_all_confirm.click()
# Wait for download to finish
if devtools:
# No way to check because download location is not specified
return
for _ in range(1000):
await asyncio.sleep(0.1)
for file in output_directory.iterdir():
if file.name.endswith(".zip"):
print("File", file, "found")
await asyncio.sleep(1)
await browser.close()
return
await browser.close()
raise FileNotFoundError(f"Impossible to download {output_type} in {output_directory}") | /roam_to_git-0.1.tar.gz/roam_to_git-0.1/roam_to_git/scrapping.py | 0.47926 | 0.159119 | scrapping.py | pypi |
import datetime
import json
import zipfile
from pathlib import Path
import git
from roam_to_git.formatter import format_markdown
def get_zip_path(zip_dir_path: Path) -> Path:
"""Return the path to the single zip file in a directory, and fail if there is not one single
zip file"""
zip_files = list(zip_dir_path.iterdir())
zip_files = [f for f in zip_files if f.name.endswith(".zip")]
assert len(zip_files) == 1, (zip_files, zip_dir_path)
zip_path, = zip_files
return zip_path
def reset_git_directory(git_path: Path):
"""Remove all files in a git directory"""
for file in git_path.glob("**"):
if not file.is_file():
continue
if ".git" in file.parts:
continue
file.unlink()
def unzip_markdown_archive(zip_dir_path: Path, git_path: Path):
zip_path = get_zip_path(zip_dir_path)
with zipfile.ZipFile(zip_path) as zip_file:
contents = {file.filename: zip_file.read(file.filename).decode()
for file in zip_file.infolist()
if not file.is_dir()}
contents = format_markdown(contents)
# Format and write the markdown files
for file_name, content in contents.items():
dest = (git_path / file_name)
dest.parent.mkdir(parents=True, exist_ok=True) # Needed if a new directory is used
# We have to specify encoding because crontab on Mac don't use UTF-8
# https://stackoverflow.com/questions/11735363/python3-unicodeencodeerror-crontab
with dest.open("w", encoding="utf-8") as f:
f.write(content)
def unzip_json_archive(zip_dir_path: Path, git_path: Path):
zip_path = get_zip_path(zip_dir_path)
with zipfile.ZipFile(zip_path) as zip_file:
files = list(zip_file.namelist())
for file in files:
assert file.endswith(".json")
content = json.loads(zip_file.read(file).decode())
with open(git_path / file, "w") as f:
json.dump(content, f, sort_keys=True, indent=2, ensure_ascii=True)
def commit_git_directory(git_path: Path):
"""Add an automatic commit in a git directory if it has changed, and push it"""
repo = git.Repo(git_path)
assert not repo.bare
if not repo.is_dirty() and not repo.untracked_files:
# No change, nothing to do
return
print("Committing in", git_path)
repo.git.add(A=True) # https://github.com/gitpython-developers/GitPython/issues/292
repo.index.commit(f"Automatic commit {datetime.datetime.now().isoformat()}")
print("Pushing to origin")
origin = repo.remote(name='origin')
origin.push() | /roam_to_git-0.1.tar.gz/roam_to_git-0.1/roam_to_git/fs.py | 0.429669 | 0.372676 | fs.py | pypi |
__version__ = "0.3.1"
class _RoamMissingItem:
""" Falsey class used to flag item "missing" from traversal path """
def __bool__(self):
return False
def __len__(self):
return 0
def __iter__(self):
return self
def __next__(self):
raise StopIteration()
def __repr__(self):
return "<MISSING>"
MISSING = _RoamMissingItem()
class _Path:
_r_root_item_ = None
_r_steps_ = []
def __init__(self, initial_item, path_to_clone=None):
if path_to_clone is not None:
self._r_root_item_ = path_to_clone._r_root_item_
self._r_steps_ = list(path_to_clone._r_steps_) # Shallow copy list
else:
self._r_root_item_ = initial_item
self._r_steps_ = []
def log_getattr(self, attr_name: str, roamer: "Roamer"):
"""
Log the fact that a ``.dot`` attribute lookup was performed using a
given name and the given ``Roamer`` shim was produced.
"""
self._r_steps_.append((f".{attr_name}", unwrap(roamer)))
def log_getitem(self, slice_value: slice, roamer: "Roamer"):
"""
Log the fact that a ``["slice"]`` attribute lookup was performed using a
given slice value and the given ``Roamer`` shim was produced.
"""
if isinstance(slice_value, slice):
item_desc = (
f"[{slice_value.start or ''}:{slice_value.stop or ''}"
f"{slice_value.step and ':' + slice_value.step or ''}]"
)
else:
item_desc = f"[{slice_value!r}]"
self._r_steps_.append((item_desc, unwrap(roamer)))
def _last_found(self):
last_found_step = None, None, self._r_root_item_
for i, step in enumerate(self._r_steps_, 1):
desc, data = step
if data is not MISSING:
last_found_step = i, desc, data
return last_found_step
def _first_missing(self):
for i, step in enumerate(self._r_steps_, 1):
desc, data = step
if data is MISSING:
return i, desc, data
return None, None, self._r_root_item_
def description(self) -> str:
"""
Return a text description of this path, capturing:
- the first step at which the path was invalid (if applicable)
- the type of the root data object
- path steps applied
- hints about the type and content of data at the point the path became
invalid (if applicable)
"""
result = []
first_missing_index, first_missing_desc, _ = self._first_missing()
if first_missing_index:
result.append(
f"missing step {first_missing_index} {first_missing_desc} for path "
)
result.append(f"<{type(self._r_root_item_).__name__}>")
result += [desc for desc, _ in self._r_steps_]
if first_missing_index:
_, _, last_found_data = self._last_found()
if last_found_data is not MISSING:
result.append(f" at <{type(last_found_data).__name__}>")
# Generate hints
if isinstance(last_found_data, (tuple, list, set, range)):
# Detect an integer key slice operation like `[3]` or `[-2]`
if first_missing_desc[0] == "[" and first_missing_desc[-1] == "]":
try:
int(first_missing_desc[1:-1])
result.append(f" with length {len(last_found_data)}")
except ValueError:
pass
elif isinstance(
last_found_data, (str, int, float, complex, bool, bytes, bytearray)
):
pass # No hint for primitive types
elif last_found_data:
try:
keys = last_found_data.keys()
if keys:
result.append(
f" with keys [{', '.join([repr(k) for k in keys])}]"
)
except AttributeError:
attrs = dir(last_found_data)
if attrs and not isinstance(
last_found_data, (str, tuple, list)
):
result.append(
f" with attrs [{', '.join([a for a in attrs if not a.startswith('_')])}]"
)
return "".join(result)
def __eq__(self, other):
if isinstance(other, _Path):
return (
self._r_root_item_ == other._r_root_item_
and self._r_steps_ == other._r_steps_
)
return False
class RoamPathException(Exception):
"""
An exception raised when a ``Roamer`` shim encounters an invalid path step
if that shim has the ``_raise`` option set, or provided when returning data.
The ``str()`` representation of this exception is a rich description of
where your traversal path went wrong.
"""
def __init__(self, path):
super().__init__(self)
self.path = path
def __str__(self):
return f"<RoamPathException: {self.path.description()}>"
class Roamer:
"""
Act as a shim over your data objects, to intercept Python operations and do
the extra work required to more easily traverse nested data.
"""
# Internal state variables
_r_item_ = None
_r_path_ = None
_r_is_multi_item_ = False
# Options
_r_raise_ = False
# Temporary flags
_r_via_alternate_lookup_ = False
_r_item__iter = None
def __init__(self, item, _raise=None):
# Handle `item` that is itself a `Roamer`
if isinstance(item, Roamer):
for attr in ("_r_item_", "_r_is_multi_item_", "_r_raise_"):
setattr(self, attr, getattr(item, attr))
self._r_path_ = _Path(item._r_item_, item._r_path_)
else:
self._r_item_ = item
self._r_path_ = _Path(self._r_item_)
# Set or override raise flag if user provided a value
if _raise is not None:
self._r_raise_ = bool(_raise)
def __getattr__(self, attr_name):
# Stop here if no item to traverse
if self._r_item_ is MISSING:
if not self._r_via_alternate_lookup_:
self._r_path_.log_getattr(attr_name, self)
return self
copy = Roamer(self)
# Multi-item: `.xyz` => `(i.xyz for i in item)`
if self._r_is_multi_item_:
multi_items = []
for i in self._r_item_:
lookup = None
try:
lookup = getattr(i, attr_name)
except (TypeError, AttributeError):
try:
lookup = i[attr_name]
except (TypeError, LookupError):
pass
if isinstance(lookup, (tuple, list, range)):
multi_items += lookup
elif lookup is not None:
multi_items.append(lookup)
copy._r_item_ = tuple(multi_items)
# Single item: `.xyz` => `item.xyz`
else:
try:
copy._r_item_ = getattr(copy._r_item_, attr_name)
except (TypeError, AttributeError):
# Attr lookup failed, no more attr lookup options
copy._r_item_ = MISSING
# Fall back to `self.__getitem__()` if lookup failed so far and we didn't come from there
if copy._r_item_ is MISSING and not self._r_via_alternate_lookup_:
try:
self._r_via_alternate_lookup_ = True
copy = self[attr_name]
except RoamPathException:
pass
finally:
copy._r_path_.log_getattr(attr_name, copy)
self._r_via_alternate_lookup_ = False
elif not self._r_via_alternate_lookup_:
copy._r_path_.log_getattr(attr_name, copy)
if copy._r_item_ is MISSING and copy._r_raise_:
raise RoamPathException(copy._r_path_)
return copy
def __getitem__(self, key_or_index_or_slice):
# Stop here if no item to traverse
if self._r_item_ is MISSING:
if not self._r_via_alternate_lookup_:
self._r_path_.log_getitem(key_or_index_or_slice, self)
return self
copy = Roamer(self)
# Multi-item: `[xyz]` => `(i[xyz] for i in item)`
if copy._r_is_multi_item_ and not isinstance(key_or_index_or_slice, slice):
# Flatten item if we have selected a specific integer index
if isinstance(key_or_index_or_slice, int):
try:
copy._r_item_ = copy._r_item_[key_or_index_or_slice]
except (TypeError, LookupError):
copy._r_item_ = MISSING
# No longer in a multi-item if we have selected a specific index item
copy._r_is_multi_item_ = False
# Otherwise apply slice lookup to each of multiple items
else:
multi_items = []
for i in copy._r_item_:
lookup = None
try:
lookup = i[key_or_index_or_slice]
except (TypeError, LookupError):
try:
lookup = getattr(i, key_or_index_or_slice)
except (TypeError, AttributeError):
pass
if isinstance(lookup, (tuple, list, range)):
multi_items += lookup
elif lookup is not None:
multi_items.append(lookup)
copy._r_item_ = tuple(multi_items)
# Lookup for non-multi item data, or for slice lookups in all cases
else:
try:
copy._r_item_ = copy._r_item_[key_or_index_or_slice]
except (TypeError, LookupError):
# Index lookup failed, no more index lookup options
copy._r_item_ = MISSING
# Flag the fact our item actually has multiple elements
if isinstance(key_or_index_or_slice, slice):
copy._r_is_multi_item_ = True
# Fall back to `self.__getattr__()` if lookup failed so far and we didn't come from there
if (
copy._r_item_ is MISSING
and not self._r_via_alternate_lookup_
# Cannot do an integer attr lookup
and not isinstance(key_or_index_or_slice, int)
):
try:
self._r_via_alternate_lookup_ = True
copy = getattr(self, key_or_index_or_slice)
except RoamPathException:
pass
finally:
copy._r_path_.log_getitem(key_or_index_or_slice, copy)
self._r_via_alternate_lookup_ = False
elif not self._r_via_alternate_lookup_:
copy._r_path_.log_getitem(key_or_index_or_slice, copy)
if copy._r_item_ is MISSING and copy._r_raise_:
raise RoamPathException(copy._r_path_)
return copy
def __call__(self, *args, _raise=False, _roam=False, _invoke=None, **kwargs):
if _raise and self._r_item_ is MISSING:
raise RoamPathException(self._r_path_)
# If an explicit callable is provided, call `_invoke(item, x, y, z)`
if _invoke is not None:
call_result = _invoke(self._r_item_, *args, **kwargs)
# If item is callable: `.(x, y, z)` => `item(x, y, z)`
elif callable(self._r_item_):
call_result = self._r_item_(*args, **kwargs)
# If item is not callable but we were given parameters, try to apply
# them even though we know it won't work, to generate the appropriate
# exception to let the user know their action failed
elif args or kwargs:
call_result = self._r_item_(*args, **kwargs)
# If item is not callable: `.()` => return wrapped item unchanged
else:
call_result = self._r_item_
# Re-wrap return as a `Roamer` if requested
if _roam:
copy = Roamer(self)
copy._r_item_ = call_result
return copy
return call_result
def __iter__(self):
try:
self._r_item__iter = iter(self._r_item_)
except (TypeError, AttributeError):
self._r_item__iter = None
return self
def __next__(self):
if self._r_item__iter is None:
raise StopIteration()
next_value = next(self._r_item__iter)
return Roamer(next_value)
def __eq__(self, other):
if isinstance(other, Roamer):
for attr in ("_r_item_", "_r_path_", "_r_is_multi_item_", "_r_raise_"):
if getattr(other, attr) != getattr(self, attr):
return False
return True
else:
return other == self._r_item_
def __bool__(self):
return bool(self._r_item_)
def __len__(self):
try:
return len(self._r_item_)
except TypeError:
# Here we know we have a non-MISSING item, but it doesn't support length lookups so
# must be a single thing...
# WARNING: This is black magic, does it make enough sense?
return 1
def __repr__(self):
return f"<Roamer: {self._r_path_.description()} => {self._r_item_!r}>"
def r(item: object, _raise: bool = None) -> Roamer:
"""
A shorter alias for constructing a ``Roamer`` shim class.
"""
return Roamer(item, _raise=_raise)
def r_strict(item: object) -> Roamer:
"""
A shorter alias for constructing a ``Roamer`` shim class in "strict" mode,
which means that the ``_raise`` flag set so the shim will immediately raise
a ``RoamPathException`` when you express an invalid path step.
"""
return Roamer(item, _raise=True)
def unwrap(roamer: Roamer, _raise: bool = None) -> object:
"""
Return the underlying data in the given ``Roamer`` shim object without
the need to call that shim object.
This is not the recommended way to get data from **roam** but you might
prefer it, or it might help to solve unexpected bugs caused by the semi-
magical call behaviour.
"""
result = roamer._r_item_
if _raise and result is MISSING:
raise RoamPathException(roamer._r_path_)
return result | /roam-0.3.1-py3-none-any.whl/roam.py | 0.766643 | 0.281569 | roam.py | pypi |
import contextlib
import re
import shutil
import sys
from difflib import get_close_matches
from pathlib import Path
from types import SimpleNamespace
from typing import Dict, List, Union
from ultralytics.yolo.utils import (DEFAULT_CFG, DEFAULT_CFG_DICT, DEFAULT_CFG_PATH, LOGGER, ROOT, USER_CONFIG_DIR,
IterableSimpleNamespace, __version__, checks, colorstr, yaml_load, yaml_print)
CLI_HELP_MSG = \
f"""
Arguments received: {str(['yolo'] + sys.argv[1:])}. Ultralytics 'yolo' commands use the following syntax:
yolo TASK MODE ARGS
Where TASK (optional) is one of [detect, segment, classify]
MODE (required) is one of [train, val, predict, export, track]
ARGS (optional) are any number of custom 'arg=value' pairs like 'imgsz=320' that override defaults.
See all ARGS at https://docs.ultralytics.com/cfg or with 'yolo cfg'
1. Train a detection model for 10 epochs with an initial learning_rate of 0.01
yolo train data=coco128.yaml model=yolov8n.pt epochs=10 lr0=0.01
2. Predict a YouTube video using a pretrained segmentation model at image size 320:
yolo predict model=yolov8n-seg.pt source='https://youtu.be/Zgi9g1ksQHc' imgsz=320
3. Val a pretrained detection model at batch-size 1 and image size 640:
yolo val model=yolov8n.pt data=coco128.yaml batch=1 imgsz=640
4. Export a YOLOv8n classification model to ONNX format at image size 224 by 128 (no TASK required)
yolo export model=yolov8n-cls.pt format=onnx imgsz=224,128
5. Run special commands:
yolo help
yolo checks
yolo version
yolo settings
yolo copy-cfg
yolo cfg
Docs: https://docs.ultralytics.com/cli
Community: https://community.ultralytics.com
GitHub: https://github.com/ultralytics/ultralytics
"""
# Define keys for arg type checks
CFG_FLOAT_KEYS = 'warmup_epochs', 'box', 'cls', 'dfl', 'degrees', 'shear', 'fl_gamma'
CFG_FRACTION_KEYS = ('dropout', 'iou', 'lr0', 'lrf', 'momentum', 'weight_decay', 'warmup_momentum', 'warmup_bias_lr',
'label_smoothing', 'hsv_h', 'hsv_s', 'hsv_v', 'translate', 'scale', 'perspective', 'flipud',
'fliplr', 'mosaic', 'mixup', 'copy_paste', 'conf', 'iou') # fractional floats limited to 0.0 - 1.0
CFG_INT_KEYS = ('epochs', 'patience', 'batch', 'workers', 'seed', 'close_mosaic', 'mask_ratio', 'max_det', 'vid_stride',
'line_thickness', 'workspace', 'nbs', 'save_period')
CFG_BOOL_KEYS = ('save', 'exist_ok', 'pretrained', 'verbose', 'deterministic', 'single_cls', 'image_weights', 'rect',
'cos_lr', 'overlap_mask', 'val', 'save_json', 'save_hybrid', 'half', 'dnn', 'plots', 'show',
'save_txt', 'save_conf', 'save_crop', 'hide_labels', 'hide_conf', 'visualize', 'augment',
'agnostic_nms', 'retina_masks', 'boxes', 'keras', 'optimize', 'int8', 'dynamic', 'simplify', 'nms',
'v5loader')
# Define valid tasks and modes
TASKS = 'detect', 'segment', 'classify'
MODES = 'train', 'val', 'predict', 'export', 'track', 'benchmark'
def cfg2dict(cfg):
"""
Convert a configuration object to a dictionary, whether it is a file path, a string, or a SimpleNamespace object.
Inputs:
cfg (str) or (Path) or (SimpleNamespace): Configuration object to be converted to a dictionary.
Returns:
cfg (dict): Configuration object in dictionary format.
"""
if isinstance(cfg, (str, Path)):
cfg = yaml_load(cfg) # load dict
elif isinstance(cfg, SimpleNamespace):
cfg = vars(cfg) # convert to dict
return cfg
def get_cfg(cfg: Union[str, Path, Dict, SimpleNamespace] = DEFAULT_CFG_DICT, overrides: Dict = None):
"""
Load and merge configuration data from a file or dictionary.
Args:
cfg (str) or (Path) or (Dict) or (SimpleNamespace): Configuration data.
overrides (str) or (Dict), optional: Overrides in the form of a file name or a dictionary. Default is None.
Returns:
(SimpleNamespace): Training arguments namespace.
"""
cfg = cfg2dict(cfg)
# Merge overrides
if overrides:
overrides = cfg2dict(overrides)
check_cfg_mismatch(cfg, overrides)
cfg = {**cfg, **overrides} # merge cfg and overrides dicts (prefer overrides)
# Special handling for numeric project/names
for k in 'project', 'name':
if k in cfg and isinstance(cfg[k], (int, float)):
cfg[k] = str(cfg[k])
# Type and Value checks
for k, v in cfg.items():
if v is not None: # None values may be from optional args
if k in CFG_FLOAT_KEYS and not isinstance(v, (int, float)):
raise TypeError(f"'{k}={v}' is of invalid type {type(v).__name__}. "
f"Valid '{k}' types are int (i.e. '{k}=0') or float (i.e. '{k}=0.5')")
elif k in CFG_FRACTION_KEYS:
if not isinstance(v, (int, float)):
raise TypeError(f"'{k}={v}' is of invalid type {type(v).__name__}. "
f"Valid '{k}' types are int (i.e. '{k}=0') or float (i.e. '{k}=0.5')")
if not (0.0 <= v <= 1.0):
raise ValueError(f"'{k}={v}' is an invalid value. "
f"Valid '{k}' values are between 0.0 and 1.0.")
elif k in CFG_INT_KEYS and not isinstance(v, int):
raise TypeError(f"'{k}={v}' is of invalid type {type(v).__name__}. "
f"'{k}' must be an int (i.e. '{k}=8')")
elif k in CFG_BOOL_KEYS and not isinstance(v, bool):
raise TypeError(f"'{k}={v}' is of invalid type {type(v).__name__}. "
f"'{k}' must be a bool (i.e. '{k}=True' or '{k}=False')")
# Return instance
return IterableSimpleNamespace(**cfg)
def check_cfg_mismatch(base: Dict, custom: Dict, e=None):
"""
This function checks for any mismatched keys between a custom configuration list and a base configuration list.
If any mismatched keys are found, the function prints out similar keys from the base list and exits the program.
Inputs:
- custom (Dict): a dictionary of custom configuration options
- base (Dict): a dictionary of base configuration options
"""
base, custom = (set(x.keys()) for x in (base, custom))
mismatched = [x for x in custom if x not in base]
if mismatched:
string = ''
for x in mismatched:
matches = get_close_matches(x, base) # key list
matches = [f'{k}={DEFAULT_CFG_DICT[k]}' if DEFAULT_CFG_DICT.get(k) is not None else k for k in matches]
match_str = f'Similar arguments are i.e. {matches}.' if matches else ''
string += f"'{colorstr('red', 'bold', x)}' is not a valid YOLO argument. {match_str}\n"
raise SyntaxError(string + CLI_HELP_MSG) from e
def merge_equals_args(args: List[str]) -> List[str]:
"""
Merges arguments around isolated '=' args in a list of strings.
The function considers cases where the first argument ends with '=' or the second starts with '=',
as well as when the middle one is an equals sign.
Args:
args (List[str]): A list of strings where each element is an argument.
Returns:
List[str]: A list of strings where the arguments around isolated '=' are merged.
"""
new_args = []
for i, arg in enumerate(args):
if arg == '=' and 0 < i < len(args) - 1: # merge ['arg', '=', 'val']
new_args[-1] += f'={args[i + 1]}'
del args[i + 1]
elif arg.endswith('=') and i < len(args) - 1 and '=' not in args[i + 1]: # merge ['arg=', 'val']
new_args.append(f'{arg}{args[i + 1]}')
del args[i + 1]
elif arg.startswith('=') and i > 0: # merge ['arg', '=val']
new_args[-1] += arg
else:
new_args.append(arg)
return new_args
def entrypoint(debug=''):
"""
This function is the ultralytics package entrypoint, it's responsible for parsing the command line arguments passed
to the package.
This function allows for:
- passing mandatory YOLO args as a list of strings
- specifying the task to be performed, either 'detect', 'segment' or 'classify'
- specifying the mode, either 'train', 'val', 'test', or 'predict'
- running special modes like 'checks'
- passing overrides to the package's configuration
It uses the package's default cfg and initializes it using the passed overrides.
Then it calls the CLI function with the composed cfg
"""
args = (debug.split(' ') if debug else sys.argv)[1:]
if not args: # no arguments passed
LOGGER.info(CLI_HELP_MSG)
return
special = {
'help': lambda: LOGGER.info(CLI_HELP_MSG),
'checks': checks.check_yolo,
'version': lambda: LOGGER.info(__version__),
'settings': lambda: yaml_print(USER_CONFIG_DIR / 'settings.yaml'),
'cfg': lambda: yaml_print(DEFAULT_CFG_PATH),
'copy-cfg': copy_default_cfg}
full_args_dict = {**DEFAULT_CFG_DICT, **{k: None for k in TASKS}, **{k: None for k in MODES}, **special}
# Define common mis-uses of special commands, i.e. -h, -help, --help
special.update({k[0]: v for k, v in special.items()}) # singular
special.update({k[:-1]: v for k, v in special.items() if len(k) > 1 and k.endswith('s')}) # singular
special = {**special, **{f'-{k}': v for k, v in special.items()}, **{f'--{k}': v for k, v in special.items()}}
overrides = {} # basic overrides, i.e. imgsz=320
for a in merge_equals_args(args): # merge spaces around '=' sign
if a.startswith('--'):
LOGGER.warning(f"WARNING ⚠️ '{a}' does not require leading dashes '--', updating to '{a[2:]}'.")
a = a[2:]
if a.endswith(','):
LOGGER.warning(f"WARNING ⚠️ '{a}' does not require trailing comma ',', updating to '{a[:-1]}'.")
a = a[:-1]
if '=' in a:
try:
re.sub(r' *= *', '=', a) # remove spaces around equals sign
k, v = a.split('=', 1) # split on first '=' sign
assert v, f"missing '{k}' value"
if k == 'cfg': # custom.yaml passed
LOGGER.info(f'Overriding {DEFAULT_CFG_PATH} with {v}')
overrides = {k: val for k, val in yaml_load(checks.check_yaml(v)).items() if k != 'cfg'}
else:
if v.lower() == 'none':
v = None
elif v.lower() == 'true':
v = True
elif v.lower() == 'false':
v = False
else:
with contextlib.suppress(Exception):
v = eval(v)
overrides[k] = v
except (NameError, SyntaxError, ValueError, AssertionError) as e:
check_cfg_mismatch(full_args_dict, {a: ''}, e)
elif a in TASKS:
overrides['task'] = a
elif a in MODES:
overrides['mode'] = a
elif a in special:
special[a]()
return
elif a in DEFAULT_CFG_DICT and isinstance(DEFAULT_CFG_DICT[a], bool):
overrides[a] = True # auto-True for default bool args, i.e. 'yolo show' sets show=True
elif a in DEFAULT_CFG_DICT:
raise SyntaxError(f"'{colorstr('red', 'bold', a)}' is a valid YOLO argument but is missing an '=' sign "
f"to set its value, i.e. try '{a}={DEFAULT_CFG_DICT[a]}'\n{CLI_HELP_MSG}")
else:
check_cfg_mismatch(full_args_dict, {a: ''})
# Check keys
check_cfg_mismatch(full_args_dict, overrides)
# Mode
mode = overrides.get('mode', None)
if mode is None:
mode = DEFAULT_CFG.mode or 'predict'
LOGGER.warning(f"WARNING ⚠️ 'mode' is missing. Valid modes are {MODES}. Using default 'mode={mode}'.")
elif mode not in MODES:
if mode not in ('checks', checks):
raise ValueError(f"Invalid 'mode={mode}'. Valid modes are {MODES}.\n{CLI_HELP_MSG}")
LOGGER.warning("WARNING ⚠️ 'yolo mode=checks' is deprecated. Use 'yolo checks' instead.")
checks.check_yolo()
return
# Task
task = overrides.get('task')
if task and task not in TASKS:
raise ValueError(f"Invalid 'task={task}'. Valid tasks are {TASKS}.\n{CLI_HELP_MSG}")
# Model
model = overrides.pop('model', DEFAULT_CFG.model)
if model is None:
model = 'yolov8n.pt'
LOGGER.warning(f"WARNING ⚠️ 'model' is missing. Using default 'model={model}'.")
from ultralytics.yolo.engine.model import YOLO
overrides['model'] = model
model = YOLO(model, task=task)
# Task Update
if task and task != model.task:
LOGGER.warning(f"WARNING ⚠️ conflicting 'task={task}' passed with 'task={model.task}' model. "
f'This may produce errors.')
task = task or model.task
overrides['task'] = task
# Mode
if mode in {'predict', 'track'} and 'source' not in overrides:
overrides['source'] = DEFAULT_CFG.source or ROOT / 'assets' if (ROOT / 'assets').exists() \
else 'https://ultralytics.com/images/bus.jpg'
LOGGER.warning(f"WARNING ⚠️ 'source' is missing. Using default 'source={overrides['source']}'.")
elif mode in ('train', 'val'):
if 'data' not in overrides:
task2data = dict(detect='coco128.yaml', segment='coco128-seg.yaml', classify='imagenet100')
overrides['data'] = task2data.get(task or DEFAULT_CFG.task, DEFAULT_CFG.data)
LOGGER.warning(f"WARNING ⚠️ 'data' is missing. Using default 'data={overrides['data']}'.")
elif mode == 'export':
if 'format' not in overrides:
overrides['format'] = DEFAULT_CFG.format or 'torchscript'
LOGGER.warning(f"WARNING ⚠️ 'format' is missing. Using default 'format={overrides['format']}'.")
# Run command in python
# getattr(model, mode)(**vars(get_cfg(overrides=overrides))) # default args using default.yaml
getattr(model, mode)(**overrides) # default args from model
# Special modes --------------------------------------------------------------------------------------------------------
def copy_default_cfg():
new_file = Path.cwd() / DEFAULT_CFG_PATH.name.replace('.yaml', '_copy.yaml')
shutil.copy2(DEFAULT_CFG_PATH, new_file)
LOGGER.info(f'{DEFAULT_CFG_PATH} copied to {new_file}\n'
f"Example YOLO command with this new custom cfg:\n yolo cfg='{new_file}' imgsz=320 batch=8")
if __name__ == '__main__':
# entrypoint(debug='yolo predict model=yolov8n.pt')
entrypoint(debug='') | /roar_yolo-9.0.0-py3-none-any.whl/ultralytics/yolo/cfg/__init__.py | 0.686055 | 0.232768 | __init__.py | pypi |
import torch
from ultralytics.yolo.engine.predictor import BasePredictor
from ultralytics.yolo.engine.results import Results
from ultralytics.yolo.utils import DEFAULT_CFG, ROOT, ops
from ultralytics.yolo.utils.plotting import Annotator, colors, save_one_box
class DetectionPredictor(BasePredictor):
def get_annotator(self, img):
return Annotator(img, line_width=self.args.line_thickness, example=str(self.model.names))
def preprocess(self, img):
img = torch.from_numpy(img).to(self.model.device)
img = img.half() if self.model.fp16 else img.float() # uint8 to fp16/32
img /= 255 # 0 - 255 to 0.0 - 1.0
return img
def postprocess(self, preds, img, orig_img):
preds, feat, probs = ops.non_max_suppression(preds,
self.args.conf,
self.args.iou,
agnostic=self.args.agnostic_nms,
max_det=self.args.max_det,
classes=self.args.classes)
results = []
for i, pred in enumerate(preds):
orig_img = orig_img[i] if isinstance(orig_img, list) else orig_img
shape = orig_img.shape
pred[:, :4] = ops.scale_boxes(img.shape[2:], pred[:, :4], shape).round()
path, _, _, _, _ = self.batch
img_path = path[i] if isinstance(path, list) else path
results.append(Results(orig_img=orig_img, path=img_path, names=self.model.names, boxes=pred, probs=probs[i]))
return results, feat
def write_results(self, idx, results, batch):
p, im, im0 = batch
log_string = ''
if len(im.shape) == 3:
im = im[None] # expand for batch dim
self.seen += 1
imc = im0.copy() if self.args.save_crop else im0
if self.source_type.webcam or self.source_type.from_img: # batch_size >= 1
log_string += f'{idx}: '
frame = self.dataset.count
else:
frame = getattr(self.dataset, 'frame', 0)
self.data_path = p
self.txt_path = str(self.save_dir / 'labels' / p.stem) + ('' if self.dataset.mode == 'image' else f'_{frame}')
log_string += '%gx%g ' % im.shape[2:] # print string
self.annotator = self.get_annotator(im0)
det = results[idx].boxes # TODO: make boxes inherit from tensors
if len(det) == 0:
return f'{log_string}(no detections), '
for c in det.cls.unique():
n = (det.cls == c).sum() # detections per class
log_string += f"{n} {self.model.names[int(c)]}{'s' * (n > 1)}, "
# write
for d in reversed(det):
cls, conf = d.cls.squeeze(), d.conf.squeeze()
if self.args.save_txt: # Write to file
line = (cls, *(d.xywhn.view(-1).tolist()), conf) \
if self.args.save_conf else (cls, *(d.xywhn.view(-1).tolist())) # label format
with open(f'{self.txt_path}.txt', 'a') as f:
f.write(('%g ' * len(line)).rstrip() % line + '\n')
if self.args.save or self.args.save_crop or self.args.show: # Add bbox to image
c = int(cls) # integer class
name = f'id:{int(d.id.item())} {self.model.names[c]}' if d.id is not None else self.model.names[c]
label = None if self.args.hide_labels else (name if self.args.hide_conf else f'{name} {conf:.2f}')
self.annotator.box_label(d.xyxy.squeeze(), label, color=colors(c, True))
if self.args.save_crop:
save_one_box(d.xyxy,
imc,
file=self.save_dir / 'crops' / self.model.model.names[c] / f'{self.data_path.stem}.jpg',
BGR=True)
return log_string
def predict(cfg=DEFAULT_CFG, use_python=False):
model = cfg.model or 'yolov8n.pt'
source = cfg.source if cfg.source is not None else ROOT / 'assets' if (ROOT / 'assets').exists() \
else 'https://ultralytics.com/images/bus.jpg'
args = dict(model=model, source=source)
if use_python:
from ultralytics import YOLO
YOLO(model)(**args)
else:
predictor = DetectionPredictor(overrides=args)
predictor.predict_cli()
if __name__ == '__main__':
predict() | /roar_yolo-9.0.0-py3-none-any.whl/ultralytics/yolo/v8/detect/predict.py | 0.408159 | 0.153867 | predict.py | pypi |
from itertools import repeat
from multiprocessing.pool import ThreadPool
from pathlib import Path
import cv2
import numpy as np
import torch
import torchvision
from tqdm import tqdm
from ..utils import NUM_THREADS, TQDM_BAR_FORMAT, is_dir_writeable
from .augment import Compose, Format, Instances, LetterBox, classify_albumentations, classify_transforms, v8_transforms
from .base import BaseDataset
from .utils import HELP_URL, LOCAL_RANK, LOGGER, get_hash, img2label_paths, verify_image_label
class YOLODataset(BaseDataset):
cache_version = '1.0.1' # dataset labels *.cache version, >= 1.0.0 for YOLOv8
rand_interp_methods = [cv2.INTER_NEAREST, cv2.INTER_LINEAR, cv2.INTER_CUBIC, cv2.INTER_AREA, cv2.INTER_LANCZOS4]
"""
Dataset class for loading images object detection and/or segmentation labels in YOLO format.
Args:
img_path (str): path to the folder containing images.
imgsz (int): image size (default: 640).
cache (bool): if True, a cache file of the labels is created to speed up future creation of dataset instances
(default: False).
augment (bool): if True, data augmentation is applied (default: True).
hyp (dict): hyperparameters to apply data augmentation (default: None).
prefix (str): prefix to print in log messages (default: '').
rect (bool): if True, rectangular training is used (default: False).
batch_size (int): size of batches (default: None).
stride (int): stride (default: 32).
pad (float): padding (default: 0.0).
single_cls (bool): if True, single class training is used (default: False).
use_segments (bool): if True, segmentation masks are used as labels (default: False).
use_keypoints (bool): if True, keypoints are used as labels (default: False).
names (list): class names (default: None).
Returns:
A PyTorch dataset object that can be used for training an object detection or segmentation model.
"""
def __init__(self,
img_path,
imgsz=640,
cache=False,
augment=True,
hyp=None,
prefix='',
rect=False,
batch_size=None,
stride=32,
pad=0.0,
single_cls=False,
use_segments=False,
use_keypoints=False,
names=None):
self.use_segments = use_segments
self.use_keypoints = use_keypoints
self.names = names
assert not (self.use_segments and self.use_keypoints), 'Can not use both segments and keypoints.'
super().__init__(img_path, imgsz, cache, augment, hyp, prefix, rect, batch_size, stride, pad, single_cls)
def cache_labels(self, path=Path('./labels.cache')):
"""Cache dataset labels, check images and read shapes.
Args:
path (Path): path where to save the cache file (default: Path('./labels.cache')).
Returns:
(dict): labels.
"""
x = {'labels': []}
nm, nf, ne, nc, msgs = 0, 0, 0, 0, [] # number missing, found, empty, corrupt, messages
desc = f'{self.prefix}Scanning {path.parent / path.stem}...'
total = len(self.im_files)
with ThreadPool(NUM_THREADS) as pool:
results = pool.imap(func=verify_image_label,
iterable=zip(self.im_files, self.label_files, repeat(self.prefix),
repeat(self.use_keypoints), repeat(len(self.names))))
pbar = tqdm(results, desc=desc, total=total, bar_format=TQDM_BAR_FORMAT)
for im_file, lb, shape, segments, keypoint, nm_f, nf_f, ne_f, nc_f, msg in pbar:
nm += nm_f
nf += nf_f
ne += ne_f
nc += nc_f
if im_file:
x['labels'].append(
dict(
im_file=im_file,
shape=shape,
cls=lb[:, 0:1], # n, 1
bboxes=lb[:, 1:], # n, 4
segments=segments,
keypoints=keypoint,
normalized=True,
bbox_format='xywh'))
if msg:
msgs.append(msg)
pbar.desc = f'{desc} {nf} images, {nm + ne} backgrounds, {nc} corrupt'
pbar.close()
if msgs:
LOGGER.info('\n'.join(msgs))
if nf == 0:
LOGGER.warning(f'{self.prefix}WARNING ⚠️ No labels found in {path}. {HELP_URL}')
x['hash'] = get_hash(self.label_files + self.im_files)
x['results'] = nf, nm, ne, nc, len(self.im_files)
x['msgs'] = msgs # warnings
x['version'] = self.cache_version # cache version
if is_dir_writeable(path.parent):
if path.exists():
path.unlink() # remove *.cache file if exists
np.save(str(path), x) # save cache for next time
path.with_suffix('.cache.npy').rename(path) # remove .npy suffix
LOGGER.info(f'{self.prefix}New cache created: {path}')
else:
LOGGER.warning(f'{self.prefix}WARNING ⚠️ Cache directory {path.parent} is not writeable, cache not saved.')
return x
def get_labels(self):
self.label_files = img2label_paths(self.im_files)
cache_path = Path(self.label_files[0]).parent.with_suffix('.cache')
try:
cache, exists = np.load(str(cache_path), allow_pickle=True).item(), True # load dict
assert cache['version'] == self.cache_version # matches current version
assert cache['hash'] == get_hash(self.label_files + self.im_files) # identical hash
except (FileNotFoundError, AssertionError, AttributeError):
cache, exists = self.cache_labels(cache_path), False # run cache ops
# Display cache
nf, nm, ne, nc, n = cache.pop('results') # found, missing, empty, corrupt, total
if exists and LOCAL_RANK in {-1, 0}:
d = f'Scanning {cache_path}... {nf} images, {nm + ne} backgrounds, {nc} corrupt'
tqdm(None, desc=self.prefix + d, total=n, initial=n, bar_format=TQDM_BAR_FORMAT) # display cache results
if cache['msgs']:
LOGGER.info('\n'.join(cache['msgs'])) # display warnings
if nf == 0: # number of labels found
raise FileNotFoundError(f'{self.prefix}No labels found in {cache_path}, can not start training. {HELP_URL}')
# Read cache
[cache.pop(k) for k in ('hash', 'version', 'msgs')] # remove items
labels = cache['labels']
self.im_files = [lb['im_file'] for lb in labels] # update im_files
# Check if the dataset is all boxes or all segments
lengths = ((len(lb['cls']), len(lb['bboxes']), len(lb['segments'])) for lb in labels)
len_cls, len_boxes, len_segments = (sum(x) for x in zip(*lengths))
if len_segments and len_boxes != len_segments:
LOGGER.warning(
f'WARNING ⚠️ Box and segment counts should be equal, but got len(segments) = {len_segments}, '
f'len(boxes) = {len_boxes}. To resolve this only boxes will be used and all segments will be removed. '
'To avoid this please supply either a detect or segment dataset, not a detect-segment mixed dataset.')
for lb in labels:
lb['segments'] = []
if len_cls == 0:
raise ValueError(f'All labels empty in {cache_path}, can not start training without labels. {HELP_URL}')
return labels
# TODO: use hyp config to set all these augmentations
def build_transforms(self, hyp=None):
if self.augment:
hyp.mosaic = hyp.mosaic if self.augment and not self.rect else 0.0
hyp.mixup = hyp.mixup if self.augment and not self.rect else 0.0
transforms = v8_transforms(self, self.imgsz, hyp)
else:
transforms = Compose([LetterBox(new_shape=(self.imgsz, self.imgsz), scaleup=False)])
transforms.append(
Format(bbox_format='xywh',
normalize=True,
return_mask=self.use_segments,
return_keypoint=self.use_keypoints,
batch_idx=True,
mask_ratio=hyp.mask_ratio,
mask_overlap=hyp.overlap_mask))
return transforms
def close_mosaic(self, hyp):
hyp.mosaic = 0.0 # set mosaic ratio=0.0
hyp.copy_paste = 0.0 # keep the same behavior as previous v8 close-mosaic
hyp.mixup = 0.0 # keep the same behavior as previous v8 close-mosaic
self.transforms = self.build_transforms(hyp)
def update_labels_info(self, label):
"""custom your label format here"""
# NOTE: cls is not with bboxes now, classification and semantic segmentation need an independent cls label
# we can make it also support classification and semantic segmentation by add or remove some dict keys there.
bboxes = label.pop('bboxes')
segments = label.pop('segments')
keypoints = label.pop('keypoints', None)
bbox_format = label.pop('bbox_format')
normalized = label.pop('normalized')
label['instances'] = Instances(bboxes, segments, keypoints, bbox_format=bbox_format, normalized=normalized)
return label
@staticmethod
def collate_fn(batch):
new_batch = {}
keys = batch[0].keys()
values = list(zip(*[list(b.values()) for b in batch]))
for i, k in enumerate(keys):
value = values[i]
if k == 'img':
value = torch.stack(value, 0)
if k in ['masks', 'keypoints', 'bboxes', 'cls']:
value = torch.cat(value, 0)
new_batch[k] = value
new_batch['batch_idx'] = list(new_batch['batch_idx'])
for i in range(len(new_batch['batch_idx'])):
new_batch['batch_idx'][i] += i # add target image index for build_targets()
new_batch['batch_idx'] = torch.cat(new_batch['batch_idx'], 0)
return new_batch
# Classification dataloaders -------------------------------------------------------------------------------------------
class ClassificationDataset(torchvision.datasets.ImageFolder):
"""
YOLOv5 Classification Dataset.
Arguments
root: Dataset path
transform: torchvision transforms, used by default
album_transform: Albumentations transforms, used if installed
"""
def __init__(self, root, augment, imgsz, cache=False):
super().__init__(root=root)
self.torch_transforms = classify_transforms(imgsz)
self.album_transforms = classify_albumentations(augment, imgsz) if augment else None
self.cache_ram = cache is True or cache == 'ram'
self.cache_disk = cache == 'disk'
self.samples = [list(x) + [Path(x[0]).with_suffix('.npy'), None] for x in self.samples] # file, index, npy, im
def __getitem__(self, i):
f, j, fn, im = self.samples[i] # filename, index, filename.with_suffix('.npy'), image
if self.cache_ram and im is None:
im = self.samples[i][3] = cv2.imread(f)
elif self.cache_disk:
if not fn.exists(): # load npy
np.save(fn.as_posix(), cv2.imread(f))
im = np.load(fn)
else: # read image
im = cv2.imread(f) # BGR
if self.album_transforms:
sample = self.album_transforms(image=cv2.cvtColor(im, cv2.COLOR_BGR2RGB))['image']
else:
sample = self.torch_transforms(im)
return {'img': sample, 'cls': j}
def __len__(self) -> int:
return len(self.samples)
# TODO: support semantic segmentation
class SemanticDataset(BaseDataset):
def __init__(self):
pass | /roar_yolo-9.0.0-py3-none-any.whl/ultralytics/yolo/data/dataset.py | 0.779616 | 0.362631 | dataset.py | pypi |
import platform
import time
from pathlib import Path
import pandas as pd
from ultralytics import YOLO
from ultralytics.yolo.engine.exporter import export_formats
from ultralytics.yolo.utils import LINUX, LOGGER, ROOT, SETTINGS
from ultralytics.yolo.utils.checks import check_yolo
from ultralytics.yolo.utils.downloads import download
from ultralytics.yolo.utils.files import file_size
from ultralytics.yolo.utils.torch_utils import select_device
def benchmark(model=Path(SETTINGS['weights_dir']) / 'yolov8n.pt', imgsz=160, half=False, device='cpu', hard_fail=False):
device = select_device(device, verbose=False)
if isinstance(model, (str, Path)):
model = YOLO(model)
y = []
t0 = time.time()
for i, (name, format, suffix, cpu, gpu) in export_formats().iterrows(): # index, (name, format, suffix, CPU, GPU)
emoji = '❌' # indicates export failure
try:
assert i != 11, 'paddle exports coming soon'
assert i != 9 or LINUX, 'Edge TPU export only supported on Linux'
if 'cpu' in device.type:
assert cpu, 'inference not supported on CPU'
if 'cuda' in device.type:
assert gpu, 'inference not supported on GPU'
# Export
if format == '-':
filename = model.ckpt_path or model.cfg
export = model # PyTorch format
else:
filename = model.export(imgsz=imgsz, format=format, half=half, device=device) # all others
export = YOLO(filename, task=model.task)
assert suffix in str(filename), 'export failed'
emoji = '❎' # indicates export succeeded
# Predict
assert i not in (9, 10), 'inference not supported' # Edge TPU and TF.js are unsupported
assert i != 5 or platform.system() == 'Darwin', 'inference only supported on macOS>=10.13' # CoreML
if not (ROOT / 'assets/bus.jpg').exists():
download(url='https://ultralytics.com/images/bus.jpg', dir=ROOT / 'assets')
export.predict(ROOT / 'assets/bus.jpg', imgsz=imgsz, device=device, half=half)
# Validate
if model.task == 'detect':
data, key = 'coco128.yaml', 'metrics/mAP50-95(B)'
elif model.task == 'segment':
data, key = 'coco128-seg.yaml', 'metrics/mAP50-95(M)'
elif model.task == 'classify':
data, key = 'imagenet100', 'metrics/accuracy_top5'
results = export.val(data=data, batch=1, imgsz=imgsz, plots=False, device=device, half=half, verbose=False)
metric, speed = results.results_dict[key], results.speed['inference']
y.append([name, '✅', round(file_size(filename), 1), round(metric, 4), round(speed, 2)])
except Exception as e:
if hard_fail:
assert type(e) is AssertionError, f'Benchmark hard_fail for {name}: {e}'
LOGGER.warning(f'ERROR ❌️ Benchmark failure for {name}: {e}')
y.append([name, emoji, None, None, None]) # mAP, t_inference
# Print results
check_yolo(device=device) # print system info
df = pd.DataFrame(y, columns=['Format', 'Status❔', 'Size (MB)', key, 'Inference time (ms/im)'])
name = Path(model.ckpt_path).name
s = f'\nBenchmarks complete for {name} on {data} at imgsz={imgsz} ({time.time() - t0:.2f}s)\n{df}\n'
LOGGER.info(s)
with open('benchmarks.log', 'a', errors='ignore', encoding='utf-8') as f:
f.write(s)
if hard_fail and isinstance(hard_fail, float):
metrics = df[key].array # values to compare to floor
floor = hard_fail # minimum metric floor to pass, i.e. = 0.29 mAP for YOLOv5n
assert all(x > floor for x in metrics if pd.notna(x)), f'HARD FAIL: one or more metric(s) < floor {floor}'
return df
if __name__ == '__main__':
benchmark() | /roar_yolo-9.0.0-py3-none-any.whl/ultralytics/yolo/utils/benchmarks.py | 0.464659 | 0.228608 | benchmarks.py | pypi |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.