code stringlengths 114 1.05M | path stringlengths 3 312 | quality_prob float64 0.5 0.99 | learning_prob float64 0.2 1 | filename stringlengths 3 168 | kind stringclasses 1
value |
|---|---|---|---|---|---|
from __future__ import annotations
import os
import sys
import _setup_returnn_env # noqa
import returnn.__main__ as rnn
from returnn.log import log
from returnn.config import Config
import argparse
from returnn.util.basic import hms, human_size, parse_orthography, parse_orthography_into_symbols, unicode
import gzip
from xml.etree import ElementTree
import wave
import time
def found_sub_seq(sub_seq, seq):
"""
:param list[str] sub_seq:
:param list[str] seq:
:rtype: bool
"""
# Very inefficient naive implementation:
for i in range(len(seq)):
if seq[i : i + len(sub_seq)] == sub_seq:
return True
return False
def iter_dataset(dataset, options, callback):
"""
:type dataset: Dataset.Dataset
"""
dataset.init_seq_order(epoch=1)
assert "orth" in dataset.get_target_list()
seq_idx = 0
while dataset.is_less_than_num_seqs(seq_idx):
dataset.load_seqs(seq_idx, seq_idx)
frame_len = dataset.get_seq_length(seq_idx)["data"]
orth = dataset.get_targets("orth", seq_idx)
callback(frame_len=frame_len, orth=orth)
seq_idx += 1
def get_wav_time_len(filename):
"""
:param str filename:
:rtype: float
"""
f = wave.open(filename)
num_frames = f.getnframes()
frame_rate = f.getframerate()
f.close()
return num_frames / float(frame_rate)
def iter_bliss(filename, options, callback):
corpus_file = open(filename, "rb")
if filename.endswith(".gz"):
corpus_file = gzip.GzipFile(fileobj=corpus_file)
def getelements(tag):
"""Yield *tag* elements from *filename_or_file* xml incrementaly."""
context = iter(ElementTree.iterparse(corpus_file, events=("start", "end")))
_, root = next(context) # get root element
tree = [root]
for event, elem in context:
if event == "start":
tree += [elem]
elif event == "end":
assert tree[-1] is elem
tree = tree[:-1]
if event == "end" and elem.tag == tag:
yield tree, elem
root.clear() # free memory
time_via_wav = False
for tree, elem in getelements("segment"):
if options.collect_time:
start = float(elem.attrib.get("start", 0))
if "end" in elem.attrib:
end = float(elem.attrib["end"])
else:
if not time_via_wav:
time_via_wav = True
print(
"Time will be read from WAV recordings. Can be slow. Maybe use `--collect_time False`.",
file=log.v3,
)
rec_elem = tree[-1]
assert rec_elem.tag == "recording"
wav_filename = rec_elem.attrib["audio"]
end = get_wav_time_len(wav_filename)
assert end > start
frame_len = (end - start) * (1000.0 / options.frame_time)
else:
frame_len = 0
elem_orth = elem.find("orth")
orth_raw = elem_orth.text or "" # should be unicode
orth_split = orth_raw.split()
orth = " ".join(orth_split)
callback(frame_len=frame_len, orth=orth)
def iter_txt(filename, options, callback):
f = open(filename, "rb")
if filename.endswith(".gz"):
f = gzip.GzipFile(fileobj=f)
if options.collect_time:
print("No time-info in txt.", file=log.v3)
options.collect_time = False
for line in f:
line = line.strip()
if not line:
continue
callback(frame_len=0, orth=line)
def collect_stats(options, iter_corpus):
"""
:param options: argparse.Namespace
"""
orth_symbols_filename = options.output
if orth_symbols_filename:
assert not os.path.exists(orth_symbols_filename)
class Stats:
count = 0
process_last_time = time.time()
total_frame_len = 0
total_orth_len = 0
orth_syms_set = set()
if options.add_numbers:
Stats.orth_syms_set.update(map(chr, list(range(ord("0"), ord("9") + 1))))
if options.add_lower_alphabet:
Stats.orth_syms_set.update(map(chr, list(range(ord("a"), ord("z") + 1))))
if options.add_upper_alphabet:
Stats.orth_syms_set.update(map(chr, list(range(ord("A"), ord("Z") + 1))))
def cb(frame_len, orth):
if frame_len >= options.max_seq_frame_len:
return
orth_syms = parse_orthography(orth)
if len(orth_syms) >= options.max_seq_orth_len:
return
Stats.count += 1
Stats.total_frame_len += frame_len
if options.dump_orth_syms:
print("Orth:", "".join(orth_syms), file=log.v3)
if options.filter_orth_sym:
if options.filter_orth_sym in orth_syms:
print("Found orth:", "".join(orth_syms), file=log.v3)
if options.filter_orth_syms_seq:
filter_seq = parse_orthography_into_symbols(options.filter_orth_syms_seq)
if found_sub_seq(filter_seq, orth_syms):
print("Found orth:", "".join(orth_syms), file=log.v3)
Stats.orth_syms_set.update(orth_syms)
Stats.total_orth_len += len(orth_syms)
# Show some progress if it takes long.
if time.time() - Stats.process_last_time > 2:
Stats.process_last_time = time.time()
if options.collect_time:
print(
"Collect process, total frame len so far:",
hms(Stats.total_frame_len * (options.frame_time / 1000.0)),
file=log.v3,
)
else:
print("Collect process, total orth len so far:", human_size(Stats.total_orth_len), file=log.v3)
iter_corpus(cb)
if options.remove_symbols:
filter_syms = parse_orthography_into_symbols(options.remove_symbols)
Stats.orth_syms_set -= set(filter_syms)
if options.collect_time:
print(
"Total frame len:",
Stats.total_frame_len,
"time:",
hms(Stats.total_frame_len * (options.frame_time / 1000.0)),
file=log.v3,
)
else:
print("No time stats (--collect_time False).", file=log.v3)
print("Total orth len:", Stats.total_orth_len, "(%s)" % human_size(Stats.total_orth_len), end=" ", file=log.v3)
if options.collect_time:
print("fraction:", float(Stats.total_orth_len) / Stats.total_frame_len, file=log.v3)
else:
print("", file=log.v3)
print("Average orth len:", float(Stats.total_orth_len) / Stats.count, file=log.v3)
print("Num symbols:", len(Stats.orth_syms_set), file=log.v3)
if orth_symbols_filename:
orth_syms_file = open(orth_symbols_filename, "wb")
for orth_sym in sorted(Stats.orth_syms_set):
orth_syms_file.write(b"%s\n" % unicode(orth_sym).encode("utf8"))
orth_syms_file.close()
print("Wrote orthography symbols to", orth_symbols_filename, file=log.v3)
else:
print("Provide --output to save the symbols.", file=log.v3)
def init(config_filename=None):
rnn.init_better_exchook()
rnn.init_thread_join_hack()
if config_filename:
rnn.init_config(config_filename, command_line_options=[])
rnn.init_log()
else:
log.initialize()
print("RETURNN collect-orth-symbols starting up.", file=log.v3)
rnn.init_faulthandler()
if config_filename:
rnn.init_data()
rnn.print_task_properties()
def is_bliss(filename):
try:
corpus_file = open(filename, "rb")
if filename.endswith(".gz"):
corpus_file = gzip.GzipFile(fileobj=corpus_file)
context = iter(ElementTree.iterparse(corpus_file, events=("start", "end")))
_, root = next(context) # get root element
return True
except IOError: # 'Not a gzipped file' or so
pass
except ElementTree.ParseError: # 'syntax error' or so
pass
return False
def is_crnn_config(filename):
if filename.endswith(".gz"):
return False
try:
config = Config()
config.load_file(filename)
return True
except Exception:
pass
return False
def main(argv):
argparser = argparse.ArgumentParser(description="Collect orth symbols.")
argparser.add_argument("input", help="RETURNN config, Corpus Bliss XML or just txt-data")
argparser.add_argument(
"--frame_time", type=int, default=10, help="time (in ms) per frame. not needed for Corpus Bliss XML"
)
argparser.add_argument(
"--collect_time", type=int, default=True, help="collect time info. can be slow in some cases"
)
argparser.add_argument("--dump_orth_syms", action="store_true", help="dump all orthographies")
argparser.add_argument("--filter_orth_sym", help="dump orthographies which match this filter")
argparser.add_argument("--filter_orth_syms_seq", help="dump orthographies which match this filter")
argparser.add_argument(
"--max_seq_frame_len", type=int, default=float("inf"), help="collect only orthographies <= this max frame len"
)
argparser.add_argument(
"--max_seq_orth_len", type=int, default=float("inf"), help="collect only orthographies <= this max orth len"
)
argparser.add_argument("--add_numbers", type=int, default=True, help="add chars 0-9 to orth symbols")
argparser.add_argument("--add_lower_alphabet", type=int, default=True, help="add chars a-z to orth symbols")
argparser.add_argument("--add_upper_alphabet", type=int, default=True, help="add chars A-Z to orth symbols")
argparser.add_argument("--remove_symbols", default="(){}$", help="remove these chars from orth symbols")
argparser.add_argument("--output", help="where to store the symbols (default: dont store)")
args = argparser.parse_args(argv[1:])
bliss_filename = None
crnn_config_filename = None
txt_filename = None
if is_bliss(args.input):
bliss_filename = args.input
elif is_crnn_config(args.input):
crnn_config_filename = args.input
else: # treat just as txt
txt_filename = args.input
init(config_filename=crnn_config_filename)
if bliss_filename:
iter_corpus = lambda cb: iter_bliss(bliss_filename, options=args, callback=cb)
elif txt_filename:
iter_corpus = lambda cb: iter_txt(txt_filename, options=args, callback=cb)
else:
iter_corpus = lambda cb: iter_dataset(rnn.train_data, options=args, callback=cb)
collect_stats(args, iter_corpus)
if crnn_config_filename:
rnn.finalize()
if __name__ == "__main__":
main(sys.argv) | /returnn-1.20230902.233313.tar.gz/returnn-1.20230902.233313/tools/collect-orth-symbols.py | 0.444806 | 0.213992 | collect-orth-symbols.py | pypi |
from __future__ import annotations
import sys
import argparse
import _setup_returnn_env # noqa
from returnn.log import log
import returnn.__main__ as rnn
import returnn.datasets.hdf as hdf_dataset_mod
from returnn.datasets import Dataset, init_dataset
from returnn.config import Config
def hdf_dataset_init(file_name):
"""
:param str file_name: filename of hdf dataset file in the filesystem
:rtype: hdf_dataset_mod.HDFDatasetWriter
"""
return hdf_dataset_mod.HDFDatasetWriter(filename=file_name)
def hdf_dump_from_dataset(dataset, hdf_dataset, parser_args):
"""
:param Dataset dataset: could be any dataset implemented as child of Dataset
:param hdf_dataset_mod.HDFDatasetWriter hdf_dataset:
:param parser_args: argparse object from main()
"""
hdf_dataset.dump_from_dataset(
dataset=dataset,
epoch=parser_args.epoch,
start_seq=parser_args.start_seq,
end_seq=parser_args.end_seq,
use_progress_bar=True,
)
def hdf_close(hdf_dataset):
"""
:param HDFDataset.HDFDatasetWriter hdf_dataset: to close
"""
hdf_dataset.close()
def init(config_filename, cmd_line_opts, dataset_config_str):
"""
:param str config_filename: global config for CRNN
:param list[str] cmd_line_opts: options for init_config method
:param str dataset_config_str: dataset via init_dataset_via_str()
"""
rnn.init_better_exchook()
rnn.init_thread_join_hack()
if config_filename:
rnn.init_config(config_filename, cmd_line_opts)
rnn.init_log()
else:
log.initialize(verbosity=[5])
print("Returnn hdf_dump starting up.", file=log.v3)
rnn.init_faulthandler()
if config_filename:
rnn.init_data()
rnn.print_task_properties()
assert isinstance(rnn.train_data, Dataset)
dataset = rnn.train_data
else:
assert dataset_config_str
dataset = init_dataset(dataset_config_str)
print("Source dataset:", dataset.len_info(), file=log.v3)
return dataset
def _is_crnn_config(filename):
"""
:param str filename:
:rtype: bool
"""
if filename.endswith(".gz"):
return False
if filename.endswith(".config"):
return True
# noinspection PyBroadException
try:
config = Config()
config.load_file(filename)
return True
except Exception:
pass
return False
def main(argv):
"""
Main entry.
"""
parser = argparse.ArgumentParser(description="Dump dataset or subset of dataset into external HDF dataset")
parser.add_argument(
"config_file_or_dataset", type=str, help="Config file for RETURNN, or directly the dataset init string"
)
parser.add_argument("hdf_filename", type=str, help="File name of the HDF dataset, which will be created")
parser.add_argument("--start_seq", type=int, default=0, help="Start sequence index of the dataset to dump")
parser.add_argument("--end_seq", type=int, default=float("inf"), help="End sequence index of the dataset to dump")
parser.add_argument("--epoch", type=int, default=1, help="Optional start epoch for initialization")
args = parser.parse_args(argv[1:])
returnn_config = None
dataset_config_str = None
if _is_crnn_config(args.config_file_or_dataset):
returnn_config = args.config_file_or_dataset
else:
dataset_config_str = args.config_file_or_dataset
dataset = init(config_filename=returnn_config, cmd_line_opts=[], dataset_config_str=dataset_config_str)
hdf_dataset = hdf_dataset_init(args.hdf_filename)
hdf_dump_from_dataset(dataset, hdf_dataset, args)
hdf_close(hdf_dataset)
rnn.finalize()
if __name__ == "__main__":
main(sys.argv) | /returnn-1.20230902.233313.tar.gz/returnn-1.20230902.233313/tools/hdf_dump.py | 0.446736 | 0.240685 | hdf_dump.py | pypi |
from __future__ import annotations
import sys
import argparse
import gzip
import pickle
import typing
import numpy
import h5py
from operator import itemgetter
from itertools import islice, zip_longest
UNKNOWN_LABEL = "<UNK>"
POSTFIX = "</S>"
class HDFTranslationDatasetCreator(object):
"""
Creates the same HDF file as hdf_dump.py called on a TranslationDataset, but does so much faster
and using much less memory.
Input parameters are basically the files in a TranslationDataset folder. Additionally, the number of lines has to
be given.
"""
def __init__(
self,
hdf_file,
source_file,
target_file,
source_vocabularies,
target_vocabularies,
source_factors,
target_factors,
number_of_lines,
factor_separator="|",
compression_method=None,
line_buffer_size=100000,
data_buffer_size=5000000,
):
"""
:param str hdf_file: filename for the hdf file being created
:param str source_file: filename of the source text file
:param str target_file: filename of the target text file
:param list[str] source_vocabularies: Filenames of the source vocabularies (in pickle format).
Usually there is just one. In case of source factors, provide one per factor.
:param list[str] target_vocabularies: Filenames of the target vocabularies (in pickle format).
Usually there is just one. In case of target factors, provide one per factor.
:param list[str] source_factors: Data keys for the source factors. First data key is always 'data'
and must not be contained in the list.
:param list[str] target_factors: Data keys for the target factors. First data key is always 'classes'
and must not be contained in the list.
:param str factor_separator: "String used to separate factors of the words.
E.g. if "|", words are expected to be of format "<lemma>|<factor>|...".
:param int number_of_lines: line count in source_file and target_file
:param str compression_method: Optional compression method as supported by h5py.File.create_dataset().
Applied to the main data only ('inputs' and 'target/data/classes').
:param int line_buffer_size: number of corpus lines to read and process at once
:param int data_buffer_size: space to reserve inside the hdf file at once, in numbers of integers
"""
self.hdf_file_name = hdf_file
self.hdf_file = None
self.compression_method = compression_method
self.source_file_handle = self._open_file(source_file)
self.target_file_handle = self._open_file(target_file)
source_vocabularies = [self._read_vocabulary(v) for v in source_vocabularies]
target_vocabularies = [self._read_vocabulary(v) for v in target_vocabularies]
self.source_data_keys = ["data"] + source_factors
self.target_data_keys = ["classes"] + target_factors
self._vocabularies = {"source": source_vocabularies, "target": target_vocabularies}
self._vocabulary_sizes = {
"source": [len(v) for v in source_vocabularies],
"target": [len(v) for v in target_vocabularies],
}
self._unknown_ids = {
"source": [v.get(UNKNOWN_LABEL) for v in source_vocabularies],
"target": [v.get(UNKNOWN_LABEL) for v in target_vocabularies],
}
self.number_of_lines = number_of_lines
self.line_buffer_size = line_buffer_size
self.data_buffer_size = data_buffer_size
self._number_of_processed_lines = 0
self._write_offsets = {data_key: 0 for data_key in self.source_data_keys + self.target_data_keys}
self.factor_separator = factor_separator
def create(self):
"""
Main function writing the HDF file.
"""
self._init_hdf_file()
print("Setting attributes...", file=sys.stderr)
sys.stderr.flush()
self._write_attributes()
print("Done.", file=sys.stderr)
print("Writing labels (vocabulary)...", file=sys.stderr)
sys.stderr.flush()
self._write_labels()
print("Done.", file=sys.stderr)
print("Writing source, target, sequence lengths and tags for all lines...", file=sys.stderr)
sys.stderr.flush()
end_of_file = False
while not end_of_file:
end_of_file = self._write_data()
if not end_of_file:
print("> Processed {} lines.".format(self._number_of_processed_lines), file=sys.stderr)
sys.stderr.flush()
print("Done.", file=sys.stderr)
sys.stderr.flush()
self.hdf_file.close()
def _init_hdf_file(self):
"""
Sets up the HDF file and initializes the datasets that will be filled.
"""
self.hdf_file = h5py.File(self.hdf_file_name, "w")
self.hdf_file.create_group("targets/data")
self.hdf_file.create_group("targets/size")
self.hdf_file.create_group("targets/labels")
num_data_keys = len(self.source_data_keys) + len(self.target_data_keys)
self.hdf_file.create_dataset("seqLengths", (self.number_of_lines, num_data_keys), dtype="int32")
max_tag_length = len("line-") + len(str(self.number_of_lines))
self._tag_dtype = "S{}".format(max_tag_length)
self.hdf_file.create_dataset("seqTags", (self.number_of_lines,), dtype=self._tag_dtype)
self.hdf_file.create_dataset(
"inputs", (self.data_buffer_size,), maxshape=(None,), dtype="int32", compression=self.compression_method
)
# HDF format expects one input only, so store source factors as target too.
for data_key in self.source_data_keys[1:] + self.target_data_keys:
self.hdf_file["targets/data"].create_dataset(
data_key, (self.data_buffer_size,), maxshape=(None,), dtype="int32", compression=self.compression_method
)
def _write_attributes(self):
"""
Writes several attributes to the HDF file.
"""
for index, data_key in enumerate(self.source_data_keys[1:], start=1):
self.hdf_file["targets/size"].attrs[data_key] = (self._vocabulary_sizes["source"][index], 1)
for index, data_key in enumerate(self.target_data_keys):
self.hdf_file["targets/size"].attrs[data_key] = (self._vocabulary_sizes["target"][index], 1)
# Those should be deprecated, but include nevertheless to exactly reproduce hdf_dump.
self.hdf_file.attrs["inputPattSize"] = self._vocabulary_sizes["source"][0]
self.hdf_file.attrs["numLabels"] = self._vocabulary_sizes["target"][0]
def _write_labels(self):
"""
Writes the labels (i.e. target vocabulary) to the HDF file.
"""
for side in ["source", "target"]:
# We have to write it for the source factors too, because they are treated like targets.
data_keys = self.source_data_keys if side == "source" else self.target_data_keys
for index, data_key in enumerate(data_keys):
if data_key == "data":
continue
sorted_vocabulary_tuples = sorted(self._vocabularies[side][index].items(), key=itemgetter(1))
labels = [word.encode("utf8") for (word, _) in sorted_vocabulary_tuples]
assert len(labels) == self._vocabulary_sizes[side][index], "Word ids were not unique."
max_label_length = max([len(label) for label in labels])
dtype = "S{}".format(max_label_length + 1)
labels = [numpy.array(label, dtype=dtype, ndmin=1) for label in labels]
labels = numpy.concatenate(labels)
self.hdf_file["targets/labels"].create_dataset(
data_key, (self._vocabulary_sizes[side][index],), data=labels, dtype=dtype
)
def _write_data(self):
"""
Loads a chunk of lines from the corpus and writes all corresponding data to the HDF file.
:return: whether the end of the corpus is reached
:rtype: bool
"""
data_chunks = self._get_chunk()
if not data_chunks["data"]:
self._finalize_data()
return True
source_lengths = [len(line) for line in data_chunks["data"]]
target_lengths = [len(line) for line in data_chunks["classes"]]
chunk_size = len(data_chunks["data"])
self._write_lengths(source_lengths, target_lengths)
self._write_tags(chunk_size)
for data_key, data_chunk in data_chunks.items():
self._write_data_indices(data_chunk, data_key)
self._number_of_processed_lines += chunk_size
return False
def _get_chunk(self):
"""
Reads in the next chunk of lines from the corpus files.
:return: a dict 'data_key' -> word indices for 'data_key' (int32, shape [Lines]) for all source and target data
:rtype: dict[str,list[numpy.ndarray]]
"""
source_lines = islice(self.source_file_handle, self.line_buffer_size)
target_lines = islice(self.target_file_handle, self.line_buffer_size)
data_chunks = {data_key: [] for data_key in self.source_data_keys + self.target_data_keys}
for source_line, target_line in zip_longest(source_lines, target_lines):
assert source_line is not None and target_line is not None, "Number of source and target lines differ."
source_indices = self._line_to_indices(source_line, "source")
for data_key, indices in zip(self.source_data_keys, source_indices):
data_chunks[data_key].append(indices)
target_indices = self._line_to_indices(target_line, "target")
for data_key, indices in zip(self.target_data_keys, target_indices):
data_chunks[data_key].append(indices)
return data_chunks
def _write_lengths(self, source_lengths, target_lengths):
"""
Writes the sequence lengths to the HDF file.
:param list[int] source_lengths: lengths of all source lines in current chunk
:param list[int] target_lengths: lengths of all target lines in current chunk
"""
# We treat source factors as targets internally because HDF format does not support multiple inputs.
# For each sequence, seqLengths is expected to contain the length of the input and the length of each of the
# targets, in alphabetical order of the target data keys. As all source and all target factors share the
# same lengths we just choose between the source and target lengths.
target_sequence_lengths = {}
for data_key in self.source_data_keys[1:]:
target_sequence_lengths[data_key] = source_lengths
for data_key in self.target_data_keys:
target_sequence_lengths[data_key] = target_lengths
# Now sort by key.
key_lengths_tuples_sorted = sorted(
target_sequence_lengths.items(), key=lambda x: x[0]
) # type: typing.List[typing.Tuple[str,typing.List[int]]] # nopep8
target_lengths_sorted = [key_length_tuple[1] for key_length_tuple in key_lengths_tuples_sorted]
# Finally, add one time the source lengths for the input ("data") and convert to numpy.
lengths = numpy.array([source_lengths] + target_lengths_sorted, dtype="int32").transpose()
offset = self._number_of_processed_lines
assert len(lengths) + offset <= self.number_of_lines, "More lines in the corpus files than specified."
self.hdf_file["seqLengths"][offset : offset + len(lengths), :] = lengths
def _write_tags(self, chunk_size):
"""
Writes the sequence tags to the HDF file.
:param int chunk_size: number of lines in the current chunk
"""
offset = self._number_of_processed_lines
tags = [numpy.array("line-" + str(offset + i), dtype=self._tag_dtype, ndmin=1) for i in range(chunk_size)]
tags = numpy.concatenate(tags)
self.hdf_file["seqTags"][offset : offset + chunk_size] = tags
def _write_data_indices(self, chunk, data_key):
"""
Writes the main data (word indices for the source or target corpus) to the HDF file.
:param list[numpy.ndarray] chunk: word indices for all lines in the current chunk
:param str data_key: "data", "classes" or a name of a factor
"""
indices = numpy.concatenate(chunk)
if data_key == "data":
dataset = self.hdf_file["inputs/"]
else:
dataset = self.hdf_file["targets/data/{}".format(data_key)]
offset = self._write_offsets[data_key]
length = len(indices)
if offset + length > len(dataset):
buffer_size = max(self.data_buffer_size, length)
dataset.resize((offset + buffer_size,))
dataset[offset : offset + length] = indices
self._write_offsets[data_key] += length
def _finalize_data(self):
"""
Called after all data is written. Checks number of lines and resizes datasets down to actual data size.
"""
# Make sure the number of lines given by the user was correct.
# Otherwise lengths and labels would have trailing zeros.
assert (
self.number_of_lines == self._number_of_processed_lines
), "Fewer lines ({}) in the corpus files " "than specified ({}).".format(
self._number_of_processed_lines, self.number_of_lines
)
# Trim datasets to actually occupied length, i.e. remove unused reserved space.
self.hdf_file["inputs"].resize((self._write_offsets["data"],))
for data_key in self.source_data_keys[1:] + self.target_data_keys:
self.hdf_file["targets/data/{}".format(data_key)].resize((self._write_offsets[data_key],))
def _line_to_indices(self, line, side):
"""
Converts a line of text to arrays of word indices.
:param str line: input line
:param str side: "source" or "target"
:return: word indices (int32, shape [num_words]) for all source or target factors
:rtype: list[numpy.ndarray]
"""
data_keys = self.source_data_keys if side == "source" else self.target_data_keys
words = line.strip().split()
if len(data_keys) == 1:
word_list_per_factor = [words + [POSTFIX]]
else:
if words:
words_split_into_factors = [word.split(self.factor_separator) for word in words]
assert all(
len(factors) == len(data_keys) for factors in words_split_into_factors
), "All words must have all factors. Expected: " + self.factor_separator.join(data_keys)
word_list_per_factor = zip(*words_split_into_factors)
else:
word_list_per_factor = [[]] * len(data_keys)
word_list_per_factor = [list(words) + [POSTFIX] for words in word_list_per_factor]
indices_list = []
for index, (data_key, words) in enumerate(zip(data_keys, word_list_per_factor)):
indices = [self._vocabularies[side][index].get(word, self._unknown_ids[side][index]) for word in words]
indices_numpy = numpy.array(indices, dtype=numpy.int32)
indices_list.append(indices_numpy)
return indices_list
@staticmethod
def _open_file(file_name):
"""
:param str file_name: filename of a plain text file, possibly zipped
:return: file handle
:rtype: io.TextIOWrapper|gzip.GzipFile
"""
if file_name.endswith(".gz"):
return gzip.open(file_name, "rt")
else:
return open(file_name, "r")
@staticmethod
def _read_vocabulary(file_name):
"""
:param str file_name: filename of the vocabulary (in pickle format)
:return: mapping from words to indices
:rtype: dict[str,int]
"""
file_handle = open(file_name, "rb")
vocabulary = pickle.load(file_handle)
return vocabulary
def parse_args():
"""
:rtype: argparse.Namespace
"""
parser = argparse.ArgumentParser()
parser.add_argument("-s", "--source_corpus", required=True, help="Source corpus file, possibly zipped.")
parser.add_argument("-t", "--target_corpus", required=True, help="Target corpus file, possibly zipped.")
parser.add_argument(
"-v",
"--source_vocabulary",
required=True,
help="Source vocabulary in pickle format."
"In case of source factors provide a comma separated list containing vocabularies for each factor.",
)
parser.add_argument(
"-w",
"--target_vocabulary",
required=True,
help="Target vocabulary in pickle format."
"In case of target factors provide a comma separated list containing vocabularies for each factor.",
)
parser.add_argument("-o", "--hdf_file", required=True, help="Output HDF file name.")
parser.add_argument("--source_factors", help="Comma separated list of data keys for the source factors.")
parser.add_argument("--target_factors", help="Comma separated list of data keys for the target factors.")
parser.add_argument(
"--factor_separator",
default="|",
help="String used to separate factors of the words, E.g. if '|', words are expected to be "
"of format '<lemma>|<factor>|...'",
)
parser.add_argument(
"-n", "--number_of_lines", required=True, type=int, help="The number of total lines in the corpus files."
)
parser.add_argument(
"-c", "--compression", help="Type of compression (e.g. 'gzip', 'lzf'). Turned off if not given."
)
parser.add_argument("-l", "--line_buffer_size", type=int, help="How many lines to read at once.", default=100000)
parser.add_argument(
"-d",
"--data_buffer_size",
type=int,
help="How much space to reserve in the HDF dataset " "at once (in number of integers).",
default=5000000,
)
return parser.parse_args()
def main():
"""
Main entry.
"""
args = parse_args()
# In case of source or target factors we need a vocabularies for each.
source_vocabularies = args.source_vocabulary.split(",")
target_vocabularies = args.target_vocabulary.split(",")
source_factors = args.source_factors.split(",") if args.source_factors else []
target_factors = args.target_factors.split(",") if args.target_factors else []
assert len(source_factors) + 1 == len(source_vocabularies), (
"Number of source factors must be one less "
"than number of source vocabularies (first factor is always called 'data')"
)
assert len(target_factors) + 1 == len(target_vocabularies), (
"Number of target factors must be one less "
"than number of target vocabularies (first factor is always called 'classes')"
)
HDFTranslationDatasetCreator(
args.hdf_file,
args.source_corpus,
args.target_corpus,
source_vocabularies,
target_vocabularies,
source_factors,
target_factors,
args.number_of_lines,
args.factor_separator,
args.compression,
args.line_buffer_size,
args.data_buffer_size,
).create()
if __name__ == "__main__":
sys.exit(main()) | /returnn-1.20230902.233313.tar.gz/returnn-1.20230902.233313/tools/hdf_dump_translation_dataset.py | 0.639286 | 0.368889 | hdf_dump_translation_dataset.py | pypi |
from __future__ import annotations
import os
import sys
import time
import numpy
import argparse
import typing
import _setup_returnn_env # noqa
import returnn.__main__ as rnn
from returnn.log import log
from returnn.util.basic import Stats, hms
from returnn.datasets import Dataset, init_dataset
import returnn.util.basic as util
def get_raw_strings(dataset, options):
"""
:param Dataset dataset:
:param options: argparse.Namespace
:return: list of (seq tag, string)
:rtype: list[(str,str)]
"""
refs = []
start_time = time.time()
seq_len_stats = Stats()
seq_idx = options.startseq
if options.endseq < 0:
options.endseq = float("inf")
interactive = util.is_tty() and not log.verbose[5]
print("Iterating over %r." % dataset, file=log.v2)
while dataset.is_less_than_num_seqs(seq_idx) and seq_idx <= options.endseq:
dataset.load_seqs(seq_idx, seq_idx + 1)
complete_frac = dataset.get_complete_frac(seq_idx)
start_elapsed = time.time() - start_time
try:
num_seqs_s = str(dataset.num_seqs)
except NotImplementedError:
try:
num_seqs_s = "~%i" % dataset.estimated_num_seqs
except TypeError: # a number is required, not NoneType
num_seqs_s = "?"
progress_prefix = "%i/%s" % (
seq_idx,
num_seqs_s,
)
progress = "%s (%.02f%%)" % (progress_prefix, complete_frac * 100)
if complete_frac > 0:
total_time_estimated = start_elapsed / complete_frac
remaining_estimated = total_time_estimated - start_elapsed
progress += " (%s)" % hms(remaining_estimated)
seq_tag = dataset.get_tag(seq_idx)
assert isinstance(seq_tag, str)
ref = dataset.get_data(seq_idx, options.key)
if isinstance(ref, numpy.ndarray):
assert ref.shape == () or (ref.ndim == 1 and ref.dtype == numpy.uint8)
if ref.shape == ():
ref = ref.flatten()[0] # get the entry itself (str or bytes)
else:
ref = ref.tobytes()
if isinstance(ref, bytes):
ref = ref.decode("utf8")
assert isinstance(ref, str)
seq_len_stats.collect([len(ref)])
refs.append((seq_tag, ref))
if interactive:
util.progress_bar_with_time(complete_frac, prefix=progress_prefix)
elif log.verbose[5]:
print(progress_prefix, "seq tag %r, ref len %i chars" % (seq_tag, len(ref)))
seq_idx += 1
print("Done. Num seqs %i. Total time %s." % (seq_idx, hms(time.time() - start_time)), file=log.v1)
print("More seqs which we did not dumped: %s." % (dataset.is_less_than_num_seqs(seq_idx),), file=log.v1)
seq_len_stats.dump(stream_prefix="Seq-length %r " % (options.key,), stream=log.v2)
return refs
config = None # type: typing.Optional["returnn.config.Config"]
def init(config_filename, log_verbosity):
"""
:param str config_filename: filename to config-file
:param int log_verbosity:
"""
rnn.init_better_exchook()
rnn.init_thread_join_hack()
if config_filename:
print("Using config file %r." % config_filename)
assert os.path.exists(config_filename)
rnn.init_config(config_filename=config_filename, command_line_options=[])
global config
config = rnn.config
config.set("task", "dump")
config.set("log", None)
config.set("log_verbosity", log_verbosity)
rnn.init_log()
print("Returnn dump-dataset-raw-strings starting up.", file=log.v1)
rnn.returnn_greeting()
rnn.init_faulthandler()
def generic_open(filename, mode="r"):
"""
:param str filename:
:param str mode: text mode by default
:rtype: typing.TextIO|typing.BinaryIO
"""
if filename.endswith(".gz"):
import gzip
if "b" not in mode:
mode += "t"
return gzip.open(filename, mode)
return open(filename, mode)
def main(argv):
"""
Main entry.
"""
arg_parser = argparse.ArgumentParser(description="Dump raw strings from dataset. Same format as in search.")
arg_parser.add_argument("--config", help="filename to config-file. will use dataset 'eval' from it")
arg_parser.add_argument("--dataset", help="dataset, overwriting config")
arg_parser.add_argument("--startseq", type=int, default=0, help="start seq idx (inclusive) (default: 0)")
arg_parser.add_argument("--endseq", type=int, default=-1, help="end seq idx (inclusive) or -1 (default: -1)")
arg_parser.add_argument("--key", default="raw", help="data-key, e.g. 'data' or 'classes'. (default: 'raw')")
arg_parser.add_argument("--verbosity", default=4, type=int, help="5 for all seqs (default: 4)")
arg_parser.add_argument("--out", required=True, help="out-file. py-format as in task=search")
args = arg_parser.parse_args(argv[1:])
assert args.config or args.dataset
init(config_filename=args.config, log_verbosity=args.verbosity)
if args.dataset:
dataset = init_dataset(args.dataset)
elif config.value("dump_data", "eval") in ["train", "dev", "eval"]:
dataset = init_dataset(config.opt_typed_value(config.value("search_data", "eval")))
else:
dataset = init_dataset(config.opt_typed_value("wer_data"))
dataset.init_seq_order(epoch=1)
try:
with generic_open(args.out, "w") as output_file:
refs = get_raw_strings(dataset=dataset, options=args)
output_file.write("{\n")
for seq_tag, ref in refs:
output_file.write("%r: %r,\n" % (seq_tag, ref))
output_file.write("}\n")
print("Done. Wrote to %r." % args.out)
except KeyboardInterrupt:
print("KeyboardInterrupt")
sys.exit(1)
finally:
rnn.finalize()
if __name__ == "__main__":
main(sys.argv) | /returnn-1.20230902.233313.tar.gz/returnn-1.20230902.233313/tools/dump-dataset-raw-strings.py | 0.402157 | 0.217275 | dump-dataset-raw-strings.py | pypi |
from __future__ import annotations
import os
import numpy
import logging
import tensorflow as tf
import _setup_returnn_env # noqa
import returnn.tf.compat as tf_compat
from returnn.util import better_exchook
better_exchook.install()
flags = tf_compat.v1.flags
FLAGS = flags.FLAGS
flags.DEFINE_string("checkpoints", "", "Comma-separated list of checkpoints to average.")
flags.DEFINE_integer(
"num_last_checkpoints", 0, "Averages the last N saved checkpoints. If the checkpoints flag is set, this is ignored."
)
flags.DEFINE_string("prefix", "", "Prefix (e.g., directory) to append to each checkpoint.")
flags.DEFINE_string("output_path", "/tmp/averaged.ckpt", "Path to output the averaged checkpoint to.")
def checkpoint_exists(path):
"""
:param str path:
:rtype: bool
"""
return (
tf_compat.v1.gfile.Exists(path)
or tf_compat.v1.gfile.Exists(path + ".meta")
or tf_compat.v1.gfile.Exists(path + ".index")
)
def main(_):
"""
Main entry.
"""
_logger = logging.getLogger("tensorflow")
_logger.setLevel("INFO")
tf_compat.v1.logging.info("%s startup. TF version: %s" % (__file__, tf.__version__))
if FLAGS.checkpoints:
# Get the checkpoints list from flags and run some basic checks.
checkpoints = [c.strip() for c in FLAGS.checkpoints.split(",")]
checkpoints = [c for c in checkpoints if c]
if not checkpoints:
raise ValueError("No checkpoints provided for averaging.")
if FLAGS.prefix:
checkpoints = [FLAGS.prefix + c for c in checkpoints]
else:
assert FLAGS.num_last_checkpoints >= 1, "Must average at least one model"
assert FLAGS.prefix, "Prefix must be provided when averaging last N checkpoints"
checkpoint_state = tf.train.get_checkpoint_state(os.path.dirname(FLAGS.prefix))
# Checkpoints are ordered from oldest to newest.
checkpoints = checkpoint_state.all_model_checkpoint_paths[-FLAGS.num_last_checkpoints :]
checkpoints = [c for c in checkpoints if checkpoint_exists(c)]
if not checkpoints:
if FLAGS.checkpoints:
raise ValueError("None of the provided checkpoints exist. %s" % FLAGS.checkpoints)
else:
raise ValueError("Could not find checkpoints at %s" % os.path.dirname(FLAGS.prefix))
# Read variables from all checkpoints and average them.
tf_compat.v1.logging.info("Reading variables and averaging checkpoints:")
for c in checkpoints:
tf_compat.v1.logging.info("%s ", c)
var_list = tf.train.list_variables(checkpoints[0])
var_values, var_dtypes = {}, {}
for (name, shape) in var_list:
var_values[name] = numpy.zeros(shape)
for checkpoint in checkpoints:
reader = tf.train.load_checkpoint(checkpoint)
for name in var_values:
tensor = reader.get_tensor(name)
if not isinstance(tensor, numpy.ndarray): # e.g. int (scalar)
tensor = numpy.array(tensor)
assert isinstance(tensor, numpy.ndarray)
var_dtypes[name] = tensor.dtype
if isinstance(tensor.dtype, numpy.integer):
var_values[name] = tensor # just take last
else:
var_values[name] += tensor
tf_compat.v1.logging.info("Read from checkpoint %s", checkpoint)
for name in var_values: # Average.
if not isinstance(var_dtypes[name], numpy.integer):
var_values[name] /= len(checkpoints)
with tf_compat.v1.variable_scope(tf_compat.v1.get_variable_scope(), reuse=tf_compat.v1.AUTO_REUSE):
tf_vars = [tf_compat.v1.get_variable(v, shape=var_values[v].shape, dtype=var_dtypes[v]) for v in var_values]
placeholders = [tf_compat.v1.placeholder(v.dtype, shape=v.shape) for v in tf_vars]
assign_ops = [tf_compat.v1.assign(v, p) for (v, p) in zip(tf_vars, placeholders)]
saver = tf_compat.v1.train.Saver(tf_compat.v1.all_variables())
# Build a model consisting only of variables, set them to the average values.
with tf_compat.v1.Session() as sess:
sess.run(tf_compat.v1.global_variables_initializer())
for p, assign_op, (name, value) in zip(placeholders, assign_ops, var_values.items()):
sess.run(assign_op, {p: value})
# Use the built saver to save the averaged checkpoint.
saver.save(sess, FLAGS.output_path)
tf_compat.v1.logging.info("Averaged checkpoints saved in %s", FLAGS.output_path)
if __name__ == "__main__":
tf_compat.v1.app.run() | /returnn-1.20230902.233313.tar.gz/returnn-1.20230902.233313/tools/tf_avg_checkpoints.py | 0.693888 | 0.26146 | tf_avg_checkpoints.py | pypi |
from __future__ import annotations
import torch
from typing import Callable, Optional, Dict
import argparse
import os
from random import random
import _setup_returnn_env # noqa
from returnn.config import Config
from returnn.log import log
from returnn.tensor import TensorDict
# noinspection PyProtectedMember
from returnn.torch.frontend.bridge import _RFModuleAsPTModule
import returnn.frontend as rf
import returnn.util.basic as util
from returnn.tensor.utils import tensor_dict_fill_random_numpy_
from returnn.torch.data.tensor_utils import tensor_dict_numpy_to_torch_
import returnn.__main__ as rnn
config = None # type: Optional[Config]
def init(config_filename: str, checkpoint: str, log_verbosity: int, device: str):
"""
:param config_filename: Filename to config file.
:param checkpoint: Filename to the trained model.
:param log_verbosity: 5 for all seqs (default: 4)
:param device:
"""
assert os.path.exists(checkpoint), "The specified checkpoint doesn't exist."
rnn.init_better_exchook()
rnn.init_thread_join_hack()
assert os.path.exists(config_filename), "The specified config doesn't exist."
print("Using config file %r." % config_filename)
rnn.init_config(
config_filename=config_filename,
extra_updates={
"log": None,
"log_verbosity": log_verbosity,
"task": __file__, # just extra info for the config
"device": device,
},
)
global config
config = rnn.config
rnn.init_log()
print("RETURNN frontend module to ONNX conversion.", file=log.v1)
rnn.returnn_greeting()
config.typed_dict.setdefault("backend", "torch")
rnn.init_backend_engine()
assert util.BackendEngine.is_torch_selected(), "For now only the torch backend is supported."
rnn.init_faulthandler()
class ForwardModulePT(torch.nn.Module):
"""
Wrapper of a PyTorch module that's meant to call forward_step from the config when called.
"""
def __init__(self, pt_module: torch.nn.Module, forward_step: Callable, extern_data: TensorDict):
"""
:param pt_module: RF module as obtained from the config.
:param forward_step: forward_step function as obtained from the config.
:param extern_data:
"""
super().__init__()
self.model = pt_module
self.forward_step_func = forward_step
self.extern_data = extern_data
def __call__(self, data: Dict[str, torch.Tensor]) -> Dict[str, torch.Tensor]:
"""
Wrapper to forward_step from the config.
"""
extern_data = self.extern_data.copy_template()
extern_data.assign_from_raw_tensor_dict_(data, with_scalar_dyn_sizes=False, duplicate_dims_are_excluded=True)
self.forward_step_func(model=self.model, extern_data=extern_data)
_check_matching_outputs()
return rf.get_run_ctx().outputs.as_raw_tensor_dict(include_scalar_dyn_sizes=False)
class ForwardModuleRF(_RFModuleAsPTModule):
"""
Wrapper of a RETURNN frontend module that's meant to call forward_step from the config when called.
"""
def __init__(self, rf_module: rf.Module, forward_step: Callable, extern_data: TensorDict):
"""
:param rf_module: RF module as obtained from the config.
:param forward_step: forward_step function as obtained from the config.
:param extern_data:
"""
super().__init__(rf_module)
self.forward_step_func = forward_step
self.extern_data = extern_data
def __call__(self, data: Dict[str, torch.Tensor]) -> Dict[str, torch.Tensor]:
"""
Wrapper to forward_step from the config.
"""
extern_data = self.extern_data.copy_template()
extern_data.assign_from_raw_tensor_dict_(data, with_scalar_dyn_sizes=False, duplicate_dims_are_excluded=True)
self.forward_step_func(model=self.rf_module, extern_data=extern_data)
_check_matching_outputs()
return rf.get_run_ctx().outputs.as_raw_tensor_dict(include_scalar_dyn_sizes=False)
def _check_matching_outputs():
rf.get_run_ctx().check_outputs_complete()
model_outputs_raw_keys = set(_get_model_outputs_raw_keys())
outputs_raw = rf.get_run_ctx().outputs.as_raw_tensor_dict(include_scalar_dyn_sizes=False)
outputs_raw_keys = set(outputs_raw.keys())
assert model_outputs_raw_keys == outputs_raw_keys, (
f"Model outputs raw keys and output raw keys from forward_step don't match.\n"
f"Model outputs raw keys: {sorted(model_outputs_raw_keys)}\n"
f"Output raw keys: {sorted(outputs_raw_keys)}"
)
assert all(v is not None for k, v in outputs_raw.items()), (
f"Output raw keys from forward_step contain None values.\n"
f"Output raw keys with None: {list(k for k, v in outputs_raw.items() if v is None)}"
)
def _get_model_outputs_raw_keys():
model_outputs = rf.get_run_ctx().expected_outputs
model_outputs_raw_keys = []
for k, v in model_outputs.data.items():
model_outputs_raw_keys.append(k)
for i, dim in enumerate(v.dims):
if dim.dyn_size_ext and dim.dyn_size_ext.dims:
model_outputs_raw_keys.append(f"{k}:size{i}")
return model_outputs_raw_keys
def main():
"""
Main entry point
"""
parser = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument(
"config",
type=str,
help="Filename to config file. Must have `get_model()` and `forward_step()`. Can optionally have `export()`.",
)
parser.add_argument("checkpoint", type=str, help="Checkpoint to RF module, considering the backend.")
parser.add_argument("out_onnx_filename", type=str, help="Filename of the final ONNX model.")
parser.add_argument("--verbosity", default=4, type=int, help="5 for all seqs (default: 4)")
parser.add_argument("--device", type=str, default="cpu", help="'cpu' (default) or 'gpu'.")
args = parser.parse_args()
init(config_filename=args.config, checkpoint=args.checkpoint, log_verbosity=args.verbosity, device=args.device)
model_outputs_dict = config.typed_value("model_outputs")
assert (
model_outputs_dict is not None
), "The specified config needs to have explicit model outputs. Please define `model_outputs` in your config."
model_outputs = TensorDict()
model_outputs.update(model_outputs_dict, auto_convert=True)
loaded_checkpoint = torch.load(args.checkpoint, map_location=torch.device(args.device))
epoch = loaded_checkpoint["epoch"]
step = loaded_checkpoint["step"]
rf.init_forward_step_run_ctx(expected_outputs=model_outputs, step=step)
rf.set_random_seed(42)
get_model_func = config.typed_value("get_model")
assert get_model_func, "get_model() isn't specified in the config passed as a parameter."
sentinel_kw = {"__fwd_compatible_random_arg_%i" % int(random() * 100): None}
model = get_model_func(epoch=epoch, step=step, **sentinel_kw)
is_rf_module = isinstance(model, rf.Module)
is_pt_module = isinstance(model, torch.nn.Module)
assert (
is_rf_module or is_pt_module
), "The module returned by get_model() isn't a returnn.frontend.Module or a torch.nn.Module."
export_func = config.typed_value("export") or torch.onnx.export
forward_step_func = config.typed_value("forward_step")
assert forward_step_func is not None, "forward_step() must be defined in the config."
extern_data_dict = config.typed_value("extern_data")
extern_data = TensorDict()
extern_data.update(extern_data_dict, auto_convert=True)
extern_data.reset_content()
for k, v in list(extern_data.data.items()):
if not v.available_for_inference:
del extern_data.data[k]
tensor_dict_fill_random_numpy_(extern_data)
tensor_dict_numpy_to_torch_(extern_data)
extern_data_raw = extern_data.as_raw_tensor_dict(include_scalar_dyn_sizes=False, exclude_duplicate_dims=True)
model_outputs_raw_keys = _get_model_outputs_raw_keys()
if is_pt_module:
model.load_state_dict(loaded_checkpoint["model"])
model.eval()
pt_model_fwd = ForwardModulePT(model, forward_step_func, extern_data)
elif is_rf_module:
pt_model_fwd = ForwardModuleRF(model, forward_step_func, extern_data)
pt_model_fwd.load_state_dict(loaded_checkpoint["model"])
pt_model_fwd.eval()
else:
assert False, "PT/RF module?" # should not get here
dynamic_axes = {}
for k, v in list(extern_data.data.items()) + list(model_outputs.data.items()):
dynamic_axes[k] = {i: dim.name for i, dim in enumerate(v.dims) if dim.is_dynamic() or dim.is_batch_dim()}
for i, dim in enumerate(v.dims):
if dim.dyn_size_ext and dim.dyn_size_ext.dims == ():
continue
if dim.dyn_size_ext:
dynamic_axes[f"{k}:size{i}"] = {
j: dim_.name
for j, dim_ in enumerate(dim.dyn_size_ext.dims)
if dim_.is_dynamic() or dim_.is_batch_dim()
}
print("*** Input names:", list(extern_data_raw.keys()))
print("*** Output names:", model_outputs_raw_keys)
print("*** Dynamic axes:", dynamic_axes)
export_func(
pt_model_fwd,
(extern_data_raw, {}),
f=args.out_onnx_filename,
verbose=True,
input_names=list(extern_data_raw.keys()),
output_names=model_outputs_raw_keys,
dynamic_axes=dynamic_axes,
)
if __name__ == "__main__":
main() | /returnn-1.20230902.233313.tar.gz/returnn-1.20230902.233313/tools/torch_export_to_onnx.py | 0.935051 | 0.270507 | torch_export_to_onnx.py | pypi |
from __future__ import annotations
import os
import gzip
from argparse import ArgumentParser
from pprint import pprint
from xml.etree import ElementTree
import collections
from collections import defaultdict
import typing
import _setup_returnn_env # noqa
from returnn.datasets import init_dataset
from returnn.datasets.lm import Lexicon, AllophoneState
from returnn.log import log
from returnn.util.basic import uniq
def get_segment_name(tree):
"""
:param tree:
:return:
"""
def _m(x):
if "name" in x.attrib:
return x.attrib["name"]
if x.tag == "segment":
return "1"
assert False, "unknown name: %r, %r" % (x, vars(x))
return "/".join(map(_m, tree))
def iter_bliss_orth(filename):
"""
:param str filename:
:return:
"""
corpus_file = open(filename, "rb")
if filename.endswith(".gz"):
corpus_file = gzip.GzipFile(fileobj=corpus_file)
# noinspection PyShadowingNames
def getelements(tag):
"""Yield *tag* elements from *filename_or_file* xml incrementally."""
context = iter(ElementTree.iterparse(corpus_file, events=("start", "end")))
_, root = next(context) # get root element
tree = [root]
for event, elem in context:
if event == "start":
tree += [elem]
elif event == "end":
assert tree[-1] is elem
tree = tree[:-1]
if event == "end" and elem.tag == tag:
yield tree, elem
for tree, elem in getelements("segment"):
elem_orth = elem.find("orth")
orth_raw = elem_orth.text or "" # should be unicode
orth_split = orth_raw.split()
orth = " ".join(orth_split)
yield get_segment_name(tree + [elem]), orth
def iter_dataset_targets(dataset):
"""
:type dataset: Dataset.Dataset
"""
dataset.init_seq_order(epoch=1)
seq_idx = 0
while dataset.is_less_than_num_seqs(seq_idx):
dataset.load_seqs(seq_idx, seq_idx + 1)
segment_name = dataset.get_tag(seq_idx)
targets = dataset.get_targets("classes", seq_idx)
assert targets.ndim == 1 # sparse
targets = targets.astype("int32")
yield segment_name, targets
seq_idx += 1
class OrthHandler:
"""
Orthography handler.
"""
allo_add_all = False # only via lexicon
def __init__(self, lexicon, si_label=None, allo_num_states=3, allo_context_len=1, allow_ci_in_words=True):
"""
:param Lexicon lexicon:
:param int si_label:
:param int allo_num_states:
:param int allo_context_len:
:param bool allow_ci_in_words:
"""
self.lexicon = lexicon
self.phonemes = sorted(self.lexicon.phonemes.keys(), key=lambda s: self.lexicon.phonemes[s]["index"])
self.word_boundary_phones = {-1: set(), 1: set()}
self.phon_to_possible_ctx_via_lex = {-1: {}, 1: {}}
for lemma in self.lexicon.lemmas.values():
for pron in lemma["phons"]:
phons = pron["phon"].split()
assert phons
self.word_boundary_phones[-1].add(phons[0])
self.word_boundary_phones[1].add(phons[-1])
for i in range(len(phons)):
ps = [phons[i + j] if (0 <= (i + j) < len(phons)) else "" for j in [-1, 0, 1]]
self.phon_to_possible_ctx_via_lex[1].setdefault(ps[1], set()).add(ps[2])
self.phon_to_possible_ctx_via_lex[-1].setdefault(ps[1], set()).add(ps[0])
for phone in self.lexicon.phoneme_list:
if "" in self.phon_to_possible_ctx_via_lex[-1][phone]:
self.phon_to_possible_ctx_via_lex[-1][phone].update(self.word_boundary_phones[1])
if "" in self.phon_to_possible_ctx_via_lex[1][phone]:
self.phon_to_possible_ctx_via_lex[1][phone].update(self.word_boundary_phones[-1])
if allow_ci_in_words:
for phone in self.lexicon.phoneme_list:
self.phon_to_possible_ctx_via_lex[-1][phone].add("")
self.phon_to_possible_ctx_via_lex[1][phone].add("")
self.si_lemma = self.lexicon.lemmas["[SILENCE]"]
self.si_phone = self.si_lemma["phons"][0]["phon"] # type: str
self.si_label = si_label
self.allo_num_states = allo_num_states # e.g. 3 -> 3-state HMM
self.allo_context_len = allo_context_len # e.g. 1 -> one left&right, i.e. triphone
def expected_num_labels_for_monophone_state_tying(self):
"""
Silence has 1 state, all others have allo_num_states.
:rtype: int
"""
num_phones = len(self.lexicon.phonemes)
return (num_phones - 1) * self.allo_num_states + 1
def iter_orth(self, orth):
"""
:param str orth:
:return: yields lemmas
"""
symbols = list(orth.split())
i = 0
while i < len(symbols):
symbol = symbols[i]
try:
lemma = self.lexicon.lemmas[symbol]
except KeyError:
if "/" in symbol:
symbols[i : i + 1] = symbol.split("/")
continue
if "-" in symbol:
symbols[i : i + 1] = symbol.split("-")
continue
raise
i += 1
yield lemma
def _iter_possible_ctx(self, phon_id, direction):
"""
:param str phon_id: e.g. "aa", "aw", "uh", "z", etc.
:param int direction: 1 or -1
:rtype: list[tuple[str]]
"""
if self.lexicon.phonemes[phon_id]["variation"] == "none":
return [()]
if self.allo_add_all:
res = [()] # type: typing.List[typing.Tuple[str, ...]]
res += [
(p,) for p in sorted(self.lexicon.phonemes.keys()) if self.lexicon.phonemes[p]["variation"] == "context"
]
return res
return [((p,) if p else ()) for p in sorted(self.phon_to_possible_ctx_via_lex[direction][phon_id])]
def num_states_for_phone(self, phon_id):
"""
:param str phon_id:
:return: number of allophone states for this phone
:rtype: int
"""
if phon_id == self.si_phone:
return 1
return self.allo_num_states
def all_allophone_variations(self, phon, states=None, all_boundary_variations=False):
"""
:param str phon:
:param None|list[int] states: which states to yield for this phone
:param bool all_boundary_variations:
:return: yields AllophoneState's
:rtype: list[AllophoneState]
"""
if states is None:
states = range(self.num_states_for_phone(phon))
if all_boundary_variations:
boundary_variations = [0, 1, 2, 3]
else:
boundary_variations = [0]
for left_ctx in self._iter_possible_ctx(phon, -1):
for right_ctx in self._iter_possible_ctx(phon, 1):
for state in states:
for boundary in boundary_variations:
a = AllophoneState()
a.id = phon
a.context_history = left_ctx
a.context_future = right_ctx
a.state = state
a.boundary = boundary
if not all_boundary_variations:
if not left_ctx:
a.mark_initial()
if not right_ctx:
a.mark_final()
yield a
# noinspection PyMethodMayBeStatic
def _phones_to_allos(self, phones):
for p in phones:
a = AllophoneState()
a.id = p
yield a
def _allos_set_context(self, allos):
if self.allo_context_len == 0:
return
ctx = []
for a in allos:
if self.lexicon.phonemes[a.id]["variation"] == "context":
a.context_history = tuple(ctx)
ctx += [a.id]
ctx = ctx[-self.allo_context_len :]
else:
ctx = []
ctx = []
for a in reversed(allos):
if self.lexicon.phonemes[a.id]["variation"] == "context":
a.context_future = tuple(reversed(ctx))
ctx += [a.id]
ctx = ctx[-self.allo_context_len :]
else:
ctx = []
def _allos_add_states(self, allos):
for _a in allos:
if _a.id == self.si_phone:
yield _a
else: # non-silence
for state in range(self.allo_num_states):
a = AllophoneState()
a.id = _a.id
a.context_history = _a.context_history
a.context_future = _a.context_future
a.boundary = _a.boundary
a.state = state
yield a
def orth_to_allophone_states(self, orth):
"""
:param str orth: orthography as a str. orth.split() should give words in the lexicon
:rtype: list[AllophoneState]
:returns allophone state list. those will have repetitions etc
"""
allos = []
for lemma in self.iter_orth(orth):
assert len(lemma["phons"]) == 1, "TODO..."
phon = lemma["phons"][0]
l_allos = list(self._phones_to_allos(phon["phon"].split()))
l_allos[0].mark_initial()
l_allos[-1].mark_final()
allos += l_allos
self._allos_set_context(allos)
allos = list(self._allos_add_states(allos))
return allos
def main():
"""
Main entry.
"""
arg_parser = ArgumentParser()
arg_parser.add_argument("--action")
arg_parser.add_argument("--print_seq", action="store_true")
arg_parser.add_argument("--print_allos", action="store_true")
arg_parser.add_argument("--print_targets", action="store_true")
arg_parser.add_argument("--dataset")
arg_parser.add_argument("--corpus")
arg_parser.add_argument("--lexicon", help="filename")
arg_parser.add_argument("--silence", type=int, help="index")
arg_parser.add_argument("--context", default=1, type=int)
arg_parser.add_argument("--hmm_states", default=3, type=int)
arg_parser.add_argument("--state_tying_type", help="'monophone' or 'full'")
arg_parser.add_argument("--state_tying_output", help="filename")
arg_parser.add_argument("--allo_add_all", action="store_true")
args = arg_parser.parse_args()
dataset = init_dataset(args.dataset) if args.dataset else None
corpus = dict(iter_bliss_orth(filename=args.corpus)) if args.corpus else None
lexicon = Lexicon(filename=args.lexicon) if args.lexicon else None
silence_label = args.silence
if args.action == "show_corpus":
pprint(corpus)
return
print("Num phones: %i" % len(lexicon.phonemes), file=log.v1)
print("Phones: %r" % sorted(lexicon.phonemes.keys()), file=log.v1)
orth_handler = OrthHandler(lexicon=lexicon, allo_context_len=args.context, allo_num_states=args.hmm_states)
map_idx_to_allo = defaultdict(set) # type: typing.Dict[int, typing.Set[AllophoneState]]
map_allo_to_idx = {} # type: typing.Dict[AllophoneState, int]
if args.allo_add_all:
orth_handler.allo_add_all = True
print("Num HMM states: %i" % orth_handler.allo_num_states, file=log.v1)
if args.state_tying_type == "monophone":
print("Monophone state tying.", file=log.v1)
num_labels = orth_handler.expected_num_labels_for_monophone_state_tying()
all_label_idx_are_used = True
elif args.state_tying_type == "full":
print("Full state tying.", file=log.v1)
phone_idxs = {
k: i + 1 for (i, k) in enumerate(lexicon.phoneme_list)
} # +1 to keep 0 reserved as the term-symbol
for phon in lexicon.phoneme_list:
for allo in orth_handler.all_allophone_variations(phon, all_boundary_variations=True):
allo_idx = allo.index(
phone_idxs=phone_idxs,
num_states=orth_handler.allo_num_states,
context_length=orth_handler.allo_context_len,
)
map_idx_to_allo[allo_idx].add(allo)
num_labels = max(map_idx_to_allo.keys()) + 1
all_label_idx_are_used = False
else:
raise Exception("invalid state tying type %r" % args.state_tying_type)
print("Num labels: %i" % num_labels, file=log.v1)
if dataset:
count = 0
for segment_name, targets in iter_dataset_targets(dataset):
count += 1
if silence_label is None or count == 1:
likely_silence_label = collections.Counter(targets).most_common(1)[0][0]
if silence_label is None:
silence_label = likely_silence_label
if silence_label != likely_silence_label:
print("warning: silence %i but likely %i" % (silence_label, likely_silence_label), file=log.v2)
print("Silence label: %i" % silence_label, file=log.v1)
orth_handler.si_label = silence_label
# Monophone state tying:
for allo in orth_handler.all_allophone_variations(orth_handler.si_phone):
map_idx_to_allo[silence_label].add(allo)
map_allo_to_idx[allo] = silence_label
assert segment_name in corpus
orth = corpus[segment_name]
allo_states = orth_handler.orth_to_allophone_states(orth=orth)
if args.print_seq:
print("%r %r" % (segment_name, orth))
if args.print_allos:
print(" allophone state seq: %r" % allo_states)
tgt_seq = [t for t in uniq(targets) if t != silence_label]
if args.print_targets:
print(" target seq: %r" % (tgt_seq,))
assert len(allo_states) == len(tgt_seq), "check --hmm_states or so"
for allo, t in zip(allo_states, tgt_seq):
allo.boundary = 0 # do not differ between boundaries
allos = map_idx_to_allo[t]
if allo in map_allo_to_idx:
assert allo in allos, "bad mapping"
else:
assert allo not in allos
allos.add(allo)
map_allo_to_idx[allo] = t
if len(map_idx_to_allo) >= num_labels:
assert len(map_idx_to_allo) == num_labels
assert 0 in map_idx_to_allo
assert num_labels - 1 in map_idx_to_allo
print("Finished with uniq mapping after %i sequences." % count, file=log.v1)
break
if count % 100 == 0:
print("Have indices: %i (num labels: %i)" % (len(map_idx_to_allo), num_labels), file=log.v1)
print("Finished. Have indices: %i (num labels: %i)" % (len(map_idx_to_allo), num_labels), file=log.v1)
if len(map_idx_to_allo) < num_labels:
found = []
not_found = []
for p in sorted(lexicon.phonemes.keys()):
allo = AllophoneState(p, state=0)
if allo in map_allo_to_idx:
found.append(p)
else:
not_found.append(p)
print("Phonemes found: %r" % found)
print("Phonemes not found: %r" % not_found)
if args.state_tying_output:
assert not os.path.exists(args.state_tying_output)
if all_label_idx_are_used:
assert len(map_idx_to_allo) == num_labels
assert 0 in map_idx_to_allo
assert num_labels - 1 in map_idx_to_allo
f = open(args.state_tying_output, "w")
for i, allos in sorted(map_idx_to_allo.items()):
for allo in allos:
f.write("%s %i\n" % (allo.format(), i))
f.close()
print("Wrote state tying to %r." % args.state_tying_output, file=log.v1)
print("The end.")
if __name__ == "__main__":
from returnn.util import better_exchook
better_exchook.install()
log.initialize(verbosity=[2])
main() | /returnn-1.20230902.233313.tar.gz/returnn-1.20230902.233313/tools/extract_state_tying_from_dataset.py | 0.703142 | 0.336794 | extract_state_tying_from_dataset.py | pypi |
from __future__ import annotations
import os
import sys
import time
import typing
import argparse
import numpy
from pprint import pformat
import _setup_returnn_env # noqa
import returnn.__main__ as rnn
from returnn.log import log
import returnn.util.basic as util
from returnn.util.basic import Stats, hms, NumbersDict
from returnn.datasets.basic import Batch, Dataset, init_dataset
from returnn.config import Config
config = None # type: typing.Optional[Config]
dataset = None # type: typing.Optional[Dataset]
def analyze_dataset(options):
"""
:param options: argparse.Namespace
"""
print("Epoch: %i" % options.epoch, file=log.v3)
print("Dataset keys:", dataset.get_data_keys(), file=log.v3)
print("Dataset target keys:", dataset.get_target_list(), file=log.v3)
assert options.key in dataset.get_data_keys()
terminal_width, _ = util.terminal_size()
show_interactive_process_bar = log.verbose[3] and (not log.verbose[5]) and terminal_width > 0
start_time = time.time()
num_seqs_stats = Stats()
if options.endseq < 0:
options.endseq = float("inf")
recurrent = True
used_data_keys = dataset.get_data_keys()
batch_size = config.typed_value("batch_size", 1)
max_seqs = config.int("max_seqs", -1)
seq_drop = config.float("seq_drop", 0.0)
max_seq_length = config.typed_value("max_seq_length", None) or config.float("max_seq_length", 0)
max_pad_size = config.typed_value("max_pad_size", None)
batches = dataset.generate_batches(
recurrent_net=recurrent,
batch_size=batch_size,
max_seqs=max_seqs,
max_seq_length=max_seq_length,
max_pad_size=max_pad_size,
seq_drop=seq_drop,
used_data_keys=used_data_keys,
)
step = 0
total_num_seqs = 0
total_num_frames = NumbersDict()
total_num_used_frames = NumbersDict()
try:
while batches.has_more():
# See FeedDictDataProvider.
(batch,) = batches.peek_next_n(1)
assert isinstance(batch, Batch)
if batch.start_seq > options.endseq:
break
dataset.load_seqs(batch.start_seq, batch.end_seq)
complete_frac = batches.completed_frac()
start_elapsed = time.time() - start_time
try:
num_seqs_s = str(dataset.num_seqs)
except NotImplementedError:
try:
num_seqs_s = "~%i" % dataset.estimated_num_seqs
except TypeError: # a number is required, not NoneType
num_seqs_s = "?"
progress_prefix = "%i/%s" % (batch.start_seq, num_seqs_s)
progress = "%s (%.02f%%)" % (progress_prefix, complete_frac * 100)
if complete_frac > 0:
total_time_estimated = start_elapsed / complete_frac
remaining_estimated = total_time_estimated - start_elapsed
progress += " (%s)" % hms(remaining_estimated)
batch_max_time = NumbersDict.max([seq.frame_length for seq in batch.seqs]) * len(batch.seqs)
batch_num_used_frames = sum([seq.frame_length for seq in batch.seqs], NumbersDict())
total_num_seqs += len(batch.seqs)
num_seqs_stats.collect(numpy.array([len(batch.seqs)]))
total_num_frames += batch_max_time
total_num_used_frames += batch_num_used_frames
print(
"%s, batch %i, num seqs %i, frames %s, used %s (%s)"
% (
progress,
step,
len(batch.seqs),
batch_max_time,
batch_num_used_frames,
batch_num_used_frames / batch_max_time,
),
file=log.v5,
)
if show_interactive_process_bar:
util.progress_bar_with_time(complete_frac, prefix=progress_prefix)
step += 1
batches.advance(1)
finally:
print(
"Done. Total time %s. More seqs which we did not dumped: %s"
% (hms(time.time() - start_time), batches.has_more()),
file=log.v2,
)
print("Dataset epoch %i, order %r." % (dataset.epoch, dataset.seq_ordering))
print("Num batches (steps): %i" % step, file=log.v1)
print("Num seqs: %i" % total_num_seqs, file=log.v1)
num_seqs_stats.dump(stream=log.v1, stream_prefix="Batch num seqs ")
for key in used_data_keys:
print("Data key %r:" % key, file=log.v1)
print(" Num frames: %s" % total_num_frames[key], file=log.v1)
print(" Num used frames: %s" % total_num_used_frames[key], file=log.v1)
print(" Fraction used frames: %s" % (total_num_used_frames / total_num_frames)[key], file=log.v1)
dataset.finish_epoch()
def init(config_str, config_dataset, use_pretrain, epoch, verbosity):
"""
:param str config_str: either filename to config-file, or dict for dataset
:param str|None config_dataset:
:param bool use_pretrain: might overwrite config options, or even the dataset
:param int epoch:
:param int verbosity:
"""
rnn.init_better_exchook()
rnn.init_thread_join_hack()
dataset_opts = None
config_filename = None
if config_str.strip().startswith("{"):
print("Using dataset %s." % config_str)
dataset_opts = eval(config_str.strip())
elif config_str.endswith(".hdf"):
dataset_opts = {"class": "HDFDataset", "files": [config_str]}
print("Using dataset %r." % dataset_opts)
assert os.path.exists(config_str)
else:
config_filename = config_str
print("Using config file %r." % config_filename)
assert os.path.exists(config_filename)
rnn.init_config(config_filename=config_filename, default_config={"cache_size": "0"})
global config
config = rnn.config
config.set("log", None)
config.set("log_verbosity", verbosity)
rnn.init_log()
print("Returnn %s starting up." % __file__, file=log.v2)
rnn.returnn_greeting()
rnn.init_faulthandler()
util.BackendEngine.select_engine(config=config)
if not dataset_opts:
if config_dataset:
dataset_opts = "config:%s" % config_dataset
else:
dataset_opts = "config:train"
if use_pretrain:
from returnn.pretrain import pretrain_from_config
pretrain = pretrain_from_config(config)
if pretrain:
print("Using pretrain %s, epoch %i" % (pretrain, epoch), file=log.v2)
net_dict = pretrain.get_network_json_for_epoch(epoch=epoch)
if "#config" in net_dict:
config_overwrites = net_dict["#config"]
print("Pretrain overwrites these config options:", file=log.v2)
assert isinstance(config_overwrites, dict)
for key, value in sorted(config_overwrites.items()):
assert isinstance(key, str)
orig_value = config.typed_dict.get(key, None)
if isinstance(orig_value, dict) and isinstance(value, dict):
diff_str = "\n" + util.obj_diff_str(orig_value, value)
elif isinstance(value, dict):
diff_str = "\n%r ->\n%s" % (orig_value, pformat(value))
else:
diff_str = " %r -> %r" % (orig_value, value)
print("Config key %r for epoch %i:%s" % (key, epoch, diff_str), file=log.v2)
config.set(key, value)
else:
print("No config overwrites for this epoch.", file=log.v2)
else:
print("No pretraining used.", file=log.v2)
elif config.typed_dict.get("pretrain", None):
print("Not using pretrain.", file=log.v2)
dataset_default_opts = {}
Dataset.kwargs_update_from_config(config, dataset_default_opts)
print("Using dataset:", dataset_opts, file=log.v2)
global dataset
dataset = init_dataset(dataset_opts, default_kwargs=dataset_default_opts)
assert isinstance(dataset, Dataset)
dataset.init_seq_order(epoch=epoch)
def main():
"""
Main entry.
"""
arg_parser = argparse.ArgumentParser(description="Anaylize dataset batches.")
arg_parser.add_argument("returnn_config", help="either filename to config-file, or dict for dataset")
arg_parser.add_argument("--dataset", help="if given the config, specifies the dataset. e.g. 'dev'")
arg_parser.add_argument("--epoch", type=int, default=1)
arg_parser.add_argument("--endseq", type=int, default=-1, help="end seq idx (inclusive) or -1 (default: 10)")
arg_parser.add_argument("--verbosity", type=int, default=5, help="overwrites log_verbosity (default: 4)")
arg_parser.add_argument("--key", default="data", help="data-key, e.g. 'data' or 'classes'. (default: 'data')")
arg_parser.add_argument("--use_pretrain", action="store_true")
args = arg_parser.parse_args()
init(
config_str=args.returnn_config,
config_dataset=args.dataset,
epoch=args.epoch,
use_pretrain=args.use_pretrain,
verbosity=args.verbosity,
)
try:
analyze_dataset(args)
except KeyboardInterrupt:
print("KeyboardInterrupt")
sys.exit(1)
finally:
rnn.finalize()
if __name__ == "__main__":
main() | /returnn-1.20230902.233313.tar.gz/returnn-1.20230902.233313/tools/analyze-dataset-batches.py | 0.470007 | 0.196903 | analyze-dataset-batches.py | pypi |
from __future__ import annotations
import sys
from argparse import ArgumentParser
import gzip
from xml.etree import ElementTree
import itertools
import _setup_returnn_env # noqa
class BlissItem:
"""
Represents one entry in the Bliss XML.
"""
def __init__(self, segment_name, recording_filename, start_time, end_time, orth):
"""
:param str segment_name:
:param str recording_filename:
:param float start_time:
:param float end_time:
:param str orth:
"""
self.segment_name = segment_name
self.recording_filename = recording_filename
self.start_time = start_time
self.end_time = end_time
self.orth = orth
def __repr__(self):
keys = ["segment_name", "recording_filename", "start_time", "end_time", "orth"]
return "BlissItem(%s)" % ", ".join(["%s=%r" % (key, getattr(self, key)) for key in keys])
@property
def delta_time(self):
"""
:rtype: float
"""
return self.end_time - self.start_time
def iter_bliss(filename):
"""
:param str filename:
:return: yields BlissItem
:rtype: list[BlissItem]
"""
corpus_file = open(filename, "rb")
if filename.endswith(".gz"):
corpus_file = gzip.GzipFile(fileobj=corpus_file)
context = iter(ElementTree.iterparse(corpus_file, events=("start", "end")))
_, root = next(context) # get root element
name_tree = [root.attrib["name"]]
elem_tree = [root]
count_tree = [0]
recording_filename = None
for event, elem in context:
if elem.tag == "recording":
recording_filename = elem.attrib["audio"] if event == "start" else None
if event == "end" and elem.tag == "segment":
elem_orth = elem.find("orth")
orth_raw = elem_orth.text or "" # should be unicode
orth_split = orth_raw.split()
orth = " ".join(orth_split)
segment_name = "/".join(name_tree)
yield BlissItem(
segment_name=segment_name,
recording_filename=recording_filename,
start_time=float(elem.attrib["start"]),
end_time=float(elem.attrib["end"]),
orth=orth,
)
root.clear() # free memory
if event == "start":
count_tree[-1] += 1
count_tree.append(0)
elem_tree += [elem]
elem_name = elem.attrib.get("name", None)
if elem_name is None:
elem_name = str(count_tree[-2])
assert isinstance(elem_name, str)
name_tree += [elem_name]
elif event == "end":
assert elem_tree[-1] is elem
elem_tree = elem_tree[:-1]
name_tree = name_tree[:-1]
count_tree = count_tree[:-1]
def main():
"""
Main entry.
"""
arg_parser = ArgumentParser()
arg_parser.add_argument("bliss_filename", nargs="+")
arg_parser.add_argument("--output", default="/dev/stdout")
args = arg_parser.parse_args()
if args.output.endswith(".gz"):
out = gzip.GzipFile(args.output, mode="wb")
else:
out = open(args.output, "wb")
out.write(b"{\n")
for bliss_item in itertools.chain(*[iter_bliss(fn) for fn in args.bliss_filename]):
assert isinstance(bliss_item, BlissItem)
seq_len = round(bliss_item.delta_time * 100.0) # assume 10ms frames, round
out.write(b"%r: %i,\n" % (bliss_item.segment_name, seq_len))
out.write(b"}\n")
out.close()
if __name__ == "__main__":
from returnn.util import better_exchook
better_exchook.install()
try:
main()
except BrokenPipeError:
print("BrokenPipeError")
sys.exit(1) | /returnn-1.20230902.233313.tar.gz/returnn-1.20230902.233313/tools/bliss-collect-seq-lens.py | 0.501709 | 0.216115 | bliss-collect-seq-lens.py | pypi |
from __future__ import annotations
import sys
import _setup_returnn_env # noqa
import returnn.__main__ as rnn
from returnn.log import log
from returnn.config import Config
import argparse
from returnn.util.basic import human_size, parse_orthography
from returnn.datasets.lm import Lexicon
import gzip
from xml.etree import ElementTree
import time
def iter_dataset(dataset, callback):
"""
:param Dataset.Dataset dataset:
:param (*)->None callback:
"""
dataset.init_seq_order(epoch=1)
assert "orth" in dataset.get_target_list()
seq_idx = 0
while dataset.is_less_than_num_seqs(seq_idx):
dataset.load_seqs(seq_idx, seq_idx)
orth = dataset.get_targets("orth", seq_idx)
callback(orth=orth)
seq_idx += 1
def iter_bliss(filename, callback):
"""
Iterate through a Sprint Bliss XML file.
:param str filename:
:param callback:
"""
corpus_file = open(filename, "rb")
if filename.endswith(".gz"):
corpus_file = gzip.GzipFile(fileobj=corpus_file)
# noinspection PyShadowingNames
def get_elements(tag):
"""Yield *tag* elements from *filename_or_file* xml incrementally."""
context = iter(ElementTree.iterparse(corpus_file, events=("start", "end")))
_, root = next(context) # get root element
tree = [root]
for event, elem in context:
if event == "start":
tree += [elem]
elif event == "end":
assert tree[-1] is elem
tree = tree[:-1]
if event == "end" and elem.tag == tag:
yield tree, elem
root.clear() # free memory
for tree, elem in get_elements("segment"):
elem_orth = elem.find("orth")
orth_raw = elem_orth.text or "" # should be unicode
orth_split = orth_raw.split()
orth = " ".join(orth_split)
callback(orth=orth)
def iter_txt(filename, callback):
"""
Iterate through pure text file.
:param str filename:
:param callback:
"""
f = open(filename, "rb")
if filename.endswith(".gz"):
f = gzip.GzipFile(fileobj=f)
for line in f:
line = line.strip()
if not line:
continue
callback(orth=line)
class CollectCorpusStats:
"""
Collect stats.
"""
def __init__(self, options, iter_corpus):
"""
:param options: argparse.Namespace
:param iter_corpus:
"""
self.options = options
self.seq_count = 0
self.words = set()
self.total_word_len = 0
self.process_last_time = time.time()
iter_corpus(self._callback)
print("Total word len:", self.total_word_len, "(%s)" % human_size(self.total_word_len), file=log.v3)
print("Average orth len:", float(self.total_word_len) / self.seq_count, file=log.v3)
print("Num word symbols:", len(self.words), file=log.v3)
def _callback(self, orth):
"""
:param str orth:
"""
orth_words = parse_orthography(orth, prefix=[], postfix=[], word_based=True)
self.seq_count += 1
if self.options.dump_orth:
print("Orth:", orth_words, file=log.v3)
self.words.update(orth_words)
self.total_word_len += len(orth_words)
# Show some progress if it takes long.
if time.time() - self.process_last_time > 2:
self.process_last_time = time.time()
print("Collect process, total word len so far:", human_size(self.total_word_len), file=log.v3)
def init(config_filename=None):
"""
:param str config_filename:
"""
rnn.init_better_exchook()
rnn.init_thread_join_hack()
if config_filename:
rnn.init_config(config_filename, command_line_options=[])
rnn.init_log()
else:
log.initialize()
print("Returnn collect-words starting up.", file=log.v3)
rnn.init_faulthandler()
if config_filename:
rnn.init_data()
rnn.print_task_properties()
def is_bliss(filename):
"""
:param str filename:
:rtype: bool
"""
try:
corpus_file = open(filename, "rb")
if filename.endswith(".gz"):
corpus_file = gzip.GzipFile(fileobj=corpus_file)
context = iter(ElementTree.iterparse(corpus_file, events=("start", "end")))
_, root = next(context) # get root element
return True
except IOError: # 'Not a gzipped file' or so
pass
except ElementTree.ParseError: # 'syntax error' or so
pass
return False
def is_returnn_config(filename):
"""
:param str filename:
:rtype: bool
"""
if filename.endswith(".gz"):
return False
# noinspection PyBroadException
try:
config = Config()
config.load_file(filename)
return True
except Exception:
pass
return False
def main(argv):
"""
Main entry.
"""
arg_parser = argparse.ArgumentParser(description="Collect orth symbols.")
arg_parser.add_argument("input", help="RETURNN config, Corpus Bliss XML or just txt-data")
arg_parser.add_argument("--dump_orth", action="store_true")
arg_parser.add_argument("--lexicon")
args = arg_parser.parse_args(argv[1:])
bliss_filename = None
crnn_config_filename = None
txt_filename = None
if is_bliss(args.input):
bliss_filename = args.input
print("Read Bliss corpus:", bliss_filename)
elif is_returnn_config(args.input):
crnn_config_filename = args.input
print("Read corpus from RETURNN config:", crnn_config_filename)
else: # treat just as txt
txt_filename = args.input
print("Read corpus from txt-file:", txt_filename)
init(config_filename=crnn_config_filename)
if bliss_filename:
def _iter_corpus(cb):
return iter_bliss(bliss_filename, callback=cb)
elif txt_filename:
def _iter_corpus(cb):
return iter_txt(txt_filename, callback=cb)
else:
def _iter_corpus(cb):
return iter_dataset(rnn.train_data, callback=cb)
corpus_stats = CollectCorpusStats(args, _iter_corpus)
if args.lexicon:
print("Lexicon:", args.lexicon)
lexicon = Lexicon(args.lexicon)
print("Words not in lexicon:")
c = 0
for w in sorted(corpus_stats.words):
if w not in lexicon.lemmas:
print(w)
c += 1
print("Count: %i (%f%%)" % (c, 100.0 * float(c) / len(corpus_stats.words)))
else:
print("No lexicon provided (--lexicon).")
if crnn_config_filename:
rnn.finalize()
if __name__ == "__main__":
main(sys.argv) | /returnn-1.20230902.233313.tar.gz/returnn-1.20230902.233313/tools/collect-words.py | 0.50293 | 0.203233 | collect-words.py | pypi |
from __future__ import annotations
import typing
import os
import sys
from pprint import pprint
import _setup_returnn_env # noqa
import returnn.__main__ as rnn
from returnn.log import log
from returnn.config import Config
import argparse
import returnn.util.basic as util
from returnn.tf.engine import Engine
from returnn.datasets import init_dataset
from returnn.datasets.meta import MetaDataset
from returnn.util import better_exchook
config = None # type: typing.Optional[Config]
def init(config_filename, log_verbosity, remaining_args=()):
"""
:param str config_filename: filename to config-file
:param int log_verbosity:
:param list[str] remaining_args:
"""
rnn.init_better_exchook()
rnn.init_thread_join_hack()
print("Using config file %r." % config_filename)
assert os.path.exists(config_filename)
rnn.init_config(
config_filename=config_filename,
command_line_options=remaining_args,
extra_updates={
"use_tensorflow": True,
"log": None,
"log_verbosity": log_verbosity,
"task": "search",
},
default_config={
"debug_print_layer_output_template": True,
},
)
global config
config = rnn.config
rnn.init_log()
print("Returnn %s starting up." % os.path.basename(__file__), file=log.v1)
rnn.returnn_greeting()
rnn.init_backend_engine()
assert util.BackendEngine.is_tensorflow_selected(), "this is only for TensorFlow"
rnn.init_faulthandler()
better_exchook.replace_traceback_format_tb() # makes some debugging easier
def prepare_compile(rec_layer_name, net_dict, cheating, dump_att_weights, hdf_filename, possible_labels):
"""
:param str rec_layer_name:
:param dict[str] net_dict: modify inplace
:param bool cheating:
:param bool dump_att_weights:
:param str hdf_filename:
:param dict[str,list[str]] possible_labels:
"""
assert isinstance(net_dict, dict)
assert rec_layer_name in net_dict
rec_layer_dict = net_dict[rec_layer_name]
assert rec_layer_dict["class"] == "rec"
rec_layer_dict["include_eos"] = True
rec_unit = rec_layer_dict["unit"]
assert isinstance(rec_unit, dict)
relevant_layer_names = []
target = None
for name, layer_desc in sorted(rec_unit.items()):
assert isinstance(name, str)
if name.startswith("#"):
continue
assert isinstance(layer_desc, dict)
assert "class" in layer_desc
class_name = layer_desc["class"]
assert isinstance(class_name, str)
if dump_att_weights and class_name == "softmax_over_spatial":
print("Dump softmax_over_spatial layer %r." % name)
rec_unit["_%s_spatial_sm_value" % name] = {"class": "copy", "from": name, "is_output_layer": True}
relevant_layer_names.append("_%s_spatial_sm_value" % name)
continue
if class_name != "choice": # only use choice layers for now
continue
if cheating and layer_desc["target"]:
print("Enable cheating for layer %r with target %r." % (name, layer_desc["target"]))
layer_desc["cheating"] = True
if name == "output":
target = layer_desc["target"]
# Similar to test_search_multi_choice.
rec_unit["_%s_value" % name] = {"class": "copy", "from": name}
rec_unit["_%s_src_beams" % name] = {"class": "choice_get_src_beams", "from": name}
rec_unit["_%s_beam_scores" % name] = {"class": "choice_get_beam_scores", "from": name}
for name_ in ["_%s_value" % name, "_%s_src_beams" % name, "_%s_beam_scores" % name]:
rec_unit[name_]["is_output_layer"] = True
relevant_layer_names.append(name_)
rec_unit["%s_raw" % name_] = {"class": "decide_keep_beam", "from": name_, "is_output_layer": True}
relevant_layer_names.append("%s_raw" % name_)
print("Collected layers:")
pprint(relevant_layer_names)
for i, name in enumerate(list(relevant_layer_names)):
full_name = "%s/%s" % (rec_layer_name, name)
if name.endswith("_raw"):
relevant_layer_names[i] = full_name
else:
net_dict["%s_%s_final" % (rec_layer_name, name)] = {"class": "decide_keep_beam", "from": full_name}
relevant_layer_names[i] = "%s_%s_final" % (rec_layer_name, name)
net_dict["%s__final_beam_scores_" % rec_layer_name] = {"class": "choice_get_beam_scores", "from": rec_layer_name}
net_dict["%s__final_beam_scores" % rec_layer_name] = {
"class": "decide_keep_beam",
"from": "%s__final_beam_scores_" % rec_layer_name,
}
relevant_layer_names.append("%s__final_beam_scores" % rec_layer_name)
net_dict["%s_final_decided_" % rec_layer_name] = {"class": "decide", "from": rec_layer_name}
net_dict["%s_final_decided" % rec_layer_name] = {
"class": "decide_keep_beam",
"from": "%s_final_decided_" % rec_layer_name,
}
if target and target in possible_labels:
print("Using labels from target %r." % target)
net_dict["debug_search_dump"] = {
"class": "hdf_dump",
"filename": hdf_filename,
"from": "%s_final_decided" % rec_layer_name,
"extra": {name.replace("/", "_"): name for name in relevant_layer_names},
"labels": possible_labels.get(target, None),
"is_output_layer": True,
"dump_whole_batches": True, # needed if there are different beam sizes...
}
def main(argv):
"""
Main entry.
"""
arg_parser = argparse.ArgumentParser(description="Dump search scores and other info to HDF file.")
arg_parser.add_argument("config", help="filename to config-file")
arg_parser.add_argument("--dataset", default="config:train")
arg_parser.add_argument("--epoch", type=int, default=-1, help="-1 for last epoch")
arg_parser.add_argument("--output_file", help="hdf", required=True)
arg_parser.add_argument("--rec_layer_name", default="output")
arg_parser.add_argument("--cheating", action="store_true", help="add ground truth to the beam")
arg_parser.add_argument("--att_weights", action="store_true", help="dump all softmax_over_spatial layers")
arg_parser.add_argument("--verbosity", default=4, type=int, help="5 for all seqs (default: 4)")
arg_parser.add_argument("--seq_list", nargs="+", help="use only these seqs")
args, remaining_args = arg_parser.parse_known_args(argv[1:])
init(config_filename=args.config, log_verbosity=args.verbosity, remaining_args=remaining_args)
dataset = init_dataset(args.dataset)
print("Dataset:")
pprint(dataset)
if args.seq_list:
dataset.seq_tags_filter = set(args.seq_list)
dataset.partition_epoch = 1 # reset
if isinstance(dataset, MetaDataset):
for sub_dataset in dataset.datasets.values():
dataset.seq_tags_filter = set(args.seq_list)
sub_dataset.partition_epoch = 1
dataset.finish_epoch() # enforce reset
if dataset.seq_tags_filter is not None:
print("Using sequences:")
pprint(dataset.seq_tags_filter)
if args.epoch >= 1:
config.set("load_epoch", args.epoch)
def net_dict_post_proc(net_dict):
"""
:param dict[str] net_dict:
:return: net_dict
:rtype: dict[str]
"""
prepare_compile(
rec_layer_name=args.rec_layer_name,
net_dict=net_dict,
cheating=args.cheating,
dump_att_weights=args.att_weights,
hdf_filename=args.output_file,
possible_labels=dataset.labels,
)
return net_dict
engine = Engine(config=config)
engine.use_search_flag = True
engine.init_network_from_config(config, net_dict_post_proc=net_dict_post_proc)
engine.search(dataset, do_eval=config.bool("search_do_eval", True), output_layer_names=args.rec_layer_name)
engine.finalize()
print("Search finished.")
assert os.path.exists(args.output_file), "hdf file not dumped?"
if __name__ == "__main__":
main(sys.argv) | /returnn-1.20230902.233313.tar.gz/returnn-1.20230902.233313/tools/debug-dump-search-scores.py | 0.549641 | 0.188175 | debug-dump-search-scores.py | pypi |
from __future__ import annotations
import typing
import os
import sys
from argparse import ArgumentParser
from decimal import Decimal
import tempfile
import gzip
import numpy
import xml.etree.ElementTree as ElementTree
import zipfile
import shutil
from subprocess import check_call
from glob import glob
import _setup_returnn_env # noqa
import returnn.sprint.cache
class BlissItem:
"""
Bliss item.
"""
def __init__(self, segment_name, recording_filename, start_time, end_time, orth, speaker_name=None):
"""
:param str segment_name:
:param str recording_filename:
:param Decimal start_time:
:param Decimal end_time:
:param str orth:
:param str|None speaker_name:
"""
self.segment_name = segment_name
self.recording_filename = recording_filename
self.start_time = start_time
self.end_time = end_time
self.orth = orth
self.speaker_name = speaker_name
def __repr__(self):
keys = ["segment_name", "recording_filename", "start_time", "end_time", "orth", "speaker_name"]
return "BlissItem(%s)" % ", ".join(["%s=%r" % (key, getattr(self, key)) for key in keys])
@property
def delta_time(self):
"""
:rtype: float
"""
return self.end_time - self.start_time
def iter_bliss(filename):
"""
:param str filename:
:return: yields BlissItem
:rtype: list[BlissItem]
"""
corpus_file = open(filename, "rb")
if filename.endswith(".gz"):
corpus_file = gzip.GzipFile(fileobj=corpus_file)
parser = ElementTree.XMLParser(target=ElementTree.TreeBuilder(), encoding="utf-8")
context = iter(ElementTree.iterparse(corpus_file, parser=parser, events=("start", "end")))
_, root = next(context) # get root element
name_tree = [root.attrib["name"]]
elem_tree = [root]
count_tree = [0]
recording_filename = None
for event, elem in context:
if elem.tag == "recording":
recording_filename = elem.attrib["audio"] if event == "start" else None
if event == "end" and elem.tag == "segment":
elem_orth = elem.find("orth")
orth_raw = elem_orth.text or "" # should be unicode
orth_split = orth_raw.split()
orth = " ".join(orth_split)
elem_speaker = elem.find("speaker")
if elem_speaker is not None:
speaker_name = elem_speaker.attrib["name"]
else:
speaker_name = None
segment_name = "/".join(name_tree)
yield BlissItem(
segment_name=segment_name,
recording_filename=recording_filename,
start_time=Decimal(elem.attrib["start"]),
end_time=Decimal(elem.attrib["end"]),
orth=orth,
speaker_name=speaker_name,
)
root.clear() # free memory
if event == "start":
count_tree[-1] += 1
count_tree.append(0)
elem_tree += [elem]
elem_name = elem.attrib.get("name", None)
if elem_name is None:
elem_name = str(count_tree[-2])
assert isinstance(elem_name, str)
name_tree += [elem_name]
elif event == "end":
assert elem_tree[-1] is elem
elem_tree = elem_tree[:-1]
name_tree = name_tree[:-1]
count_tree = count_tree[:-1]
class SprintCacheHandler:
"""
This is just to apply the same silence trimming on the raw audio samples
which was applied on the features in the Sprint cache.
We can reconstruct this information because the Sprint cache also has the exact timing information.
"""
def __init__(self, opt, bliss_opt, raw_sample_rate, feat_sample_rate):
"""
:param str opt: either filename or filename pattern
:param str bliss_opt: either filename or filename pattern
:param int raw_sample_rate:
:param int feat_sample_rate:
"""
self.sprint_cache = self._load_sprint_cache(opt)
self.seg_times = self._collect_seg_times_from_bliss(bliss_opt)
self.raw_sample_rate = raw_sample_rate
self.feat_sample_rate = feat_sample_rate
self.pp_counter = 0
@staticmethod
def _load_sprint_cache(opt):
"""
:param str opt: either filename or filename pattern
:rtype: SprintCache.FileArchiveBundle|SprintCache.FileArchive
"""
if "*" in opt:
sprint_cache_fns = glob(opt)
assert sprint_cache_fns, "nothing found under sprint cache pattern %r" % (opt,)
sprint_cache = returnn.sprint.cache.FileArchiveBundle()
for fn in sprint_cache_fns:
print("Load Sprint cache:", fn)
sprint_cache.add_bundle_or_archive(fn)
else:
print("Load Sprint cache:", opt)
sprint_cache = returnn.sprint.cache.open_file_archive(opt, must_exists=True)
return sprint_cache
@staticmethod
def _collect_seg_times_from_bliss(opt):
"""
:param str opt: either filename or filename pattern
:rtype: dict[str,(Decimal,Decimal)]
"""
if "*" in opt:
items = []
fns = glob(opt)
assert fns, "nothing found under Bliss XML cache pattern %r" % (opt,)
for fn in fns:
print("Load Bliss XML:", fn)
items.extend(iter_bliss(fn))
else:
print("Load Bliss XML:", opt)
items = list(iter_bliss(opt))
return {seq.segment_name: (seq.start_time, seq.end_time) for seq in items}
# noinspection PyUnusedLocal
def feature_post_process(self, feature_data, seq_name, **kwargs):
"""
:param numpy.ndarray feature_data:
:param str seq_name:
:return: features
:rtype: numpy.ndarray
"""
assert feature_data.shape[1] == 1 # raw audio
self.pp_counter += 1
assert self.raw_sample_rate % self.feat_sample_rate == 0
num_frames_per_feat = self.raw_sample_rate // self.feat_sample_rate
assert num_frames_per_feat % 2 == 0
allowed_variance_num_frames = num_frames_per_feat // 2 # allow some variance
times, data = self.sprint_cache.read(seq_name, "feat")
assert len(times) == len(data)
prev_end_frame = None
res_feature_data = []
seq_time_offset = float(self.seg_times[seq_name][0])
for (start_time, end_time), feat in zip(times, data):
start_time -= seq_time_offset
end_time -= seq_time_offset
center_time = (start_time + end_time) / 2.0
start_frame = int(center_time * self.raw_sample_rate) - num_frames_per_feat // 2
assert 0 <= start_frame < feature_data.shape[0]
if prev_end_frame is not None:
if (
prev_end_frame - allowed_variance_num_frames
<= start_frame
<= prev_end_frame + allowed_variance_num_frames
):
start_frame = prev_end_frame
assert start_frame >= prev_end_frame
end_frame = start_frame + num_frames_per_feat
if feature_data.shape[0] < end_frame <= feature_data.shape[0] + allowed_variance_num_frames:
res_feature_data.append(feature_data[start_frame:])
res_feature_data.append(numpy.zeros((end_frame - feature_data.shape[0], 1), dtype=feature_data.dtype))
else:
assert end_frame <= feature_data.shape[0]
res_feature_data.append(feature_data[start_frame:end_frame])
prev_end_frame = end_frame
res_feature_data = numpy.concatenate(res_feature_data, axis=0)
assert res_feature_data.shape[0] % num_frames_per_feat == 0
assert res_feature_data.shape[0] // num_frames_per_feat == len(data)
return res_feature_data
def longest_common_prefix(strings):
"""
:param list[str]|set[str] strings:
:rtype: str
"""
if not strings:
return ""
min_s = min(strings)
max_s = max(strings)
if not min_s:
return ""
for i in range(len(min_s)):
if max_s[i] != min_s[i]:
return max_s[:i]
return min_s[:]
def longest_common_postfix(strings):
"""
:param list[str]|set[str] strings:
:rtype: str
"""
strings = ["".join(reversed(s)) for s in strings]
res = longest_common_prefix(strings)
return "".join(reversed(res))
def hms(s):
"""
:param float|int s: seconds
:return: e.g. "1:23:45" (hs:ms:secs). see hms_fraction if you want to get fractional seconds
:rtype: str
"""
m, s = divmod(s, 60)
h, m = divmod(m, 60)
return "%d:%02d:%02d" % (h, m, s)
def main():
"""
Main entry.
"""
arg_parser = ArgumentParser()
arg_parser.add_argument("bliss_filename")
arg_parser.add_argument("--subset_segment_file")
arg_parser.add_argument("--no_ogg", help="skip generating ogg files", action="store_true")
arg_parser.add_argument(
"--no_conversion", help="skip ffmpeg call, assume audio is correct already", action="store_true"
)
arg_parser.add_argument("--no_cleanup", help="don't delete our temp files", action="store_true")
arg_parser.add_argument("--sprint_cache", help="filename of feature cache for synchronization")
arg_parser.add_argument("--raw_sample_rate", help="sample rate of audio input", type=int, default=8000)
arg_parser.add_argument("--feat_sample_rate", help="sample rate of features for sync", type=int, default=100)
arg_parser.add_argument("--ffmpeg_loglevel", help="loglevel for ffmpeg calls", type=str, default="info")
arg_parser.add_argument("--ffmpeg_acodec", help="force audio codec for ffmpeg calls", type=str)
arg_parser.add_argument(
"--number_of_channels", help="force number of channels for output audio", type=int, default=0
)
arg_parser.add_argument("--output", help="output zip filename (if empty, dummy run)", required=True)
args = arg_parser.parse_args()
subset_segment_list = None
if args.subset_segment_file:
subset_segment_list = set(open(args.subset_segment_file).read().splitlines())
assert subset_segment_list
rec_filenames = set()
seqs = [] # type: typing.List[BlissItem]
for bliss_item in iter_bliss(args.bliss_filename):
if subset_segment_list and bliss_item.segment_name not in subset_segment_list:
continue
seqs.append(bliss_item)
rec_filenames.add(bliss_item.recording_filename)
assert seqs
if subset_segment_list:
seq_names = set([seq.segment_name for seq in seqs])
for seq_name in subset_segment_list:
assert seq_name in seq_names
print("Num seqs:", len(seqs))
print("Num recordings:", len(rec_filenames))
rec_filename_common_prefix = longest_common_prefix(rec_filenames)
if not rec_filename_common_prefix.endswith("/"):
if "/" in rec_filename_common_prefix:
rec_filename_common_prefix = rec_filename_common_prefix[: rec_filename_common_prefix.rfind("/") + 1]
else:
rec_filename_common_prefix = ""
print("Recordings common dir prefix:", rec_filename_common_prefix)
rec_filename_common_postfix = longest_common_postfix(rec_filenames)
if not rec_filename_common_postfix.startswith("."):
if "." in rec_filename_common_postfix:
rec_filename_common_postfix = rec_filename_common_postfix[rec_filename_common_postfix.find(".") :]
else:
rec_filename_common_postfix = ""
print("Recordings common postfix:", rec_filename_common_postfix)
if args.output:
zip_filename = args.output
name, ext = os.path.splitext(os.path.basename(zip_filename))
assert ext == ".zip"
else:
name = "dummy"
zip_filename = None
print("Dataset name:", name)
sprint_cache_handler = None
if args.sprint_cache:
sprint_cache_handler = SprintCacheHandler(
opt=args.sprint_cache,
bliss_opt=args.bliss_filename,
raw_sample_rate=args.raw_sample_rate,
feat_sample_rate=args.feat_sample_rate,
)
total_duration = Decimal(0)
total_num_chars = 0
temp_dir = tempfile.mkdtemp()
print("Temp dir for data:", temp_dir)
dest_dirname = "%s/%s" % (temp_dir, name)
dest_meta_filename = "%s/%s.txt" % (temp_dir, name)
dest_meta_file = open(dest_meta_filename, "w")
dest_meta_file.write("[\n")
os.makedirs(dest_dirname, exist_ok=True)
for seq in seqs:
rec_filename = seq.recording_filename
assert os.path.isfile(rec_filename)
assert seq.start_time < seq.end_time and seq.delta_time > 0
duration = seq.delta_time
assert duration > 0
total_duration += duration
assert rec_filename.startswith(rec_filename_common_prefix) and rec_filename.endswith(
rec_filename_common_postfix
)
rec_name = rec_filename[len(rec_filename_common_prefix) : -len(rec_filename_common_postfix)]
if args.sprint_cache:
wav_tmp_filename = "%s/%s/%s_%s.wav" % (dest_dirname, rec_name, seq.start_time, seq.end_time)
os.makedirs(os.path.dirname(wav_tmp_filename), exist_ok=True)
cmd = ["ffmpeg"]
if args.ffmpeg_acodec:
cmd += ["-acodec", args.ffmpeg_acodec] # https://trac.ffmpeg.org/ticket/2810
cmd += ["-i", rec_filename, "-ss", str(seq.start_time), "-t", str(duration)]
if args.number_of_channels > 0:
cmd += ["-ac", str(args.number_of_channels)]
cmd += [wav_tmp_filename, "-loglevel", args.ffmpeg_loglevel]
print("$ %s" % " ".join(cmd))
check_call(cmd)
import soundfile # pip install pysoundfile
audio, sample_rate = soundfile.read(wav_tmp_filename)
assert sample_rate == args.raw_sample_rate
audio_synced = sprint_cache_handler.feature_post_process(numpy.expand_dims(audio, axis=1), seq.segment_name)
soundfile.write(wav_tmp_filename, audio_synced, args.raw_sample_rate)
source_filename = wav_tmp_filename
start_time = 0
limit_duration = False
else:
soundfile = audio_synced = sample_rate = wav_tmp_filename = None
source_filename = rec_filename
start_time = seq.start_time
limit_duration = True
dest_filename = "%s/%s/%s_%s.ogg" % (dest_dirname, rec_name, seq.start_time, seq.end_time)
os.makedirs(os.path.dirname(dest_filename), exist_ok=True)
if args.no_ogg:
print("no Ogg (%s -> %s)" % (os.path.basename(rec_filename), dest_filename[len(dest_dirname) + 1 :]))
else:
if os.path.exists(dest_filename):
print("already exists, delete: %s" % os.path.basename(dest_filename))
os.remove(dest_filename)
if args.no_conversion:
assert source_filename.endswith(".ogg")
assert not start_time and not limit_duration, (
f"With no_conversion=True, start_time {start_time} or duration {duration} is not supported. "
"Use no_conversion=False, even if the input is already in OGG."
)
print(
"skip ffmpeg, copy instead (%s -> %s)"
% (os.path.basename(source_filename), dest_filename[len(dest_dirname) + 1 :])
)
shutil.copy(src=source_filename, dst=dest_filename)
else:
cmd = ["ffmpeg"]
if args.ffmpeg_acodec:
cmd += ["-acodec", args.ffmpeg_acodec] # https://trac.ffmpeg.org/ticket/2810
cmd += ["-i", source_filename]
if args.number_of_channels > 0:
cmd += ["-ac", str(args.number_of_channels)]
if start_time:
cmd += ["-ss", str(start_time)]
if limit_duration:
cmd += ["-t", str(duration)]
cmd += [dest_filename, "-loglevel", args.ffmpeg_loglevel]
print("$ %s" % " ".join(cmd))
check_call(cmd)
if args.sprint_cache:
audio_ogg, sample_rate_ogg = soundfile.read(dest_filename)
assert len(audio_synced) == len(audio_ogg), "Number of frames in synced wav and converted ogg do not match"
assert sample_rate == sample_rate_ogg, "Sample rates in synced wav and converted ogg do not match"
os.remove(wav_tmp_filename)
dest_meta_file.write(
"{'text': %r, 'speaker_name': %r, 'file': %r, 'seq_name': %r, 'duration': %s},\n"
% (seq.orth, seq.speaker_name, dest_filename[len(dest_dirname) + 1 :], seq.segment_name, duration)
)
total_num_chars += len(seq.orth)
dest_meta_file.write("]\n")
dest_meta_file.close()
print("Total duration:", total_duration, "secs", "(%s)" % hms(total_duration))
print("Total num chars:", total_num_chars)
print("Dataset zip filename:", zip_filename if zip_filename else "(dummy run, no zip file)")
if zip_filename:
print("Zipping...")
zip_file = zipfile.ZipFile(zip_filename, mode="a", compression=zipfile.ZIP_DEFLATED)
for dirpath, dirnames, filenames in os.walk(temp_dir):
for name in sorted(dirnames + filenames):
path = "%s/%s" % (dirpath, name)
assert path.startswith(temp_dir + "/")
zip_path = path[len(temp_dir) + 1 :]
print(" Adding:", zip_path)
zip_file.write(path, zip_path)
if not args.no_cleanup:
print("Cleaning up...")
shutil.rmtree(temp_dir)
else:
print("Keeping temp dir:", temp_dir)
print("Finished.")
if __name__ == "__main__":
from returnn.util import better_exchook
better_exchook.install()
try:
main()
except BrokenPipeError:
print("BrokenPipeError")
sys.exit(1) | /returnn-1.20230902.233313.tar.gz/returnn-1.20230902.233313/tools/bliss-to-ogg-zip.py | 0.651466 | 0.207275 | bliss-to-ogg-zip.py | pypi |
import pandas
import numpy
from collections import Counter
from ipaddress import ip_network
from requests import get # to make GET request
import os
def get_series_from_counters(counter1, counter2, name=None):
"""
:param counter1:
:param counter2:
:param name:
:return: Two Panda Series with sharex index
"""
min_index = min([min(counter1.keys()), min(counter2.keys())])
max_index = max([max(counter1.keys()), max(counter2.keys())])
indexes = range(min_index, max_index + 1)
values1 = []
values2 = []
for i in indexes:
values1.append(counter1[i])
values2.append(counter2[i])
x_input = numpy.asarray(indexes)
y_input1 = numpy.asarray(values1)
y_input2 = numpy.asarray(values2)
series1 = pandas.Series(y_input1, index=x_input, name=name)
series2 = pandas.Series(y_input2, index=x_input, name=name)
return series1, series2
def get_series_from_counter(counter, name=None):
"""
:param counter: Counter()
:param name: label for Series
:return: Panda Series with counter values, index counter keys
"""
if len(counter) == 0:
return pandas.Series()
min_index = 0
max_index = max(counter.keys())
indexes = range(min_index, max_index + 1)
values = []
for i in indexes:
values.append(counter[i])
x_input = numpy.asarray(indexes)
y_input = numpy.asarray(values)
series = pandas.Series(y_input, index=x_input, name=name)
return series
def normalize_counter(counter, total):
"""
Normalizes a counter by dividing each value by 'total'
:param counter: Counter
:param total: A number
:return: Normalized Counter()
"""
if len(counter) == 0:
return counter
new_counter = Counter()
min_index = min(counter.keys())
max_index = max(counter.keys())
total_f = float(total)
for i in range(min_index, max_index + 1):
new_counter[i] = counter[i] / total_f
return new_counter
def is_subprefix(pref1, pref2):
"""
:param pref1: IP Prefix
:param pref2: IP Prefix
:return: True if pref1 is subprefix of pref2
"""
pref2_network = ip_network(pref2)
pref1_network = ip_network(pref1)
if not pref2_network.overlaps(pref1_network):
return False
rtval = pref2_network.compare_networks(pref1_network)
# If rtval is -1 it means pref2_network_contains
if rtval < 0:
return True
return False
def make_dirs(directory):
"""
Creates directory and all directories that lie on the path between working dir and directory
:param directory:
"""
if not os.path.exists(directory):
os.makedirs(directory)
def init_dic_with(dic, key, default_value):
"""
Inits dic[key] with default value, if key isn't in dic yet
:param dic: A dictionary
:param key: A key for dic
:param default_value: Value to set dic[key] to if key isn't in dic
:return: dic[key]
"""
try:
_ = dic[key]
except KeyError:
dic[key] = default_value
return dic[key]
def download_file(url, out, verify_cert=True):
with open(out, "wb") as file:
response = get(url, verify=verify_cert)
# write to file
file.write(response.content)
def get_sum_of_values(counter):
"""
Returns the sum of all values in a counter
:param counter: Counter()
:return: sum of values in counter
"""
total = 0
for key in counter:
total += counter[key]
return total | /reuter_util-0.1.tar.gz/reuter_util-0.1/reuter_util/general.py | 0.464659 | 0.564279 | general.py | pypi |
# reuterspy
<h2 align="center">Financial Data Extraction from Reuters.com with Python</h2>
reuterspy is a Python package to retrieve data from [reuters.com](https://www.reuters.com/), which provides **Balance sheet, Cash Flow, Income Statement and Key Metrics**.
reuterspy allows the user to download both recent and historical data from all the financial products indexed at reuters.com
reuterspy seeks simple Python packages when it comes to financial data extraction in order to stop relying on public/private APIs, since reuterspy is **FREE** and has **NO LIMITATIONS**.
## Installation
In order to get this package working you will need to **install it via pip** (with a Python3.5 version or higher) on the terminal by typing:
``$ pip install reuterspy``
## Usage
<h2 align="center">Income Statement</h2>
<h3>What is an Income Statement?</h3>
An income statement is one of the three important financial statements used for reporting a company's financial performance over a specific accounting period, with the other two key statements being the balance sheet and the statement of cash flows.
Also known as the profit and loss statement or the statement of revenue and expense, the income statement primarily focuses on the company’s revenues and expenses during a particular period.
In the example presented below, the yearly income statement of an stock is retrieved.
```python
from reuterspy import Reuters
reuters = Reuters()
ticker_list = ['NFLX.O']
df = reuters.get_income_statement(ticker_list)
print(df.head())
```
```{r, engine='python', count_lines}
ticker financialReport year metric value
NFLX.O income_statement 2019 Revenue 20156.447000
NFLX.O income_statement 2018 Revenue 15794.341000
NFLX.O income_statement 2017 Revenue 11692.713000
NFLX.O income_statement 2016 Revenue 8830.669000
NFLX.O income_statement 2015 Revenue 6779.511000
```
<h2 align="center">Balance Sheet</h2>
<h3>What Is a Balance Sheet?</h3>
A balance sheet is a financial statement that reports a company's assets, liabilities and shareholders' equity at a specific point in time, and provides a basis for computing rates of return and evaluating its capital structure. It is a financial statement that provides a snapshot of what a company owns and owes, as well as the amount invested by shareholders.
The balance sheet is used alongside other important financial statements such as the income statement and statement of cash flows in conducting fundamental analysis or calculating financial ratios.
In the example presented below, the yearly Balance Sheet of an stock is retrieved.
```python
from reuterspy import Reuters
reuters = Reuters()
ticker_list = ['NFLX.O']
df = reuters.get_balance_sheet(ticker_list)
print(df.head())
```
```{r, engine='python', count_lines}
ticker financialReport year metric value
NFLX.O balance_sheet 2019 Cash 3103.525000
NFLX.O balance_sheet 2018 Cash 2572.685000
NFLX.O balance_sheet 2016 Cash 1264.126000
NFLX.O balance_sheet 2015 Cash 1706.592000
NFLX.O balance_sheet 2019 Cash & Equivalents 1914.912000
```
<h2 align="center">Cash Flow</h2>
<h3>What Is a Cash Flow?</h3>
Cash flow is the net amount of cash and cash-equivalents being transferred into and out of a business. At the most fundamental level, a company’s ability to create value for shareholders is determined by its ability to generate positive cash flows, or more specifically, maximize long-term free cash flow (FCF).
In the example presented below, the yearly Cash Flow of an stock is retrieved.
```python
from reuterspy import Reuters
reuters = Reuters()
ticker_list = ['NFLX.O']
df = reuters.get_cash_flow(ticker_list)
print(df.head())
```
```{r, engine='python', count_lines}
ticker financialReport year metric value
NFLX.O cash_flow 2019 Net Income/Starting Line 1866.916000
NFLX.O cash_flow 2018 Net Income/Starting Line 1211.242000
NFLX.O cash_flow 2017 Net Income/Starting Line 558.929000
NFLX.O cash_flow 2016 Net Income/Starting Line 186.678000
NFLX.O cash_flow 2015 Net Income/Starting Line 122.641000
```
<h2 align="center">Key Metrics</h2>
In the example presented below, the key metrics of an stock is retrieved.
```python
from reuterspy import Reuters
reuters = Reuters()
ticker_list = ['NFLX.O']
df = reuters.get_key_metrics(ticker_list)
print(df.head())
```
```{r, engine='python', count_lines}
ticker metric value financialReport
NFLX.O Price closing or last bid 510.40 Price and Volume
NFLX.O 52 Week High 575.37 Price and Volume
NFLX.O 52 Week Low 290.25 Price and Volume
NFLX.O Pricing date 2021-01-08 Price and Volume
NFLX.O 10 Day Average Trading Volume 3.49 Price and Volume
```
## Disclaimer
This Python package has been made for research purposes in order to fit the needs that reuters.com does not cover, so this package works like an Application Programming Interface (API) of reuters.com developed in an altruistic way.
Conclude that this package is not related in any way with reuters.com or any dependant company, the only requirement specified by reuters.com in order to develop this package was "mention the source where data is retrieved from". | /reuterspy-1.0.tar.gz/reuterspy-1.0/README.md | 0.681621 | 0.97506 | README.md | pypi |
"""Client used or interacting with out sentiment analysis api"""
from .generic_api_client import GenericApiClient
from .models import SentimentAnalysisJob, SentimentAnalysisResult
class SentimentAnalysisClient(GenericApiClient):
"""Client for interacting with the Rev AI sentiment analysis api"""
# Default version of Rev AI sentiment analysis api
api_version = 'v1'
# Default api name of Rev AI sentiment analysis api
api_name = 'sentiment_analysis'
def __init__(self, access_token):
"""Constructor
:param access_token: access token which authorizes all requests and links them to your
account. Generated on the settings page of your account dashboard
on Rev AI.
"""
GenericApiClient.__init__(self, access_token, self.api_name, self.api_version,
SentimentAnalysisJob.from_json, SentimentAnalysisResult.from_json)
def submit_job_from_text(self,
text=None,
metadata=None,
callback_url=None,
delete_after_seconds=None,
language=None,
notification_config=None):
"""Submit a job to the Rev AI sentiment analysis api. Takes either a plain text string or
Transcript object
:param text: Plain text string to be run through sentiment analysis
:param metadata: info to associate with the transcription job
:param callback_url: callback url to invoke on job completion as
a webhook
:param delete_after_seconds: number of seconds after job completion when job is auto-deleted
:param language: specify language using the one of the supported ISO 639-1 (2-letter) or
ISO 639-3 (3-letter) language codes as defined in the API Reference
:param notification_config: CustomerUrlData object containing the callback url to
invoke on job completion as a webhook and optional authentication headers to use when
calling the callback url
:returns: SentimentAnalysisJob object
:raises: HTTPError
"""
payload = self._enhance_payload({'text': text, 'language': language},
metadata, callback_url, delete_after_seconds,
notification_config)
return self._submit_job(payload)
def submit_job_from_transcript(self,
transcript=None,
metadata=None,
callback_url=None,
delete_after_seconds=None,
language=None,
notification_config=None):
"""Submit a job to the Rev AI sentiment analysis api. Takes either a plain text string or
Transcript object
:param transcript: Transcript object from the Rev AI async transcription client to be run
through sentiment analysis
:param metadata: info to associate with the transcription job
:param callback_url: callback url to invoke on job completion as
a webhook
:param delete_after_seconds: number of seconds after job completion when job is auto-deleted
:param language: specify language using the one of the supported ISO 639-1 (2-letter) or
ISO 639-3 (3-letter) language codes as defined in the API Reference
:param notification_config: CustomerUrlData object containing the callback url to
invoke on job completion as a webhook and optional authentication headers to use when
calling the callback url
:returns: SentimentAnalysisJob object
:raises: HTTPError
"""
payload = self._enhance_payload({'json': transcript.to_dict(), 'language': language},
metadata, callback_url, delete_after_seconds,
notification_config)
return self._submit_job(payload)
def get_result_json(self, id_, filter_for=None):
"""Get result of a sentiment analysis job as json.
:param id_: id of job to be requested
:param filter_for: SentimentValue to filter for.
If specified only sentiments of this type will be returned
:returns: job result data as raw json
:raises: HTTPError
"""
to_filter_for = str(filter_for) if filter_for else None
return self._get_result_json(id_, {'filter_for': to_filter_for})
def get_result_object(self, id_, filter_for=None):
"""Get result of a sentiment analysis job as SentimentAnalysisResult object.
:param id_: id of job to be requested
:param filter_for: SentimentValue to filter for.
If specified only sentiments of this type will be returned
:returns: job result data as SentimentAnalysisResult object
:raises: HTTPError
"""
to_filter_for = str(filter_for) if filter_for else None
return self._get_result_object(id_, {'filter_for': to_filter_for}) | /rev_ai-2.18.0.tar.gz/rev_ai-2.18.0/src/rev_ai/sentiment_analysis_client.py | 0.918489 | 0.227534 | sentiment_analysis_client.py | pypi |
"""Generic client used to interact with our newer style apis"""
from .baseclient import BaseClient
try:
from urllib.parse import urljoin
except ImportError:
from urlparse import urljoin
class GenericApiClient(BaseClient):
"""Generic client which handles logic for making requests to almost any Rev AI Api.
Intended to be inherited and extended by a specific client per API"""
def __init__(self, access_token, api_name, api_version, parse_job_info, parse_job_result):
"""Constructor
:param access_token: access token which authorizes all requests and links them to your
account. Generated on the settings page of your account dashboard
on Rev AI.
:param api_name: name of the api to submit to
:param api_version: version of the api to submit to
:param parse_job_info: method to be used to parse job information
:param parse_job_result: method to be used to parse job results
"""
BaseClient.__init__(self, access_token)
self.base_url = 'https://api.rev.ai/{0}/{1}/'.format(api_name, api_version)
self.parse_job_info = parse_job_info
self.parse_job_result = parse_job_result
def _submit_job(self, payload):
"""Submit a job to the api. This method is special in that it is intended to be hidden by
the implementation this is done because python standard is to pass options individually
instead of as an object and our true clients should match this standard
:param payload: payload to be sent with job request
:raises: HTTPError
"""
response = self._make_http_request(
"POST",
urljoin(self.base_url, 'jobs'),
json=payload
)
return self.parse_job_info(response.json())
def get_job_details(self, id_):
"""View information about a specific job.
The server will respond with the status and creation date.
:param id_: id of the job to be requested
:returns: Job info object
:raises: HTTPError
"""
if not id_:
raise ValueError('id_ must be provided')
response = self._make_http_request(
"GET",
urljoin(self.base_url, 'jobs/{}'.format(id_))
)
return self.parse_job_info(response.json())
def get_list_of_jobs(self, limit=None, starting_after=None):
"""Get a list of jobs submitted within the last 30 days in reverse
chronological order up to the provided limit number of jobs per call.
Pagination is supported via passing the last job id from previous call into starting_after.
:param limit: optional, limits the number of jobs returned,
if none, a default of 100 jobs is returned, max limit if 1000
:param starting_after: optional, returns jobs created after the job with this id,
exclusive (job with this id is not included)
:returns: list of jobs response data
:raises: HTTPError
"""
params = []
if limit is not None:
params.append('limit={}'.format(limit))
if starting_after is not None:
params.append('starting_after={}'.format(starting_after))
query = '?{}'.format('&'.join(params))
response = self._make_http_request(
"GET",
urljoin(self.base_url, 'jobs{}'.format(query))
)
return [self.parse_job_info(job) for job in response.json()]
def _get_result_json(self, id_, params):
"""Get the result of a job. This method is special in that it is intended to be hidden by
the implementation this is done because python standard is to pass options individually
instead of as an object and our true clients should match this standard
:param id_: id of job to be requested
:returns: job result data as raw json
:raises: HTTPError
"""
if not id_:
raise ValueError('id_ must be provided')
query_params = []
for key, value in params.items():
if value is not None:
query_params.append('{0}={1}'.format(key, value))
response = self._make_http_request(
"GET",
urljoin(self.base_url, 'jobs/{0}/result?{1}'.format(id_, '&'.join(query_params)))
)
return response.json()
def _get_result_object(self, id_, params):
"""Get the result of a job. This method is special in that it is intended to be hidden by
the implementation this is done because python standard is to pass options individually
instead of as an object and our true clients should match this standard
:param id_: id of job to be requested
:returns: job result data as object
:raises: HTTPError
"""
return self.parse_job_result(self._get_result_json(id_, params))
def delete_job(self, id_):
"""Delete a specific job
All data related to the job, such as input media and result, will be permanently
deleted. A job can only by deleted once it's completed.
:param id_: id of job to be deleted
:returns: None if job was successfully deleted
:raises: HTTPError
"""
if not id_:
raise ValueError('id_ must be provided')
self._make_http_request(
"DELETE",
urljoin(self.base_url, 'jobs/{}'.format(id_)),
)
return
def create_payload_with_source(self, media_url, source_config, metadata, callback_url,
delete_after_seconds, notification_config):
payload = {}
if media_url:
payload['media_url'] = media_url
if source_config:
payload['source_config'] = source_config.to_dict()
self._copy_options(payload, metadata, callback_url, delete_after_seconds,
notification_config)
return payload
def _enhance_payload(self, payload, metadata, callback_url, delete_after_seconds,
notification_config):
enhanced = payload.copy()
self._copy_options(enhanced, metadata, callback_url, delete_after_seconds,
notification_config)
return enhanced
@staticmethod
def _copy_options(payload, metadata, callback_url, delete_after_seconds,
notification_config):
if metadata:
payload['metadata'] = metadata
if callback_url:
payload['callback_url'] = callback_url
if delete_after_seconds is not None:
payload['delete_after_seconds'] = delete_after_seconds
if notification_config:
payload['notification_config'] = notification_config.to_dict() | /rev_ai-2.18.0.tar.gz/rev_ai-2.18.0/src/rev_ai/generic_api_client.py | 0.799833 | 0.295433 | generic_api_client.py | pypi |
"""Client used or interacting with out sentiment analysis api"""
from .generic_api_client import GenericApiClient
from .models import TopicExtractionJob, TopicExtractionResult
class TopicExtractionClient(GenericApiClient):
"""Client for interacting with the Rev AI topic extraction api"""
# Default version of Rev AI topic extraction api
api_version = 'v1'
# Default api name of Rev AI topic extraction api
api_name = 'topic_extraction'
def __init__(self, access_token):
"""Constructor
:param access_token: access token which authorizes all requests and links them to your
account. Generated on the settings page of your account dashboard
on Rev AI.
"""
GenericApiClient.__init__(self, access_token, self.api_name, self.api_version,
TopicExtractionJob.from_json, TopicExtractionResult.from_json)
def submit_job_from_text(self,
text=None,
metadata=None,
callback_url=None,
delete_after_seconds=None,
language=None,
notification_config=None):
"""Submit a job to the Rev AI topic extraction api. Takes either a plain text string or
Transcript object
:param text: Plain text string to be run through topic extraction
:param metadata: info to associate with the transcription job
:param callback_url: the callback url to invoke on job completion as a webhook
.. deprecated:: 2.16.0
Use notification_config instead
:param delete_after_seconds: number of seconds after job completion when job is auto-deleted
:param language: specify language using the one of the supported ISO 639-1 (2-letter) or
ISO 639-3 (3-letter) language codes as defined in the API Reference
:param notification_config: CustomerUrlData object containing the callback url to
invoke on job completion as a webhook and optional authentication headers to use when
calling the callback url
:returns: TopicExtractionJob object
:raises: HTTPError
"""
payload = self._enhance_payload({'text': text, 'language': language}, metadata,
callback_url, delete_after_seconds, notification_config)
return self._submit_job(payload)
def submit_job_from_transcript(self,
transcript=None,
metadata=None,
callback_url=None,
delete_after_seconds=None,
language=None,
notification_config=None):
"""Submit a job to the Rev AI topic extraction api. Takes either a plain text string or
Transcript object
:param transcript: Transcript object from the Rev AI async transcription client to be run
through topic extraction
:param metadata: info to associate with the transcription job
:param callback_url: the callback url to invoke on job completion as a webhook
.. deprecated:: 2.16.0
Use notification_config instead
:param delete_after_seconds: number of seconds after job completion when job is auto-deleted
:param language: specify language using the one of the supported ISO 639-1 (2-letter) or
ISO 639-3 (3-letter) language codes as defined in the API Reference
:param notification_config: CustomerUrlData object containing the callback url to
invoke on job completion as a webhook and optional authentication headers to use when
calling the callback url
:returns: TopicExtractionJob object
:raises: HTTPError
"""
payload = self._enhance_payload({'json': transcript.to_dict(), 'language': language},
metadata, callback_url, delete_after_seconds,
notification_config)
return self._submit_job(payload)
def get_result_json(self, id_, threshold=None):
"""Get result of a topic extraction job as json.
:param id_: id of job to be requested
:param threshold: score threshold for topics. No topics with scores under this threshold
will be returned
:returns: job result data as raw json
:raises: HTTPError
"""
return self._get_result_json(id_, {'threshold': threshold})
def get_result_object(self, id_, threshold=None):
"""Get result of a topic extraction job as TopicExtractionResult object.
:param id_: id of job to be requested
:param threshold: score threshold for topics. No topics with scores under this threshold
will be returned
:returns: job result data as TopicExtractionResult object
:raises: HTTPError
"""
return self._get_result_object(id_, {'threshold': threshold}) | /rev_ai-2.18.0.tar.gz/rev_ai-2.18.0/src/rev_ai/topic_extraction_client.py | 0.928466 | 0.176672 | topic_extraction_client.py | pypi |
"""Speech recognition tools for using Rev AI"""
import json
from .models import Account, CaptionType, Job, Transcript
from .baseclient import BaseClient
from . import utils
try:
from urllib.parse import urljoin
except ImportError:
from urlparse import urljoin
class RevAiAPIClient(BaseClient):
"""Client which implements Rev AI API
Note that HTTPErrors can be thrown by methods of the API client. The HTTP
response payload attached to these error is a problem details. The problem
details information is represented as a JSON object with error specific
properties that help to troubleshoot the problem.
Problem details are defined at https://tools.ietf.org/html/rfc7807.
"""
# Default version of Rev AI
version = 'v1'
# Default base url for Rev AI
base_url = 'https://api.rev.ai/speechtotext/{}/'.format(version)
# Rev AI transcript format
rev_json_content_type = 'application/vnd.rev.transcript.v1.0+json'
def __init__(self, access_token):
"""Constructor
:param access_token: access token which authorizes all requests and links them to your
account. Generated on the settings page of your account dashboard
on Rev AI.
"""
BaseClient.__init__(self, access_token)
def submit_job_url(
self,
media_url=None,
metadata=None,
callback_url=None,
skip_diarization=False,
skip_punctuation=False,
speaker_channels_count=None,
custom_vocabularies=None,
filter_profanity=False,
remove_disfluencies=False,
delete_after_seconds=None,
language=None,
custom_vocabulary_id=None,
transcriber=None,
verbatim=None,
rush=None,
test_mode=None,
segments_to_transcribe=None,
speaker_names=None,
source_config=None,
notification_config=None,
skip_postprocessing=False,
remove_atmospherics=False,
speakers_count=None):
"""Submit media given a URL for transcription.
The audio data is downloaded from the URL
:param media_url: web location of the media file
.. deprecated:: 2.16.0
Use source_config instead
:param metadata: info to associate with the transcription job
:param callback_url: callback url to invoke on job completion as a webhook
.. deprecated:: 2.16.0
Use notification_config instead
:param skip_diarization: should Rev AI skip diarization when transcribing this file
:param skip_punctuation: should Rev AI skip punctuation when transcribing this file
:param speaker_channels_count: the number of speaker channels in the
audio. If provided the given audio will have each channel
transcribed separately and each channel will be treated as a single
speaker. Valid values are integers 1-8 inclusive.
:param custom_vocabularies: a collection of phrase dictionaries.
Including custom vocabulary will inform and bias the speech
recognition to find those phrases. Each dictionary should consist
of a key "phrases" which maps to a list of strings, each of which
represents a phrase you would like the speech recognition to bias
itself toward. Cannot be used with the custom_vocabulary_id parameter.
:param filter_profanity: whether to mask profane words
:param remove_disfluencies: whether to exclude filler words like "uh"
:param delete_after_seconds: number of seconds after job completion when job is auto-deleted
:param language: specify language using the one of the supported ISO 639-1 (2-letter) or
ISO 639-3 (3-letter) language codes as defined in the API Reference
:param custom_vocabulary_id: The id of a pre-completed custom vocabulary
submitted through the custom vocabularies api. Cannot be used with the
custom_vocabularies parameter.
:param transcriber: type of transcriber to use to transcribe the media file
:param verbatim: Only available with "human" transcriber.
Whether human transcriber transcribes every syllable.
:param rush: Only available with "human" transcriber.
Whether job is given higher priority to be worked on sooner for higher pricing.
:param test_mode: Only available with "human" transcriber.
Whether human transcription job is mocked and no transcription actually happens.
:param segments_to_transcribe: Only available with "human" transcriber.
Sections of transcript needed to be transcribed.
:param speaker_names: Only available with "human" transcriber.
Human readable names of speakers in the file.
:param source_config: CustomerUrlData object containing url of the source media and
optional authentication headers to use when accessing the source url
:param notification_config: CustomerUrlData object containing the callback url to
invoke on job completion as a webhook and optional authentication headers to use when
calling the callback url
:param skip_postprocessing: skip all text postprocessing (punctuation, capitalization, ITN)
:param remove_atmospherics: Atmospherics such as <laugh>, <affirmative>, etc. will not
appear in the transcript.
:param speakers_count: Use to specify the total number of unique speakers in the audio.
:returns: raw response data
:raises: HTTPError
"""
payload = self._create_job_options_payload(media_url, metadata, callback_url,
skip_diarization, skip_punctuation,
speaker_channels_count,
custom_vocabularies, filter_profanity,
remove_disfluencies, delete_after_seconds,
language, custom_vocabulary_id, transcriber,
verbatim, rush, test_mode,
segments_to_transcribe, speaker_names,
source_config, notification_config,
skip_postprocessing)
response = self._make_http_request(
"POST",
urljoin(self.base_url, 'jobs'),
json=payload
)
return Job.from_json(response.json())
def submit_job_local_file(
self,
filename,
metadata=None,
callback_url=None,
skip_diarization=False,
skip_punctuation=False,
speaker_channels_count=None,
custom_vocabularies=None,
filter_profanity=False,
remove_disfluencies=False,
delete_after_seconds=None,
language=None,
custom_vocabulary_id=None,
transcriber=None,
verbatim=None,
rush=None,
test_mode=None,
segments_to_transcribe=None,
speaker_names=None,
notification_config=None,
skip_postprocessing=False,
remove_atmospherics=False,
speakers_count=None):
"""Submit a local file for transcription.
Note that the content type is inferred if not provided.
:param filename: path to a local file on disk
:param metadata: info to associate with the transcription job
:param callback_url: callback url to invoke on job completion as a webhook
.. deprecated:: 2.16.0
Use notification_config instead
:param skip_diarization: should Rev AI skip diarization when transcribing this file
:param skip_punctuation: should Rev AI skip punctuation when transcribing this file
:param speaker_channels_count: the number of speaker channels in the
audio. If provided the given audio will have each channel
transcribed separately and each channel will be treated as a single
speaker. Valid values are integers 1-8 inclusive.
:param custom_vocabularies: a collection of phrase dictionaries.
Including custom vocabulary will inform and bias the speech
recognition to find those phrases. Each dictionary has the key
"phrases" which maps to a list of strings, each of which represents
a phrase you would like the speech recognition to bias itself toward.
Cannot be used with the custom_vocabulary_id parameter
:param filter_profanity: whether to mask profane words
:param remove_disfluencies: whether to exclude filler words like "uh"
:param delete_after_seconds: number of seconds after job completion when job is auto-deleted
:param language: specify language using the one of the supported ISO 639-1 (2-letter) or
ISO 639-3 (3-letter) language codes as defined in the API Reference
:param custom_vocabulary_id: The id of a pre-completed custom vocabulary
submitted through the custom vocabularies api. Cannot be used with the
custom_vocabularies parameter.
:param transcriber: type of transcriber to use to transcribe the media file
:param verbatim: Only available with "human" transcriber.
Whether human transcriber transcribes every syllable.
:param rush: Only available with "human" transcriber.
Whether job is given higher priority to be worked on sooner for higher pricing.
:param test_mode: Only available with "human" transcriber.
Whether human transcription job is mocked and no transcription actually happens.
:param segments_to_transcribe: Only available with "human" transcriber.
Sections of transcript needed to be transcribed.
:param speaker_names: Only available with "human" transcriber.
Human readable names of speakers in the file.
:param notification_config: CustomerUrlData object containing the callback url to
invoke on job completion as a webhook and optional authentication headers to use when
calling the callback url
:param skip_postprocessing: skip all text postprocessing (punctuation, capitalization, ITN)
:param remove_atmospherics: Atmospherics such as <laugh>, <affirmative>, etc. will not
appear in the transcript.
:param speakers_count: Use to specify the total number of unique speakers in the audio.
:returns: raw response data
:raises: HTTPError, ValueError
"""
if not filename:
raise ValueError('filename must be provided')
payload = self._create_job_options_payload(None, metadata, callback_url,
skip_diarization, skip_punctuation,
speaker_channels_count,
custom_vocabularies, filter_profanity,
remove_disfluencies, delete_after_seconds,
language, custom_vocabulary_id, transcriber,
verbatim, rush, test_mode,
segments_to_transcribe, speaker_names, None,
notification_config, skip_postprocessing)
with open(filename, 'rb') as f:
files = {
'media': (filename, f),
'options': (None, json.dumps(payload, sort_keys=True))
}
response = self._make_http_request(
"POST",
urljoin(self.base_url, 'jobs'),
files=files
)
return Job.from_json(response.json())
def get_job_details(self, id_):
"""View information about a specific job.
The server will respond with the status and creation date.
:param id_: id of the job to be requested
:returns: Job object if available
:raises: HTTPError
"""
if not id_:
raise ValueError('id_ must be provided')
response = self._make_http_request(
"GET",
urljoin(self.base_url, 'jobs/{}'.format(id_))
)
return Job.from_json(response.json())
def get_list_of_jobs(self, limit=None, starting_after=None):
"""Get a list of transcription jobs submitted within the last week in reverse
chronological order up to the provided limit number of jobs per call.
Pagination is supported via passing the last job id from previous call into starting_after.
:param limit: optional, limits the number of jobs returned,
if none, a default of 100 jobs is returned, max limit if 1000
:param starting_after: optional, returns jobs created after the job with this id,
exclusive (job with this id is not included)
:returns: list of jobs response data
:raises: HTTPError
"""
params = []
if limit is not None:
params.append('limit={}'.format(limit))
if starting_after is not None:
params.append('starting_after={}'.format(starting_after))
query = '?{}'.format('&'.join(params))
response = self._make_http_request(
"GET",
urljoin(self.base_url, 'jobs{}'.format(query))
)
return [Job.from_json(job) for job in response.json()]
def get_transcript_text(self, id_):
"""Get the transcript of a specific job as plain text.
:param id_: id of job to be requested
:returns: transcript data as text
:raises: HTTPError
"""
if not id_:
raise ValueError('id_ must be provided')
response = self._make_http_request(
"GET",
urljoin(self.base_url, 'jobs/{}/transcript'.format(id_)),
headers={'Accept': 'text/plain'}
)
return response.text
def get_transcript_text_as_stream(self, id_):
"""Get the transcript of a specific job as a plain text stream.
:param id_: id of job to be requested
:returns: requests.models.Response HTTP response which can be used to stream
the payload of the response
:raises: HTTPError
"""
if not id_:
raise ValueError('id_ must be provided')
response = self._make_http_request(
"GET",
urljoin(self.base_url, 'jobs/{}/transcript'.format(id_)),
headers={'Accept': 'text/plain'},
stream=True
)
return response
def get_transcript_json(self, id_):
"""Get the transcript of a specific job as json.
:param id_: id of job to be requested
:returns: transcript data as json
:raises: HTTPError
"""
if not id_:
raise ValueError('id_ must be provided')
response = self._make_http_request(
"GET",
urljoin(self.base_url, 'jobs/{}/transcript'.format(id_)),
headers={'Accept': self.rev_json_content_type}
)
return response.json()
def get_transcript_json_as_stream(self, id_):
"""Get the transcript of a specific job as streamed json.
:param id_: id of job to be requested
:returns: requests.models.Response HTTP response which can be used to stream
the payload of the response
:raises: HTTPError
"""
if not id_:
raise ValueError('id_ must be provided')
response = self._make_http_request(
"GET",
urljoin(self.base_url, 'jobs/{}/transcript'.format(id_)),
headers={'Accept': self.rev_json_content_type},
stream=True
)
return response
def get_transcript_object(self, id_):
"""Get the transcript of a specific job as a python object`.
:param id_: id of job to be requested
:returns: transcript data as a python object
:raises: HTTPError
"""
if not id_:
raise ValueError('id_ must be provided')
response = self._make_http_request(
"GET",
urljoin(self.base_url, 'jobs/{}/transcript'.format(id_)),
headers={'Accept': self.rev_json_content_type}
)
return Transcript.from_json(response.json())
def get_captions(self, id_, content_type=CaptionType.SRT, channel_id=None):
"""Get the captions output of a specific job and return it as plain text
:param id_: id of job to be requested
:param content_type: caption type which should be returned. Defaults to SRT
:param channel_id: id of speaker channel to be captioned, only matters for multichannel jobs
:returns: caption data as text
:raises: HTTPError
"""
if not id_:
raise ValueError('id_ must be provided')
query = self._create_captions_query(channel_id)
response = self._make_http_request(
"GET",
urljoin(self.base_url, 'jobs/{0}/captions{1}'.format(id_, query)),
headers={'Accept': content_type.value}
)
return response.text
def get_captions_as_stream(self, id_, content_type=CaptionType.SRT, channel_id=None):
"""Get the captions output of a specific job and return it as a plain text stream
:param id_: id of job to be requested
:param content_type: caption type which should be returned. Defaults to SRT
:param channel_id: id of speaker channel to be captioned, only matters for multichannel jobs
:returns: requests.models.Response HTTP response which can be used to stream
the payload of the response
:raises: HTTPError
"""
if not id_:
raise ValueError('id_ must be provided')
query = self._create_captions_query(channel_id)
response = self._make_http_request(
"GET",
urljoin(self.base_url, 'jobs/{0}/captions{1}'.format(id_, query)),
headers={'Accept': content_type.value},
stream=True
)
return response
def delete_job(self, id_):
"""Delete a specific transcription job
All data related to the job, such as input media and transcript, will be permanently
deleted. A job can only by deleted once it's completed.
:param id_: id of job to be deleted
:returns: None if job was successfully deleted
:raises: HTTPError
"""
if not id_:
raise ValueError('id_ must be provided')
self._make_http_request(
"DELETE",
urljoin(self.base_url, 'jobs/{}'.format(id_)),
)
return
def get_account(self):
"""Get account information, such as remaining credits.
:raises: HTTPError
"""
response = self._make_http_request(
"GET",
urljoin(self.base_url, 'account')
)
return Account.from_json(response.json())
def _create_job_options_payload(
self,
media_url=None,
metadata=None,
callback_url=None,
skip_diarization=None,
skip_punctuation=None,
speaker_channels_count=None,
custom_vocabularies=None,
filter_profanity=None,
remove_disfluencies=None,
delete_after_seconds=None,
language=None,
custom_vocabulary_id=None,
transcriber=None,
verbatim=None,
rush=None,
test_mode=None,
segments_to_transcribe=None,
speaker_names=None,
source_config=None,
notification_config=None,
skip_postprocessing=False,
remove_atmospherics=None,
speakers_count=None):
payload = {}
if media_url:
payload['media_url'] = media_url
if skip_diarization:
payload['skip_diarization'] = skip_diarization
if skip_punctuation:
payload['skip_punctuation'] = skip_punctuation
if metadata:
payload['metadata'] = metadata
if callback_url:
payload['callback_url'] = callback_url
if custom_vocabularies:
payload['custom_vocabularies'] = utils._process_vocabularies(custom_vocabularies)
if speaker_channels_count:
payload['speaker_channels_count'] = speaker_channels_count
if filter_profanity:
payload['filter_profanity'] = filter_profanity
if remove_disfluencies:
payload['remove_disfluencies'] = remove_disfluencies
if delete_after_seconds is not None:
payload['delete_after_seconds'] = delete_after_seconds
if language:
payload['language'] = language
if custom_vocabulary_id:
payload['custom_vocabulary_id'] = custom_vocabulary_id
if transcriber:
payload['transcriber'] = transcriber
if verbatim:
payload['verbatim'] = verbatim
if rush:
payload['rush'] = rush
if test_mode:
payload['test_mode'] = test_mode
if segments_to_transcribe:
payload['segments_to_transcribe'] = segments_to_transcribe
if speaker_names:
payload['speaker_names'] =\
utils._process_speaker_names(speaker_names)
if source_config:
payload['source_config'] = source_config.to_dict()
if notification_config:
payload['notification_config'] = notification_config.to_dict()
if skip_postprocessing:
payload['skip_postprocessing'] = skip_postprocessing
if remove_atmospherics:
payload['remove_atmospherics'] = remove_atmospherics
if speakers_count:
payload['speakers_count'] = speakers_count
return payload
def _create_captions_query(self, speaker_channel):
return '' if speaker_channel is None else '?speaker_channel={}'.format(speaker_channel) | /rev_ai-2.18.0.tar.gz/rev_ai-2.18.0/src/rev_ai/apiclient.py | 0.848031 | 0.300669 | apiclient.py | pypi |
"""Speech recognition tools for using Rev AI"""
from .baseclient import BaseClient
from . import utils
from .models.customer_url_data import CustomerUrlData
try:
from urllib.parse import urljoin
except ImportError:
from urlparse import urljoin
class RevAiCustomVocabulariesClient(BaseClient):
"""Client which implements Rev AI CustomVocabulary API
See https://docs.rev.ai/api/custom-vocabulary/reference/
"""
# Default version of Rev AI
version = 'v1'
# Default base url for Rev AI
base_url = 'https://api.rev.ai/speechtotext/{}/'.format(version)
def __init__(self, access_token):
"""Constructor
:param access_token: access token which authorizes all requests and
links them to your account. Generated on the
settings page of your account dashboard
on Rev AI
"""
BaseClient.__init__(self, access_token)
self.base_url = urljoin(self.base_url, 'vocabularies/')
def submit_custom_vocabularies(
self,
custom_vocabularies,
callback_url=None,
metadata=None,
notification_config=None):
"""Submit custom vocabularies.
See https://docs.rev.ai/api/custom-vocabulary/reference/#operation/SubmitCustomVocabulary
:param custom_vocabularies: List of CustomVocabulary objects
:param callback_url: callback url to invoke on job completion as a webhook
.. deprecated:: 2.16.0
Use notification_config instead
:param metadata: info to associate with the transcription job
:param notification_config: CustomerUrlData object containing the callback url to
invoke on job completion as a webhook and optional authentication headers to use when
calling the callback url
"""
if not custom_vocabularies:
raise ValueError('custom_vocabularies must be provided')
payload = self._create_custom_vocabularies_options_payload(
custom_vocabularies,
callback_url,
metadata,
notification_config
)
response = self._make_http_request(
"POST",
self.base_url,
json=payload
)
return response.json()
def get_custom_vocabularies_information(self, id):
""" Get the custom vocabulary status
See https://docs.rev.ai/api/custom-vocabulary/reference/#operation/GetCustomVocabulary
:param id: string id of custom vocabulary submission
"""
response = self._make_http_request("GET", urljoin(self.base_url, id))
return response.json()
def get_list_of_custom_vocabularies(self, limit=None):
""" Get a list of custom vocabularies
See https://docs.rev.ai/api/custom-vocabulary/reference/#operation/GetCustomVocabularies
:param limit: optional, limits the number of jobs returned
"""
url = self.base_url
if limit:
url += '?limit={}'.format(limit)
response = self._make_http_request("GET", url)
return response.json()
def delete_custom_vocabulary(self, id):
""" Delete a custom vocabulary
See https://docs.rev.ai/api/custom-vocabulary/reference/#operation/DeleteCustomVocabulary
:param id: string id of custom vocabulary to be deleted
:returns: None if job was successfully deleted
:raises: HTTPError
"""
self._make_http_request("DELETE", urljoin(self.base_url, id))
return
def _create_custom_vocabularies_options_payload(
self,
custom_vocabularies,
callback_url=None,
metadata=None,
notification_config=None):
payload = {}
if custom_vocabularies:
payload['custom_vocabularies'] = utils._process_vocabularies(custom_vocabularies)
if callback_url:
payload['callback_url'] = callback_url
if metadata:
payload['metadata'] = metadata
if notification_config:
payload['notification_config'] = notification_config.to_dict()
return payload | /rev_ai-2.18.0.tar.gz/rev_ai-2.18.0/src/rev_ai/custom_vocabularies_client.py | 0.867808 | 0.17522 | custom_vocabularies_client.py | pypi |
"""Sentiment analysis result model"""
from .sentiment_value import SentimentValue
class SentimentAnalysisResult:
def __init__(self, messages):
"""
:param messages: list of sentimented statements from the input in order of how they appeared
in the input.
"""
self.messages = messages
def __eq__(self, other):
"""Override default equality operator"""
if isinstance(other, self.__class__):
return all(a == b for a, b in zip(self.messages, other.messages))
return False
@classmethod
def from_json(cls, json):
"""Alternate constructor used for parsing json"""
return cls([SentimentMessage.from_json(message) for message in json.get('messages', [])])
class SentimentMessage:
def __init__(self, content, score, sentiment, timestamp=None, end_timestamp=None,
offset=None, length=None):
"""
:param content: content of the informant, pulled from input
:param score: Sentimental “score” of the content. Numbers less than 0 indicate a negative
(sad, angry) sentiment. Numbers above 0 indicate positive (joyful, happy)
sentiment
:param: sentiment: Overall detected sentiment of the content, based off of score
:param timestamp: time at which this element starts if input was json
:param end_timestamp: time at which this element ends if input was json
:param offset: Character index at which the content started in the source transcript,
excludes invisible characters
:param length: Length of the content in characters, excludes invisible characters
"""
self.content = content
self.score = score
self.sentiment = sentiment
self.timestamp = timestamp
self.end_timestamp = end_timestamp
self.offset = offset
self.length = length
def __eq__(self, other):
"""Override default equality operator"""
if isinstance(other, self.__class__):
return self.__dict__ == other.__dict__
return False
@classmethod
def from_json(cls, json):
"""Alternate constructor used for parsing json"""
return cls(
json['content'],
json['score'],
SentimentValue.from_string(json['sentiment']),
json.get('ts'),
json.get('end_ts'),
json.get('offset'),
json.get('length')) | /rev_ai-2.18.0.tar.gz/rev_ai-2.18.0/src/rev_ai/models/insights/sentiment_analysis/sentiment_analysis_result.py | 0.86626 | 0.556098 | sentiment_analysis_result.py | pypi |
"""Job model"""
from ...asynchronous.job_status import JobStatus
class SentimentAnalysisJob:
def __init__(
self, id_, created_on, status,
completed_on=None,
callback_url=None,
metadata=None,
failure=None,
failure_detail=None,
word_count=None,
delete_after_seconds=None):
"""
:param id_: unique id of job
:param created_on: date and time at which this job was started
:param status: current job status 'IN_PROGRESS', 'COMPLETED',
or 'FAILED'
:param completed_on: date and time at which this job finished
being processed
:param callback_url: callback_url if provided
:param metadata: metadata if provided
:param failure: type of failure if job has failed
:param failure_detail: more detailed failure message if job has failed
:param word_count: count of words in job
:param delete_after_seconds: seconds before deletion if provided
"""
self.id = id_
self.created_on = created_on
self.status = status
self.completed_on = completed_on
self.callback_url = callback_url,
self.metadata = metadata
self.failure = failure
self.failure_detail = failure_detail
self.delete_after_seconds = delete_after_seconds
self.word_count = word_count
def __eq__(self, other):
"""Override default equality operator"""
if isinstance(other, self.__class__):
return self.__dict__ == other.__dict__
return False
@classmethod
def from_json(cls, json):
"""Alternate constructor used for parsing json"""
return cls(
json['id'],
json['created_on'],
JobStatus.from_string(json['status']),
completed_on=json.get('completed_on'),
callback_url=json.get('callback_url'),
metadata=json.get('metadata'),
failure=json.get('failure'),
failure_detail=json.get('failure_detail'),
word_count=json.get('word_count'),
delete_after_seconds=json.get('delete_after_seconds'),
) | /rev_ai-2.18.0.tar.gz/rev_ai-2.18.0/src/rev_ai/models/insights/sentiment_analysis/sentiment_analysis_job.py | 0.792906 | 0.162181 | sentiment_analysis_job.py | pypi |
class TopicExtractionResult:
def __init__(self, topics):
"""
:param topics: list of topics included in output
"""
self.topics = topics
def __eq__(self, other):
"""Override default equality operator"""
if isinstance(other, self.__class__):
return all(a == b for a, b in zip(self.topics, other.topics))
return False
@classmethod
def from_json(cls, json):
"""Alternate constructor used for parsing json"""
return cls([Topic.from_json(topic) for topic in json.get('topics', [])])
class Topic:
def __init__(self, topic_name, score, informants):
"""
:param topic_name: name of the topic, pulled directly from somewhere in the input text
:param score: score of the topic, between 0 and 1. Higher means it is more likely that this
is truly a topic
:param informants: pieces of the input text which informed this choice of topic
"""
self.topic_name = topic_name
self.score = score
self.informants = informants
def __eq__(self, other):
"""Override default equality operator"""
if isinstance(other, self.__class__):
return all(a == b for a, b in zip(self.informants, other.informants)) \
and self.topic_name == other.topic_name \
and self.score == other.score
return False
@classmethod
def from_json(cls, json):
"""Alternate constructor used for parsing json"""
return cls(
json['topic_name'],
json['score'],
[Informant.from_json(informant) for informant in json.get('informants', [])])
class Informant:
def __init__(self, content, timestamp=None, end_timestamp=None, offset=None, length=None):
"""
:param content: content of the informant, pulled from input
:param timestamp: time at which this element starts if input was json
:param end_timestamp: time at which this element ends if input was json
:param offset: Character index at which the content started in the source transcript,
excludes invisible characters
:param length: Length of the content in characters, excludes invisible characters
"""
self.content = content
self.timestamp = timestamp
self.end_timestamp = end_timestamp
self.offset = offset
self.length = length
def __eq__(self, other):
"""Override default equality operator"""
if isinstance(other, self.__class__):
return self.__dict__ == other.__dict__
return False
@classmethod
def from_json(cls, json):
"""Alternate constructor used for parsing json"""
return cls(
json['content'],
json.get('ts'),
json.get('end_ts'),
json.get('offset'),
json.get('length')) | /rev_ai-2.18.0.tar.gz/rev_ai-2.18.0/src/rev_ai/models/insights/topic_extraction/topic_extraction_result.py | 0.87444 | 0.382949 | topic_extraction_result.py | pypi |
"""Job model"""
from ...asynchronous.job_status import JobStatus
class TopicExtractionJob:
def __init__(
self, id_, created_on, status,
completed_on=None,
metadata=None,
failure=None,
failure_detail=None,
word_count=None,
delete_after_seconds=None):
"""
:param id_: unique id of job
:param created_on: date and time at which this job was started
:param status: current job status 'IN_PROGRESS', 'COMPLETED',
or 'FAILED'
:param completed_on: date and time at which this job finished
being processed
:param metadata: metadata if provided
:param failure: type of failure if job has failed
:param failure_detail: more detailed failure message if job has failed
:param word_count: count of words in job
:param delete_after_seconds: seconds before deletion if provided
"""
self.id = id_
self.created_on = created_on
self.status = status
self.completed_on = completed_on
self.metadata = metadata
self.failure = failure
self.failure_detail = failure_detail
self.delete_after_seconds = delete_after_seconds
self.word_count = word_count
def __eq__(self, other):
"""Override default equality operator"""
if isinstance(other, self.__class__):
return self.__dict__ == other.__dict__
return False
@classmethod
def from_json(cls, json):
"""Alternate constructor used for parsing json"""
return cls(
json['id'],
json['created_on'],
JobStatus.from_string(json['status']),
completed_on=json.get('completed_on'),
metadata=json.get('metadata'),
failure=json.get('failure'),
failure_detail=json.get('failure_detail'),
word_count=json.get('word_count'),
delete_after_seconds=json.get('delete_after_seconds'),
) | /rev_ai-2.18.0.tar.gz/rev_ai-2.18.0/src/rev_ai/models/insights/topic_extraction/topic_extraction_job.py | 0.789112 | 0.153137 | topic_extraction_job.py | pypi |
"""Job model"""
from ..asynchronous.job_status import JobStatus
class LanguageIdentificationJob:
def __init__(
self, id_, created_on, status,
completed_on=None,
callback_url=None,
metadata=None,
media_url=None,
failure=None,
failure_detail=None,
processed_duration_seconds=None,
delete_after_seconds=None):
"""
:param id_: unique id of job
:param created_on: date and time at which this job was started
:param status: current job status 'IN_PROGRESS', 'COMPLETED', or 'FAILED'
:param completed_on: date and time at which this job finished being processed
:param callback_url: callback_url if provided
:param metadata: metadata if provided
:param media_url: url of transcribed media if job was submitted this way
:param failure: type of failure if job has failed
:param failure_detail: more detailed failure message if job has failed
:param processed_duration_seconds: duration of file processed in seconds
:param delete_after_seconds: seconds before deletion if provided
"""
self.id = id_
self.created_on = created_on
self.status = status
self.completed_on = completed_on
self.callback_url = callback_url
self.metadata = metadata
self.media_url = media_url
self.failure = failure
self.failure_detail = failure_detail
self.processed_duration_seconds = processed_duration_seconds
self.delete_after_seconds = delete_after_seconds
def __eq__(self, other):
"""Override default equality operator"""
if isinstance(other, self.__class__):
return self.__dict__ == other.__dict__
return False
@classmethod
def from_json(cls, json):
"""Alternate constructor used for parsing json"""
return cls(
json['id'],
json['created_on'],
JobStatus.from_string(json['status']),
completed_on=json.get('completed_on'),
callback_url=json.get('callback_url'),
metadata=json.get('metadata'),
media_url=json.get('media_url'),
failure=json.get('failure'),
failure_detail=json.get('failure_detail'),
processed_duration_seconds=json.get('processed_duration_seconds'),
delete_after_seconds=json.get('delete_after_seconds'),
) | /rev_ai-2.18.0.tar.gz/rev_ai-2.18.0/src/rev_ai/models/language_id/language_id_job.py | 0.783823 | 0.163245 | language_id_job.py | pypi |
class Transcript:
def __init__(self, monologues):
"""
:param monologues: list of monologues included in output
"""
self.monologues = monologues
def __eq__(self, other):
"""Override default equality operator"""
if isinstance(other, self.__class__):
return all(a == b for a, b in zip(self.monologues, other.monologues))
return False
def to_dict(self):
"""Returns the raw form of the transcript as the api
returns them"""
return {'monologues': [monologue.to_dict() for monologue in self.monologues]}
@classmethod
def from_json(cls, json):
"""Alternate constructor used for parsing json"""
return cls([Monologue.from_json(monologue) for monologue in json.get('monologues', [])])
class Monologue:
def __init__(self, speaker, elements, speaker_info=None):
"""
:param speaker: speaker identified for this monologue
:param elements: list of elements spoken in this monologue
:param speaker_info: information about the speaker if available
"""
self.speaker = speaker
self.elements = elements
self.speaker_info = speaker_info
def __eq__(self, other):
"""Override default equality operator"""
if isinstance(other, self.__class__):
return all(a == b for a, b in zip(self.elements, other.elements)) \
and self.speaker == other.speaker \
and self.speaker_info == other.speaker_info
return False
def to_dict(self):
"""Returns the raw form of the monologue as the api
returns them"""
json = {'speaker': self.speaker,
'elements': [element.to_dict() for element in self.elements]}
if self.speaker_info:
json['speaker_info'] = self.speaker_info.to_dict()
return json
@classmethod
def from_json(cls, json):
"""Alternate constructor used for parsing json"""
speaker_info = None
if json.get('speaker_info') is not None:
speaker_info = SpeakerInfo.from_json(json['speaker_info'])
return cls(
json['speaker'],
[Element.from_json(element) for element in json.get('elements', [])],
speaker_info)
class SpeakerInfo:
def __init__(self, id_, display_name):
"""
:param id_: speaker id identified for this monologue
:param display_name: Human readable name of the speaker if available
"""
self.id = id_
self.display_name = display_name
def __eq__(self, other):
"""Override default equality operator"""
if isinstance(other, self.__class__):
return self.id == other.id and self.display_name == other.display_name
return False
def to_dict(self):
"""Returns the raw form of the monologue as the api
returns them"""
return {'id': self.id,
'display_name': self.display_name}
@classmethod
def from_json(cls, json):
"""Alternate constructor used for parsing json"""
return cls(
json['id'],
json['display_name'])
class Element:
def __init__(self, type_, value, timestamp, end_timestamp, confidence):
"""
:param type_: type of element: text, punct, or unknown
:param value: value of the element
:param timestamp: time at which this element starts in the audio
:param end_timestamp: time at which this element ends in the audio
:param confidence: confidence in this output
"""
self.type_ = type_
self.value = value
self.timestamp = timestamp
self.end_timestamp = end_timestamp
self.confidence = confidence
def __eq__(self, other):
"""Override default equality operator"""
if isinstance(other, self.__class__):
return self.__dict__ == other.__dict__
return False
def to_dict(self):
"""Returns the raw form of the element as the api
returns them"""
return {'type': self.type_, 'value': self.value, 'ts': self.timestamp,
'end_ts': self.end_timestamp, 'confidence': self.confidence}
@classmethod
def from_json(cls, json):
"""Alternate constructor used for parsing json"""
return cls(
json['type'],
json['value'],
json.get('ts'),
json.get('end_ts'),
json.get('confidence')) | /rev_ai-2.18.0.tar.gz/rev_ai-2.18.0/src/rev_ai/models/asynchronous/transcript.py | 0.877306 | 0.329284 | transcript.py | pypi |
import math
import matplotlib.pyplot as plt
from .Generaldistribution import Distribution
class Gaussian(Distribution):
""" Gaussian distribution class for calculating and
visualizing a Gaussian distribution.
Attributes:
mean (float) representing the mean value of the distribution
stdev (float) representing the standard deviation of the distribution
data_list (list of floats) a list of floats extracted from the data file
"""
def __init__(self, mu=0, sigma=1):
Distribution.__init__(self, mu, sigma)
def calculate_mean(self):
"""Function to calculate the mean of the data set.
Args:
None
Returns:
float: mean of the data set
"""
avg = 1.0 * sum(self.data) / len(self.data)
self.mean = avg
return self.mean
def calculate_stdev(self, sample=True):
"""Function to calculate the standard deviation of the data set.
Args:
sample (bool): whether the data represents a sample or population
Returns:
float: standard deviation of the data set
"""
if sample:
n = len(self.data) - 1
else:
n = len(self.data)
mean = self.calculate_mean()
sigma = 0
for d in self.data:
sigma += (d - mean) ** 2
sigma = math.sqrt(sigma / n)
self.stdev = sigma
return self.stdev
def plot_histogram(self):
"""Function to output a histogram of the instance variable data using
matplotlib pyplot library.
Args:
None
Returns:
None
"""
plt.hist(self.data)
plt.title('Histogram of Data')
plt.xlabel('data')
plt.ylabel('count')
def pdf(self, x):
"""Probability density function calculator for the gaussian distribution.
Args:
x (float): point for calculating the probability density function
Returns:
float: probability density function output
"""
return (1.0 / (self.stdev * math.sqrt(2*math.pi))) * math.exp(-0.5*((x - self.mean) / self.stdev) ** 2)
def plot_histogram_pdf(self, n_spaces = 50):
"""Function to plot the normalized histogram of the data and a plot of the
probability density function along the same range
Args:
n_spaces (int): number of data points
Returns:
list: x values for the pdf plot
list: y values for the pdf plot
"""
mu = self.mean
sigma = self.stdev
min_range = min(self.data)
max_range = max(self.data)
# calculates the interval between x values
interval = 1.0 * (max_range - min_range) / n_spaces
x = []
y = []
# calculate the x values to visualize
for i in range(n_spaces):
tmp = min_range + interval*i
x.append(tmp)
y.append(self.pdf(tmp))
# make the plots
fig, axes = plt.subplots(2,sharex=True)
fig.subplots_adjust(hspace=.5)
axes[0].hist(self.data, density=True)
axes[0].set_title('Normed Histogram of Data')
axes[0].set_ylabel('Density')
axes[1].plot(x, y)
axes[1].set_title('Normal Distribution for \n Sample Mean and Sample Standard Deviation')
axes[0].set_ylabel('Density')
plt.show()
return x, y
def __add__(self, other):
"""Function to add together two Gaussian distributions
Args:
other (Gaussian): Gaussian instance
Returns:
Gaussian: Gaussian distribution
"""
result = Gaussian()
result.mean = self.mean + other.mean
result.stdev = math.sqrt(self.stdev ** 2 + other.stdev ** 2)
return result
def __repr__(self):
"""Function to output the characteristics of the Gaussian instance
Args:
None
Returns:
string: characteristics of the Gaussian
"""
return "mean {}, standard deviation {}".format(self.mean, self.stdev) | /rev_distributions-0.1.tar.gz/rev_distributions-0.1/distributions/Gaussiandistribution.py | 0.688364 | 0.853058 | Gaussiandistribution.py | pypi |
import json
import sys
import logging
import pandas
import time
import os
log = logging.getLogger(__file__)
handler = logging.StreamHandler()
log.addHandler(handler)
log.setLevel(logging.WARNING)
def json_reader(input_file):
"""
Read JSON from rev
Arguments:
input_file (str, required) : Path to JSON file
Returns:
dictionary parsed from JSON
"""
with open(input_file, "r") as jsonin:
output_dict = json.load(jsonin)
log.debug("json_reader read file '{}' with length {}".format(input_file, len(output_dict)))
return output_dict
def rev_json_parser(input_dict, additional_offset = 0):
"""
Convert the JSON created `input_dict` towards FAVE format
Defined here: https://github.com/JoFrhwld/FAVE/wiki/Using-FAVE-align
Args:
input_dict (dict, required) : input_dict as created from Rev annotation transcript
additional_offset (float, optional) : number to add to / subtract from each timestamp; defaults to 0
Returns:
list of dicts with keys required to make DataFrame for FAVE
"""
count, fails = 0, 0
output_list = []
speaker_dict = {dicti["id"] : dicti["name"] for dicti in input_dict["speakers"]}
log.debug("Speakers: {}".format(speaker_dict))
monologues = input_dict["monologues"]
for monologue in monologues:
spans = monologue["spans"]
annotated_spans = [i for i in spans if i.get("ts") and i.get("endTs")]
fails += len(spans) - len(annotated_spans)
if annotated_spans:
speaker_id = monologue.get("speaker")
speaker_name = speaker_dict.get(speaker_id)
onset, offset = annotated_spans[0]["ts"], annotated_spans[-1]["endTs"]
transcription = " ".join([i.get("value") for i in annotated_spans])
fave_dict = {
"speaker_id": speaker_id,
"speaker_name": speaker_name,
"onset": onset + additional_offset,
"offset": offset + additional_offset,
"transcription": transcription
}
output_list.append(fave_dict)
log.debug("{} spans output by rev_json_parser".format(len(output_list)))
log.debug("{} spans did not have complete timestamp information".format(fails))
return output_list
def fave_csv_writer(input_list, output_file):
"""
Takes a list of dicts to write to CSV
Arguments:
input_list (list, required) : a list of dicts, to be fed into DataFrame.from_records
output_file (str, required) : path to write the CSV file to
Returns:
Nothing
"""
ordered_cols = ["speaker_id", "speaker_name", "onset", "offset", "transcription"]
df = pandas.DataFrame.from_records(input_list)
df.to_csv(output_file, index = False, columns = ordered_cols, sep = "\t", header = False)
log.debug("Written fave_csv to '{}'".format(output_file))
if __name__ == "__main__":
"""
User needs to input (drag & drop) a folder containing json files with rev transcripts.
"""
if len(sys.argv) < 2:
raise IOError("Not enough inputs: 'folder_name' needed")
if len(sys.argv) > 3:
raise IOError("Too many inputs: only 'folder_name', 'additional_offset' needed. Inputs given: {}".format(",".join(sys.argv)))
if len(sys.argv) == 3:
additionaloffset = float(sys.argv[2])
else:
additionaloffset = 0
inputfolder = sys.argv[1]
for fili in [os.path.join(inputfolder, i) for i in os.listdir(inputfolder) if i.lower().endswith(".json")]:
print ("Working on input file '{}'".format(fili))
timestamp = time.strftime("%Y%m%d-%H%M")
outputfile = "_".join([os.path.split(fili)[1].lower().rstrip(".json"), timestamp, "faved.csv"])
outputpath = os.path.join(os.path.split(fili)[0], outputfile)
json_dict = json_reader(fili)
parsed_list = rev_json_parser(json_dict, additional_offset = additionaloffset)
fave_csv_writer(input_list = parsed_list, output_file = outputpath)
print ("CSV file written to '{}'".format(outputpath)) | /rev-reader-0.0.3.tar.gz/rev-reader-0.0.3/rev_reader/rev_reader.py | 0.5144 | 0.190366 | rev_reader.py | pypi |
#pylint: disable=C0413
import sys
import argparse
import itertools
import numpy as np
import pandas as pd
import pysam
#pylint: disable=E0611
from pysam import VariantFile
def make_no_change_dict(vcf_path):
"""
Makes a no change list from a vcf file
Parameters
----------
vcf_path : str
location to vcf file
Returns
-------
no_change_dict: dictionary
key is a tuple, Last entry is the mutation, other entries are positions
where the mutation occurs
value is a string detailing type of mutation. Singling out
insert or other
"""
vcf_in = VariantFile(vcf_path)
no_change_dict = {}
for rec in vcf_in.fetch():
start = rec.pos
ref = rec.ref
key = tuple(range(start, start+len(rec.ref)))
if rec.alts:
for variant in rec.alts:
if len(variant) > len(rec.ref):
var_type = "insert"
else:
var_type = 'other'
variant = variant + "-"*(len(ref) - len(variant))
long_key = key + tuple([variant])
no_change_dict[long_key] = var_type
else:
variant = ""
variant = variant + "-"*(len(ref) - len(variant))
var_type = 'other'
long_key = key + tuple([variant])
no_change_dict[long_key] = var_type
return no_change_dict
def parse_cigar_seq(cigar_tuple, raw_read):
"""
This generates an aligned sequnce with a cigar tuple and raw read
Parameters
----------
cigar_tuple : tuple of tuples
cigar tuple
raw_read : str
raw_seqnece read
Returns
-------
aligned_read: str
aligned read
"""
aligned_read = ""
for key, count in cigar_tuple:
if key == 0:
aligned_read += raw_read[:count]
raw_read = raw_read[count:]
if key == 1:
raw_read = raw_read[count:]
if key == 2:
aligned_read += '-'*count
if key == 4:
aligned_read += raw_read[:count]
raw_read = raw_read[count:]
return aligned_read
def parse_cigar_seq_inserts(cigar_tuple, raw_read):
"""
This generates an aligned sequnce with a cigar tuple and raw read.
It includes inserts
Parameters
----------
cigar_tuple : tuple of tuples
cigar tuple
raw_read : str
raw_seqnece read
Returns
-------
aligned_read: str
aligned read
"""
aligned_read = ""
for key, count in cigar_tuple:
if key == 0:
aligned_read += raw_read[:count]
raw_read = raw_read[count:]
if key == 1:
aligned_read += raw_read[:count]
raw_read = raw_read[count:]
if key == 2:
aligned_read += '-'*count
if key == 4:
aligned_read += raw_read[:count]
raw_read = raw_read[count:]
return aligned_read
def make_raw_read(cigar_tuple, inserted_read, no_inserted_read):
"""
Creates a raw read from the three pieces of info
Parameters
----------
cigar_tuple : list of tuples
cigar sequence
inserted_read : str
aligned read including inserts
no_inserted_read : str
aligned read without inserts
Returns
-------
raw_read : str
raw read
"""
raw_read = ""
for key, count in cigar_tuple:
if key in set({0, 2, 4}):
raw_read += no_inserted_read[:count]
inserted_read = inserted_read[count:]
no_inserted_read = no_inserted_read[count:]
if key == 1:
raw_read += inserted_read[:count]
inserted_read = inserted_read[count:]
return raw_read
def find_inserts(old_cigar, insert_variants_list):
"""
Finds locations of inserts to keep and inserts to remove
Parameters
----------
old_cigar : list of tuples
cigar string
no_change_dict : dict
values given by vcf_file
sam_file_ref_start : int
offset of reference sequence
Returns
-------
good_inserts : list
list of inserts to keep
bad_inserts : list
List of inserts to remove
"""
spot = 0
inserts = []
for key, count in old_cigar:
if key == 1:
inserts += list(range(spot, spot + count))
spot += count
else:
spot += count
good_inserts = [x for x in inserts if x in insert_variants_list]
bad_inserts = [ x for x in inserts if x not in good_inserts]
return good_inserts, bad_inserts
def build_new_cigar(read, good_inserts, bad_inserts):
"""
Builds a new cigar sequence
Parameters
----------
read : str
read with inserts and deletions
insert_list : list
location of inserts
Returns
-------
cigar: list of tuples
cigar sequence
finished_read: str
new read with insertions removed
"""
finished_read = list(read)
read = read.replace('-', '2')
prep_read = list(read)
for item in bad_inserts:
prep_read[item] = '*'
finished_read[item] = '*'
for item in good_inserts:
prep_read[item] = 1
prep_read = [x for x in prep_read if x != '*']
for num, item in enumerate(prep_read):
if item not in ('2', 1):
prep_read[num] = 0
if item == '2':
prep_read[num] = int(item)
finished_read = ''.join([x for x in finished_read if x != '*'])
#pylint: disable=E1133
cigar = [(k, sum(1 for i in g)) for k, g in itertools.groupby(prep_read)]
return cigar, finished_read
def build_dataframe(samfile):
"""
Builds the samfile dataframe
"""
sam_file_iter = samfile.fetch()
sam_file_dict = {}
new_sam_file_list = []
sam_file_ref_start = []
#cigar_dict = {}
#pos_dict = {}
for item in sam_file_iter:
cigar_tuple = item.cigartuples
#cigar_dict[item.query_name] = cigar_tuple
sam_file_dict[item.query_name] = list(parse_cigar_seq(cigar_tuple,
item.query_sequence))
new_sam_file_list.append(item)
sam_file_ref_start += [item.reference_start]
#pos_dict[item.query_name] = item.pos
return sam_file_dict, new_sam_file_list, sam_file_ref_start
def revert_mutations(file_path, no_change_dict):
"""
Reverts the mutations to the concensus in each position in change_list
Parameters
----------
samfile_path : str
location of fasta file
no_change_list : list
list object with positions to revert mutations
out_path: str
place to write new changed fasta file
Returns
-------
None.
Side Effects
------------
It writes a fasta file to the output location
"""
#pylint: disable=E1101, R0914
samfile = pysam.AlignmentFile(file_path, 'rb')
sam_file_dict, new_sam_file_list,\
sam_file_ref_start = build_dataframe(samfile)
# Change consensus columns first
samfile_df = pd.DataFrame.from_dict(sam_file_dict, orient='index')
mode_list = samfile_df.mode()
col_num = len(list(samfile_df))
sam_file_ref_start = sam_file_ref_start[0]
no_change_list = []
insert_variants_list = []
for item in no_change_dict:
if no_change_dict[item] == 'other':
no_change_list += list(item[:-1])
else:
insert_length = len(item[-1])
insert_variants_list += list(range(item[0], item[0] + insert_length))
no_change_list = set(no_change_list)
no_change_list = [x - sam_file_ref_start - 1 for x in no_change_list]
insert_variants_list = set(insert_variants_list)
insert_variants_list = [int(x) - int(sam_file_ref_start) for x in insert_variants_list]
consensus = [x for x in range(col_num) if x not in no_change_list]
con_mode = mode_list[consensus]
samfile_df[consensus] = np.nan
samfile_df[consensus] = samfile_df[consensus].fillna(con_mode.iloc[0])
# next we need to find rows for variants,
# then replace all other rows with consensus
# REMBER TO SUBTRACT 1 because python starts at 0
pairs = []
for long_key, var_type in no_change_dict.items():
if var_type == 'other':
key = long_key[:-1]
value = long_key[-1]
key_list = [x - sam_file_ref_start - 1 for x in key]
value = list(value)
rows = samfile_df[key_list][(samfile_df[key_list] == value).all(1)].index
pairs += list(itertools.product(rows, key_list))
change_pairs = [x for x in itertools.product(samfile_df.index.tolist(), list(no_change_list))\
if x not in pairs]
index_pos = dict(zip(samfile_df.index, range(len(samfile_df.index))))
change_rows = [index_pos[x[0]] for x in change_pairs]
change_cols = [x[1] for x in change_pairs]
samfile_df.values[change_rows, change_cols] = np.nan
samfile_df[no_change_list] = \
samfile_df[no_change_list].fillna(mode_list[no_change_list].iloc[0])
samfile_df['read'] = samfile_df.sum(axis=1)
new_reads = dict(zip(samfile_df.index,samfile_df['read']))
new_cigars = {}
print("Samples that have bad inserts", file=sys.stderr)
for new_read in new_sam_file_list:
header_id = new_read.query_name
old_cigar = new_read.cigartuples
if old_cigar[0][0] == 4:
new_pos = new_read.pos - old_cigar[0][1]
new_read.pos = new_pos
good_inserts, bad_inserts = find_inserts(old_cigar, insert_variants_list)
aligned_inserts = parse_cigar_seq_inserts(old_cigar, new_read.query_sequence)
aligned_no_inserts = new_reads[header_id]
raw_read = make_raw_read(old_cigar, aligned_inserts, aligned_no_inserts)
new_cigar, finished_read = build_new_cigar(raw_read, good_inserts, bad_inserts)
new_cigars[header_id] = new_cigar
new_read.cigartuples = new_cigar
finished_read = finished_read.replace('-', '')
new_read.query_sequence = finished_read
if len(bad_inserts) > 0:
print(f"{header_id}:{','.join([str(x) for x in bad_inserts])}", file=sys.stderr)
outfile = pysam.AlignmentFile("-", "wb", template=samfile)
for item in new_sam_file_list:
outfile.write(item)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Revert sequences to consensus, but leave mutations positions unchanged',
usage='revert_mutations_bam.py [-h] [-i,--input] [-v,--vcf] > <output_file>',
formatter_class=argparse.RawDescriptionHelpFormatter
)
g = parser.add_argument_group(title='input options',
description='''-in, --input <input> Input file, must in indexed bam file
-vp, --vcf <vcf-file> Path to vcf file''')
parser.add_argument('-in', '--input', metavar='',
help=argparse.SUPPRESS)
parser.add_argument('-vp', '--vcf', metavar='',
help=argparse.SUPPRESS)
args=parser.parse_args()
NO_CHANGE_DICT = make_no_change_dict(args.vcf)
revert_mutations(args.input, NO_CHANGE_DICT) | /rev-seqs-0.0.1.tar.gz/rev-seqs-0.0.1/revseqs/revseqs.py | 0.464416 | 0.247527 | revseqs.py | pypi |
from sklearn.datasets import make_blobs
from sklearn.model_selection import train_test_split
from reval.best_nclust_cv import FindBestClustCV
from sklearn.neighbors import KNeighborsClassifier
from sklearn.cluster import AgglomerativeClustering
from sklearn.metrics import zero_one_loss, adjusted_mutual_info_score
from reval.visualization import plot_metrics
import matplotlib.pyplot as plt
from reval.utils import kuhn_munkres_algorithm
# Generate 1,000 samples for 5 blobs
# ----------------------------------
data = make_blobs(1000, 2, 5, random_state=42)
plt.scatter(data[0][:, 0],
data[0][:, 1],
c=data[1], cmap='rainbow_r')
# Split them into training and test set (30% of data)
X_tr, X_ts, y_tr, y_ts = train_test_split(data[0],
data[1],
test_size=0.30,
random_state=42,
stratify=data[1])
# Apply relative clustering validation with KNN and Hierarchical clustering
classifier = KNeighborsClassifier()
clustering = AgglomerativeClustering()
findbestclust = FindBestClustCV(nfold=10,
nclust_range=[2, 7],
s=classifier,
c=clustering,
nrand=100)
metrics, nbest, _ = findbestclust.best_nclust(X_tr, y_tr)
out = findbestclust.evaluate(X_tr, X_ts, nbest)
perm_lab = kuhn_munkres_algorithm(y_ts, out.test_cllab)
print(f"Best number of clusters: {nbest}")
print(f"Test set external ACC: "
f"{1 - zero_one_loss(y_ts, perm_lab)}")
print(f'AMI = {adjusted_mutual_info_score(y_ts, out.test_cllab)}')
print(f"Validation set normalized stability (misclassification): {metrics['val'][nbest]}")
print(f'Test set ACC = {out.test_acc}')
plot_metrics(metrics, title="Reval performance")
plt.scatter(X_ts[:, 0], X_ts[:, 1],
c=y_ts, cmap='rainbow_r')
plt.title("True labels for test set")
plt.scatter(X_ts[:, 0], X_ts[:, 1],
c=perm_lab, cmap='rainbow_r')
plt.title("Clustering labels for test set")
# Create a noisy dataset with 5 clusters
# ----------------------------------------
data_noisy = make_blobs(1000, 10, 5, random_state=42, cluster_std=3)
plt.scatter(data_noisy[0][:, 0],
data_noisy[0][:, 1],
c=data_noisy[1],
cmap='rainbow_r')
Xnoise_tr, Xnoise_ts, ynoise_tr, ynoise_ts = train_test_split(data_noisy[0],
data_noisy[1],
test_size=0.30,
random_state=42,
stratify=data_noisy[1])
metrics_noise, nbest_noise, _ = findbestclust.best_nclust(Xnoise_tr, ynoise_tr)
out_noise = findbestclust.evaluate(Xnoise_tr, Xnoise_ts, nbest_noise)
plot_metrics(metrics_noise, title="Reval performance")
perm_lab_noise = kuhn_munkres_algorithm(ynoise_ts, out_noise.test_cllab)
print(f"Best number of clusters: {nbest_noise}")
print(f"Test set external ACC: "
f"{1 - zero_one_loss(ynoise_ts, perm_lab_noise)}")
print(f'AMI = {adjusted_mutual_info_score(ynoise_ts, out_noise.test_cllab)}')
print(f"Validation set normalized stability (misclassification): {metrics_noise['val'][nbest_noise]}")
print(f"Result accuracy (on test set): "
f"{out_noise.test_acc}")
plt.scatter(Xnoise_ts[:, 0], Xnoise_ts[:, 1],
c=ynoise_ts, cmap='rainbow_r')
plt.title("True labels")
plt.scatter(Xnoise_ts[:, 0], Xnoise_ts[:, 1],
c=perm_lab_noise, cmap='rainbow_r')
plt.title("Clustering labels for test set")
# Pre-processing with UMAP
from umap import UMAP
transform = UMAP(n_components=10, n_neighbors=30, min_dist=0.0)
Xtr_umap = transform.fit_transform(Xnoise_tr)
Xts_umap = transform.transform(Xnoise_ts)
plt.scatter(Xtr_umap[:, 0], Xtr_umap[:, 1],
c=ynoise_tr, cmap='rainbow_r')
plt.title("UMAP-transformed training set with true labels")
plt.scatter(Xts_umap[:, 0], Xts_umap[:, 1],
c=ynoise_ts, cmap='rainbow_r')
plt.title("UMAP-transformed test set with true labels")
metrics, nbest, _ = findbestclust.best_nclust(Xtr_umap, ynoise_tr)
out = findbestclust.evaluate(Xtr_umap, Xts_umap, nbest)
plot_metrics(metrics, title='Reval performance of UMAP-transformed dataset')
perm_noise = kuhn_munkres_algorithm(ynoise_ts, out.test_cllab)
print(f"Best number of clusters: {nbest}")
print(f"Test set external ACC: "
f"{1 - zero_one_loss(ynoise_ts, perm_noise)}")
print(f'AMI = {adjusted_mutual_info_score(ynoise_ts, out.test_cllab)}')
print(f"Validation set normalized stability (misclassification): {metrics['val'][nbest]}")
print(f"Result accuracy (on test set): "
f"{out.test_acc}")
plt.scatter(Xts_umap[:, 0], Xts_umap[:, 1],
c=perm_noise, cmap='rainbow_r')
plt.title("Predicted labels for UMAP-preprocessed test set") | /reval-1.1.0-py3-none-any.whl/working_examples/blobs.py | 0.894017 | 0.642292 | blobs.py | pypi |
from sklearn.datasets import fetch_openml
from sklearn.model_selection import train_test_split
from reval.best_nclust_cv import FindBestClustCV
from sklearn.neighbors import KNeighborsClassifier
from sklearn.cluster import AgglomerativeClustering
from sklearn.metrics import zero_one_loss, adjusted_mutual_info_score
import matplotlib.pyplot as plt
from umap import UMAP
from reval.visualization import plot_metrics
from reval.relative_validation import _kuhn_munkres_algorithm
# MNIST dataset with 10 classes
mnist, label = fetch_openml('mnist_784', version=1, return_X_y=True)
transform = UMAP(n_neighbors=30, min_dist=0.0, n_components=10, random_state=42)
# Stratified subsets of 7000 elements for both training and test set
mnist_tr, mnist_ts, label_tr, label_ts = train_test_split(mnist, label,
train_size=0.1,
test_size=0.1,
random_state=42,
stratify=label)
# Dimensionality reduction with UMAP as pre-processing step
mnist_tr = transform.fit_transform(mnist_tr)
mnist_ts = transform.transform(mnist_ts)
plt.scatter(mnist_tr[:, 0],
mnist_tr[:, 1],
c=label_tr.astype(int),
s=0.1,
cmap='rainbow_r')
plt.title('UMAP-transformed training subsample of MNIST dataset (N=7,000)')
plt.scatter(mnist_ts[:, 0], mnist_ts[:, 1],
c=label_ts.astype(int), s=0.1, cmap='rainbow_r')
plt.title('UMAP-transformed test subsample of MNIST dataset (N=7,000)')
# Run relative clustering validation
classifier = KNeighborsClassifier()
clustering = AgglomerativeClustering()
findbestclust = FindBestClustCV(nfold=10, nclust_range=[2, 12],
s=classifier, c=clustering, nrand=100)
metrics, nbest, _ = findbestclust.best_nclust(mnist_tr, label_tr)
out = findbestclust.evaluate(mnist_tr, mnist_ts, nbest)
plot_metrics(metrics, "Relative clustering validation performance on MNIST dataset")
perm_lab = _kuhn_munkres_algorithm(label_ts.astype(int), out.test_cllab)
plt.scatter(mnist_ts[:, 0], mnist_ts[:, 1],
c=perm_lab, s=0.1, cmap='rainbow_r')
plt.title("Predicted labels for MNIST test set")
print(f"Best number of clusters: {nbest}")
print(f"Test set external ACC: "
f"{1 - zero_one_loss(label_ts.astype(int), perm_lab)}")
print(f'AMI = {adjusted_mutual_info_score(label_ts.astype(int), perm_lab)}')
print(f"Validation set normalized stability (misclassification): {metrics['val'][nbest]}")
print(f"Result accuracy (on test set): "
f"{out.test_acc}") | /reval-1.1.0-py3-none-any.whl/working_examples/mnist.py | 0.896784 | 0.652767 | mnist.py | pypi |
import time
from sklearn.datasets import make_blobs
from sklearn.model_selection import train_test_split
from reval.best_nclust_cv import FindBestClustCV
from sklearn.neighbors import KNeighborsClassifier
from sklearn.cluster import KMeans
import matplotlib.pyplot as plt
import numpy as np
def blobs_performance():
"""
Function performing multiple iterations of reval on simulated 5-blob datasets
with varying number of samples and features and 10 repetitions of 10-fold CVs.
The function plots the performance (in seconds) of the algorithm for each
parameter configuration.
"""
feat = [10, 100, 500, 1000]
samples = [100, 500, 1000, 1500, 2000]
perftime = []
for s in samples:
perf = []
for f in feat:
start = time.time()
data = make_blobs(s, f, 5, center_box=(-20, 20),
random_state=42)
X_tr, X_ts, y_tr, y_ts = train_test_split(data[0],
data[1],
test_size=0.30,
random_state=42,
stratify=data[1])
classifier = KNeighborsClassifier(n_neighbors=5)
clustering = KMeans()
findbestclust = FindBestClustCV(nfold=10,
nclust_range=[2, 7],
s=classifier,
c=clustering,
nrand=100)
metrics, nbest, _ = findbestclust.best_nclust(X_tr, iter_cv=10, strat_vect=y_tr)
tmp_time = time.time() - start
perf.append(tmp_time)
print(f'Feat {f}, samples {s}: N cluster {nbest}, time: {tmp_time}')
perftime.append(perf)
perftime = np.array(perftime)
fig, ax = plt.subplots()
ax.plot(samples, perftime[:, 0], label='10 features', linestyle='--', color='black')
ax.plot(samples, perftime[:, 1], label='100 features', color='black')
ax.plot(samples, perftime[:, 2], label='500 features', linestyle='-.', color='black')
ax.plot(samples, perftime[:, 3], label='1000 features', linestyle=':', color='black')
ax.set_xlabel('Number of samples')
ax.set_ylabel('Execution time (s)')
ax.set_title("")
ax.legend()
plt.savefig('./performance_blobs.png', dpi=300)
if __name__ == "__main__":
blobs_performance() | /reval-1.1.0-py3-none-any.whl/working_examples/blobs_performance.py | 0.710528 | 0.595669 | blobs_performance.py | pypi |
from sklearn.datasets import make_blobs
from sklearn.model_selection import train_test_split
from reval.best_nclust_cv import FindBestClustCV
from reval.visualization import plot_metrics
from sklearn.neighbors import KNeighborsClassifier
from sklearn.cluster import AgglomerativeClustering
from sklearn.metrics import adjusted_mutual_info_score, zero_one_loss
from reval.relative_validation import _kuhn_munkres_algorithm
import matplotlib.pyplot as plt
import numpy as np
# NUMBER OF FEATURES
# ------------------
# The first example illustrates how results can be influenced by the number of dataset features.
# Increasing the number of features can fix the problem.
data1 = make_blobs(1000, 10, 5, cluster_std=5, random_state=42)
# Plot synthetic dataset
plt.scatter(data1[0][:, 0], data1[0][:, 1],
c=data1[1], cmap='rainbow_r')
plt.title('True labels for 10-feature dataset')
X_tr, X_ts, y_tr, y_ts = train_test_split(data1[0],
data1[1],
test_size=0.30, random_state=42,
stratify=data1[1])
# Apply relative clustering validation with KNN and Hierarchical clustering
classifier = KNeighborsClassifier()
clustering = AgglomerativeClustering()
findbestclust = FindBestClustCV(nfold=10, nclust_range=[2, 7],
s=classifier, c=clustering, nrand=100)
metrics, nbest, _ = findbestclust.best_nclust(X_tr, y_tr)
out = findbestclust.evaluate(X_tr, X_ts, nbest)
plot_metrics(metrics, "Reval performance for synthetic dataset with 10 features")
plt.scatter(X_ts[:, 0], X_ts[:, 1],
c=out.test_cllab, cmap='rainbow_r')
plt.title("Predicted labels for 10-feature dataset")
# Compare Reval solution to true labels
print(f'AMI test set = {adjusted_mutual_info_score(y_ts, out.test_cllab)}')
relabeling = _kuhn_munkres_algorithm(y_ts, out.test_cllab)
print(f'ACC test set = {1 - zero_one_loss(y_ts, relabeling)}')
# Increase the number of features from 10 to 20
data2 = make_blobs(1000, 20, 5, cluster_std=5, random_state=42)
# Plot synthetic dataset
plt.scatter(data2[0][:, 0],
data2[0][:, 1],
c=data2[1],
cmap='rainbow_r')
plt.title('True labels for 20-feature dataset')
X_tr, X_ts, y_tr, y_ts = train_test_split(data2[0],
data2[1],
test_size=0.30, random_state=42,
stratify=data2[1])
findbestclust = FindBestClustCV(nfold=10, nclust_range=[2, 7],
s=classifier, c=clustering, nrand=100)
metrics, nbest, _ = findbestclust.best_nclust(X_tr, y_tr)
out = findbestclust.evaluate(X_tr, X_ts, nbest)
plot_metrics(metrics, "Reval performance for synthetic dataset with 20 features")
plt.scatter(X_ts[:, 0], X_ts[:, 1],
c=out.test_cllab, cmap='rainbow_r')
plt.title("Predicted labels for 20-feature dataset")
# Compare predicted labels with true labels (Adjusted Mutual Information, accuracy)
print(f'AMI test set = {adjusted_mutual_info_score(y_ts, out.test_cllab)}')
relabeling = _kuhn_munkres_algorithm(y_ts, out.test_cllab)
print(f'ACC test set = {1 - zero_one_loss(y_ts, relabeling)}')
# NUMBER OF SAMPLES
# -----------------
np.random.seed(42)
# We generate three random samples from normal distributions
data1 = np.random.normal(-5, size=(100, 2))
data2 = np.random.normal(12, 2.5, size=(50, 2))
data3 = np.random.normal(6, 2.5, size=(50, 2))
data = np.append(data1, data2, axis=0)
data = np.append(data, data3, axis=0)
label = [0] * 100 + [1] * 50 + [2] * 50
plt.scatter(data[:, 0], data[:, 1],
c=label, cmap='rainbow_r')
plt.title('Random samples from normal distribution Ns=(100, 50, 50)')
classifier = KNeighborsClassifier()
clustering = AgglomerativeClustering()
X_tr, X_ts, y_tr, y_ts = train_test_split(data, label,
test_size=0.30,
random_state=42,
stratify=label)
# Apply relative clustering validation with KNN and Hierarchical clustering
findbestclust = FindBestClustCV(nfold=10, nclust_range=[2, 7],
s=classifier, c=clustering, nrand=100)
metrics, nbest, _ = findbestclust.best_nclust(X_tr, y_tr)
out = findbestclust.evaluate(X_tr, X_ts, nbest)
plot_metrics(metrics, "Reval performance for synthetic dataset with Ns=(100, 50, 50)")
plt.scatter(X_ts[:, 0], X_ts[:, 1],
c=_kuhn_munkres_algorithm(np.array(y_ts),
out.test_cllab),
cmap='rainbow_r')
plt.title(f'Predicted labels for classes with Ns=(100, 50, 50)')
# We now increase the number of samples in groups 2 and 3 to 500
data1 = np.random.normal(-5, size=(100, 2))
data2 = np.random.normal(12, 2.5, size=(500, 2))
data3 = np.random.normal(6, 2.5, size=(500, 2))
data = np.append(data1, data2, axis=0)
data = np.append(data, data3, axis=0)
label = [0] * 100 + [1] * 500 + [2] * 500
plt.scatter(data[:, 0], data[:, 1],
c=label, cmap='rainbow_r')
plt.title('Random samples from normal distribution Ns=(100, 500, 500)')
classifier = KNeighborsClassifier()
clustering = AgglomerativeClustering()
X_tr, X_ts, y_tr, y_ts = train_test_split(data, label,
test_size=0.30,
random_state=42,
stratify=label)
# Apply relative clustering validation with KNN and Hierarchical clustering
findbestclust = FindBestClustCV(nfold=10, nclust_range=[2, 7],
s=classifier, c=clustering, nrand=100)
metrics, nbest, _ = findbestclust.best_nclust(X_tr, y_tr)
out = findbestclust.evaluate(X_tr, X_ts, nbest)
plot_metrics(metrics, "Reval performance for synthetic dataset with Ns=(100, 500, 500)")
plt.scatter(X_ts[:, 0], X_ts[:, 1],
c=y_ts,
cmap='rainbow_r')
plt.title(f'Test set true labels for classes with Ns=(100, 500, 500)')
plt.scatter(X_ts[:, 0], X_ts[:, 1],
c=_kuhn_munkres_algorithm(np.array(y_ts),
out.test_cllab),
cmap='rainbow_r')
plt.title(f'Predicted labels for classes with Ns=(100, 500, 500)')
# Performance scores
# Test set ACC
print(f'Test set external '
f'ACC = {1 - zero_one_loss(y_ts, _kuhn_munkres_algorithm(np.array(y_ts), out.test_cllab))}')
print(f'AMI = {adjusted_mutual_info_score(y_ts, out.test_cllab)}')
print(f"Validation stability metrics: {metrics['val'][nbest]}")
print(f"Test set ACC = {out.test_acc}") | /reval-1.1.0-py3-none-any.whl/working_examples/data_dimensionality.py | 0.928389 | 0.742381 | data_dimensionality.py | pypi |
from sklearn.datasets import make_blobs, load_digits
from sklearn.model_selection import train_test_split
from reval.best_nclust_cv import FindBestClustCV
from sklearn.neighbors import KNeighborsClassifier
from sklearn.cluster import AgglomerativeClustering, KMeans, SpectralClustering
from sklearn.metrics import zero_one_loss, adjusted_mutual_info_score
from reval.visualization import plot_metrics
import matplotlib.pyplot as plt
from reval.utils import kuhn_munkres_algorithm
from umap import UMAP
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import SVC
from sklearn.linear_model import LogisticRegression
import numpy as np
from datasets.manuscript_builddatasets import build_ucidatasets
import warnings
import logging
warnings.filterwarnings('ignore')
logging.basicConfig(filename='manuscript_examples.log',
filemode='a',
format='%(asctime)s, %(levelname)s %(message)s',
datefmt='%H:%M:%S',
level=logging.INFO)
"""
Three example functions that can be run from shell (change main at the bottom).
Example 1: blobs dataset;
Example 2: real-world dataset (handwritten digits)
Example 3: ensamble learning with 18 datasets from UCI Machine Learning Repository.
Algorithm performances are saved to manuscript_example.log file.
"""
# EXAMPLE 1: Isotropic Gaussian blobs
def example_1():
data = make_blobs(1000, 2, 5, center_box=(-20, 20),
random_state=42)
plt.figure(figsize=(6, 4))
plt.scatter(data[0][:, 0],
data[0][:, 1],
c=data[1], cmap='rainbow_r')
plt.title("Blobs dataset (N=1000)")
plt.show()
X_tr, X_ts, y_tr, y_ts = train_test_split(data[0],
data[1],
test_size=0.30,
random_state=42,
stratify=data[1])
classifier = KNeighborsClassifier(n_neighbors=5)
clustering = KMeans()
findbestclust = FindBestClustCV(nfold=10,
nclust_range=[2, 7],
s=classifier,
c=clustering,
nrand=100)
metrics, nbest, _ = findbestclust.best_nclust(X_tr, iter_cv=10, strat_vect=y_tr)
out = findbestclust.evaluate(X_tr, X_ts, nbest)
perm_lab = kuhn_munkres_algorithm(y_ts, out.test_cllab)
print(f"Best number of clusters: {nbest}")
print(f"Test set prediction ACC: "
f"{1 - zero_one_loss(y_ts, perm_lab)}")
print(f'AMI (true labels vs predicted labels) = '
f'{adjusted_mutual_info_score(y_ts, out.test_cllab)}')
print(f"Validation set normalized stability (misclassification):"
f"{metrics['val'][nbest]}")
print(f'Test set ACC = {out.test_acc} '
f'(true labels vs predicted labels)')
plot_metrics(metrics, title="Reval performance blobs dataset",
legend_loc=2)
plt.figure(figsize=(6, 4))
plt.scatter(X_ts[:, 0], X_ts[:, 1],
c=y_ts, cmap='rainbow_r')
plt.title("Test set true labels (blobs dataset)")
plt.show()
plt.figure(figsize=(6, 4))
plt.scatter(X_ts[:, 0], X_ts[:, 1],
c=perm_lab, cmap='rainbow_r')
plt.title("Test set clustering labels (blobs dataset)")
plt.show()
# EXAMPLE 2: Handwritten digits dataset example
def example_2():
digits_dataset = load_digits()
digits_data = digits_dataset['data']
digits_target = digits_dataset['target']
X_tr, X_ts, y_tr, y_ts = train_test_split(digits_data,
digits_target,
test_size=0.40,
random_state=42,
stratify=digits_target)
transform = UMAP(n_components=2,
random_state=42,
n_neighbors=30,
min_dist=0.0)
X_tr = transform.fit_transform(X_tr)
X_ts = transform.transform(X_ts)
s = KNeighborsClassifier(n_neighbors=30)
c = KMeans()
reval = FindBestClustCV(s=s,
c=c,
nfold=5,
nclust_range=[2, 15],
nrand=100)
metrics, nclustbest, _ = reval.best_nclust(X_tr, iter_cv=10, strat_vect=y_tr)
plot_metrics(metrics, title='Reval performance digits dataset')
out = reval.evaluate(X_tr, X_ts, nclust=nclustbest)
perm_lab = kuhn_munkres_algorithm(y_ts, out.test_cllab)
print(f"Best number of clusters: {nclustbest}")
print(f"Test set prediction ACC: "
f"{1 - zero_one_loss(y_ts, perm_lab)}")
print(f'AMI (true labels vs predicted labels) = '
f'{adjusted_mutual_info_score(y_ts, out.test_cllab)}')
print(f"Validation set normalized stability (misclassification):"
f"{metrics['val'][nclustbest]}")
print(f'Test set ACC = {out.test_acc} '
f'(true labels vs predicted labels)')
plt.figure(figsize=(6, 4))
plt.scatter(X_ts[:, 0],
X_ts[:, 1],
c=y_ts, cmap='rainbow_r')
plt.title("Test set true labels (digits dataset)")
plt.show()
plt.figure(figsize=(6, 4))
plt.scatter(X_ts[:, 0],
X_ts[:, 1],
c=perm_lab, cmap='rainbow_r')
plt.title("Test set clustering labels (digits dataset)")
plt.show()
# Example 3: ensemble learning
def example_3():
# Classifiers
knn = KNeighborsClassifier(n_neighbors=1,
metric='euclidean')
rf = RandomForestClassifier(n_estimators=100,
random_state=42)
svm = SVC(C=1,
random_state=42)
logreg = LogisticRegression(solver='liblinear',
random_state=42)
classifiers = [knn, logreg, svm, rf]
# Clustering
hc = AgglomerativeClustering()
km = KMeans(random_state=42)
sc = SpectralClustering(random_state=42)
clustering = [hc, km, sc]
# scaler = StandardScaler()
transform = UMAP(n_neighbors=30, min_dist=0.0, random_state=42)
# Import benchmark datasets
uci_data = build_ucidatasets()
# Run ensemble learning algorithm
for data, name in zip(uci_data, uci_data._fields):
nclass = len(np.unique(data['target']))
logging.info(f"Processing dataset {name}")
logging.info(f"Number of classes: {nclass}\n")
X_tr, X_ts, y_tr, y_ts = train_test_split(data['data'],
data['target'],
test_size=0.40,
random_state=42,
stratify=data['target'])
X_tr = transform.fit_transform(X_tr)
X_ts = transform.transform(X_ts)
for s in classifiers:
if type(s) == type(svm):
svm.gamma = 1 / data['data'].shape[0]
for c in clustering:
logging.info(f"Clustering algorithm: {c} -- Classification algorithm {s}")
reval = FindBestClustCV(s=s,
c=c,
nfold=5,
nclust_range=[2, nclass + 3],
nrand=100)
metrics, nclustbest, _ = reval.best_nclust(X_tr, strat_vect=y_tr)
out = reval.evaluate(X_tr, X_ts, nclust=nclustbest)
perm_lab = kuhn_munkres_algorithm(y_ts, out.test_cllab)
logging.info(f"Best number of clusters: {nclustbest}")
logging.info(f"Test set prediction ACC: "
f"{1 - zero_one_loss(y_ts, perm_lab)}")
logging.info(f'AMI (true labels vs predicted labels) = '
f'{adjusted_mutual_info_score(y_ts, out.test_cllab)}')
logging.info(f"Validation set normalized stability (misclassification):"
f"{metrics['val'][nclustbest]}")
logging.info(f'Test set ACC = {out.test_acc} '
f'(true labels vs predicted labels)\n')
logging.info('*' * 100)
logging.info('\n\n')
if __name__ == "__main__":
example_1()
example_2() | /reval-1.1.0-py3-none-any.whl/working_examples/manuscript_examples.py | 0.892796 | 0.627295 | manuscript_examples.py | pypi |
import numpy as np
from scipy.optimize import curve_fit
import platform, subprocess
from datetime import datetime, date
def get_processor_info():
"""
Simple function to get computer information output as a string
"""
if platform.system() == "Windows":
r = platform.processor()
elif platform.system() == "Linux":
command = "lscpu"
r = subprocess.check_output(command, shell=True).strip()
return r.decode("utf-8")
def write(n, t, fname, typ, notes=False):
"""
Write results from benchmarking test to a file, also fits timing data to
an equation of the form: time = k*(n)**p.
Parameters
----------
n : numpy array
Array containing number of data points used for each test simulation
t : numpy array
Array containing wall time of each test simulation
fname : str, path-like
String or path object containing path and file name for test results
to be printed
typ : str
"variogram" or "revarie", for which object it being
benchmarked. Can be abbreviated, written directly to test results file
notes : str
Any extra information to be printed in results file
"""
out = open(fname, "w")
#Write CPU information
out.write("% --- CPU info\n")
out.write(get_processor_info() + "\n\n")
#Write testing information
out.write("% --- Test Info\n")
out.write("Test Date/Time:".ljust(24) + str(datetime.now()) + "\n")
out.write("Test Type:".ljust(24) + typ + "\n")
mn = "%.2E"%n.min()
mx = "%.2E"%n.max()
out.write("Nrange:".ljust(24) + mn + "-" + mx + "\n\n")
if notes:
out.write("Notes:".ljust(24) + notes + "\n")
#Report fit to exponential of form t=k*n^p
out.write("% --- Results Summary\n")
out.write("Total Runtime:".ljust(24) + str(t.sum()) + " s\n")
a_est = np.log(t[0]/t[-1])/np.log(n[0]/n[-1])
k_est = t[-1]/n[-1]**a_est
(a,k), _ = curve_fit(lambda n, a, k : k*n**a,
xdata = n,
ydata = t,
p0 = (a_est, k_est))
out.write("Fitted k:".ljust(24) + "%.4E\n"%k)
out.write("Fitted a:".ljust(24) + "%.4E\n"%a)
out.write("Predicted 1e5 Runtime:".ljust(24) + "%.4E s\n"%(k*1e5**a))
#Write results from test
out.write("\n\n% --- Timing Data\n")
out.write("n Wall Time [s]\n")
for x1, x2 in zip(n, t):
out.write(("%.5E"%x1).ljust(14))
out.write("%.10E\n"%x2) | /revarie-1.4.0-py3-none-any.whl/benchmarking/output.py | 0.721743 | 0.305076 | output.py | pypi |
from revarie import Revarie
from revarie import Variogram
from revarie import fvariogram
import numpy as np
import time
import datetime
from .output import write
from pathlib import Path
def bench_variogram(tlimit = 15, path = "."):
"""
Run timing benchmark test for variogram class. Results are formatted and
printed to a file named "variogram_timing###.dat".
Parameters
----------
tlimit : float
Approximate wall time limit for tests to run.
path : str, path-like
Path for results file to be printed to
"""
tot_time = time.time()
n = 50
ns = []
ts = []
while time.time() - tot_time < tlimit:
start = time.time()
ns.append(n)
x = np.random.uniform(0,1,n)
f = np.random.uniform(0,1,n)
Variogram(x,f)
ts.append(time.time() - start)
n = int(n*1.2)
t = 0
fname = Path("variogram_timing" + str(t).zfill(3) + ".dat")
while (Path(path) / fname).is_file():
t += 1
fname = Path("variogram_timing" + str(t).zfill(3) + ".dat")
write(np.asarray(ns),np.asarray(ts),Path(path) / fname,"Variogram", "1-D")
def bench_revarie(tlimit = 15, path = "."):
"""
Run timing benchmark test for revarie class. Results are formatted and
printed to a file named "revarie_timing###.dat".
Parameters
----------
tlimit : float
Approximate wall time limit for tests to run.
path : str, path-like
Path for results file to be printed to
"""
tot_time = time.time()
nug = 0
sill = 1
rang = 0.3
v = fvariogram("func", "sph", [nug, sill, rang])
mu = 1
n = 50
ns = []
ts = []
while time.time() - tot_time < tlimit:
start = time.time()
ns.append(n)
x = np.random.uniform(0,1,n)
Revarie(x, mu, sill, v).genf()
ts.append(time.time() - start)
n = int(n*1.2)
t = 0
fname = Path("revarie_timing" + str(t).zfill(3) + ".dat")
while (Path(path) / fname).is_file():
t += 1
fname = Path("revarie_timing" + str(t).zfill(3) + ".dat")
write(np.asarray(ns),np.asarray(ts),Path(path) / fname,"Revarie", "1-D")
def suite(tlimit = 30, path = "."):
"""
Run timing benchmark test for both the revarie and variogram classes.
Results are formatted and printed to a file named "revarie_timing###.dat".
Each benchmark is run with equal time division for each object.
Parameters
----------
tlimit : float
Approximate wall time limit for tests to run.
path : str, path-like
Path for results files to be printed to
"""
bench_variogram(tlimit/2, path = path)
bench_revarie(tlimit/2, path = path) | /revarie-1.4.0-py3-none-any.whl/benchmarking/standard.py | 0.640636 | 0.325963 | standard.py | pypi |
RevCode
=======
A Roman encoding and mapping module for Indian languages.
*Languages Supported*
---------------------
* Hindi
* Punjabi
* Gujarati
* Marathi
* Kannada
* Telugu
* Tamil
* Malayalam
* Oriya
* Assamese
* Bengali
Installation
============
>From PyPI:
```pip install revcode```
>From source `tar.gz` bundle:
```pip install revcode-2.0.tar.gz```
Example Usage
=============
*1. Convert text to RevCode*
--------------------------------------------
```python
from revcode import revcode_conversion as rc
text = 'नमस्ते'
print(rc.to_revcode(text, 'hi')) # language as ISO-2 code
text = 'ನಮಸ್ಕಾರ'
print(rc.to_revcode(text, 'kannada')) # langauge as full name
text = 'నమస్కారం'
print(rc.to_revcode(text, 'te'))
```
*Output*
-------
```
namastE
namaskAra
namaskArx
```
*2. Convert text from RevCode*
-----------------------------------------------------
```python
from revcode import revcode_conversion as rc
text = 'namastE'
print(rc.from_revcode(text, 'hindi'))
text = 'namaskAra'
print(rc.from_revcode(text, 'kn'))
text = 'namaskArx'
print(rc.from_revcode(text, 'telugu'))
```
*Output*
------------
```
नमस्ते
ನಮಸ್ಕಾರ
నమస్కారం
```
RevCode Reference Table
=======================
| RevCode | Unicode-Hindi | Hindi | Unicode-Punjabi | Punjabi | Unicode-Gujarati | Gujarati | Unicode-Marathi | Marathi | Unicode-Kannada | Kannada | Unicode-Telugu | Telugu | Unicode-Tamil | Tamil | Unicode-Malayalam | Malayalam | Unicode-Oriya | Oriya | Unicode-Assamese | Assamese | Unicode-Bengali | Bengali |
|---------|---------------|-------|-----------------|---------|------------------|----------|-----------------|---------|-----------------|---------|----------------|--------|---------------|-------|-------------------|-----------|---------------|-------|------------------|----------|-----------------|---------|
| M | 0x901 | ँ | 0xA01 | ਁ | 0xA81 | ઁ | 0x901 | ँ | 0xC81 | - | 0xC01 | ఁ | 0xB81 | - | 0x0D01 | - | 0x0B01 | ଁ | 0x981 | ঁ | 0x981 | ঁ |
| x | 0x902 | ं | 0xA02 | ਂ | 0xA82 | ં | 0x902 | ं | 0xC82 | ಂ | 0xC02 | ం | 0xB82 | ஂ | 0x0D02 | ം | 0x0B02 | ଂ | 0x982 | ং | 0x982 | ং |
| X | 0x903 | ः | 0xA03 | ਃ | 0xA83 | ઃ | 0x903 | ः | 0xC83 | ಃ | 0xC03 | ః | 0xB83 | ஃ | 0x0D03 | ഃ | 0x0B03 | ଃ | 0x983 | ঃ | 0x983 | ঃ |
| a | 0x905 | अ | 0xA05 | ਅ | 0xA85 | અ | 0x905 | अ | 0xC85 | ಅ | 0xC05 | అ | 0xB85 | அ | 0x0D05 | അ | 0x0B05 | ଅ | 0x985 | অ | 0x985 | অ |
| A | 0x906 | आ | 0xA06 | ਆ | 0xA86 | આ | 0x906 | आ | 0xC86 | ಆ | 0xC06 | ఆ | 0xB86 | ஆ | 0x0D06 | ആ | 0x0B06 | ଆ | 0x986 | আ | 0x986 | আ |
| i | 0x907 | इ | 0xA07 | ਇ | 0xA87 | ઇ | 0x907 | इ | 0xC87 | ಇ | 0xC07 | ఇ | 0xB87 | இ | 0x0D07 | ഇ | 0x0B07 | ଇ | 0x987 | ই | 0x987 | ই |
| I | 0x908 | ई | 0xA08 | ਈ | 0xA88 | ઈ | 0x908 | ई | 0xC88 | ಈ | 0xC08 | ఈ | 0xB88 | ஈ | 0x0D08 | ഈ | 0x0B08 | ଈ | 0x988 | ঈ | 0x988 | ঈ |
| u | 0x909 | उ | 0xA09 | ਉ | 0xA89 | ઉ | 0x909 | उ | 0xC89 | ಉ | 0xC09 | ఉ | 0xB89 | உ | 0x0D09 | ഉ | 0x0B09 | ଉ | 0x989 | উ | 0x989 | উ |
| U | 0x90A | ऊ | 0xA0A | ਊ | 0xA8A | ઊ | 0x90A | ऊ | 0xC8A | ಊ | 0xC0A | ఊ | 0xB8A | ஊ | 0x0D0A | ഊ | 0x0B0A | ଊ | 0x98A | ঊ | 0x98A | ঊ |
| WR | 0x90B | ऋ | 0xA0B | - | 0xA8B | ઋ | 0x90B | ऋ | 0xC8B | ಋ | 0xC0B | ఋ | 0xB8B | - | 0x0D0B | ഋ | 0x0B0B | ଋ | 0x98B | ঋ | 0x98B | ঋ |
| WD | 0x90C | | 0xA0C | - | 0xA8C | ઌ | 0x90C | | 0xC8C | - | 0xC0C | - | 0xB8C | | | - | 0x0B0C | - | 0x98C | - | 0x98C | - |
| WA | 0x90D | ऍ | 0xA0D | - | 0xA8D | ઍ | 0x90D | ऍ | 0xC8D | - | 0xC0D | - | 0xB8D | - | | - | 0x0B0D | - | 0x98D | - | 0x98D | - |
| e | 0x90E | - | 0xA0E | - | 0xA8E | | 0x90E | | 0xC8E | ಎ | 0xC0E | ఎ | 0xB8E | எ | 0x0D0E | എ | 0x0B0E | - | 0x98E | - | 0x98E | - |
| E | 0x90F | ए | 0xA0F | ਏ | 0xA8F | એ | 0x90F | ए | 0xC8F | ಏ | 0xC0F | ఏ | 0xB8F | ஏ | 0x0D0F | ഏ | 0x0B0F | ଏ | 0x98F | এ | 0x98F | এ |
| YE | 0x910 | ऐ | 0xA10 | ਐ | 0xA90 | ઐ | 0x910 | ऐ | 0xC90 | ಐ | 0xC10 | ఐ | 0xB90 | ஐ | 0x0D10 | ഐ | 0x0B10 | ଐ | 0x990 | ঐ | 0x990 | ঐ |
| WO | 0x911 | ऑ | 0xA11 | | 0xA91 | ઑ | 0x911 | ऑ | 0xC91 | - | 0xC11 | - | 0xB91 | - | | - | 0x0B11 | - | 0x991 | - | 0x991 | - |
| o | 0x912 | - | 0xA12 | | 0xA92 | | 0x912 | | 0xC92 | ಒ | 0xC12 | ఒ | 0xB92 | ஒ | 0x0D12 | ഒ | 0x0B12 | - | 0x992 | - | 0x992 | - |
| O | 0x913 | ओ | 0xA13 | ਓ | 0xA93 | ઓ | 0x913 | ओ | 0xC93 | ಓ | 0xC13 | ఓ | 0xB93 | ஓ | 0x0D13 | ഓ | 0x0B13 | ଓ | 0x993 | ও | 0x993 | ও |
| YO | 0x914 | औ | 0xA14 | ਔ | 0xA94 | ઔ | 0x914 | औ | 0xC94 | ಔ | 0xC14 | ఔ | 0xB94 | ஒள | 0x0D14 | ഔ | 0x0B14 | ଔ | 0x994 | ঔ | 0x994 | ঔ |
| k | 0x915 | क | 0xA15 | ਕ | 0xA95 | ક | 0x915 | क | 0xC95 | ಕ | 0xC15 | క | 0xB95 | க | 0x0D15 | ക | 0x0B15 | କ | 0x995 | ক | 0x995 | ক |
| K | 0x916 | ख | 0xA16 | ਖ | 0xA96 | ખ | 0x916 | ख | 0xC96 | ಖ | 0xC16 | ఖ | 0xB96 | - | 0x0D16 | ഖ | 0x0B16 | ଖ | 0x996 | খ | 0x996 | খ |
| g | 0x917 | ग | 0xA17 | ਗ | 0xA97 | ગ | 0x917 | ग | 0xC97 | ಗ | 0xC17 | గ | 0xB97 | - | 0x0D17 | ഗ | 0x0B17 | ଗ | 0x997 | গ | 0x997 | গ |
| G | 0x918 | घ | 0xA18 | ਘ | 0xA98 | ઘ | 0x918 | घ | 0xC98 | ಘ | 0xC18 | ఘ | 0xB98 | - | 0x0D18 | ഘ | 0x0B18 | ଘ | 0x998 | ঘ | 0x998 | ঘ |
| z | 0x919 | ङ | 0xA19 | ਙ | 0xA99 | ઙ | 0x919 | ङ | 0xC99 | ಙ | 0xC19 | ఙ | 0xB99 | ங | 0x0D19 | ങ | 0x0B19 | ଙ | 0x999 | ঙ | 0x999 | ঙ |
| c | 0x91A | च | 0xA1A | ਚ | 0xA9A | ચ | 0x91A | च | 0xC9A | ಚ | 0xC1A | చ | 0xB9A | ச | 0x0D1A | ച | 0x0B1A | ଚ | 0x99A | চ | 0x99A | চ |
| C | 0x91B | छ | 0xA1B | ਛ | 0xA9B | છ | 0x91B | छ | 0xC9B | ಛ | 0xC1B | ఛ | 0xB9B | - | 0x0D1B | ഛ | 0x0B1B | ଛ | 0x99B | ছ | 0x99B | ছ |
| j | 0x91C | ज | 0xA1C | ਜ | 0xA9C | જ | 0x91C | ज | 0xC9C | ಜ | 0xC1C | జ | 0xB9C | ஜ | 0x0D1C | ജ | 0x0B1C | ଜ | 0x99C | জ | 0x99C | জ |
| J | 0x91D | झ | 0xA1D | ਝ | 0xA9D | ઝ | 0x91D | झ | 0xC9D | ಝ | 0xC1D | ఝ | 0xB9D | - | 0x0D1D | ഝ | 0x0B1D | ଝ | 0x99D | ঝ | 0x99D | ঝ |
| Z | 0x91E | ञ | 0xA1E | ਞ | 0xA9E | ઞ | 0x91E | ञ | 0xC9E | ಞ | 0xC1E | ఞ | 0xB9E | ஞ | 0x0D1E | ഞ | 0x0B1E | ଞ | 0x99E | ঞ | 0x99E | ঞ |
| T | 0x91F | ट | 0xA1F | ਟ | 0xA9F | ટ | 0x91F | ट | 0xC9F | ಟ | 0xC1F | ట | 0xB9F | ட | 0x0D1F | ട | 0x0B1F | ଟ | 0x99F | ট | 0x99F | ট |
| HT | 0x920 | ठ | 0xA20 | ਠ | 0xAA0 | ઠ | 0x920 | ठ | 0xCA0 | ಠ | 0xC20 | ఠ | 0xBA0 | - | 0x0D20 | ഠ | 0x0B20 | ଠ | 0x9A0 | ঠ | 0x9A0 | ঠ |
| D | 0x921 | ड | 0xA21 | ਡ | 0xAA1 | ડ | 0x921 | ड | 0xCA1 | ಡ | 0xC21 | డ | 0xBA1 | - | 0x0D21 | ഡ | 0x0B21 | ଡ | 0x9A1 | ড | 0x9A1 | ড |
| HD | 0x922 | ढ | 0xA22 | ਢ | 0xAA2 | ઢ | 0x922 | ढ | 0xCA2 | ಢ | 0xC22 | ఢ | 0xBA2 | - | 0x0D22 | ഢ | 0x0B22 | ଢ | 0x9A2 | ঢ | 0x9A2 | ঢ |
| N | 0x923 | ण | 0xA23 | ਣ | 0xAA3 | ણ | 0x923 | ण | 0xCA3 | ಣ | 0xC23 | ణ | 0xBA3 | ண | 0x0D23 | ണ | 0x0B23 | ଣ | 0x9A3 | ণ | 0x9A3 | ণ |
| t | 0x924 | त | 0xA24 | ਤ | 0xAA4 | ત | 0x924 | त | 0xCA4 | ತ | 0xC24 | త | 0xBA4 | த | 0x0D24 | ത | 0x0B24 | ତ | 0x9A4 | ত | 0x9A4 | ত |
| Ht | 0x925 | थ | 0xA25 | ਥ | 0xAA5 | થ | 0x925 | थ | 0xCA5 | ಥ | 0xC25 | థ | 0xBA5 | - | 0x0D25 | ഥ | 0x0B25 | ଥ | 0x9A5 | থ | 0x9A5 | থ |
| d | 0x926 | द | 0xA26 | ਦ | 0xAA6 | દ | 0x926 | द | 0xCA6 | ದ | 0xC26 | ద | 0xBA6 | - | 0x0D26 | ദ | 0x0B26 | ଦ | 0x9A6 | দ | 0x9A6 | দ |
| Hd | 0x927 | ध | 0xA27 | ਧ | 0xAA7 | ધ | 0x927 | ध | 0xCA7 | ಧ | 0xC27 | ధ | 0xBA7 | - | 0x0D27 | ധ | 0x0B27 | ଧ | 0x9A7 | ধ | 0x9A7 | ধ |
| n | 0x928 | न | 0xA28 | ਨ | 0xAA8 | ન | 0x928 | न | 0xCA8 | ನ | 0xC28 | న | 0xBA8 | ந | 0x0D28 | ന | 0x0B28 | ନ | 0x9A8 | ন | 0x9A8 | ন |
| Q | 0x929 | ऩ | 0xA29 | - | | | 0x929 | ऩ | 0xCA9 | - | 0xC29 | - | 0xBA9 | ன | | - | 0x0B29 | - | 0x9A9 | - | 0x9A9 | - |
| p | 0x92A | प | 0xA2A | ਪ | 0xAAA | પ | 0x92A | प | 0xCAA | ಪ | 0xC2A | ప | 0xBAA | ப | 0x0D2A | പ | 0x0B2A | ପ | 0x9AA | প | 0x9AA | প |
| P | 0x92B | फ | 0xA2B | ਫ | 0xAAB | ફ | 0x92B | फ | 0xCAB | ಫ | 0xC2B | ఫ | 0xBAB | - | 0x0D2B | ഫ | 0x0B2B | ଫ | 0x9AB | ফ | 0x9AB | ফ |
| b | 0x92C | ब | 0xA2C | ਬ | 0xAAC | બ | 0x92C | ब | 0xCAC | ಬ | 0xC2C | బ | 0xBAC | - | 0x0D2C | ബ | 0x0B2C | ବ | 0x9AC | ব | 0x9AC | ব |
| B | 0x92D | भ | 0xA2D | ਭ | 0xAAD | ભ | 0x92D | भ | 0xCAD | ಭ | 0xC2D | భ | 0xBAD | - | 0x0D2D | ഭ | 0x0B2D | ଭ | 0x9AD | ভ | 0x9AD | ভ |
| m | 0x92E | म | 0xA2E | ਮ | 0xAAE | મ | 0x92E | म | 0xCAE | ಮ | 0xC2E | మ | 0xBAE | ம | 0x0D2E | മ | 0x0B2E | ମ | 0x9AE | ম | 0x9AE | ম |
| y | 0x92F | य | 0xA2F | ਯ | 0xAAF | ય | 0x92F | य | 0xCAF | ಯ | 0xC2F | య | 0xBAF | ய | 0x0D2F | യ | 0x0B5F | ୟ | 0x9AF | য | 0x9AF | য |
| r | 0x930 | र | 0xA30 | ਰ | 0xAB0 | ર | 0x930 | र | 0xCB0 | ರ | 0xC30 | ర | 0xBB0 | ர | 0x0D30 | ര | 0x0B30 | ର | 0x9F0 | ৰ | 0x9B0 | র |
| R | 0x931 | ऱ | 0xA31 | | | | 0x931 | | 0xCB1 | ಱ | 0xC31 | ఱ | 0xBB1 | - | 0x0D31 | റ | 0x0B31 | - | 0x9B1 | - | 0x9B1 | - |
| l | 0x932 | ल | 0xA32 | ਲ | 0xAB2 | લ | 0x932 | ल | 0xCB2 | ಲ | 0xC32 | ల | 0xBB2 | ல | 0x0D32 | ല | 0x0B32 | ଲ | 0x9B2 | ল | 0x9B2 | ল |
| L | 0x933 | ळ | 0xA33 | ਲ਼ | 0xAB3 | ળ | 0x933 | ळ | 0xCB3 | ಳ | 0xC33 | ళ | 0xBB3 | ள | 0x0D33 | ള | 0x0B33 | ଳ | 0x9B3 | - | 0x9B3 | - |
| Hz | 0x934 | ऴ | 0xA34 | | | | 0x934 | | 0xCB4 | - | 0xC34 | ఴ | 0xBB4 | ழ | 0x0D34 | ഴ | 0x0B34 | - | 0x9B4 | - | 0x9B4 | - |
| v | 0x935 | व | 0xA35 | ਵ | 0xAB5 | વ | 0x935 | व | 0xCB5 | ವ | 0xC35 | వ | 0xBB5 | வ | 0x0D35 | വ | 0x0B71 | ୱ | 0x9F1 | ৱ | 0x9F1 | ৱ |
| S | 0x936 | श | 0xA36 | ਸ਼ | 0xAB6 | શ | 0x936 | श | 0xCB6 | ಶ | 0xC36 | శ | 0xBB6 | ஶ | 0x0D36 | ശ | 0x0B36 | ଶ | 0x9B6 | শ | 0x9B6 | শ |
| Hs | 0x937 | ष | 0xA37 | - | 0xAB7 | ષ | 0x937 | ष | 0xCB7 | ಷ | 0xC37 | ష | 0xBB7 | ஷ | 0x0D37 | ഷ | 0x0B37 | ଷ | 0x9B7 | ষ | 0x9B7 | ষ |
| s | 0x938 | स | 0xA38 | ਸ | 0xAB8 | સ | 0x938 | स | 0xCB8 | ಸ | 0xC38 | స | 0xBB8 | ஸ | 0x0D38 | സ | 0x0B38 | ସ | 0x9B8 | স | 0x9B8 | স |
| h | 0x939 | ह | 0xA39 | ਹ | 0xAB9 | હ | 0x939 | ह | 0xCB9 | ಹ | 0xC39 | హ | 0xBB9 | ஹ | 0x0D39 | ഹ | 0x0B39 | ହ | 0x9B9 | হ | 0x9B9 | হ |
| | 0x93C | ़ | 0xA3C | | 0xABC | ઼ | 0x93C | ़ | 0xCBC | - | 0xC3C | | 0xBBC | | | | 0x0B3C | ଼ | 0x9BC | ় | 0x9BC | ় |
| | 0x93D | | 0xA3D | | 0xABD | ઽ | 0x93D | | 0xCBD | ಽ | 0xC3D | ఽ | 0xBBD | | | | 0x0B3D | - | 0x9BD | - | 0x9BD | - |
| A | 0x93E | ा | 0xA3E | ਾ | 0xABE | ા | 0x93E | ा | 0xCBE | ಾ | 0xC3E | ా | 0xBBE | ா | 0x0D3E | ാ | 0x0B3E | ା | 0x9BE | া | 0x9BE | া |
| i | 0x93F | ि | 0xA3F | ਿ | 0xABF | િ | 0x93F | ि | 0xCBF | ಿ | 0xC3F | ి | 0xBBF | ி | 0x0D3F | ി | 0x0B3F | ି | 0x9BF | ি | 0x9BF | ি |
| I | 0x940 | ी | 0xA40 | ੀ | 0xAC0 | ી | 0x940 | ी | 0xCC0 | ೀ | 0xC40 | ీ | 0xBC0 | ீ | 0x0D40 | ീ | 0x0B40 | ୀ | 0x9C0 | ী | 0x9C0 | ী |
| u | 0x941 | ु | 0xA41 | ੁ | 0xAC1 | ુ | 0x941 | ु | 0xCC1 | ು | 0xC41 | ు | 0xBC1 | ு | 0x0D41 | ു | 0x0B41 | ୁ | 0x9C1 | ু | 0x9C1 | ু |
| U | 0x942 | ू | 0xA42 | ੂ | 0xAC2 | ૂ | 0x942 | ू | 0xCC2 | ೂ | 0xC42 | ూ | 0xBC2 | ூ | 0x0D42 | ൂ | 0x0B42 | ୂ | 0x9C2 | ু | 0x9C2 | ু |
| WR | 0x943 | ृ | 0xA43 | | 0xAC3 | ૃ | 0x943 | ृ | 0xCC3 | ೃ | 0xC43 | ృ | 0xBC3 | - | 0x0D7C | ര് | 0x0B43 | ୃ | 0x9C3 | ৃ | 0x9C3 | ৃ |
| WA | 0x945 | ॅ | 0xA45 | | 0xAC4 | ૄ | 0x945 | ॅ | 0xCC5 | - | 0xC45 | - | 0xBC5 | - | | | 0x0B45 | - | 0x9C5 | - | 0x9C5 | - |
| e | 0x946 | ॆ | 0xA46 | | 0xAC5 | ૅ | 0x946 | | 0xCC6 | ೆ | 0xC46 | ె | 0xBC6 | ெ | OD46 | െ | 0x0B46 | - | 0x9C6 | - | 0x9C6 | - |
| E | 0x947 | े | 0xA47 | ੇ | 0xAC7 | ે | 0x947 | े | 0xCC7 | ೇ | 0xC47 | ే | 0xBC7 | ே | OD47 | േ | 0x0B47 | େ | 0x9C7 | ে | 0x9C7 | ে |
| YE | 0x948 | ै | 0xA48 | ੈ | 0xAC8 | ૈ | 0x948 | ै | 0xCC8 | ೈ | 0xC48 | ై | 0xBC8 | ை | | ൈ | 0x0B48 | ୈ | 0x9C8 | ৈ | 0x9C8 | ৈ |
| WO | 0x949 | ॉ | 0xA49 | | | | 0x949 | ॉ | 0xCC9 | - | 0xC49 | - | 0xBC9 | - | | | 0x0B49 | - | 0x9C9 | - | 0x9C9 | - |
| o | 0x94A | ॊ | 0xA4A | | 0xAC9 | ૉ | 0x94A | | 0xCCA | ೊ | 0xC4A | ొ | 0xBCA | ொ | 0x0D4A | ൊ | 0x0B4A | - | 0x2019 | ’ | 0x2019 | ’ |
| O | 0x94B | ो | 0xA4B | ੋ | 0xACB | ો | 0x94B | ो | 0xCCB | ೋ | 0xC4B | ో | 0xBCB | ோ | 0x0D4B | ോ | 0x0B4B | ୋ | 0x9CB | ো | 0x9CB | ো |
| YO | 0x94C | ौ | 0xA4C | ੌ | 0xACC | ૌ | 0x94C | ौ | 0xCCC | ೌ | 0xC4C | ౌ | 0xBCC | ௌ | | | 0x0B4C | ୌ | 0x9CC | ৌ | 0x9CC | ৌ |
| q | 0x94D | ् | 0xA4D | | 0xACD | ્ | 0x94D | ् | 0xCCD | ್ | 0xC4D | ్ | 0xBCD | ் | | ് | 0x0B4D | ୍ | 0x9CD | ্ | 0x9CD | ্ |
| Fk | 0x958 | क़ | 0xA58 | - | | | 0x958 | | 0xCD8 | - | 0xC58 | ౘ | 0xBD8 | | 0x0D7F | ൿ | 0x0B58 | - | 0x9D8 | - | 0x9D8 | - |
| FK | 0x959 | ख़ | 0xA59 | ਖ਼ | | | 0x959 | | 0xCD9 | - | 0xC59 | ౙ | 0xBD9 | | | | 0x0B59 | - | 0x9D9 | - | 0x9D9 | - |
| Fg | 0x95A | ग़ | 0xA5A | ਗ਼ | | | 0x95A | | 0xCDA | - | 0xC5A | ౚ | 0xBDA | | | | 0x0B5A | - | 0x9DA | - | 0x9DA | - |
| Fj | 0x95B | ज़ | 0xA5B | ਜ਼ | | | 0x95B | ज़ | 0xCDB | - | 0xC5B | - | 0xBDB | | | | 0x0B5B | - | 0x9DB | - | 0x9DB | - |
| Fd | 0x95C | ड़ | 0xA5C | ੜ | | | 0x95C | ड़ | 0xCDC | - | 0xC5C | - | 0xBDC | | | | 0x0B5C | ଡ଼ | 0x9DC | ড় | 0x9DC | ড় |
| HR | 0x95D | ढ़ | 0xA5D | | | | 0x95D | ढ़ | 0xCDD | - | 0xC5D | - | 0xBDD | | 0x0D43 | ൃ | 0x0B5D | ଢ଼ | 0x9DD | ঢ় | 0x9DD | ঢ় |
| FP | 0x95E | फ़ | 0xA5E | ਫ਼ | | | 0x95E | फ़ | 0xCDE | ೞ | 0xC5E | - | 0xBDE | | | | 0x0B5E | - | 0x9DE | - | 0x9DE | - |
| Fy | 0x95F | य़ | 0xA5F | - | | | 0x95F | | 0xCDF | - | 0xC5F | - | 0xBDF | | | | 0x0B2F | ଯ | 0x9DF | য় | 0x9DF | য় |
| YN | 0x970 | | 0xA70 | ੰ | | | | | | | | | | | 0x0D7A | ണ് | | | | - | | - |
| HH | 0x971 | | 0xA71 | ੱ | | | | | | | | | | | | | | | | - | | - |
| Yt | | | | | | | | | | | | | | | | | | | 0x9CE | ৎ | | ৎ |
| Yn | | | | | | | | | | | | | | | 0x0D7B | ന് | | | | | | |
| Yl | | | | | | | 0x093D | | | | | | | | 0x0D7D | ല് | | | | | | |
| YL | | | | | | | 0x0952 | | | | | | | | 0x0D7E | ള് | | | | | | |
| | | | | | | | | | | | | | | | | | | | | | | |
| Yr | | | | | | | 0x0960 | ॠ | | | | | | | | | | | | | | |
| | | | | | | | | | | | | | | | | | | | | | | |
| | | | | | | | | | | | | | | | | | | | | | | |
| | | | | | | | | | | | | | | | | | | | | | | |
| | | | | | | | | | | | | | | | | | | | | | | |
| | | | | | | | | | | | | | | | | | | | | | | |
| | | | | | | | | | | | | | | | | | | | | | | |
| /revcode-2.1-py3-none-any.whl/revcode-2.1.dist-info/DESCRIPTION.rst | 0.642657 | 0.842475 | DESCRIPTION.rst | pypi |
from boto3.dynamodb.conditions import Key, ConditionExpressionBuilder, Attr
import re
from boto3.dynamodb.types import TypeDeserializer, TypeSerializer
ATTR_NAME_REGEX = re.compile(r'[^.\[\]]+(?![^\[]*\])')
class ExpressionBuilder(ConditionExpressionBuilder):
def _build_value_placeholder(self, value, attribute_value_placeholders, is_resource=True,
has_grouped_values=False):
# If the values are grouped, we need to add a placeholder for
# each element inside of the actual value.
serializer = TypeSerializer()
if has_grouped_values:
placeholder_list = []
for v in value:
value_placeholder = self._get_value_placeholder()
self._value_count += 1
placeholder_list.append(value_placeholder)
if is_resource:
attribute_value_placeholders[value_placeholder] = v
else:
attribute_value_placeholders[value_placeholder] = serializer.serialize(v)
# Assuming the values are grouped by parenthesis.
# IN is the currently the only one that uses this so it maybe
# needed to be changed in future.
return '(' + ', '.join(placeholder_list) + ')'
# Otherwise, treat the value as a single value that needs only
# one placeholder.
else:
value_placeholder = self._get_value_placeholder()
self._value_count += 1
attribute_value_placeholders[value_placeholder] = serializer.serialize(value)
return value_placeholder
class ConditionHandler:
builder_class = ExpressionBuilder
def __init__(self):
self.builder = self.builder_class()
def handle_condition(self, condition, is_resource=True, is_key=True):
if is_resource:
return condition
built = self.builder.build_expression(condition, is_key_condition=is_key, is_resource=False)
return {
'KeyConditionExpression': built.condition_expression,
'ExpressionAttributeNames': built.attribute_name_placeholders,
'ExpressionAttributeValues': built.attribute_value_placeholders,
}
def build_condition(self, is_key=True, is_resource=True, **kwargs):
method = Key if is_key else Attr
condition = None
for k, v in kwargs.items():
key = k
try:
if k[-5:] == '__lte':
op = 'lte'
key = k[:-5]
elif k[-5:] == '__gte':
op = 'gte'
key = k[:-5]
elif k[-5:] == '__startswith':
op = 'begins_with'
key = k[:-5]
else:
op = 'eq'
except:
op = 'eq'
if not condition:
condition = getattr(method(key), op)(v)
else:
condition = condition & getattr(method(key), op)(v)
condition = self.handle_condition(condition, is_resource)
return condition | /revcore_micro-0.7.6-py3-none-any.whl/revcore_micro/ddb/conditions.py | 0.573678 | 0.227781 | conditions.py | pypi |
__author__ = 'Georgios Rizos (georgerizos@iti.gr)'
def pagerank_limit_push(s, r, w_i, a_i, push_node, rho):
"""
Performs a random step without a self-loop.
"""
# Calculate the A and B quantities to infinity
A_inf = rho*r[push_node]
B_inf = (1-rho)*r[push_node]
# Update approximate Pagerank and residual vectors
s[push_node] += A_inf
r[push_node] = 0.0
# Update residual vector at push node's adjacent nodes
r[a_i] += B_inf * w_i
def pagerank_lazy_push(s, r, w_i, a_i, push_node, rho, lazy):
"""
Performs a random step with a self-loop.
Introduced in: Andersen, R., Chung, F., & Lang, K. (2006, October).
Local graph partitioning using pagerank vectors.
In Foundations of Computer Science, 2006. FOCS'06. 47th Annual IEEE Symposium on (pp. 475-486). IEEE.
"""
# Calculate the A, B and C quantities
A = rho*r[push_node]
B = (1-rho)*(1 - lazy)*r[push_node]
C = (1-rho)*lazy*(r[push_node])
# Update approximate Pagerank and residual vectors
s[push_node] += A
r[push_node] = C
# Update residual vector at push node's adjacent nodes
r[a_i] += B * w_i
def cumulative_pagerank_difference_limit_push(s, r, w_i, a_i, push_node, rho):
"""
Performs a random step without a self-loop.
Inputs: - s: A NumPy array that contains the approximate absorbing random walk cumulative probabilities.
- r: A NumPy array that contains the residual probability distribution.
- w_i: A NumPy array of probability transition weights from the seed nodes to its adjacent nodes.
- a_i: A NumPy array of the nodes adjacent to the push node.
- push_node: The node from which the residual probability is pushed to its adjacent nodes.
- rho: The restart probability.
Outputs: - s in 1xn: A NumPy array that contains the approximate absorbing random walk cumulative probabilities.
- r in 1xn: A NumPy array that contains the residual probability distribution.
"""
# Calculate the commute quantity
commute = (1-rho)*r[push_node]
# Update approximate regularized commute and residual vectors
r[push_node] = 0.0
# Update residual vector at push node's adjacent nodes
commute_probabilities = commute * w_i
s[a_i] += commute_probabilities
r[a_i] += commute_probabilities | /reveal-graph-embedding-0.1.4.tar.gz/reveal-graph-embedding-0.1.4/reveal_graph_embedding/eps_randomwalk/push.py | 0.781331 | 0.712061 | push.py | pypi |
__author__ = 'Georgios Rizos (georgerizos@iti.gr)'
from collections import deque
import numpy as np
from reveal_graph_embedding.eps_randomwalk.push import pagerank_limit_push
from reveal_graph_embedding.eps_randomwalk.push import pagerank_lazy_push
from reveal_graph_embedding.eps_randomwalk.push import cumulative_pagerank_difference_limit_push
def fast_approximate_personalized_pagerank(s,
r,
w_i,
a_i,
out_degree,
in_degree,
seed_node,
rho=0.2,
epsilon=0.00001):
"""
Calculates the approximate personalized PageRank starting from a seed node without self-loops.
"""
# Initialize approximate PageRank and residual distributions
# s = np.zeros(number_of_nodes, dtype=np.float64)
# r = np.zeros(number_of_nodes, dtype=np.float64)
r[seed_node] = 1.0
# Initialize queue of nodes to be pushed
pushable = deque()
pushable.append(seed_node)
# Do one push anyway
push_node = pushable.popleft()
pagerank_limit_push(s,
r,
w_i[push_node],
a_i[push_node],
push_node,
rho)
number_of_push_operations = 1
i = np.where(np.divide(r[a_i[push_node]], in_degree[a_i[push_node]]) >= epsilon)[0]
if i.size > 0:
pushable.extend(a_i[push_node][i])
while len(pushable) > 0:
# While there are nodes with large residual probabilities, push
push_node = pushable.popleft()
if r[push_node]/in_degree[push_node] >= epsilon:
pagerank_limit_push(s,
r,
w_i[push_node],
a_i[push_node],
push_node,
rho)
number_of_push_operations += 1
i = np.where(np.divide(r[a_i[push_node]], in_degree[a_i[push_node]]) >= epsilon)[0]
if i.size > 0:
pushable.extend(a_i[push_node][i])
return number_of_push_operations
def lazy_approximate_personalized_pagerank(s,
r,
w_i,
a_i,
out_degree,
in_degree,
seed_node,
rho=0.2,
epsilon=0.00001,
laziness_factor=0.5):
"""
Calculates the approximate personalized PageRank starting from a seed node with self-loops.
Introduced in: Andersen, R., Chung, F., & Lang, K. (2006, October).
Local graph partitioning using pagerank vectors.
In Foundations of Computer Science, 2006. FOCS'06. 47th Annual IEEE Symposium on (pp. 475-486). IEEE.
"""
# Initialize approximate PageRank and residual distributions
# s = np.zeros(number_of_nodes, dtype=np.float64)
# r = np.zeros(number_of_nodes, dtype=np.float64)
r[seed_node] = 1.0
# Initialize queue of nodes to be pushed
pushable = deque()
pushable.append(seed_node)
# Do one push anyway
push_node = pushable.popleft()
pagerank_lazy_push(s,
r,
w_i[push_node],
a_i[push_node],
push_node,
rho,
laziness_factor)
number_of_push_operations = 1
i = np.where(np.divide(r[a_i[push_node]], in_degree[a_i[push_node]]) >= epsilon)[0]
if i.size > 0:
pushable.extend(a_i[push_node][i])
while r[push_node]/in_degree[push_node] >= epsilon:
pagerank_lazy_push(s,
r,
w_i[push_node],
a_i[push_node],
push_node,
rho,
laziness_factor)
number_of_push_operations += 1
# While there are nodes with large residual probabilities, push
while len(pushable) > 0:
push_node = pushable.popleft()
if r[push_node]/in_degree[push_node] >= epsilon:
pagerank_lazy_push(s,
r,
w_i[push_node],
a_i[push_node],
push_node,
rho,
laziness_factor)
number_of_push_operations += 1
i = np.where(np.divide(r[a_i[push_node]], in_degree[a_i[push_node]]) >= epsilon)[0]
if i.size > 0:
pushable.extend(a_i[push_node][i])
while r[push_node]/in_degree[push_node] >= epsilon:
pagerank_lazy_push(s,
r,
w_i[push_node],
a_i[push_node],
push_node,
rho,
laziness_factor)
number_of_push_operations += 1
return number_of_push_operations
def fast_approximate_cumulative_pagerank_difference(s,
r,
w_i,
a_i,
out_degree,
in_degree,
seed_node,
rho=0.2,
epsilon=0.00001):
"""
Calculates cumulative PageRank difference probability starting from a seed node without self-loops.
Inputs: - w_i: A NumPy array of arrays of probability transition weights from the seed nodes to its adjacent nodes.
- a_i: A NumPy array of arrays of the nodes adjacent to the seed node.
- out_degree: A NumPy array of node out_degrees.
- in_degree: A NumPy array of node in_degrees.
- seed_node: The seed for the node-centric personalized PageRank.
- rho: The restart probability. Usually set in [0.1, 0.2].
- epsilon: The error threshold.
Outputs: - s in 1xn: A sparse vector that contains the approximate absorbing random walk cumulative probabilities.
- r in 1xn: A sparse vector that contains the residual probability distribution.
- nop: The number of limit probability push operations performed.
"""
# Initialize the similarity matrix slice and the residual distribution
# s = np.zeros(number_of_nodes, dtype=np.float64)
# r = np.zeros(number_of_nodes, dtype=np.float64)
s[seed_node] = 1.0
r[seed_node] = 1.0
# Initialize double-ended queue of nodes to be pushed
pushable = deque()
pushable.append(seed_node)
# Do one push for free
push_node = pushable.popleft()
cumulative_pagerank_difference_limit_push(s,
r,
w_i[push_node],
a_i[push_node],
push_node,
rho)
number_of_push_operations = 1
i = np.where(np.divide(r[a_i[push_node]], in_degree[a_i[push_node]]) >= epsilon)[0]
if i.size > 0:
pushable.extend(a_i[push_node][i])
# While there are nodes with large residual probabilities, push
while len(pushable) > 0:
push_node = pushable.popleft()
# If the threshold is not satisfied, perform a push operation
# Both this and the later check are needed, since the pushable queue may contain duplicates.
if r[push_node]/in_degree[push_node] >= epsilon:
cumulative_pagerank_difference_limit_push(s,
r,
w_i[push_node],
a_i[push_node],
push_node,
rho)
number_of_push_operations += 1
# Update pushable double-ended queue
i = np.where(np.divide(r[a_i[push_node]], in_degree[a_i[push_node]]) >= epsilon)[0]
if i.size > 0:
pushable.extend(a_i[push_node][i])
# Sparsify and return.
# s_sparse = sparse.csr_matrix(s, shape=(1, number_of_nodes))
# r_sparse = sparse.csr_matrix(r, shape=(1, number_of_nodes))
return number_of_push_operations | /reveal-graph-embedding-0.1.4.tar.gz/reveal-graph-embedding-0.1.4/reveal_graph_embedding/eps_randomwalk/similarity.py | 0.796055 | 0.351228 | similarity.py | pypi |
__author__ = 'Georgios Rizos (georgerizos@iti.gr)'
import numpy as np
import scipy.sparse as sparse
import ctypes as c
import multiprocessing as mp
def get_label_based_random_walk_matrix(adjacency_matrix, labelled_nodes, label_absorption_probability):
"""
Returns the label-absorbing random walk transition probability matrix.
Input: - A: A sparse matrix that contains the adjacency matrix of the graph.
Output: - W: A sparse matrix that contains the natural random walk transition probability matrix.
"""
# Turn to sparse.csr_matrix format for faster row access.
rw_transition = sparse.csr_matrix(adjacency_matrix, dtype=np.float64)
# Sum along the two axes to get out-degree and in-degree, respectively
out_degree = rw_transition.sum(axis=1)
in_degree = rw_transition.sum(axis=0)
# Form the inverse of the diagonal matrix containing the out-degree
for i in np.arange(rw_transition.shape[0]):
rw_transition.data[rw_transition.indptr[i]: rw_transition.indptr[i + 1]] =\
rw_transition.data[rw_transition.indptr[i]: rw_transition.indptr[i + 1]]/out_degree[i]
out_degree = np.array(out_degree).astype(np.float64).reshape(out_degree.size)
in_degree = np.array(in_degree).astype(np.float64).reshape(in_degree.size)
# When the random walk agent encounters a labelled node, there is a probability that it will be absorbed.
diag = np.zeros_like(out_degree)
diag[labelled_nodes] = 1.0
diag = sparse.dia_matrix((diag, [0]), shape=(in_degree.size, in_degree.size))
diag = sparse.csr_matrix(diag)
rw_transition[labelled_nodes, :] = (1-label_absorption_probability)*rw_transition[labelled_nodes, :] + label_absorption_probability*diag[labelled_nodes, :]
return rw_transition, out_degree, in_degree
def get_natural_random_walk_matrix(adjacency_matrix, make_shared=False):
"""
Returns the natural random walk transition probability matrix given the adjacency matrix.
Input: - A: A sparse matrix that contains the adjacency matrix of the graph.
Output: - W: A sparse matrix that contains the natural random walk transition probability matrix.
"""
# Turn to sparse.csr_matrix format for faster row access.
rw_transition = sparse.csr_matrix(adjacency_matrix, dtype=np.float64, copy=True)
# Sum along the two axes to get out-degree and in-degree, respectively
out_degree = rw_transition.sum(axis=1)
in_degree = rw_transition.sum(axis=0)
# Form the inverse of the diagonal matrix containing the out-degree
for i in np.arange(rw_transition.shape[0]):
rw_transition.data[rw_transition.indptr[i]: rw_transition.indptr[i + 1]] =\
rw_transition.data[rw_transition.indptr[i]: rw_transition.indptr[i + 1]]/out_degree[i]
rw_transition.sort_indices()
out_degree = np.array(out_degree).astype(np.float64).reshape(out_degree.size)
in_degree = np.array(in_degree).astype(np.float64).reshape(in_degree.size)
if make_shared:
number_of_nodes = adjacency_matrix.shape[0]
out_degree_c = mp.Array(c.c_double, number_of_nodes)
in_degree_c = mp.Array(c.c_double, number_of_nodes)
out_degree_shared = np.frombuffer(out_degree_c.get_obj(), dtype=np.float64, count=number_of_nodes)
in_degree_shared = np.frombuffer(in_degree_c.get_obj(), dtype=np.float64, count=number_of_nodes)
out_degree_shared[:] = out_degree[:]
in_degree_shared[:] = in_degree[:]
indices_c = mp.Array(c.c_int64, rw_transition.indices.size)
indptr_c = mp.Array(c.c_int64, rw_transition.indptr.size)
data_c = mp.Array(c.c_double, rw_transition.data.size)
indices_shared = np.frombuffer(indices_c.get_obj(), dtype=np.int64, count=rw_transition.indices.size)
indptr_shared = np.frombuffer(indptr_c.get_obj(), dtype=np.int64, count=rw_transition.indptr.size)
data_shared = np.frombuffer(data_c.get_obj(), dtype=np.float64, count=rw_transition.data.size)
indices_shared[:] = rw_transition.indices[:]
indptr_shared[:] = rw_transition.indptr[:]
data_shared[:] = rw_transition.data[:]
rw_transition = sparse.csr_matrix((data_shared,
indices_shared,
indptr_shared),
shape=rw_transition.shape)
return rw_transition, out_degree, in_degree | /reveal-graph-embedding-0.1.4.tar.gz/reveal-graph-embedding-0.1.4/reveal_graph_embedding/eps_randomwalk/transition.py | 0.780412 | 0.706558 | transition.py | pypi |
__author__ = 'Georgios Rizos (georgerizos@iti.gr)'
import numpy as np
import scipy.sparse as sparse
from sklearn.metrics import precision_recall_fscore_support
from sklearn.metrics import f1_score
def form_node_label_prediction_matrix(y_pred, y_test):
"""
Given the discriminator distances, this function forms the node-label prediction matrix.
It is assumed that the number of true labels is known.
Inputs: - y_pred: A NumPy array that contains the distance from the discriminator for each label for each user.
- y_test: The node-label ground truth for the test set in a SciPy sparse CSR matrix format.
Outputs: - y_pred: The node-label prediction for the test set in a SciPy sparse CSR matrix format.
"""
number_of_test_nodes = y_pred.shape[0]
# We calculate the number of true labels for each node.
true_number_of_labels = np.squeeze(y_test.sum(axis=1))
# We sort the prediction array for each node.
index = np.argsort(y_pred, axis=1)
row = np.empty(y_test.getnnz(), dtype=np.int64)
col = np.empty(y_test.getnnz(), dtype=np.int64)
start = 0
for n in np.arange(number_of_test_nodes):
end = start + true_number_of_labels[0, n]
row[start:end] = n
col[start:end] = index[n, -1:-true_number_of_labels[0, n]-1:-1]
start = end
data = np.ones_like(row, dtype=np.int8)
y_pred = sparse.coo_matrix((data, (row, col)), shape=y_test.shape)
return y_pred
def calculate_measures(y_pred, y_test):
"""
Calculates the F-scores and F-score averages given a classification result and a ground truth.
Inputs: - y_pred: The node-label prediction for the test set in a SciPy sparse CSR matrix format.
- y_test: The node-label ground truth for the test set in a SciPy sparse CSR matrix format.
Outputs: - measures: A number of NumPy arrays containing evaluation scores for the experiment.
"""
y_pred = y_pred.toarray()
y_test = y_test.toarray()
macro_precision, macro_recall, macro_F1, macro_support = precision_recall_fscore_support(y_test,
y_pred,
beta=1.0,
average="macro")
micro_precision, micro_recall, micro_F1, micro_support = precision_recall_fscore_support(y_test,
y_pred,
beta=1.0,
average="micro")
F1 = f1_score(y_test,
y_pred,
average=None)
measures = [macro_recall, micro_recall, macro_precision, micro_precision, macro_F1, micro_F1, F1]
return measures | /reveal-graph-embedding-0.1.4.tar.gz/reveal-graph-embedding-0.1.4/reveal_graph_embedding/learning/evaluation.py | 0.917735 | 0.673571 | evaluation.py | pypi |
__author__ = 'Georgios Rizos (georgerizos@iti.gr)'
import numpy as np
import scipy.sparse as spsp
from reveal_graph_embedding.embedding.implicit import get_adjacency_matrix_via_combinatorial_laplacian,\
get_adjacency_matrix_via_directed_laplacian
def get_unnormalized_laplacian(adjacency_matrix):
# Calculate diagonal matrix of node degrees.
degree = spsp.dia_matrix((adjacency_matrix.sum(axis=0), np.array([0])), shape=adjacency_matrix.shape)
degree = degree.tocsr()
# Calculate sparse graph Laplacian.
laplacian = spsp.csr_matrix(-adjacency_matrix + degree, dtype=np.float64)
return laplacian
def get_normalized_laplacian(adjacency_matrix):
# Calculate diagonal matrix of node degrees.
degree = spsp.dia_matrix((adjacency_matrix.sum(axis=0), np.array([0])), shape=adjacency_matrix.shape)
degree = degree.tocsr()
# Calculate sparse graph Laplacian.
adjacency_matrix = spsp.csr_matrix(-adjacency_matrix + degree, dtype=np.float64)
# Calculate inverse square root of diagonal matrix of node degrees.
degree.data = np.real(1/np.sqrt(degree.data))
# Calculate sparse normalized graph Laplacian.
normalized_laplacian = degree*adjacency_matrix*degree
return normalized_laplacian
def get_random_walk_laplacian(adjacency_matrix):
# Calculate diagonal matrix of node degrees.
degree = spsp.dia_matrix((adjacency_matrix.sum(axis=0), np.array([0])), shape=adjacency_matrix.shape)
degree = degree.tocsr()
# Calculate sparse graph Laplacian.
adjacency_matrix = spsp.csr_matrix(-adjacency_matrix + degree, dtype=np.float64)
# Calculate inverse of diagonal matrix of node degrees.
degree.data = np.real(1/degree.data)
# Calculate sparse normalized graph Laplacian.
random_walk_laplacian = degree*adjacency_matrix
return random_walk_laplacian
def get_directed_laplacian(adjacency_matrix, rho=0.2):
number_of_nodes = adjacency_matrix.shape[0]
effective_adjacency_matrix, rw_distribution = get_adjacency_matrix_via_directed_laplacian(adjacency_matrix, rho)
I = spsp.spdiags(rw_distribution, [0], number_of_nodes, number_of_nodes)
theta_matrix = I - effective_adjacency_matrix
return theta_matrix
def get_combinatorial_laplacian(adjacency_matrix, rho=0.2):
number_of_nodes = adjacency_matrix.shape[0]
effective_adjacency_matrix, rw_distribution = get_adjacency_matrix_via_combinatorial_laplacian(adjacency_matrix, rho)
I = spsp.spdiags(rw_distribution, [0], number_of_nodes, number_of_nodes)
theta_matrix = I - effective_adjacency_matrix
return theta_matrix | /reveal-graph-embedding-0.1.4.tar.gz/reveal-graph-embedding-0.1.4/reveal_graph_embedding/embedding/laplacian.py | 0.890384 | 0.650564 | laplacian.py | pypi |
__author__ = 'Georgios Rizos (georgerizos@iti.gr)'
import multiprocessing as mp
import itertools
import numpy as np
import scipy.sparse as sparse
from reveal_graph_embedding.common import get_threads_number
from reveal_graph_embedding.eps_randomwalk.transition import get_natural_random_walk_matrix
from reveal_graph_embedding.eps_randomwalk.similarity import fast_approximate_cumulative_pagerank_difference,\
fast_approximate_personalized_pagerank, lazy_approximate_personalized_pagerank
def parallel_chunks(l, n):
for thread_id in range(n):
yield roundrobin_chunks(l, n, thread_id)
def roundrobin_chunks(l, n, id):
l_c = iter(l)
x = list(itertools.islice(l_c, id, None, n))
if len(x):
return x
def calculate_epsilon_effective(rho, epsilon, seed_degree, neighbor_degrees, mean_degree):
"""
Semi-automatic effective epsilon threshold calculation.
"""
# Calculate a weighted neighborhood degree average.
# neighborhood_degree = rho*seed_degree + (1-rho)*neighbor_degrees.mean()
neighborhood_degree = neighbor_degrees.mean()
# Calculate the seed neighborhood normalized effective epsilon.
epsilon_effective = (epsilon*np.log(1 + seed_degree))/np.log(1 + neighborhood_degree)
# Calculate the maximum epsilon for at least one push on a neighboring node.
# Also the minimum epsilon for a push on all the neighboring nodes.
epsilon_effective_maximum = np.max(1/(seed_degree*neighbor_degrees))
epsilon_effective_minimum = np.min(1/(seed_degree*neighbor_degrees))
# print(epsilon_effective, epsilon_effective_maximum, epsilon_effective_minimum)
# The maximum epsilon is absolute, whereas we regularize for the minimum.
if epsilon_effective > epsilon_effective_maximum:
epsilon_effective = epsilon_effective_maximum
elif epsilon_effective < epsilon_effective_minimum:
epsilon_effective = (epsilon_effective_minimum + epsilon_effective)/2
return epsilon_effective
def arcte_with_lazy_pagerank_worker(iterate_nodes,
indices_c,
indptr_c,
data_c,
out_degree,
in_degree,
rho,
epsilon):
iterate_nodes = np.array(iterate_nodes, dtype=np.int64)
number_of_nodes = out_degree.size
mean_degree = np.mean(out_degree)
rw_transition = sparse.csr_matrix((data_c, indices_c, indptr_c))
# Store adjacent nodes and corresponding transition weights in array of arrays form.
adjacent_nodes = np.ndarray(number_of_nodes, dtype=np.ndarray)
base_transitions = np.ndarray(number_of_nodes, dtype=np.ndarray)
for n in range(number_of_nodes):
adjacent_nodes[n] = rw_transition.indices[rw_transition.indptr[n]: rw_transition.indptr[n + 1]]
base_transitions[n] = rw_transition.data[rw_transition.indptr[n]: rw_transition.indptr[n + 1]]
# Separate nodes to iterate in smaller chunks in order to compress the feature batches.
features = sparse.dok_matrix((number_of_nodes, number_of_nodes), dtype=np.float64)
features = sparse.csr_matrix(features)
if iterate_nodes.size > 2000:
node_chunks = list(parallel_chunks(iterate_nodes, iterate_nodes.size//2000))
else:
node_chunks = list()
node_chunks.append(iterate_nodes)
for node_chunk in node_chunks:
node_chunk = np.array(node_chunk, dtype=np.int64)
# Calculate local communities for all nodes.
row_list = list()
col_list = list()
extend_row = row_list.extend
extend_col = col_list.extend
# number_of_local_communities = 0
s = np.zeros(number_of_nodes, dtype=np.float64) #TODO: What if it is only one?
r = np.zeros(number_of_nodes, dtype=np.float64)
for n_index in range(node_chunk.size):
print(n_index)
n = node_chunk[n_index]
# Calculate similarity matrix slice.
s[:] = 0.0
r[:] = 0.0
epsilon_eff = calculate_epsilon_effective(rho, epsilon, out_degree[n], out_degree[adjacent_nodes[n]], mean_degree)
lazy_rho = (rho*(0.5))/(1-(0.5*rho))
nop = lazy_approximate_personalized_pagerank(s,
r,
base_transitions[:],
adjacent_nodes[:],
out_degree,
in_degree,
n,
lazy_rho,
epsilon_eff)
s_sparse = sparse.csr_matrix(s)
# Perform degree normalization of approximate similarity matrix slice.
relevant_degrees = in_degree[s_sparse.indices]
s_sparse.data = np.divide(s_sparse.data, relevant_degrees)
base_community = np.append(adjacent_nodes[n], n)
# If base community is not strictly non zero, then we break.
intersection = np.intersect1d(base_community, s_sparse.indices)
if intersection.size < base_community.size:
continue
base_community_rankings = np.searchsorted(s_sparse.indices, base_community)
min_similarity = np.min(s_sparse.data[base_community_rankings])
# Sort the degree normalized approximate similarity matrix slice.
sorted_indices = np.argsort(s_sparse.data, axis=0)
s_sparse.data = s_sparse.data[sorted_indices]
s_sparse.indices = s_sparse.indices[sorted_indices]
most_unlikely_index = s_sparse.indices.size - np.searchsorted(s_sparse.data, min_similarity)
# Save feature matrix coordinates.
if most_unlikely_index > base_community.size:
# print(n_index, out_degree[n], epsilon_eff)
new_rows = s_sparse.indices[-1:-most_unlikely_index-1:-1]
extend_row(new_rows)
# extend_col(number_of_local_communities*np.ones_like(new_rows))
# number_of_local_communities += 1
extend_col(n*np.ones_like(new_rows))
# Form local community feature matrix.
row = np.array(row_list, dtype=np.int64)
col = np.array(col_list, dtype=np.int64)
data = np.ones_like(row, dtype=np.float64)
# features = sparse.coo_matrix((data, (row, col)), shape=(number_of_nodes, number_of_local_communities))
chunk_features = sparse.coo_matrix((data, (row, col)), shape=(number_of_nodes, number_of_nodes))
chunk_features = sparse.csr_matrix(chunk_features)
features += chunk_features
return features
def arcte_with_pagerank_worker(iterate_nodes,
indices_c,
indptr_c,
data_c,
out_degree,
in_degree,
rho,
epsilon):
iterate_nodes = np.array(iterate_nodes, dtype=np.int64)
number_of_nodes = out_degree.size
mean_degree = np.mean(out_degree)
rw_transition = sparse.csr_matrix((data_c, indices_c, indptr_c))
# Store adjacent nodes and corresponding transition weights in array of arrays form.
adjacent_nodes = np.ndarray(number_of_nodes, dtype=np.ndarray)
base_transitions = np.ndarray(number_of_nodes, dtype=np.ndarray)
for n in range(number_of_nodes):
adjacent_nodes[n] = rw_transition.indices[rw_transition.indptr[n]: rw_transition.indptr[n + 1]]
base_transitions[n] = rw_transition.data[rw_transition.indptr[n]: rw_transition.indptr[n + 1]]
# Separate nodes to iterate in smaller chunks in order to compress the feature batches.
features = sparse.dok_matrix((number_of_nodes, number_of_nodes), dtype=np.float64)
features = sparse.csr_matrix(features)
if iterate_nodes.size > 2000:
node_chunks = list(parallel_chunks(iterate_nodes, iterate_nodes.size//2000))
else:
node_chunks = list()
node_chunks.append(iterate_nodes)
for node_chunk in node_chunks:
node_chunk = np.array(node_chunk, dtype=np.int64)
# Calculate local communities for all nodes.
row_list = list()
col_list = list()
extend_row = row_list.extend
extend_col = col_list.extend
# number_of_local_communities = 0
s = np.zeros(number_of_nodes, dtype=np.float64) #TODO: What if it is only one?
r = np.zeros(number_of_nodes, dtype=np.float64)
for n_index in range(node_chunk.size):
print(n_index)
n = node_chunk[n_index]
# Calculate similarity matrix slice.
s[:] = 0.0
r[:] = 0.0
epsilon_eff = calculate_epsilon_effective(rho, epsilon, out_degree[n], out_degree[adjacent_nodes[n]], mean_degree)
nop = fast_approximate_personalized_pagerank(s,
r,
base_transitions[:],
adjacent_nodes[:],
out_degree,
in_degree,
n,
rho,
epsilon_eff)
s_sparse = sparse.csr_matrix(s)
# Perform degree normalization of approximate similarity matrix slice.
relevant_degrees = in_degree[s_sparse.indices]
s_sparse.data = np.divide(s_sparse.data, relevant_degrees)
base_community = np.append(adjacent_nodes[n], n)
# If base community is not strictly non zero, then we break.
intersection = np.intersect1d(base_community, s_sparse.indices)
if intersection.size < base_community.size:
continue
base_community_rankings = np.searchsorted(s_sparse.indices, base_community)
min_similarity = np.min(s_sparse.data[base_community_rankings])
# Sort the degree normalized approximate similarity matrix slice.
sorted_indices = np.argsort(s_sparse.data, axis=0)
s_sparse.data = s_sparse.data[sorted_indices]
s_sparse.indices = s_sparse.indices[sorted_indices]
most_unlikely_index = s_sparse.indices.size - np.searchsorted(s_sparse.data, min_similarity)
# Save feature matrix coordinates.
if most_unlikely_index > base_community.size:
# print(n_index, out_degree[n], epsilon_eff)
new_rows = s_sparse.indices[-1:-most_unlikely_index-1:-1]
extend_row(new_rows)
# extend_col(number_of_local_communities*np.ones_like(new_rows))
# number_of_local_communities += 1
extend_col(n*np.ones_like(new_rows))
# Form local community feature matrix.
row = np.array(row_list, dtype=np.int64)
col = np.array(col_list, dtype=np.int64)
data = np.ones_like(row, dtype=np.float64)
# features = sparse.coo_matrix((data, (row, col)), shape=(number_of_nodes, number_of_local_communities))
chunk_features = sparse.coo_matrix((data, (row, col)), shape=(number_of_nodes, number_of_nodes))
chunk_features = sparse.csr_matrix(chunk_features)
features += chunk_features
return features
def arcte_worker(iterate_nodes,
indices_c,
indptr_c,
data_c,
out_degree,
in_degree,
rho,
epsilon):
iterate_nodes = np.array(iterate_nodes, dtype=np.int64)
number_of_nodes = out_degree.size
mean_degree = np.mean(out_degree)
rw_transition = sparse.csr_matrix((data_c, indices_c, indptr_c))
# Store adjacent nodes and corresponding transition weights in array of arrays form.
adjacent_nodes = np.ndarray(number_of_nodes, dtype=np.ndarray)
base_transitions = np.ndarray(number_of_nodes, dtype=np.ndarray)
for n in range(number_of_nodes):
adjacent_nodes[n] = rw_transition.indices[rw_transition.indptr[n]: rw_transition.indptr[n + 1]]
base_transitions[n] = rw_transition.data[rw_transition.indptr[n]: rw_transition.indptr[n + 1]]
# Separate nodes to iterate in smaller chunks in order to compress the feature batches.
features = sparse.dok_matrix((number_of_nodes, number_of_nodes), dtype=np.float64)
features = sparse.csr_matrix(features)
if iterate_nodes.size > 2000:
node_chunks = list(parallel_chunks(iterate_nodes, iterate_nodes.size//2000))
else:
node_chunks = list()
node_chunks.append(iterate_nodes)
for node_chunk in node_chunks:
node_chunk = np.array(node_chunk, dtype=np.int64)
# Calculate local communities for all nodes.
row_list = list()
col_list = list()
extend_row = row_list.extend
extend_col = col_list.extend
# number_of_local_communities = 0
s = np.zeros(number_of_nodes, dtype=np.float64) #TODO: What if it is only one?
r = np.zeros(number_of_nodes, dtype=np.float64)
for n_index in range(node_chunk.size):
# print(n_index)
n = node_chunk[n_index]
# Calculate similarity matrix slice.
s[:] = 0.0
r[:] = 0.0
epsilon_eff = calculate_epsilon_effective(rho, epsilon, out_degree[n], out_degree[adjacent_nodes[n]], mean_degree)
nop = fast_approximate_cumulative_pagerank_difference(s,
r,
base_transitions[:],
adjacent_nodes[:],
out_degree,
in_degree,
n,
rho,
epsilon_eff)
s_sparse = sparse.csr_matrix(s)
# Perform degree normalization of approximate similarity matrix slice.
relevant_degrees = in_degree[s_sparse.indices]
s_sparse.data = np.divide(s_sparse.data, relevant_degrees)
base_community = np.append(adjacent_nodes[n], n)
base_community_rankings = np.searchsorted(s_sparse.indices, base_community)
min_similarity = np.min(s_sparse.data[base_community_rankings])
# Sort the degree normalized approximate similarity matrix slice.
sorted_indices = np.argsort(s_sparse.data, axis=0)
s_sparse.data = s_sparse.data[sorted_indices]
s_sparse.indices = s_sparse.indices[sorted_indices]
most_unlikely_index = s_sparse.indices.size - np.searchsorted(s_sparse.data, min_similarity)
# Save feature matrix coordinates.
if most_unlikely_index > base_community.size:
# print(n_index, out_degree[n], epsilon_eff)
new_rows = s_sparse.indices[-1:-most_unlikely_index-1:-1]
extend_row(new_rows)
# extend_col(number_of_local_communities*np.ones_like(new_rows))
# number_of_local_communities += 1
extend_col(n*np.ones_like(new_rows))
# Form local community feature matrix.
row = np.array(row_list, dtype=np.int64)
col = np.array(col_list, dtype=np.int64)
data = np.ones_like(row, dtype=np.float64)
# features = sparse.coo_matrix((data, (row, col)), shape=(number_of_nodes, number_of_local_communities))
chunk_features = sparse.coo_matrix((data, (row, col)), shape=(number_of_nodes, number_of_nodes))
chunk_features = sparse.csr_matrix(chunk_features)
features += chunk_features
return features
def arcte_with_lazy_pagerank(adjacency_matrix, rho, epsilon, number_of_threads=None):
"""
Extracts local community features for all graph nodes based on the partitioning of node-centric similarity vectors.
Inputs: - A in R^(nxn): Adjacency matrix of an undirected network represented as a SciPy Sparse COOrdinate matrix.
- rho: Restart probability
- epsilon: Approximation threshold
Outputs: - X in R^(nxC_n): The latent space embedding represented as a SciPy Sparse COOrdinate matrix.
"""
adjacency_matrix = sparse.csr_matrix(adjacency_matrix)
number_of_nodes = adjacency_matrix.shape[0]
if number_of_threads is None:
number_of_threads = get_threads_number()
if number_of_threads == 1:
# Calculate natural random walk transition probability matrix.
rw_transition, out_degree, in_degree = get_natural_random_walk_matrix(adjacency_matrix, make_shared=False)
a = adjacency_matrix.copy()
a.data = np.ones_like(a.data)
edge_count_vector = np.squeeze(np.asarray(a.sum(axis=0), dtype=np.int64))
iterate_nodes = np.where(edge_count_vector != 0)[0]
argsort_indices = np.argsort(edge_count_vector[iterate_nodes])
iterate_nodes = iterate_nodes[argsort_indices][::-1]
iterate_nodes = iterate_nodes[np.where(edge_count_vector[iterate_nodes] > 1.0)[0]]
# iterate_nodes = np.where(out_degree != 0)[0]
# argsort_indices = np.argsort(out_degree[iterate_nodes])
# iterate_nodes = iterate_nodes[argsort_indices][::-1]
# iterate_nodes = iterate_nodes[np.where(out_degree[iterate_nodes] > 1.0)[0]]
local_features = arcte_with_lazy_pagerank_worker(iterate_nodes,
rw_transition.indices,
rw_transition.indptr,
rw_transition.data,
out_degree,
in_degree,
rho,
epsilon)
else:
# Calculate natural random walk transition probability matrix.
rw_transition, out_degree, in_degree = get_natural_random_walk_matrix(adjacency_matrix, make_shared=True)
a = adjacency_matrix.copy()
a.data = np.ones_like(a.data)
edge_count_vector = np.squeeze(np.asarray(a.sum(axis=0), dtype=np.int64))
iterate_nodes = np.where(edge_count_vector != 0)[0]
argsort_indices = np.argsort(edge_count_vector[iterate_nodes])
iterate_nodes = iterate_nodes[argsort_indices][::-1]
iterate_nodes = iterate_nodes[np.where(edge_count_vector[iterate_nodes] > 1.0)[0]]
# iterate_nodes = np.where(out_degree != 0)[0]
# argsort_indices = np.argsort(out_degree[iterate_nodes])
# iterate_nodes = iterate_nodes[argsort_indices][::-1]
# iterate_nodes = iterate_nodes[np.where(out_degree[iterate_nodes] > 1.0)[0]]
pool = mp.Pool(number_of_threads)
node_chunks = list(parallel_chunks(iterate_nodes, number_of_threads))
node_count = 0
for chunk in node_chunks:
node_count += len(list(chunk))
results = list()
for chunk_no in range(len(pool._pool)):
pool.apply_async(arcte_with_lazy_pagerank_worker,
args=(node_chunks[chunk_no],
rw_transition.indices,
rw_transition.indptr,
rw_transition.data,
out_degree,
in_degree,
rho,
epsilon),
callback=results.append)
pool.close()
pool.join()
# local_features = sparse.hstack(results)
local_features = results[0]
for additive_features in results[1:]:
local_features += additive_features
local_features = sparse.csr_matrix(local_features)
# Form base community feature matrix.
identity_matrix = sparse.csr_matrix(sparse.eye(number_of_nodes, number_of_nodes, dtype=np.float64))
adjacency_matrix_ones = adjacency_matrix
adjacency_matrix_ones.data = np.ones_like(adjacency_matrix.data)
base_community_features = identity_matrix + adjacency_matrix_ones
# Stack horizontally matrices to form feature matrix.
try:
features = sparse.hstack([base_community_features, local_features]).tocsr()
except ValueError as e:
print("Failure with horizontal feature stacking.")
features = base_community_features
return features
def arcte_with_pagerank(adjacency_matrix, rho, epsilon, number_of_threads=None):
"""
Extracts local community features for all graph nodes based on the partitioning of node-centric similarity vectors.
Inputs: - A in R^(nxn): Adjacency matrix of an undirected network represented as a SciPy Sparse COOrdinate matrix.
- rho: Restart probability
- epsilon: Approximation threshold
Outputs: - X in R^(nxC_n): The latent space embedding represented as a SciPy Sparse COOrdinate matrix.
"""
adjacency_matrix = sparse.csr_matrix(adjacency_matrix)
number_of_nodes = adjacency_matrix.shape[0]
if number_of_threads is None:
number_of_threads = get_threads_number()
if number_of_threads == 1:
# Calculate natural random walk transition probability matrix.
rw_transition, out_degree, in_degree = get_natural_random_walk_matrix(adjacency_matrix, make_shared=False)
a = adjacency_matrix.copy()
a.data = np.ones_like(a.data)
edge_count_vector = np.squeeze(np.asarray(a.sum(axis=0), dtype=np.int64))
iterate_nodes = np.where(edge_count_vector != 0)[0]
argsort_indices = np.argsort(edge_count_vector[iterate_nodes])
iterate_nodes = iterate_nodes[argsort_indices][::-1]
iterate_nodes = iterate_nodes[np.where(edge_count_vector[iterate_nodes] > 1.0)[0]]
# iterate_nodes = np.where(out_degree != 0)[0]
# argsort_indices = np.argsort(out_degree[iterate_nodes])
# iterate_nodes = iterate_nodes[argsort_indices][::-1]
# iterate_nodes = iterate_nodes[np.where(out_degree[iterate_nodes] > 1.0)[0]]
local_features = arcte_with_pagerank_worker(iterate_nodes,
rw_transition.indices,
rw_transition.indptr,
rw_transition.data,
out_degree,
in_degree,
rho,
epsilon)
else:
# Calculate natural random walk transition probability matrix.
rw_transition, out_degree, in_degree = get_natural_random_walk_matrix(adjacency_matrix, make_shared=True)
a = adjacency_matrix.copy()
a.data = np.ones_like(a.data)
edge_count_vector = np.squeeze(np.asarray(a.sum(axis=0), dtype=np.int64))
iterate_nodes = np.where(edge_count_vector != 0)[0]
argsort_indices = np.argsort(edge_count_vector[iterate_nodes])
iterate_nodes = iterate_nodes[argsort_indices][::-1]
iterate_nodes = iterate_nodes[np.where(edge_count_vector[iterate_nodes] > 1.0)[0]]
# iterate_nodes = np.where(out_degree != 0)[0]
# argsort_indices = np.argsort(out_degree[iterate_nodes])
# iterate_nodes = iterate_nodes[argsort_indices][::-1]
# iterate_nodes = iterate_nodes[np.where(out_degree[iterate_nodes] > 1.0)[0]]
pool = mp.Pool(number_of_threads)
node_chunks = list(parallel_chunks(iterate_nodes, number_of_threads))
node_count = 0
for chunk in node_chunks:
node_count += len(list(chunk))
results = list()
for chunk_no in range(len(pool._pool)):
pool.apply_async(arcte_with_pagerank_worker,
args=(node_chunks[chunk_no],
rw_transition.indices,
rw_transition.indptr,
rw_transition.data,
out_degree,
in_degree,
rho,
epsilon),
callback=results.append)
pool.close()
pool.join()
# local_features = sparse.hstack(results)
local_features = results[0]
for additive_features in results[1:]:
local_features += additive_features
local_features = sparse.csr_matrix(local_features)
# Form base community feature matrix.
identity_matrix = sparse.csr_matrix(sparse.eye(number_of_nodes, number_of_nodes, dtype=np.float64))
adjacency_matrix_ones = adjacency_matrix
adjacency_matrix_ones.data = np.ones_like(adjacency_matrix.data)
base_community_features = identity_matrix + adjacency_matrix_ones
# Stack horizontally matrices to form feature matrix.
try:
features = sparse.hstack([base_community_features, local_features]).tocsr()
except ValueError as e:
print("Failure with horizontal feature stacking.")
features = base_community_features
return features
def arcte(adjacency_matrix, rho, epsilon, number_of_threads=None):
"""
Extracts local community features for all graph nodes based on the partitioning of node-centric similarity vectors.
Inputs: - A in R^(nxn): Adjacency matrix of an undirected network represented as a SciPy Sparse COOrdinate matrix.
- rho: Restart probability
- epsilon: Approximation threshold
Outputs: - X in R^(nxC_n): The latent space embedding represented as a SciPy Sparse COOrdinate matrix.
"""
adjacency_matrix = sparse.csr_matrix(adjacency_matrix)
number_of_nodes = adjacency_matrix.shape[0]
if number_of_threads is None:
number_of_threads = get_threads_number()
if number_of_threads == 1:
# Calculate natural random walk transition probability matrix.
rw_transition, out_degree, in_degree = get_natural_random_walk_matrix(adjacency_matrix, make_shared=False)
a = adjacency_matrix.copy()
a.data = np.ones_like(a.data)
edge_count_vector = np.squeeze(np.asarray(a.sum(axis=0), dtype=np.int64))
iterate_nodes = np.where(edge_count_vector != 0)[0]
argsort_indices = np.argsort(edge_count_vector[iterate_nodes])
iterate_nodes = iterate_nodes[argsort_indices][::-1]
iterate_nodes = iterate_nodes[np.where(edge_count_vector[iterate_nodes] > 1.0)[0]]
# iterate_nodes = np.where(out_degree != 0)[0]
# argsort_indices = np.argsort(out_degree[iterate_nodes])
# iterate_nodes = iterate_nodes[argsort_indices][::-1]
# iterate_nodes = iterate_nodes[np.where(out_degree[iterate_nodes] > 1.0)[0]]
local_features = arcte_worker(iterate_nodes,
rw_transition.indices,
rw_transition.indptr,
rw_transition.data,
out_degree,
in_degree,
rho,
epsilon)
else:
# Calculate natural random walk transition probability matrix.
rw_transition, out_degree, in_degree = get_natural_random_walk_matrix(adjacency_matrix, make_shared=True)
a = adjacency_matrix.copy()
a.data = np.ones_like(a.data)
edge_count_vector = np.squeeze(np.asarray(a.sum(axis=0), dtype=np.int64))
iterate_nodes = np.where(edge_count_vector != 0)[0]
argsort_indices = np.argsort(edge_count_vector[iterate_nodes])
iterate_nodes = iterate_nodes[argsort_indices][::-1]
iterate_nodes = iterate_nodes[np.where(edge_count_vector[iterate_nodes] > 1.0)[0]]
# iterate_nodes = np.where(out_degree != 0)[0]
# argsort_indices = np.argsort(out_degree[iterate_nodes])
# iterate_nodes = iterate_nodes[argsort_indices][::-1]
# iterate_nodes = iterate_nodes[np.where(out_degree[iterate_nodes] > 1.0)[0]]
pool = mp.Pool(number_of_threads)
node_chunks = list(parallel_chunks(iterate_nodes, number_of_threads))
node_count = 0
for chunk in node_chunks:
node_count += len(list(chunk))
results = list()
for chunk_no in range(len(pool._pool)):
pool.apply_async(arcte_worker,
args=(node_chunks[chunk_no],
rw_transition.indices,
rw_transition.indptr,
rw_transition.data,
out_degree,
in_degree,
rho,
epsilon),
callback=results.append)
pool.close()
pool.join()
# local_features = sparse.hstack(results)
local_features = results[0]
for additive_features in results[1:]:
local_features += additive_features
local_features = sparse.csr_matrix(local_features)
# Form base community feature matrix.
identity_matrix = sparse.csr_matrix(sparse.eye(number_of_nodes, number_of_nodes, dtype=np.float64))
adjacency_matrix_ones = adjacency_matrix
adjacency_matrix_ones.data = np.ones_like(adjacency_matrix.data)
base_community_features = identity_matrix + adjacency_matrix_ones
# Stack horizontally matrices to form feature matrix.
try:
features = sparse.hstack([base_community_features, local_features]).tocsr()
except ValueError as e:
print("Failure with horizontal feature stacking.")
features = base_community_features
return features | /reveal-graph-embedding-0.1.4.tar.gz/reveal-graph-embedding-0.1.4/reveal_graph_embedding/embedding/arcte/arcte.py | 0.630685 | 0.404743 | arcte.py | pypi |
__author__ = 'Georgios Rizos (georgerizos@iti.gr)'
import os
import numpy as np
from reveal_graph_embedding.common import get_file_row_generator
def write_results(performance_measures, target_file_path):
with open(target_file_path, "w") as fp:
first_row = "*** Percentages:" + "\n"
fp.write(first_row)
second_row = "1\t2\t3\t4\t5\t6\t7\t8\t9\t10" + "\n"
fp.write(second_row)
fp.write("\n\n")
write_average_score_row(fp, "Macro F1", performance_measures[4])
fp.write("\n\n")
write_average_score_row(fp, "Micro F1", performance_measures[5])
def store_performace_measures(performance_measures, memory_path, experiment_string):
# Unpack performance measures
# mean_macro_precision = performance_measures[0][0]
# std_macro_precision = performance_measures[0][1]
# mean_micro_precision = performance_measures[1][0]
# std_micro_precision = performance_measures[1][1]
# mean_macro_recall = performance_measures[2][0]
# std_macro_recall = performance_measures[2][1]
# mean_micro_recall = performance_measures[3][0]
# std_micro_recall = performance_measures[3][1]
# mean_macro_F1 = performance_measures[4][0]
# std_macro_F1 = performance_measures[4][1]
# mean_micro_F1 = performance_measures[5][0]
# std_micro_F1 = performance_measures[5][1]
F1 = performance_measures[6]
number_of_categories = F1.shape[1]
# Store average scores
path = memory_path + "/scores/" + experiment_string + "_average_scores.txt"
if not os.path.exists(path):
with open(path, "w") as fp:
write_average_score_row(fp, "Macro Precision", performance_measures[0])
fp.write("\n\n")
write_average_score_row(fp, "Micro Precision", performance_measures[1])
fp.write("\n\n")
write_average_score_row(fp, "Macro Recall", performance_measures[2])
fp.write("\n\n")
write_average_score_row(fp, "Micro Recall", performance_measures[3])
fp.write("\n\n")
write_average_score_row(fp, "Macro F1", performance_measures[4])
fp.write("\n\n")
write_average_score_row(fp, "Micro F1", performance_measures[5])
# Store category-specific F scores
path = memory_path + "/scores/" + experiment_string + "_F_scores.txt"
if not os.path.exists(path):
with open(path, "w") as fp:
for c in np.arange(number_of_categories):
row = list(F1[:, c])
row = [str(score) for score in row]
row = "\t".join(row) + "\n"
fp.write(row)
def write_average_score_row(fp, score_name, scores):
"""
Simple utility function that writes an average score row in a file designated by a file pointer.
Inputs: - fp: A file pointer.
- score_name: What it says on the tin.
- scores: An array of average score values corresponding to each of the training set percentages.
"""
row = "--" + score_name + "--"
fp.write(row)
for vector in scores:
row = list(vector)
row = [str(score) for score in row]
row = "\n" + "\t".join(row)
fp.write(row)
def read_performance_measures(file_path, number=10):
file_row_gen = get_file_row_generator(file_path, "\t")
F1_macro_mean = np.zeros(number, dtype=np.float64)
F1_macro_std = np.zeros(number, dtype=np.float64)
F1_micro_mean = np.zeros(number, dtype=np.float64)
F1_micro_std = np.zeros(number, dtype=np.float64)
for r in range(18):
file_row = next(file_row_gen)
file_row = [float(score) for score in file_row]
F1_macro_mean[:] = file_row
file_row = next(file_row_gen)
file_row = [float(score) for score in file_row]
F1_macro_std[:] = file_row
for r in range(3):
file_row = next(file_row_gen)
file_row = [float(score) for score in file_row]
F1_micro_mean[:] = file_row
file_row = next(file_row_gen)
file_row = [float(score) for score in file_row]
F1_micro_std[:] = file_row
return F1_macro_mean, F1_macro_std, F1_micro_mean, F1_micro_std | /reveal-graph-embedding-0.1.4.tar.gz/reveal-graph-embedding-0.1.4/reveal_graph_embedding/datautil/score_rw_util.py | 0.497315 | 0.328045 | score_rw_util.py | pypi |
__author__ = 'Georgios Rizos (georgerizos@iti.gr)'
import numpy as np
import scipy.sparse as spsp
from reveal_user_annotation.common.datarw import get_file_row_generator
def read_oslom_features(oslom_folder, number_of_nodes):
oslom_path = oslom_folder + "/tp"
number_of_levels = 0
while True:
try:
with open(oslom_path + str(number_of_levels + 1)):
number_of_levels += 1
except EnvironmentError:
number_of_levels += 1
print('OSLOM hierarchy level: ', number_of_levels)
break
number_of_communities = 0
with open(oslom_path) as fp:
lines = fp.readlines()
for line in lines:
if line[0] == '#':
continue
else:
number_of_communities += 1
for i in np.arange(1, number_of_levels):
with open(oslom_path + str(i)) as fp:
lines = fp.readlines()
for line in lines:
if line[0] == '#':
continue
else:
number_of_communities += 1
features = spsp.dok_matrix((number_of_nodes, number_of_communities), dtype=np.float64)
j = 0
with open(oslom_path) as fp:
lines = fp.readlines()
for line in lines:
if line[0] == '#':
continue
else:
words = line.strip().split(' ')
for word in words:
features[int(word) - 1, j] = 1
j += 1
for i in np.arange(1, number_of_levels):
with open(oslom_path + str(i)) as fp:
lines = fp.readlines()
for line in lines:
if line[0] == '#':
continue
else:
words = line.strip().split(' ')
for word in words:
features[int(word) - 1, j] = 1
j += 1
features = features.tocoo()
return features
def read_bigclam_features(path, number_of_nodes):
with open(path, "r") as fp:
lines_crisp = fp.readlines()
number_of_communities = len(lines_crisp)
# Calculate number of nonzero elements
nnz = 0
for c in np.arange(number_of_communities):
words = lines_crisp[c].strip().split('\t')
nnz += len(words)
# Form feature matrices
data_crisp = np.zeros(nnz, dtype=np.float64)
row_crisp = np.zeros(nnz, dtype=np.int32)
col_crisp = np.zeros(nnz, dtype=np.int32)
nnz = 0
for c in np.arange(number_of_communities):
words_crisp = lines_crisp[c].strip().split('\t')
for i in np.arange(len(words_crisp)):
data_crisp[nnz] = 1.0
row_crisp[nnz] = int(words_crisp[i])
col_crisp[nnz] = c
nnz += 1
features = spsp.coo_matrix((data_crisp, (row_crisp, col_crisp)),
shape=(number_of_nodes, number_of_communities))
features = features.tocoo()
return features
def read_matlab_features(array_paths, number_of_nodes, dimensionality):
"""
Returns a sparse feature matrix as calculated by a Matlab routine.
"""
# Read the data array
file_row_gen = get_file_row_generator(array_paths[0], "\t")
data = list()
append_data = data.append
for file_row in file_row_gen:
append_data(float(file_row[0]))
# Read the row array
file_row_gen = get_file_row_generator(array_paths[1], "\t")
row = list()
append_row = row.append
for file_row in file_row_gen:
append_row(int(float(file_row[0])))
# Read the data array
file_row_gen = get_file_row_generator(array_paths[2], "\t")
col = list()
append_col = col.append
for file_row in file_row_gen:
append_col(int(float(file_row[0])))
data = np.array(data).astype(np.float64)
row = np.array(row).astype(np.int64) - 1 # Due to Matlab numbering
col = np.array(col).astype(np.int64) - 1 # Due to Matlab numbering
print(np.max(row), np.min(row))
print(np.max(col), np.min(col))
# centroids_new = sparse.coo_matrix((data, (row, col)), shape=(number_of_nodes + 1, k))
features = spsp.coo_matrix((data, (row, col)), shape=(number_of_nodes, dimensionality))
return features
def read_deepwalk_features(deepwalk_folder, number_of_nodes=None):
file_row_gen = get_file_row_generator(deepwalk_folder + "/deepwalk.txt", " ")
first_row = next(file_row_gen)
if number_of_nodes is not None:
features = np.zeros((number_of_nodes, int(first_row[1])), dtype=np.float64)
else:
features = np.zeros((int(first_row[0]), int(first_row[1])), dtype=np.float64)
for file_row in file_row_gen:
node = int(file_row[0]) - 1
features[node, :] = np.array([np.float64(coordinate) for coordinate in file_row[1:]])
return features
def read_dense_separated_value_file(file_path, number_of_nodes, separator=","):
file_row_gen = get_file_row_generator(file_path=file_path, separator=separator)
first_file_row = next(file_row_gen)
number_of_dimensions = len(first_file_row)
features = np.empty((number_of_nodes, number_of_dimensions), dtype=np.float64)
file_row_counter = 0
features[file_row_counter, :] = np.array(first_file_row)
for file_row in file_row_gen:
file_row_counter += 1
features[file_row_counter, :] = np.array(file_row)
return features | /reveal-graph-embedding-0.1.4.tar.gz/reveal-graph-embedding-0.1.4/reveal_graph_embedding/datautil/read_exotic_features.py | 0.404978 | 0.383959 | read_exotic_features.py | pypi |
__author__ = 'Georgios Rizos (georgerizos@iti.gr)'
try:
import cPickle as pickle
except ImportError:
import pickle
import numpy as np
import scipy.sparse as spsp
def get_file_row_generator(file_path, separator, encoding=None):
"""
Reads an separated value file row by row.
Inputs: - file_path: The path of the separated value format file.
- separator: The delimiter among values (e.g. ",", "\t", " ")
- encoding: The encoding used in the stored text.
Yields: - words: A list of strings corresponding to each of the file's rows.
"""
with open(file_path, encoding=encoding) as file_object:
for line in file_object:
words = line.strip().split(separator)
yield words
def store_pickle(file_path, data):
"""
Pickle some data to a given path.
Inputs: - file_path: Target file path.
- data: The python object to be serialized via pickle.
"""
pkl_file = open(file_path, 'wb')
pickle.dump(data, pkl_file)
pkl_file.close()
def load_pickle(file_path):
"""
Unpickle some data from a given path.
Input: - file_path: Target file path.
Output: - data: The python object that was serialized and stored in disk.
"""
pkl_file = open(file_path, 'rb')
data = pickle.load(pkl_file)
pkl_file.close()
return data
def read_adjacency_matrix(file_path, separator, undirected):
"""
Reads an edge list in csv format and returns the adjacency matrix in SciPy Sparse COOrdinate format.
Inputs: - file_path: The path where the adjacency matrix is stored.
- separator: The delimiter among values (e.g. ",", "\t", " ")
- undirected: If True, create the reciprocal edge for each edge in edge list.
Outputs: - adjacency_matrix: The adjacency matrix in SciPy Sparse COOrdinate format.
- node_to_id: A dictionary that maps anonymized node ids to the original node ids.
"""
# Open file
file_row_generator = get_file_row_generator(file_path, separator)
# Initialize lists for row and column sparse matrix arguments.
row = list()
col = list()
data = list()
append_row = row.append
append_col = col.append
append_data = data.append
# Initialize node anonymizer.
id_to_node = dict()
# Read all file rows.
for file_row in file_row_generator:
if file_row[0][0] == "#":
continue
source_node_id = int(file_row[0])
target_node_id = int(file_row[1])
number_of_nodes = len(id_to_node)
source_node = id_to_node.setdefault(source_node_id,
number_of_nodes)
number_of_nodes = len(id_to_node)
target_node = id_to_node.setdefault(target_node_id,
number_of_nodes)
edge_weight = float(file_row[2])
# Add edge.
append_row(source_node)
append_col(target_node)
append_data(edge_weight)
# Since this is an undirected network also add the reciprocal edge.
if undirected:
if source_node != target_node:
append_row(target_node)
append_col(source_node)
append_data(edge_weight)
number_of_nodes = len(id_to_node)
node_to_id = dict(zip(list(id_to_node.values()),
list(id_to_node.keys())))
row = np.array(row, dtype=np.int64)
col = np.array(col, dtype=np.int64)
data = np.array(data, dtype=np.float64)
# Form sparse adjacency matrix.
adjacency_matrix = spsp.coo_matrix((data, (row, col)), shape=(number_of_nodes,
number_of_nodes))
return adjacency_matrix, node_to_id
def write_features(file_path,
features,
separator,
node_to_id):
features = spsp.coo_matrix(features)
row = features.row
col = features.col
data = features.data
with open(file_path, "w") as f:
for element in range(row.size):
node = row[element]
node_id = node_to_id[node]
community_id = col[element]
value = int(data[element])
file_row = str(node_id) + separator + str(community_id) + separator + str(value) + "\n"
f.write(file_row) | /reveal-graph-embedding-0.1.4.tar.gz/reveal-graph-embedding-0.1.4/reveal_graph_embedding/datautil/datarw.py | 0.665737 | 0.397968 | datarw.py | pypi |
__author__ = 'Georgios Rizos (georgerizos@iti.gr)'
import numpy as np
import scipy.sparse as sparse
from reveal_graph_embedding.common import get_file_row_generator
def read_adjacency_matrix(file_path, separator):
"""
Reads an edge list in csv format and returns the adjacency matrix in SciPy Sparse COOrdinate format.
Inputs: - file_path: The path where the adjacency matrix is stored.
- separator: The delimiter among values (e.g. ",", "\t", " ")
Outputs: - adjacency_matrix: The adjacency matrix in SciPy Sparse COOrdinate format.
"""
# Open file
file_row_generator = get_file_row_generator(file_path, separator)
# Initialize lists for row and column sparse matrix arguments
row = list()
col = list()
append_row = row.append
append_col = col.append
# Read all file rows
for file_row in file_row_generator:
source_node = np.int64(file_row[0])
target_node = np.int64(file_row[1])
# Add edge
append_row(source_node)
append_col(target_node)
# Since this is an undirected network also add the reciprocal edge
append_row(target_node)
append_col(source_node)
row = np.array(row, dtype=np.int64)
col = np.array(col, dtype=np.int64)
data = np.ones_like(row, dtype=np.float64)
number_of_nodes = np.max(row) # I assume that there are no missing nodes at the end.
# Array count should start from 0.
row -= 1
col -= 1
# Form sparse adjacency matrix
adjacency_matrix = sparse.coo_matrix((data, (row, col)), shape=(number_of_nodes, number_of_nodes))
return adjacency_matrix
def read_node_label_matrix(file_path, separator, number_of_nodes):
"""
Reads node-label pairs in csv format and returns a list of tuples and a node-label matrix.
Inputs: - file_path: The path where the node-label matrix is stored.
- separator: The delimiter among values (e.g. ",", "\t", " ")
- number_of_nodes: The number of nodes of the full graph. It is possible that not all nodes are labelled.
Outputs: - node_label_matrix: The node-label associations in a NumPy array of tuples format.
- number_of_categories: The number of categories/classes the nodes may belong to.
- labelled_node_indices: A NumPy array containing the labelled node indices.
"""
# Open file
file_row_generator = get_file_row_generator(file_path, separator)
# Initialize lists for row and column sparse matrix arguments
row = list()
col = list()
append_row = row.append
append_col = col.append
# Populate the arrays
for file_row in file_row_generator:
node = np.int64(file_row[0])
label = np.int64(file_row[1])
# Add label
append_row(node)
append_col(label)
number_of_categories = len(set(col)) # I assume that there are no missing labels. There may be missing nodes.
labelled_node_indices = np.array(list(set(row)))
row = np.array(row, dtype=np.int64)
col = np.array(col, dtype=np.int64)
data = np.ones_like(row, dtype=np.float64)
# Array count should start from 0.
row -= 1
col -= 1
labelled_node_indices -= 1
# Form sparse adjacency matrix
node_label_matrix = sparse.coo_matrix((data, (row, col)), shape=(number_of_nodes, number_of_categories))
node_label_matrix = node_label_matrix.tocsr()
return node_label_matrix, number_of_categories, labelled_node_indices | /reveal-graph-embedding-0.1.4.tar.gz/reveal-graph-embedding-0.1.4/reveal_graph_embedding/datautil/asu_datautil/asu_read_data.py | 0.855157 | 0.656521 | asu_read_data.py | pypi |
__author__ = 'Georgios Rizos (georgerizos@iti.gr)'
import numpy as np
import scipy.sparse as spsp
from reveal_graph_embedding.common import get_file_row_generator
def read_adjacency_matrix(file_path, separator, numbering="matlab"):
"""
Reads an edge list in csv format and returns the adjacency matrix in SciPy Sparse COOrdinate format.
Inputs: - file_path: The path where the adjacency matrix is stored.
- separator: The delimiter among values (e.g. ",", "\t", " ")
- numbering: Array numbering style: * "matlab"
* "c"
Outputs: - adjacency_matrix: The adjacency matrix in SciPy Sparse COOrdinate format.
"""
# Open file
file_row_generator = get_file_row_generator(file_path, separator)
file_row = next(file_row_generator)
number_of_rows = file_row[1]
number_of_columns = file_row[3]
directed = file_row[7]
if directed == "True":
directed = True
elif directed == "False":
directed = False
else:
print("Invalid metadata.")
raise RuntimeError
# Initialize lists for row and column sparse matrix arguments
row = list()
col = list()
data = list()
append_row = row.append
append_col = col.append
append_data = data.append
# Read all file rows
for file_row in file_row_generator:
source_node = np.int64(file_row[0])
target_node = np.int64(file_row[1])
edge_weight = np.float64(file_row[2])
# Add edge
append_row(source_node)
append_col(target_node)
append_data(edge_weight)
# Since this is an undirected network also add the reciprocal edge
if not directed:
if source_node != target_node:
append_row(target_node)
append_col(source_node)
append_data(edge_weight)
row = np.array(row, dtype=np.int64)
col = np.array(col, dtype=np.int64)
data = np.array(data, dtype=np.float64)
if numbering == "matlab":
row -= 1
col -= 1
elif numbering == "c":
pass
else:
print("Invalid numbering style.")
raise RuntimeError
# Form sparse adjacency matrix
adjacency_matrix = spsp.coo_matrix((data, (row, col)), shape=(number_of_rows, number_of_columns))
return adjacency_matrix
def read_node_label_matrix(file_path, separator, numbering="matlab"):
"""
Reads node-label pairs in csv format and returns a list of tuples and a node-label matrix.
Inputs: - file_path: The path where the node-label matrix is stored.
- separator: The delimiter among values (e.g. ",", "\t", " ")
- number_of_nodes: The number of nodes of the full graph. It is possible that not all nodes are labelled.
- numbering: Array numbering style: * "matlab"
* "c"
Outputs: - node_label_matrix: The node-label associations in a NumPy array of tuples format.
- number_of_categories: The number of categories/classes the nodes may belong to.
- labelled_node_indices: A NumPy array containing the labelled node indices.
"""
# Open file
file_row_generator = get_file_row_generator(file_path, separator)
file_row = next(file_row_generator)
number_of_rows = file_row[1]
number_of_categories = int(file_row[3])
# Initialize lists for row and column sparse matrix arguments
row = list()
col = list()
append_row = row.append
append_col = col.append
# Populate the arrays
for file_row in file_row_generator:
node = np.int64(file_row[0])
label = np.int64(file_row[1])
# Add label
append_row(node)
append_col(label)
labelled_node_indices = np.array(list(set(row)))
row = np.array(row, dtype=np.int64)
col = np.array(col, dtype=np.int64)
data = np.ones_like(row, dtype=np.float64)
if numbering == "matlab":
row -= 1
col -= 1
labelled_node_indices -= 1
elif numbering == "c":
pass
else:
print("Invalid numbering style.")
raise RuntimeError
# Form sparse adjacency matrix
node_label_matrix = spsp.coo_matrix((data, (row, col)), shape=(number_of_rows, number_of_categories))
node_label_matrix = node_label_matrix.tocsr()
return node_label_matrix, number_of_categories, labelled_node_indices
def scipy_sparse_to_csv(filepath, matrix, separator=",", directed=False, numbering="matlab"):
"""
Writes sparse matrix in separated value format.
"""
matrix = spsp.coo_matrix(matrix)
shape = matrix.shape
nnz = matrix.getnnz()
if numbering == "matlab":
row = matrix.row + 1
col = matrix.col + 1
data = matrix.data
elif numbering == "c":
row = matrix.row
col = matrix.col
data = matrix.data
else:
print("Invalid numbering style.")
raise RuntimeError
with open(filepath, "w") as f:
# Write metadata.
file_row = "n_rows:" + separator + str(shape[0]) + separator +\
"n_cols:" + separator + str(shape[1]) + separator +\
"nnz:" + separator + str(nnz) + separator +\
"directed:" + separator + str(directed) +\
"\n"
f.write(file_row)
for edge in range(row.size):
if directed is False:
if col[edge] < row[edge]:
continue
file_row = str(row[edge]) + separator + str(col[edge]) + separator + str(data[edge]) + "\n"
f.write(file_row) | /reveal-graph-embedding-0.1.4.tar.gz/reveal-graph-embedding-0.1.4/reveal_graph_embedding/datautil/insight_datautil/insight_read_data.py | 0.792062 | 0.497742 | insight_read_data.py | pypi |
__author__ = 'Georgios Rizos (georgerizos@iti.gr)'
import numpy as np
import scipy.sparse as spsp
from collections import defaultdict
from reveal_graph_embedding.common import get_file_row_generator
def read_adjacency_matrix(file_path, separator, numbering="matlab"):
"""
Reads an edge list in csv format and returns the adjacency matrix in SciPy Sparse COOrdinate format.
Inputs: - file_path: The path where the adjacency matrix is stored.
- separator: The delimiter among values (e.g. ",", "\t", " ")
- numbering: Array numbering style: * "matlab"
* "c"
Outputs: - adjacency_matrix: The adjacency matrix in SciPy Sparse COOrdinate format.
"""
# Open file
file_row_generator = get_file_row_generator(file_path, separator)
file_row = next(file_row_generator)
number_of_rows = file_row[1]
number_of_columns = file_row[3]
directed = file_row[7]
if directed == "True":
directed = True
elif directed == "False":
directed = False
else:
print("Invalid metadata.")
raise RuntimeError
# Initialize lists for row and column sparse matrix arguments
row = list()
col = list()
data = list()
append_row = row.append
append_col = col.append
append_data = data.append
# Read all file rows
for file_row in file_row_generator:
source_node = np.int64(file_row[0])
target_node = np.int64(file_row[1])
edge_weight = np.float64(file_row[2])
# Add edge
append_row(source_node)
append_col(target_node)
append_data(edge_weight)
# Since this is an undirected network also add the reciprocal edge
if not directed:
if source_node != target_node:
append_row(target_node)
append_col(source_node)
append_data(edge_weight)
row = np.array(row, dtype=np.int64)
col = np.array(col, dtype=np.int64)
data = np.array(data, dtype=np.float64)
if numbering == "matlab":
row -= 1
col -= 1
elif numbering == "c":
pass
else:
print("Invalid numbering style.")
raise RuntimeError
# Form sparse adjacency matrix
adjacency_matrix = spsp.coo_matrix((data, (row, col)), shape=(number_of_rows, number_of_columns))
return adjacency_matrix
def read_node_label_matrix(file_path, separator, numbering="matlab"):
"""
Reads node-label pairs in csv format and returns a list of tuples and a node-label matrix.
Inputs: - file_path: The path where the node-label matrix is stored.
- separator: The delimiter among values (e.g. ",", "\t", " ")
- number_of_nodes: The number of nodes of the full graph. It is possible that not all nodes are labelled.
- numbering: Array numbering style: * "matlab"
* "c"
Outputs: - node_label_matrix: The node-label associations in a NumPy array of tuples format.
- number_of_categories: The number of categories/classes the nodes may belong to.
- labelled_node_indices: A NumPy array containing the labelled node indices.
"""
# Open file
file_row_generator = get_file_row_generator(file_path, separator)
file_row = next(file_row_generator)
number_of_rows = file_row[1]
number_of_categories = int(file_row[3])
# Initialize lists for row and column sparse matrix arguments
row = list()
col = list()
append_row = row.append
append_col = col.append
# Populate the arrays
for file_row in file_row_generator:
node = np.int64(file_row[0])
label = np.int64(file_row[1])
# Add label
append_row(node)
append_col(label)
labelled_node_indices = np.array(list(set(row)))
row = np.array(row, dtype=np.int64)
col = np.array(col, dtype=np.int64)
data = np.ones_like(row, dtype=np.float64)
if numbering == "matlab":
row -= 1
col -= 1
labelled_node_indices -= 1
elif numbering == "c":
pass
else:
print("Invalid numbering style.")
raise RuntimeError
# Form sparse adjacency matrix
node_label_matrix = spsp.coo_matrix((data, (row, col)), shape=(number_of_rows, number_of_categories))
node_label_matrix = node_label_matrix.tocsr()
return node_label_matrix, number_of_categories, labelled_node_indices
def scipy_sparse_to_csv(filepath, matrix, separator=",", directed=False, numbering="matlab"):
"""
Writes sparse matrix in separated value format.
"""
matrix = spsp.coo_matrix(matrix)
shape = matrix.shape
nnz = matrix.getnnz()
if numbering == "matlab":
row = matrix.row + 1
col = matrix.col + 1
data = matrix.data
elif numbering == "c":
row = matrix.row
col = matrix.col
data = matrix.data
else:
print("Invalid numbering style.")
raise RuntimeError
with open(filepath, "w") as f:
# Write metadata.
file_row = "n_rows:" + separator + str(shape[0]) + separator +\
"n_cols:" + separator + str(shape[1]) + separator +\
"nnz:" + separator + str(nnz) + separator +\
"directed:" + separator + str(directed) +\
"\n"
f.write(file_row)
for edge in range(row.size):
if directed is False:
if col[edge] < row[edge]:
continue
file_row = str(row[edge]) + separator + str(col[edge]) + separator + str(data[edge]) + "\n"
f.write(file_row)
def write_screen_name_to_topics(filepath, user_label_matrix, node_to_id, id_to_name, label_to_lemma, lemma_to_keyword, separator=","):
"""
Writes a user name and associated topic names per row.
"""
user_label_matrix = spsp.coo_matrix(user_label_matrix)
shape = user_label_matrix.shape
nnz = user_label_matrix.getnnz()
row = user_label_matrix.row
col = user_label_matrix.col
data = user_label_matrix.data
name_to_topic_set = defaultdict(set)
for edge in range(row.size):
node = row[edge]
user_twitter_id = node_to_id[node]
name = id_to_name[user_twitter_id]
label = col[edge]
lemma = label_to_lemma[label]
# topic = lemma_to_keyword[lemma]
name_to_topic_set[name].add(lemma)
with open(filepath, "w") as f:
# Write metadata.
file_row = "n_rows:" + separator + str(shape[0]) + separator +\
"nnz:" + separator + str(nnz) + separator +\
"\n"
f.write(file_row)
for name, topic_set in name_to_topic_set.items():
file_row = list()
file_row.append(name)
file_row.extend(topic_set)
file_row = separator.join(file_row) + "\n"
f.write(file_row) | /reveal-graph-embedding-0.1.4.tar.gz/reveal-graph-embedding-0.1.4/reveal_graph_embedding/datautil/snow_datautil/snow_read_data.py | 0.786541 | 0.569314 | snow_read_data.py | pypi |
__author__ = 'Georgios Rizos (georgerizos@iti.gr)'
import twython
from twython import Twython
import time
from urllib.error import URLError
from http.client import BadStatusLine
from reveal_user_annotation.common.config_package import get_package_path
from reveal_user_annotation.common.datarw import get_file_row_generator
def login(twitter_app_key,
twitter_app_secret):
"""
This is a shortcut for logging in the Twitter app described in the config_app_credentials.txt file.
Output: - twitter: A twython twitter object, containing wrappers for the Twitter API.
"""
# Log into my application
twitter = Twython(twitter_app_key, twitter_app_secret)
# auth = twitter.get_authentication_tokens(screen_name=screen_name)
return twitter
def safe_twitter_request_handler(twitter_api_func,
call_rate_limit,
call_counter,
time_window_start,
max_retries,
wait_period,
*args, **kw):
"""
This is a safe function handler for any twitter request.
Inputs: - twitter_api_func: The twython function object to be safely called.
- call_rate_limit: THe call rate limit for this specific Twitter API function.
- call_counter: A counter that keeps track of the number of function calls in the current 15-minute window.
- time_window_start: The timestamp of the current 15-minute window.
- max_retries: Number of call retries allowed before abandoning the effort.
- wait_period: For certain Twitter errors (i.e. server overload), we wait and call again.
- *args, **kw: The parameters of the twython function to be called.
Outputs: - twitter_api_function_result: The results of the Twitter function.
- call_counter: A counter that keeps track of the number of function calls in the current 15-minute window.
- time_window_start: The timestamp of the current 15-minute window.
Raises: - twython.TwythonError
- urllib.error.URLError
- http.client.BadStatusLine
"""
error_count = 0
while True:
try:
# If we have reached the call rate limit for this function:
if call_counter >= call_rate_limit:
# Reset counter.
call_counter = 0
# Sleep for the appropriate time.
elapsed_time = time.perf_counter() - time_window_start
sleep_time = 15*60 - elapsed_time
if sleep_time < 0.1:
sleep_time = 0.1
time.sleep(sleep_time)
# Initialize new 15-minute time window.
time_window_start = time.perf_counter()
else:
call_counter += 1
twitter_api_function_result = twitter_api_func(*args, **kw)
return twitter_api_function_result, call_counter, time_window_start
except twython.TwythonError as e:
# If it is a Twitter error, handle it.
error_count, call_counter, time_window_start, wait_period = handle_twitter_http_error(e,
error_count,
call_counter,
time_window_start,
wait_period)
if error_count > max_retries:
print("Max error count reached. Abandoning effort.")
raise e
except URLError as e:
error_count += 1
if error_count > max_retries:
print("Max error count reached. Abandoning effort.")
raise e
except BadStatusLine as e:
error_count += 1
if error_count > max_retries:
print("Max error count reached. Abandoning effort.")
raise e
def handle_twitter_http_error(e, error_count, call_counter, time_window_start, wait_period):
"""
This function handles the twitter request in case of an HTTP error.
Inputs: - e: A twython.TwythonError instance to be handled.
- error_count: Number of failed retries of the call until now.
- call_counter: A counter that keeps track of the number of function calls in the current 15-minute window.
- time_window_start: The timestamp of the current 15-minute window.
- wait_period: For certain Twitter errors (i.e. server overload), we wait and call again.
Outputs: - call_counter: A counter that keeps track of the number of function calls in the current 15-minute window.
- time_window_start: The timestamp of the current 15-minute window.
- wait_period: For certain Twitter errors (i.e. server overload), we wait and call again.
Raises: - twython.TwythonError
"""
if e.error_code == 401:
# Encountered 401 Error (Not Authorized)
raise e
elif e.error_code == 404:
# Encountered 404 Error (Not Found)
raise e
elif e.error_code == 429:
# Encountered 429 Error (Rate Limit Exceeded)
# Sleep for 15 minutes
error_count += 0.5
call_counter = 0
wait_period = 2
time.sleep(60*15 + 5)
time_window_start = time.perf_counter()
return error_count, call_counter, time_window_start, wait_period
elif e.error_code in (500, 502, 503, 504):
error_count += 1
time.sleep(wait_period)
wait_period *= 1.5
return error_count, call_counter, time_window_start, wait_period
else:
raise e | /reveal-user-annotation-0.2.2.tar.gz/reveal-user-annotation-0.2.2/reveal_user_annotation/twitter/twitter_util.py | 0.546496 | 0.155687 | twitter_util.py | pypi |
__author__ = 'Georgios Rizos (georgerizos@iti.gr)'
from collections import defaultdict
from reveal_user_annotation.text.clean_text import clean_document
from reveal_user_annotation.text.text_util import reduce_list_of_bags_of_words
def clean_twitter_list(twitter_list, lemmatizing="wordnet"):
"""
Extracts the *set* of keywords found in a Twitter list (name + description).
Inputs: - twitter_list: A Twitter list in json format.
- lemmatizing: A string containing one of the following: "porter", "snowball" or "wordnet".
Output: - keyword_set: A set of keywords (i.e. not a bag-of-words) in python set format.
- lemma_to_keywordbag: A python dictionary that maps stems/lemmas to original topic keywords.
"""
name_lemmas, name_lemma_to_keywordbag = clean_document(twitter_list["name"].replace("_", " ").replace("-", " "),
lemmatizing=lemmatizing)
description_lemmas, description_lemma_to_keywordbag = clean_document(twitter_list["description"].replace("_", " ").replace("-", " "),
lemmatizing=lemmatizing)
keyword_set = set(name_lemmas + description_lemmas)
lemma_to_keywordbag = defaultdict(lambda: defaultdict(int))
for lemma, keywordbag in name_lemma_to_keywordbag.items():
for keyword, multiplicity in keywordbag.items():
lemma_to_keywordbag[lemma][keyword] += multiplicity
for lemma, keywordbag in description_lemma_to_keywordbag.items():
for keyword, multiplicity in keywordbag.items():
lemma_to_keywordbag[lemma][keyword] += multiplicity
return keyword_set, lemma_to_keywordbag
def clean_list_of_twitter_list(list_of_twitter_lists, lemmatizing="wordnet"):
"""
Extracts the sets of keywords for each Twitter list.
Inputs: - list_of_twitter_lists: A python list of Twitter lists in json format.
- lemmatizing: A string containing one of the following: "porter", "snowball" or "wordnet".
Output: - list_of_keyword_sets: A list of sets of keywords (i.e. not a bag-of-words) in python set format.
- list_of_lemma_to_keywordbags: List of python dicts that map stems/lemmas to original topic keywords.
"""
list_of_keyword_sets = list()
append_keyword_set = list_of_keyword_sets.append
list_of_lemma_to_keywordbags = list()
append_lemma_to_keywordbag = list_of_lemma_to_keywordbags.append
if list_of_twitter_lists is not None:
for twitter_list in list_of_twitter_lists:
if twitter_list is not None:
keyword_set, lemma_to_keywordbag = clean_twitter_list(twitter_list=twitter_list, lemmatizing=lemmatizing)
append_keyword_set(keyword_set)
append_lemma_to_keywordbag(lemma_to_keywordbag)
return list_of_keyword_sets, list_of_lemma_to_keywordbags
def user_twitter_list_bag_of_words(twitter_list_corpus, lemmatizing="wordnet"):
"""
Extract a bag-of-words for a corpus of Twitter lists pertaining to a Twitter user.
Inputs: - twitter_list_corpus: A python list of Twitter lists in json format.
- lemmatizing: A string containing one of the following: "porter", "snowball" or "wordnet".
Output: - bag_of_words: A bag-of-words in python dictionary format.
- lemma_to_keywordbag_total: Aggregated python dictionary that maps stems/lemmas to original topic keywords.
"""
# Extract a bag-of-words from a list of Twitter lists.
# May result in empty sets
list_of_keyword_sets, list_of_lemma_to_keywordbags = clean_list_of_twitter_list(twitter_list_corpus, lemmatizing)
# Reduce keyword sets.
bag_of_words = reduce_list_of_bags_of_words(list_of_keyword_sets)
# Reduce lemma to keywordbag maps.
lemma_to_keywordbag_total = defaultdict(lambda: defaultdict(int))
for lemma_to_keywordbag in list_of_lemma_to_keywordbags:
for lemma, keywordbag in lemma_to_keywordbag.items():
for keyword, multiplicity in keywordbag.items():
lemma_to_keywordbag_total[lemma][keyword] += multiplicity
return bag_of_words, lemma_to_keywordbag_total | /reveal-user-annotation-0.2.2.tar.gz/reveal-user-annotation-0.2.2/reveal_user_annotation/twitter/clean_twitter_list.py | 0.665411 | 0.206194 | clean_twitter_list.py | pypi |
__author__ = 'Georgios Rizos (georgerizos@iti.gr)'
from reveal_user_annotation.common.config_package import get_package_path
from reveal_user_annotation.common.datarw import get_file_row_generator
def get_topic_set(file_path):
"""
Opens one of the topic set resource files and returns a set of topics.
- Input: - file_path: The path pointing to the topic set resource file.
- Output: - topic_set: A python set of strings.
"""
topic_set = set()
file_row_gen = get_file_row_generator(file_path, ",") # The separator here is irrelevant.
for file_row in file_row_gen:
topic_set.add(file_row[0])
return topic_set
def get_story_set():
"""
Returns a set of all the story-related topics found in the SNOW dataset.
"""
file_path = get_package_path() + "/twitter/res/topics/story_set.txt"
topics = get_topic_set(file_path)
return topics
def get_theme_set():
"""
Returns a set of all the thematic-related keywords found in the SNOW dataset.
"""
file_path = get_package_path() + "/twitter/res/topics/theme_set.txt"
topics = get_topic_set(file_path)
return topics
def get_attribute_set():
"""
Returns a set of all the user-related attributes (e.g. occupation) found in the SNOW dataset.
"""
file_path = get_package_path() + "/twitter/res/topics/attribute_set.txt"
topics = get_topic_set(file_path)
return topics
def get_stance_set():
"""
Returns a set of all the user-related stances (e.g. political affiliations, religious view) found in the SNOW dataset.
"""
file_path = get_package_path() + "/twitter/res/topics/stance_set.txt"
topics = get_topic_set(file_path)
return topics
def get_geographical_set():
"""
Returns a set of all the geographical attributes (e.g. uk, sweden) found in the SNOW dataset.
"""
file_path = get_package_path() + "/twitter/res/topics/geographical_set.txt"
topics = get_topic_set(file_path)
return topics
def get_sport_set():
"""
Returns a set of all the sport-related attributes (e.g. football, golf) found in the SNOW dataset.
"""
file_path = get_package_path() + "/twitter/res/topics/sport_set.txt"
topics = get_topic_set(file_path)
return topics
def get_reveal_set():
"""
Returns a set of all the topics that are interesting for REVEAL use-cases.
"""
file_path = get_package_path() + "/twitter/res/topics/story_set.txt"
story_topics = get_topic_set(file_path)
file_path = get_package_path() + "/twitter/res/topics/theme_set.txt"
theme_topics = get_topic_set(file_path)
file_path = get_package_path() + "/twitter/res/topics/attribute_set.txt"
attribute_topics = get_topic_set(file_path)
file_path = get_package_path() + "/twitter/res/topics/stance_set.txt"
stance_topics = get_topic_set(file_path)
file_path = get_package_path() + "/twitter/res/topics/geographical_set.txt"
geographical_topics = get_topic_set(file_path)
topics = story_topics | theme_topics | attribute_topics | stance_topics | geographical_topics
return topics
def get_topic_keyword_dictionary():
"""
Opens the topic-keyword map resource file and returns the corresponding python dictionary.
- Input: - file_path: The path pointing to the topic-keyword map resource file.
- Output: - topic_set: A topic to keyword python dictionary.
"""
topic_keyword_dictionary = dict()
file_row_gen = get_file_row_generator(get_package_path() + "/twitter/res/topics/topic_keyword_mapping" + ".txt",
",",
"utf-8")
for file_row in file_row_gen:
topic_keyword_dictionary[file_row[0]] = set([keyword for keyword in file_row[1:]])
return topic_keyword_dictionary | /reveal-user-annotation-0.2.2.tar.gz/reveal-user-annotation-0.2.2/reveal_user_annotation/twitter/manage_resources.py | 0.619932 | 0.256122 | manage_resources.py | pypi |
__author__ = 'Georgios Rizos (georgerizos@iti.gr)'
import requests
import xml.etree.cElementTree as etree
from io import StringIO
def get_user_list(host_name, client_name, client_pass):
"""
Pulls the list of users in a client.
Inputs: - host_name: A string containing the address of the machine where the PServer instance is hosted.
- client_name: The PServer client name.
- client_pass: The PServer client's password.
Output: - user_id_list: A python list of user ids.
Raises: -
"""
# Construct request.
request = construct_request(model_type="pers",
client_name=client_name,
client_pass=client_pass,
command="getusrs",
values="whr=*")
# Make request.
request_result = send_request(host_name, request)
# Extract a python list from xml object.
user_id_list = list()
append_user_id = user_id_list.append
if request_result is not None:
user_list_xml = request_result.text
tree = etree.parse(StringIO(user_list_xml))
root = tree.getroot()
xml_rows = root.findall("./result/row/usr")
for xml_row in xml_rows:
append_user_id(xml_row.text)
return user_id_list
def add_features(host_name, client_name, client_pass, feature_names):
"""
Add a number of numerical features in the client.
Inputs: - host_name: A string containing the address of the machine where the PServer instance is hosted.
- client_name: The PServer client name.
- client_pass: The PServer client's password.
- feature_names: A python list of feature names.
"""
init_feats = ("&".join(["%s=0"]*len(feature_names))) % tuple(feature_names)
features_req = construct_request("pers",
client_name,
client_pass,
"addftr",
init_feats)
send_request(host_name,
features_req)
def delete_features(host_name, client_name, client_pass, feature_names=None):
"""
Remove a number of numerical features in the client. If a list is not provided, remove all features.
Inputs: - host_name: A string containing the address of the machine where the PServer instance is hosted.
- client_name: The PServer client name.
- client_pass: The PServer client's password.
- feature_names: A python list of feature names.
"""
# Get all features.
if feature_names is None:
feature_names = get_feature_names(host_name,
client_name,
client_pass)
# Remove all features.
feature_to_be_removed = ("&".join(["ftr=%s"]*len(feature_names))) % tuple(feature_names)
features_req = construct_request("pers",
client_name,
client_pass,
'remftr',
feature_to_be_removed)
send_request(host_name,
features_req)
def get_feature_names(host_name, client_name, client_pass):
"""
Get the names of all features in a PServer client.
Inputs: - host_name: A string containing the address of the machine where the PServer instance is hosted.
- client_name: The PServer client name.
- client_pass: The PServer client's password.
Output: - feature_names: A python list of feature names.
"""
# Construct request.
request = construct_request(model_type="pers",
client_name=client_name,
client_pass=client_pass,
command="getftrdef",
values="ftr=*")
# Send request.
request_result = send_request(host_name,
request)
# Extract a python list from xml object.
feature_names = list()
append_feature_name = feature_names.append
if request_result is not None:
feature_names_xml = request_result.text
tree = etree.parse(StringIO(feature_names_xml))
root = tree.getroot()
xml_rows = root.findall("row/ftr")
for xml_row in xml_rows:
append_feature_name(xml_row.text)
return feature_names
def insert_user_data(host_name, client_name, client_pass, user_twitter_id, topic_to_score):
"""
Inserts topic/score data for a user to a PServer client.
Inputs: - host_name: A string containing the address of the machine where the PServer instance is hosted.
- client_name: The PServer client name.
- client_pass: The PServer client's password.
- feature_names: A python list of feature names.
- user_twitter_id: A Twitter user identifier.
- topic_to_score: A python dictionary that maps from topic to score.
"""
# Construct values.
values = "usr=" + str(user_twitter_id)
for topic, score in topic_to_score.items():
values += "&type." + topic + "=%.2f" % score
# Construct request.
request = construct_request(model_type="pers",
client_name=client_name,
client_pass=client_pass,
command="setusr",
values=values)
# Send request.
send_request(host_name,
request)
def construct_request(model_type, client_name, client_pass, command, values):
"""
Construct the request url.
Inputs: - model_type: PServer usage mode type.
- client_name: The PServer client name.
- client_pass: The PServer client's password.
- command: A PServer command.
- values: PServer command arguments.
Output: - base_request: The base request string.
"""
base_request = ("{model_type}?"
"clnt={client_name}|{client_pass}&"
"com={command}&{values}".format(model_type=model_type,
client_name=client_name,
client_pass=client_pass,
command=command,
values=values))
return base_request
def send_request(host_name, request):
"""
Sends a PServer url request.
Inputs: - host_name: A string containing the address of the machine where the PServer instance is hosted.
- request: The url request.
"""
request = "%s%s" % (host_name, request)
# print(request)
try:
result = requests.get(request)
if result.status_code == 200:
return result
else:
# print(result.status_code)
raise Exception
except Exception as e:
# print(e)
raise e
def update_feature_value(host_name, client_name, client_pass, user_twitter_id, feature_name, feature_score):
"""
Updates a single topic score, for a single user.
"""
username = str(user_twitter_id)
feature_value = "{0:.2f}".format(feature_score)
joined_ftr_value = "ftr_" + feature_name + "=" + str(feature_value)
values = "usr=%s&%s" % (username, joined_ftr_value)
# Construct request.
request = construct_request(model_type="pers",
client_name=client_name,
client_pass=client_pass,
command="setusr",
values=values)
# Send request.
send_request(host_name,
request) | /reveal-user-annotation-0.2.2.tar.gz/reveal-user-annotation-0.2.2/reveal_user_annotation/pserver/request.py | 0.575946 | 0.179746 | request.py | pypi |
__author__ = 'Georgios Rizos (georgerizos@iti.gr)'
import numpy as np
import nltk
def combine_word_list(word_list):
"""
Combine word list into a bag-of-words.
Input: - word_list: This is a python list of strings.
Output: - bag_of_words: This is the corresponding multi-set or bag-of-words, in the form of a python dictionary.
"""
bag_of_words = dict()
for word in word_list:
if word in bag_of_words.keys():
bag_of_words[word] += 1
else:
bag_of_words[word] = 1
return bag_of_words
def reduce_list_of_bags_of_words(list_of_keyword_sets):
"""
Reduces a number of keyword sets to a bag-of-words.
Input: - list_of_keyword_sets: This is a python list of sets of strings.
Output: - bag_of_words: This is the corresponding multi-set or bag-of-words, in the form of a python dictionary.
"""
bag_of_words = dict()
get_bag_of_words_keys = bag_of_words.keys
for keyword_set in list_of_keyword_sets:
for keyword in keyword_set:
if keyword in get_bag_of_words_keys():
bag_of_words[keyword] += 1
else:
bag_of_words[keyword] = 1
return bag_of_words
def augmented_tf_idf(attribute_matrix):
"""
Performs augmented TF-IDF normalization on a bag-of-words vector representation of data.
Augmented TF-IDF introduced in: Manning, C. D., Raghavan, P., & Schütze, H. (2008).
Introduction to information retrieval (Vol. 1, p. 6).
Cambridge: Cambridge university press.
Input: - attribute_matrix: A bag-of-words vector representation in SciPy sparse matrix format.
Output: - attribute_matrix: The same matrix after augmented tf-idf normalization.
"""
number_of_documents = attribute_matrix.shape[0]
max_term_frequencies = np.ones(number_of_documents, dtype=np.float64)
idf_array = np.ones(attribute_matrix.shape[1], dtype=np.float64)
# Calculate inverse document frequency
attribute_matrix = attribute_matrix.tocsc()
for j in range(attribute_matrix.shape[1]):
document_frequency = attribute_matrix.getcol(j).data.size
if document_frequency > 1:
idf_array[j] = np.log(number_of_documents/document_frequency)
# Calculate maximum term frequencies for a user
attribute_matrix = attribute_matrix.tocsr()
for i in range(attribute_matrix.shape[0]):
max_term_frequency = attribute_matrix.getrow(i).data
if max_term_frequency.size > 0:
max_term_frequency = max_term_frequency.max()
if max_term_frequency > 0.0:
max_term_frequencies[i] = max_term_frequency
# Do augmented tf-idf normalization
attribute_matrix = attribute_matrix.tocoo()
attribute_matrix.data = 0.5 + np.divide(0.5*attribute_matrix.data, np.multiply((max_term_frequencies[attribute_matrix.row]), (idf_array[attribute_matrix.col])))
attribute_matrix = attribute_matrix.tocsr()
return attribute_matrix
def query_list_of_words(target_word, list_of_words, edit_distance=1):
"""
Checks whether a target word is within editing distance of any one in a set of keywords.
Inputs: - target_word: A string containing the word we want to search in a list.
- list_of_words: A python list of words.
- edit_distance: For larger words, we also check for similar words based on edit_distance.
Outputs: - new_list_of_words: This is the input list of words minus any found keywords.
- found_list_of_words: This is the list of words that are within edit distance of the target word.
"""
# Initialize lists
new_list_of_words = list()
found_list_of_words = list()
append_left_keyword = new_list_of_words.append
append_found_keyword = found_list_of_words.append
# Iterate over the list of words
for word in list_of_words:
if len(word) > 6:
effective_edit_distance = edit_distance
else:
effective_edit_distance = 0 # No edit distance for small words.
if abs(len(word)-len(target_word)) <= effective_edit_distance:
if nltk.edit_distance(word, target_word) <= effective_edit_distance:
append_found_keyword(word)
else:
append_left_keyword(word)
else:
append_left_keyword(word)
return new_list_of_words, found_list_of_words
def simple_word_query(target_word, list_of_words, edit_distance=1):
found_list_of_words = list()
append_found_keyword = found_list_of_words.append
for word in list_of_words:
if len(word) > 6:
effective_edit_distance = edit_distance
else:
effective_edit_distance = 0 # No edit distance for small words.
if abs(len(word)-len(target_word)) <= effective_edit_distance:
if nltk.edit_distance(word, target_word) <= effective_edit_distance:
append_found_keyword(word)
else:
pass
else:
pass
return found_list_of_words | /reveal-user-annotation-0.2.2.tar.gz/reveal-user-annotation-0.2.2/reveal_user_annotation/text/text_util.py | 0.773559 | 0.499451 | text_util.py | pypi |
__author__ = 'Georgios Rizos (georgerizos@iti.gr)'
import argparse
from reveal_user_classification.preprocess.snow.snow_2014_graph_dataset_util import process_tweet_collection,\
make_directory_tree, weakly_connected_graph, make_implicit_graphs, make_annotation
from reveal_user_annotation.mongo.store_snow_data import extract_all_snow_tweets_from_disk_generator
from reveal_user_annotation.mongo.preprocess_data import get_collection_documents_generator
from reveal_user_annotation.mongo.mongo_util import establish_mongo_connection
from reveal_user_annotation.mongo.store_snow_data import store_snow_tweets_from_disk_to_mongodb
def main():
# Parse arguments.
parser = argparse.ArgumentParser()
parser.add_argument("-stf", "--snow-tweets-folder", dest="snow_tweets_folder",
help="This is the folder with the SNOW tweets.",
type=str, required=True)
parser.add_argument("-gdf", "--graph-dataset-folder", dest="graph_dataset_folder",
help="This is the root directory that the graph dataset will be extracted.",
type=str, required=True)
parser.add_argument("-tlf", "--twitter-lists-folder", dest="twitter_lists_folder",
help="This is the folder where the twitter lists for each user are stored.",
type=str, required=True)
parser.add_argument("-tlkf", "--twitter-lists-keywords-folder", dest="twitter_lists_keywords_folder",
help="This is the folder where the extracted keywords from the lists for each user are stored.",
type=str, required=True)
args = parser.parse_args()
snow_tweets_folder = args.snow_tweets_folder
graph_dataset_folder = args.graph_dataset_folder
twitter_lists_folder = args.twitter_lists_folder
twitter_lists_keywords_folder = args.twitter_lists_keywords_folder
make_dataset(snow_tweets_folder, graph_dataset_folder, twitter_lists_folder, twitter_lists_keywords_folder)
def make_dataset(snow_tweets_folder, graph_dataset_folder, twitter_lists_folder, twitter_lists_keywords_folder):
# Get a generator of the SNOW 2014 Data Challenge tweets.
# store_snow_tweets_from_disk_to_mongodb(snow_tweets_folder)
# Write all tweets on MongoDB.
# mongo_client = establish_mongo_connection("mongodb://admin:123456@160.40.51.29:27017")
# Read all tweets in ascending timestamp order.
# tweet_generator = get_collection_documents_generator(client=mongo_client,
# database_name="snow_tweet_storage",
# collection_name="tweets",
# spec=None,
# latest_n=None,
# sort_key="created_at")
# Make sub-folders for the graph dataset.
full_graph_folder, weakly_connected_graph_folder, weakly_connected_label_folder, implicit_graph_folder,\
simple_undirected_graph_folder, combinatorial_implicit_graph_folder,\
directed_implicit_graph_folder = make_directory_tree(graph_dataset_folder)
# Extract the full graphs from the tweet collection. This is a quadratic complexity process.
# tweet_generator = extract_all_snow_tweets_from_disk_generator(snow_tweets_folder)
# process_tweet_collection(tweet_generator, full_graph_folder)
# Extract weakly connected mention graph and corresponding retweet graph and user_lemma_matrix.
weakly_connected_graph(full_graph_folder, weakly_connected_graph_folder)
# Make combinatorial and directed implicit graphs for the mention and retweet graphs.
make_implicit_graphs(weakly_connected_graph_folder,
simple_undirected_graph_folder,
combinatorial_implicit_graph_folder,
directed_implicit_graph_folder)
# Make annotation for weakly connected mention graph.
# make_annotation(twitter_lists_folder,
# twitter_lists_keywords_folder,
# weakly_connected_graph_folder,
# weakly_connected_label_folder,
# full_graph_folder)
# make_dataset(snow_tweets_folder="/home/georgerizos/Documents/LocalStorage/raw_data/SNOW/snow_tweets_folder",
# graph_dataset_folder="/home/georgerizos/Documents/LocalStorage/raw_data/SNOW",
# twitter_lists_folder="/home/georgerizos/Documents/LocalStorage/raw_data/SNOW/twitter_lists",
# twitter_lists_keywords_folder="/home/georgerizos/Documents/LocalStorage/raw_data/SNOW/twitter_lists_keywords") | /reveal-user-classification-0.2.8.tar.gz/reveal-user-classification-0.2.8/reveal_user_classification/preprocess/snow/make_snow_2014_graph_dataset.py | 0.427038 | 0.163012 | make_snow_2014_graph_dataset.py | pypi |
__author__ = "Yuan Chang"
__copyright__ = "Copyright (C) 2019-2020"
__license__ = "MIT"
__email__ = "pyslvs@gmail.com"
from typing import (
cast, get_type_hints, overload, TypeVar, Tuple, List, Sequence, Dict,
Mapping, OrderedDict, Iterator, ItemsView, Union, Type, Any,
)
from abc import ABCMeta
from dataclasses import dataclass, field, is_dataclass, asdict
from os.path import isfile, join, relpath, dirname, sep
from distutils.dir_util import copy_tree, mkpath
from shutil import rmtree
from yaml import safe_load
from json import loads
from jsonschema import validate
from flask import Flask, render_template, url_for
from .utility import is_url, valid_config, load_file, dl, rm, ROOT
_Opt = Mapping[str, str]
_Data = Dict[str, Any]
_YamlValue = Union[bool, int, float, str, list, dict]
_PROJECT = ""
T = TypeVar('T', bound=Union[_YamlValue, 'TypeChecker'])
U = TypeVar('U', bound=_YamlValue)
def load_yaml() -> _Data:
"""Load project."""
config = valid_config(safe_load(load_file(_PROJECT)))
validate(config, loads(load_file(join(ROOT, 'schema.json'))))
return config
@overload
def cast_to(key: str, t: Type[List[T]], value: _YamlValue) -> List[T]:
pass
@overload
def cast_to(key: str, t: Type[T], value: _YamlValue) -> T:
pass
def cast_to(key, t, value):
"""Check value type."""
if hasattr(t, '__origin__') and t.__origin__ is list:
# Is listed items
t = t.__args__[0]
if issubclass(t, TypeChecker) and is_dataclass(t):
return t.as_list(value)
else:
return [cast_to(key, t, v) for v in value]
elif isinstance(value, t):
return value
elif (
issubclass(t, TypeChecker)
and is_dataclass(t)
and isinstance(value, dict)
):
return t.from_dict(value)
raise TypeError(f"'{key}' expect type: {t}, got: {type(value)}")
def pixel(value: Union[int, str]) -> str:
"""Support pure number input of the size."""
if isinstance(value, str):
return value
return f"{value}pt"
class TypeChecker(metaclass=ABCMeta):
"""Type checker function."""
Self = TypeVar('Self', bound='TypeChecker')
MaybeDict = Union[_Data, Self]
MaybeList = Union[_Data, Sequence[_Data], Self, Sequence[Self]]
@classmethod
def from_dict(cls: Type[Self], data: MaybeDict) -> Self:
"""Generate a data class from dict object."""
if isinstance(data, cls):
return data
if not isinstance(data, Mapping):
raise TypeError(f"expect type: {cls}, wrong type: {type(data)}")
if not data:
raise TypeError(f"expect type: {cls}, the field cannot be empty")
return cls(**data) # type: ignore
@classmethod
def as_list(cls: Type[Self], data: MaybeList) -> List[Self]:
"""Generate a list of Self from dict object."""
if isinstance(data, cls):
return [data]
if not isinstance(data, Sequence):
data = [cast(_Data, data)]
return [cls.from_dict(d) for d in data]
def __setattr__(self, key, value):
super(TypeChecker, self).__setattr__(key, cast_to(
key, get_type_hints(self.__class__).get(key, None), value))
@dataclass(repr=False, eq=False)
class Size(TypeChecker):
"""The block has size attributes."""
src: str = ""
width: str = ""
height: str = ""
def __post_init__(self):
self.width = pixel(self.width)
self.height = pixel(self.height)
@dataclass(repr=False, eq=False)
class Img(Size):
"""Image class."""
label: str = ""
@dataclass(repr=False, eq=False)
class Footer(Img):
"""Footer class."""
link: str = ""
@dataclass(repr=False, eq=False)
class Fragment(TypeChecker):
"""Fragment option."""
img: str = ""
math: str = ""
youtube: str = ""
embed: str = ""
@dataclass(repr=False, eq=False)
class Slide(TypeChecker):
"""Slide class."""
id: str = ""
title: str = ""
doc: str = ""
include: str = ""
math: str = ""
img: List[Img] = field(default_factory=list)
youtube: Size = field(default_factory=Size)
embed: Size = field(default_factory=Size)
fragment: Fragment = field(default_factory=Fragment)
def __post_init__(self):
if not self.embed.width:
self.embed.width = '1000px'
if not self.embed.height:
self.embed.height = '450px'
@property
def is_article(self) -> bool:
"""Return true if the block is empty."""
return bool(self.title) and any((self.doc, self.include, self.math,
self.img, self.youtube.src,
self.embed.src))
@dataclass(repr=False, eq=False)
class HSlide(Slide):
"""Root slide class."""
sub: List[Slide] = field(default_factory=list)
@dataclass(repr=False, eq=False)
class Plugin(TypeChecker):
"""Plugin enable / disable options."""
zoom: bool = False
notes: bool = True
search: bool = False
markdown: bool = field(default=True, init=False)
highlight: bool = True
math: bool = False
def as_dict(self) -> ItemsView[str, bool]:
"""Return self as mapping."""
return asdict(self, dict_factory=OrderedDict[str, bool]).items()
@dataclass(repr=False, eq=False)
class Config(TypeChecker):
"""Config overview."""
lang: str = "en"
title: str = ""
description: str = ""
author: str = ""
cdn: str = ""
theme: str = "serif"
code_theme: str = "zenburn"
icon: str = "img/icon.png"
outline: int = 2
default_style: bool = True
extra_style: str = ""
watermark: str = ""
watermark_size: str = ""
nav_mode: str = "default"
show_arrows: bool = True
center: bool = True
loop: bool = False
history: bool = True
slide_num: str = "c/t"
progress: bool = True
mouse_wheel: bool = False
preview_links: bool = False
transition: str = "slide"
footer: Footer = field(default_factory=Footer)
nav: List[HSlide] = field(default_factory=list)
plugin: Plugin = field(default_factory=Plugin)
def __post_init__(self):
"""Check arguments after assigned."""
if not self.title and self.nav:
self.title = self.nav[0].title
self.cdn = self.cdn.rstrip('/')
self.watermark_size = pixel(self.watermark_size)
if self.outline not in {0, 1, 2}:
raise ValueError(f"outline level should be 0, 1 or 2, "
f"not {self.outline}")
# Make an outline page
doc = []
for i, j, n in self.slides:
if not self.plugin.math and n.math:
self.plugin.math = True
if i < 1 or not n.title or self.outline < 1:
continue
doc.append(" " * (2 if j > 0 else 0) + (
f"+ [{n.title}](#/{f'{i}/{j}' if j > 0 else f'{i}'})"
if self.history else
f"+ {n.title}"
))
if doc:
self.nav[0].sub.append(Slide(title="Outline", doc='\n'.join(doc)))
@property
def slides(self) -> Iterator[Tuple[int, int, Slide]]:
"""Traverse all slides."""
for i, n in enumerate(self.nav):
yield i, 0, n
for j, sn in enumerate(n.sub):
yield i, j + 1, sn
def render_slides(config: Config, *, rel_url: bool = False) -> str:
"""Rendered slides."""
if rel_url:
def url_func(endpoint: str, *, filename: str) -> str:
"""Generate relative internal path."""
path = join(ROOT, endpoint, filename).replace('/', sep)
return relpath(path, ROOT).replace(sep, '/')
else:
url_func = url_for
project_dir = dirname(_PROJECT)
def uri(path: str) -> str:
"""Handle the relative path and URIs."""
if not path:
return ""
if is_url(path):
return path
if (
not rel_url
and config.cdn
# Prefer to load local files
# Check files when reloading
and not isfile(join(project_dir, 'static', path))
):
return f"{config.cdn}/{path}"
return url_func('static', filename=path)
def include(path: str) -> str:
"""Include text file."""
return load_file(join(project_dir, uri(path).strip('/')))
return render_template("slides.html", config=config, url_for=url_func,
uri=uri, include=include)
def find_project(flask_app: Flask, pwd: str) -> str:
"""Get project name from the current path."""
project = join(pwd, "reveal.yaml")
if not isfile(project):
project = join(pwd, "reveal.yml")
if not isfile(project):
return ""
flask_app.config['STATIC_FOLDER'] = join(pwd, 'static')
global _PROJECT
_PROJECT = project
return _PROJECT
def pack(root: str, build_path: str, app: Flask) -> None:
"""Pack into a static project."""
with app.app_context():
copy_project(Config(**load_yaml()), root, build_path)
def copy_project(config: Config, root: str, build_path: str) -> None:
"""Copy project."""
mkpath(build_path)
copy_tree(join(root, 'static'), join(build_path, 'static'))
# Download from CDN
def cdn(src: str) -> None:
"""Download from source path."""
if src and not is_url(src):
dl(f"{config.cdn}/{src}", join(build_path, 'static', src))
cdn(config.icon)
cdn(config.watermark)
for _, _, n in config.slides:
for img in n.img:
cdn(img.src)
cdn(n.embed.src)
# Render index.html
with open(join(build_path, "index.html"), 'w+', encoding='utf-8') as f:
f.write(render_slides(config, rel_url=True))
# Remove include files
rm(join(build_path, 'static', config.extra_style))
for _, _, n in config.slides:
rm(join(build_path, 'static', n.include))
# Remove unused js module
rmtree(join(build_path, 'static', 'ace'), ignore_errors=True)
for name, enabled in config.plugin.as_dict():
if not enabled:
rmtree(join(build_path, 'static', 'plugin', name)) | /reveal_yaml-2.4.0-py3-none-any.whl/reveal_yaml/slides.py | 0.859354 | 0.190498 | slides.py | pypi |
<p align="center">
<img src="docs/images/logo.png" alt="Logo" width="600"><br/>
<b>Retrospective Extraction of Visual and Logical Insights for Ontology-based interpretation of Neural Networks</b>
</p>
<b>RevelioNN</b> is an open-source library of post-hoc algorithms for explaining predictions of deep convolutional
neural networks of binary classification using ontologies. The algorithms are based on the construction of mapping
networks linking the internal representations of a convolutional neural network with ontology concepts.
The inspiration for the development of this library was a paper in which this approach to the interpretation of
neural networks was proposed:
* M. de Sousa Ribeiro and J. Leite, “Aligning Artificial Neural Networks and Ontologies towards Explainable AI,” in 35th AAAI Conference on Artificial Intelligence, AAAI 2021, May 2021, vol. 6A, no. 6, pp. 4932–4940. doi: [10.1609/aaai.v35i6.16626](https://doi.org/10.1609/aaai.v35i6.16626).
## How the library works
The convolutional neural network, whose predictions need to be explained, is called “main” network. When an image is
passed through it, the output is a probability of some target class, which at the same time is a concept of ontology.
Activations of the “main” network produced as a result of image passing represent input data for the mapping networks.
The outputs of the mapping networks are the probabilities of each of the concepts relevant to the target class, that is,
the concepts that are involved in its definition. Knowing the probabilities of each of the concepts, it becomes possible
to form logical and visual explanations.
<p align="center">
<img src="docs/images/scheme.jpg" alt="General scheme" width="600"/>
</p>
### Logical Explanations
By extracting relevant concepts, it is possible to form logical explanations about the belonging of the sample to the
target concept, accompanied by a set of axioms of ontology.
The input image presented in the scheme was taken from the [SCDB dataset](https://github.com/adriano-lucieri/SCDB), with which the “main” network and mapping networks were trained.
This image belongs to class <i>C1</i>. The image is classified as <i>C1</i> if the concepts <i>Hexagon</i> ⊓ <i>Star</i> or
<i>Ellipse</i> ⊓ <i>Star</i> or <i>Triangle</i> ⊓ <i>Ellipse</i> ⊓ <i>Starmarker</i> are present. An example of a logical
explanation by ontological inference for this sample is given below.
```console
The image is classified as ['C1'].
The following concepts were extracted from the image:
['HexStar', 'EllStar', 'NotTEStarmarker', 'Hexagon', 'Star', 'Ellipse', 'NotTriangle', 'NotStarmarker']
with the following probabilities:
[0.99938893, 0.99976605, 0.9937676684930921, 0.99947304, 0.9999995, 0.99962604, 0.9861229043453932, 0.9810010809451342]
Justification for '__input__ Type C1': (Degree of Belief: 0.99963)
__input__ Type has some Star ("0.9999995")
__input__ Type has some Ellipse ("0.99962604")
(has some Ellipse) and (has some Star) SubClassOf EllStar
C1 EquivalentTo EllStar or HexStar or TEStarmarker
```
Each of the extracted concepts corresponds to a certain probability, which is then used to calculate the degree of
confidence of the justifications. The list of possible justifications is ranked by the degree of trust.
If any concept has not been extracted, then we can say that the opposite concept has been extracted, the name of which
is automatically formed by adding the prefix 'Not'.
The above example shows one of the explanations from the list of possible explanations. It can be interpreted as
follows. The concepts of <i>Star</i> and <i>Ellipse</i> were extracted from the image. Therefore, based on the axiom of ontology that
the conjunction of the concepts <i>Star</i> and <i>Ellipse</i> is a subclass of <i>EllStar</i>, we can conclude that the image also
represents <i>EllStar</i>. And according to another axiom, the <i>C1</i> target concept is equivalent to <i>EllStar</i>. Thus, the
prediction of the neural network was confirmed by ontological reasoning.
### Visual Explanations
Visual explanations mean highlighting positively extracted concepts in the image. Currently, visual explanations are
formed using the occlusion method. Its essence lies in the fact that the input image is systematically overlapped by a
square of a given size with a given step. At each step, the overlapped image is run through the “main” network, and its
activations are run through the mapping network. Thus, by obtaining output probabilities at each step, a saliency map can
be formed. An example of visual explanations for our image is presented below.
<p align="center">
<img src="docs/images/visual_example.png" alt="Example of a visual explanation" width="600"/>
</p>
## RevelioNN Features
### Mapping Networks
The library implements two types of mapping networks whose parameters can be flexibly customized by the user.
| Type of mapping network | Features |
|-------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
| Single mapping network | It is a full-connected neural network, the number of input neurons of which is determined by the number of activations of neurons of the specified convolutional network layers. It has a ReLU activation function in its hidden layers and a sigmoid in its output.<br/>It is reasonable to use it to extract only one concept from one or more given convolutional network layers.<br/>The user can vary the number of layers and the number of neurons in each layer of this mapping network. |
| Simultaneous mapping network | Due to the features of its architecture, it allows you to extract many concepts simultaneously, receiving activations of all specified layers of the convolutional network at once.<br/>It takes into account the features of the 2D image structure and is less prone to overfitting compared to single mapping networks. <br/>It also shows good results in semi-supervised learning using semantic loss, which strengthens the relationship between concepts. |
### Extraction Algorithms
| Extraction algorithm | Type of mapping network | What it does |
|-------------------------|------------------------------|----------------------------------------------------------------------------------------------------------------------------------------|
| Exhaustive search | Single mapping network | Trains and evaluates mapping networks based on the activations of each of the specified layers of the convolutional network |
| Heuristic search | Single mapping network | Due to the heuristic reduction of the set of specified layers, mapping networks are not trained for every combination of layer-concept |
| Simultaneous extraction | Simultaneous mapping network | Trains a mapping network that can simultaneously extract a set of relevant concepts from the entire set of layers of specified types |
## How to Use
RevelioNN can interpret convolutional binary classification networks that have already been trained without using this
library. It is worth noting that the network class must be inherited from the nn.Module class, that is, your network
must be implemented using PyTorch. The specified model must be converted to RevelioNN format.
To use the API, follow these steps:
1. In order to convert your model to RevelioNN format, your network class must be described in a separate file in which
the following variables must also be declared:
* variable storing the number of channels of the image fed to the network;
* variable storing the size of the image fed to the network;
* the ``torchvision.transforms`` module object, which represents a transformation over images.
Examples of network descriptions are given in the main_net_classes directory.
2. Next, you need to initialize your convolutional neural network model.
```
from main_net_classes.resnet18_scdb import ResNet18, NUM_CHANNELS, IMG_SIDE_SIZE, transformation
main_net = ResNet18()
main_net.load_state_dict(torch.load('SCDB_ResNet18_C1.pt'))
```
3. Import ``convert_to_rvl_format()`` function:
```
from revelionn.utils.model import convert_to_rvl_format
```
Call this function by passing the data of the previously declared network model as parameters:
```
convert_to_rvl_format(main_net, 'SCDB_ResNet18_C1', 'C1', 'resnet18_scdb', 'ResNet18', 'transformation', IMG_SIDE_SIZE, NUM_CHANNELS)
```
4. After the main network has been successfully converted to the RevelioNN format, mapping networks can be trained.
Here is an example for training a simultaneous extraction network. Here activations were extracted from all batch normalization layers (the value is 'bn').
```
from revelionn.mapping_trainer import MappingTrainer
device = torch.device('cuda')
trainer = MappingTrainer('SCDB_ResNet18_C1.rvl', 'main_net_classes', ['bn'], 20, 100,
os.path.join(root_path, 'trained_models', 'mapping_models'),
device, os.path.join(root_path, 'data', 'scdb_custom', 'images'),
'C1_mapping_train.csv', 'C1_mapping_val.csv', 'name', 100, 6, None)
trainer.train_simultaneous_model(['HexStar', 'EllStar', 'TEStarmarker', 'Hexagon',
'Star', 'Ellipse', 'Triangle', 'Starmarker'],
20, [160, 80, 40, 20], [20, 1])
```
5. Once the mapping network is trained, you can form logical and visual explanations. To do this, you must first load
the trained network model via ``load_mapping_model()``.
```
from revelionn.utils.model import load_mapping_model
main_module, mapping_module, activation_extractor, transformation, img_size = load_mapping_model(
os.path.join(root_path, 'trained_models', 'mapping_models', 'C1_20_[160, 80, 40, 20]_[20, 1].rvl'),
cur_path, 'main_net_classes', device)
```
6. To form logical explanations using an ontology, one must first extract the concepts relevant to the target concept
from the image, and then transfer the extracted concepts and their probabilities to the reasoning module along with the
ontology. This can be done as follows:
```
from revelionn.utils.explanation import extract_concepts_from_img, explain_target_concept
from ontologies.scdb_ontology import concepts_map
from PIL import Image
image_path = os.path.join(root_path, 'data', 'scdb_custom', 'images', '001236.png')
image = Image.open(image_path)
main_concepts, extracted_concepts, mapping_probabilities = extract_concepts_from_img(main_module,
mapping_module,
image,
transformation)
print(f'\nThe image is classified as {main_concepts}.')
print('\nThe following concepts were extracted from the image:')
print(extracted_concepts)
print('with the following probabilities:')
print(f'{mapping_probabilities}\n')
justifications = explain_target_concept(extracted_concepts, mapping_probabilities, concepts_map, 'C1',
os.path.join(root_path, 'ontologies', 'SCDB.owl'),
os.path.join(root_path, 'temp'))
print(justifications)
```
7. Visual explanations can be formed as follows:
```
import matplotlib.pyplot as plt
from revelionn.occlusion import perform_occlusion
perform_occlusion(main_module, mapping_module, activation_extractor, transformation, img_size,
image_path, window_size=20, stride=5, threads=0)
plt.show()
```
The execution of the listed steps is shown in basic_example.ipynb.
RevelioNN also supports a command line-based interface, i.e. interaction through scripts. A detailed description of how to use each of the scripts can be found in the documentation.
## Installation
The required Python version is 3.9.
This requirement is due to the fact that the ``xaitk-saliency`` package used to generate visual explanations in RevelioNN requires a Python version lower than 3.10.
The simplest way to install RevelioNN is using ``pip``:
```bash
pip install revelionn
pip install git+https://github.com/lucadiliello/semantic-loss-pytorch.git
```
You can view a list of required dependencies in the requirements.txt file. You can also install them as follows:
```bash
pip install -r requirements.txt
```
It is also worth noting that [Java SE 8](https://www.java.com/en/download/manual.jsp) must be installed to form logical explanations.
## Project Structure
The repository includes the following directories:
* Package `main_net_classes` contains various convolutional neural network architectures that can serve as examples for initializing your network in RevelioNN;
* Package `ontologies` contains examples of ontology files in OWL format, as well as examples of the dictionary of relations of dataset attributes to ontology concepts and examples of the class representing the ontology as a graph;
* Package `examples` includes notebooks that contain practical examples of RevelioNN use;
* Package `trained_models` is designed to save models of main and mapping networks, as well as their training logs;
* All unit and integration tests can be observed in the `tests` directory;
* The sources of the documentation are in the `docs` directory.
## Documentation
A detailed RevelioNN description is available in [Read the Docs](https://revelionn.readthedocs.io/en/latest/).
## Tests
To run tests, you can use:
```bash
pytest tests
```
## Publications
The library was used in the following publications:
* Agafonov A., Ponomarev A. An Experiment on Localization of Ontology Concepts in Deep Convolutional Neural Networks // In the *11th International Symposium on Information and Communication Technology (SoICT 2022)*, 82–87. DOI: [10.1145/3568562.3568602](http://doi.org/10.1145/3568562.3568602)
* Ponomarev A., Agafonov A. Ontology Concept Extraction Algorithm for Deep Neural Networks // *Proceedings of the 32nd Conference of Open Innovations Association FRUCT*, 221-226. DOI: [10.23919/FRUCT56874.2022.9953838](http://doi.org/10.23919/FRUCT56874.2022.9953838)
* Agafonov A., Ponomarev A. Localization of Ontology Concepts in Deep Convolutional Neural Networks // *2022 IEEE International Multi-Conference on Engineering, Computer and Information Sciences (SIBIRCON)*, 160-165. DOI: [10.1109/SIBIRCON56155.2022.10016932](http://doi.org/10.1109/SIBIRCON56155.2022.10016932)
## Funding
The RevelioNN library was developed in the scope of the project 22-11-00214, funded by the Russian Science Foundation (RSF).
<img src="docs/images/RSF_2019_logo.png" width="250" />
## Acknowledgements
We thank the developers of [xaitk-saliency](https://github.com/XAITK/xaitk-saliency), [semantic-loss-pytorch](https://github.com/lucadiliello/semantic-loss-pytorch),
[nxontology](https://github.com/related-sciences/nxontology) and [BUNDLE](https://ml.unife.it/bundle/), thanks to whom the development of RevelioNN became possible!
Special thanks to the creators of the [XTRAINS dataset](https://bitbucket.org/xtrains/dataset/src/master/) for providing the ontology and for inspiring the development of this library!
| /revelionn-1.0.1.tar.gz/revelionn-1.0.1/README.md | 0.925297 | 0.983327 | README.md | pypi |
import base64
import binascii
import json
import secrets
import string
import struct
from functools import reduce
from operator import concat
from typing import Any, Tuple, Union
from urllib.parse import parse_qsl, urlencode as urlencode_
from fulldict import FullDict
def to_bytes(data: Any, encoding: str = "utf-8") -> bytes:
"""
Returns the bytes representation of the provided data.
:param data: Data to be transformed.
:type data: Any
:param encoding: Encoding of the bytes encoding, defaults to "utf-8".
:type encoding: str, optional
:return: Bytes representation of the provided data.
:rtype: bytes
"""
if isinstance(data, bytes) or data is None:
return data
if isinstance(data, str):
return data.encode(encoding)
if isinstance(data, (int, float)):
return str(data).encode(encoding)
return bytes(data, encoding=encoding)
def to_string(data: Any, encoding: str = "utf-8") -> str:
"""
Returns the string representation of the provided data.
:param data: Data to be transformed.
:type data: Any
:param encoding: Encoding of the string encoding, defaults to "utf-8".
:type encoding: str, optional
:return: String representation of the provided data.
:rtype: str
"""
if isinstance(data, str) or data is None:
return data
if isinstance(data, bytes):
try:
return data.decode(encoding)
except (TypeError, UnicodeDecodeError):
return base64url_encode(data).decode(encoding)
return str(data, encoding=encoding)
def base64url_decode(data: bytes) -> bytes:
"""
Decodes a URL Safe Base64 encoded bytes string into its original contents.
:param data: Data to be decoded.
:type data: bytes
:return: Original contents of the provided encoded data.
:rtype: bytes
"""
data += b"=" * (len(data) % 4)
return base64.urlsafe_b64decode(data)
def base64url_encode(data: Any) -> bytes:
"""
Returns a URL Safe Base64 encoding representation of the provided data.
:param data: Data to be encoded.
:type data: Any
:return: URL Safe Base64 encoded representation of the provided data.
:rtype: bytes
"""
return base64.urlsafe_b64encode(to_bytes(data)).rstrip(b"=")
def b64_to_int(data: Union[bytes, str]) -> int:
"""
Decodes a URL Safe Base64 representation of an integer.
:param data: Data to be decoded.
:type data: Union[bytes, str]
:raises TypeError: The provided data is not a valid URL Safe Base64 string.
:return: Decoded Integer.
:rtype: int
"""
if data is None:
return data
if not isinstance(data, (bytes, str)):
raise TypeError('The argument "data" MUST be a bytes or str object.')
x = base64url_decode(to_bytes(data, "ascii"))
buffer = struct.unpack("%sB" % len(x), x)
return int("".join(["%02x" % byte for byte in buffer]), 16)
def int_to_b64(data: int) -> bytes:
"""
Encodes an integer into a URL Safe Base64 bytes string.
:param data: Integer to be encoded.
:type data: int
:raises ValueError: The data is not an integer.
:raises ValueError: The data is not a natural number.
:return: URL Safe Base64 encoded version of the integer.
:rtype: bytes
"""
if not isinstance(data, int):
raise ValueError("Must be a natural number.")
if data < 0:
raise ValueError("Must be a natural number.")
res = data.to_bytes((data.bit_length() + 7) // 8, "big", signed=False)
return base64url_encode(res)
def json_dumps(data: dict) -> str:
"""
Dumps a dictionary into a formated JSON string.
:param data: Dictionary to be formatted.
:type data: dict
:return: JSON formatted string of the data.
:rtype: str
"""
return json.dumps(data, ensure_ascii=False)
def json_loads(data: Union[bytes, str]) -> dict:
"""
Loads a JSON string into a dictionary.
:param data: JSON string to be loaded.
:type data: Union[bytes, str]
:raises TypeError: The data is not a valid string.
:return: Dictionary of the loaded JSON string.
:rtype: dict
"""
if not isinstance(data, (bytes, str)):
raise TypeError("The data MUST be either a string or a bytes object.")
return json.loads(to_string(data))
def secret_token(size: int = 32) -> str:
"""
Generates a cryptographically secure, urlsafe random token based on the size.
:param size: Size of the token, defaults to 32.
:type size: int, optional
:return: Token generated.
:rtype: str
"""
alphabet = f"{string.ascii_letters}{string.digits}-_"
return reduce(concat, (secrets.choice(alphabet) for _ in range(size)))
def urldecode(query_string: str) -> dict:
"""
Decodes a x-www-form-urlencoded string into a dictionary.
:param query_string: Query string to be decoded.
:type query_string: str
:return: Dictionary representation of the query string.
:rtype: dict
"""
return dict(parse_qsl(query_string))
def urlencode(url: str, **params) -> str:
"""
Encodes keyword arguments into a x-www-form-urlencoded
string and concatenates it into the provided url.
:param url: Base URL.
:type url: str
:return: Encoded URL containing the parameters of the dictionary as a query string.
:rtype: str
"""
return f"{url}?{urlencode_(FullDict(params))}"
def get_basic_authorization(headers: dict) -> Tuple[str, str]:
"""
Extracts the authentication credentials from a Basic authentication scheme.
:param headers: Dictionary of the headers.
:type headers: dict
:return: Authentication credentials.
:rtype: Tuple[str, str]
"""
auth: str = headers.get("authorization") or headers.get("Authorization")
if not auth or not isinstance(auth, str) or " " not in auth:
return None, None
method, token = auth.split(None, 1)
if method.lower() != "basic":
return None, None
try:
credentials = to_string(base64.b64decode(token))
except (binascii.Error, TypeError):
return None, None
if ":" not in credentials:
return None, None
try:
client_id, client_secret = credentials.split(":", 1)
except ValueError:
return None, None
return client_id, client_secret
def get_bearer_authorization(headers: dict) -> str:
"""
Extracts a Bearer token from a Bearer authorization scheme.
:param headers: Dictionary of the headers.
:type headers: dict
:return: Bearer token.
:rtype: str
"""
auth: str = headers.get("authorization") or headers.get("Authorization")
if not auth or not isinstance(auth, str) or " " not in auth:
return None
method, token = auth.split(None, 1)
if method.lower() != "bearer":
return None
return token | /revensky.webtools-1.0.0-py3-none-any.whl/webtools.py | 0.86132 | 0.529628 | webtools.py | pypi |
from copy import copy
from typing import Tuple, List, Dict, Union
from revenue_maximization_ranking.cascade\
.fixed_attention import optimal_rankings
from revenue_maximization_ranking._types import DistributionLike
__all__ = ["best_x_full_capacity", "best_x"]
def best_x(products: Dict, g: DistributionLike, capacity: int,
offset: int = 0) -> Tuple[int, List]:
"""It finds the x for the maximum lower bound on expected revenue.
Given a set of products each fixed attention span "x" from
1, 2, ..., min(len(products), capacity) with its corresponding
optimal ranking gives a lower bound on the expected revenue. This
function chooses the best x based on that lower bound.
Parameters
----------
products: dict
Set of products, keys must be the product ids and values
must be dictionaries with the revenue and probability of
each product.
g: DistributionLike
Distribution of attention spans.
capacity: int
Maximum number of items that the retailer can display.
offset: optional, int, default 0
An offset to be used when calling the distribution g.
Returns
-------
best_x_value, rankings[best_x_value]: tuple[int, list]
The best-x and its optimal ranking.
"""
rankings, revenues = optimal_rankings(products=products,
capacity=capacity)
maximum_lower_bound = 0.0
best_x_value = 0
for x, revenue_best_assort in revenues.items():
revenue_lower_bound = revenue_best_assort * (g.sf(x + offset)
+ g.pmf(x + offset))
if revenue_lower_bound > maximum_lower_bound:
best_x_value = x
maximum_lower_bound = revenue_lower_bound
if best_x_value == 0:
# This could happen depending on the distribution g
return 0, []
return best_x_value, rankings[best_x_value]
def best_x_full_capacity(products: Dict, g: DistributionLike, capacity: int,
show_xs: bool = True) -> Union[List,
Tuple[List, List]]:
"""It completes the best-x strategy up to full capacity.
Since the "best" x can be much smaller than the capacity M this
function implements the following adaptation:
- Choose the "best" x and its corresponding ranking for the first
x elements,
- define offset as the amount of items already ranked,
- remove those first "offset" products from the original set of
products and solve for the "best next" x again considering
capacity M - offset and the evaluation of distribution g with
offset.
- Repeat until capacity is 0.
Parameters
----------
products: dict
Set of products, keys must be the product ids and values
must be dictionaries with the revenue and probability of
each product.
g: DistributionLike
Distribution of attention spans.
capacity: int
Maximum number of items that the retailer can display.
show_xs: bool, optional, default: False
Should the function return the list of "best-x"'s chosen?
Returns
-------
full_ranking:
Ranking of products from this iterative "best-x" strategy.
Length of this ranking is min(M, max_x) where max_x is the
maximum attention considered by the distribution of
attention spans.
best_xs: list, optional
List of xs' values chosen by the algorithm.
"""
offset = 0
full_ranking = []
best_xs = []
n_items_to_rank = min(len(products), capacity)
prods = copy(products)
while n_items_to_rank > offset:
x, ranking = best_x(products=prods, g=g,
capacity=n_items_to_rank - offset, offset=offset)
if x == 0:
break
prods = {k: v for k, v in prods.items()
if k not in [name for name, _ in ranking]}
offset += x
full_ranking += ranking
if show_xs:
best_xs.append(x)
if show_xs:
return full_ranking, best_xs
return full_ranking | /revenue_maximization_ranking-0.0.4-py3-none-any.whl/revenue_maximization_ranking/cascade/best_x.py | 0.961153 | 0.706077 | best_x.py | pypi |
from copy import copy
from typing import Dict, Tuple
from collections import defaultdict
__all__ = ["optimal_rankings"]
def key(product_: Tuple[str, Dict]) -> Tuple[float, float]:
"""Lemma 1 sorting key for the fixed attention problem.
An optimal ranking for the fixed attention problem is sorted
by this key function.
Parameters
----------
product_: tuple[str, dict]
A tuple describing a product, the first element is its name
or id, the second element is a dictionary describing its
revenue if purchased and its probability of being
purchased.
Returns
-------
tuple:
A tuple with the revenue of the product as first element
and its probability as the second element.
"""
return product_[1]["revenue"], product_[1]["probability"]
def optimal_rankings(products: Dict, capacity: int) -> Tuple[Dict, Dict]:
"""Optimal rankings for the fixed attention span problem.
Given a set of products this function calculates a solution for
every possible fixed attention span problem with attention ranging
from 1 up to a defined capacity.
This implementation is inspired on "Algorithm 1" from
https://arxiv.org/abs/2012.03800, but returns all of H[0, k]
elements to avoid the need for the "AssortOpt" algorithm.
Parameters
----------
products: dict
Dictionary with all the products, keys must be the products
ids and values must be dictionaries with the revenue and
probability of each product.
capacity: int
Maximum capacity of items to be displayed by the retailer.
Returns
-------
rankings, revenues: Tuple[dict, dict]
Optimal rankings and its corresponding revenues. Each possible
attention span is the key on those dictionaries.
"""
h = defaultdict(lambda: 0.0)
assort = defaultdict(lambda: [])
for k in range(1, min(len(products), capacity) + 1):
for j, product in reversed(list(enumerate(sorted(products.items(),
key=key,
reverse=True)))):
default = h[j + 1, k]
alternative = (h[j + 1, k - 1]
+ product[1]["probability"] * (product[1]["revenue"]
- h[j + 1, k - 1]))
if alternative >= default:
h[j, k] = alternative
aux_list = copy(assort[j + 1, k - 1])
aux_list.append(product)
assort[j, k] = aux_list
else:
h[j, k] = default
assort[j, k] = assort[j + 1, k]
rankings = {k: sorted(assort[0, k], key=key, reverse=True)
for k in range(1, min(len(products), capacity) + 1)}
revenues = {k: h[0, k] for k in range(1, min(len(products), capacity) + 1)}
return rankings, revenues | /revenue_maximization_ranking-0.0.4-py3-none-any.whl/revenue_maximization_ranking/cascade/fixed_attention.py | 0.939269 | 0.598547 | fixed_attention.py | pypi |
import pandas as pd
from typing import Dict, List, Union, Tuple
from revenue_maximization_ranking.cascade.best_x import best_x_full_capacity
from revenue_maximization_ranking.cascade\
.revenue import expected_revenue as exp_rev
from revenue_maximization_ranking._types import DistributionLike
__all__ = ["full_best_x", "expected_revenue"]
def load_dataframe(df: pd.DataFrame, revenue_col: str,
probability_col: str) -> Dict:
"""It loads data of products from a dataframe.
Parameters
----------
df: pandas.DataFrame
The dataframe with the products' data.
revenue_col: str
Name of the column with the revenue for each product.
probability_col: str
Name of the column with the conditional probabilities
of purchasing the product once it is seen by the user.
Returns
-------
products: dict
A dictionary with the products' info, keys are the index of
the dataframe, values are dictionaries with the revenue and
probability of the product as required by other functions
in revenue_maximization_ranking.cascade.
"""
df_ = df[[revenue_col, probability_col]].copy()
products = df_.rename(columns={revenue_col: "revenue",
probability_col: "probability"})\
.transpose().to_dict()
return products
def ranking_as_column(ranking: List) -> pd.Series:
"""It converts a ranking list in a pandas.Series.
Parameters
----------
ranking: List
A list of tuples representing products, the first element
of each tuple will be the product id and the second element
must be a dictionary with the revenue and probability of
the product.
Returns
-------
rank_column: pandas.Series
A pandas.Series with product_id as index and the
corresponding ranking (1, 2, 3, ...) of each product.
"""
index = [name for name, _ in ranking]
rank_column = pd.Series([i + 1 for i in range(len(index))], index=index)
return rank_column
def full_best_x(df: pd.DataFrame, revenue_col: str, probability_col: str,
g: DistributionLike, capacity: int = 0,
show_xs: bool = False) -> Union[pd.Series,
Tuple[pd.Series, List]]:
"""Implements the full_best_x ranking on a dataframe.
Parameters
----------
df: pandas.DataFrame
Dataframe storing the products' data.
revenue_col: str
Name of the column with the revenue of each product.
probability_col: str
Name of the column with the conditional probability of each
product.
g: DistributionLike
Distribution of attention spans.
capacity: int, default 0
Maximum number of products that the retailer can display.
show_xs: bool, default False
List of xs' values chosen by the algorithm.
Returns
-------
rank_column: pandas.Series
A pandas series with the ranking of each product. The index
will be the same as the df index.
best_xs: list, optional
List of xs' values chosen by the algorithm.
"""
if capacity < 1:
capacity = df.shape[0]
products = load_dataframe(df, revenue_col, probability_col)
algorithm = best_x_full_capacity(products, g, capacity, show_xs=show_xs)
if show_xs:
rank_column = ranking_as_column(algorithm[0])
best_xs = algorithm[1]
return rank_column, best_xs
return ranking_as_column(algorithm)
def expected_revenue(df: pd.DataFrame, revenue_col: str, probability_col: str,
ranking_col: str, g: DistributionLike) -> float:
"""It calculates the expected revenue for the cascade model.
Parameters
----------
df: pandas.DataFrame
Dataframe storing the products' data.
revenue_col: str
Name of the column with the revenue of each product.
probability_col: str
Name of the column with the conditional probability of each
product.
g: DistributionLike
Distribution of attention spans.
ranking_col: str
Name of the columns with the ranking to be evaluated.
Returns
-------
revenue: float
The expected revenue of the ranking according to the
cascade model.
"""
df_ = df[[revenue_col, probability_col, ranking_col]].copy()
df_.query(f"{ranking_col}.notna()", inplace=True)
df_.rename(columns={revenue_col: "revenue",
probability_col: "probability"}, inplace=True)
df_.sort_values(by=ranking_col, inplace=True)
df_.drop(columns=ranking_col, inplace=True)
revenue = exp_rev(ranked_products=df_.iterrows(), g=g)
return revenue | /revenue_maximization_ranking-0.0.4-py3-none-any.whl/revenue_maximization_ranking/cascade/dataframe.py | 0.946424 | 0.805135 | dataframe.py | pypi |
import re
def _prioritize(a, b, priority):
a = '(?:%s)' % a.text if a.priority < priority else a.text
b = '(?:%s)' % b.text if b.priority < priority else b.text
return a, b
class Raw(object):
text = None # basic Regexp
priority = 99 # priority (controls auto-added parentheses):
# 0 = | (least binding)
# 1 = concatenation (including ^, $)
# 2 = repetition (*, +, ?)
# 99 = atomic (charsets, parentheses, etc.)
negated = None # negated form of Regexp, if meaningful
nongreedy = None # nongreedy form
charset = None # form usable in character set, if any
def __init__(self, text, priority, **kw):
self.text = text
self.priority = priority
self.__dict__.update(kw)
if "nongreedy" in kw:
self.minimal = self.nongreedy
if kw.get('charset') == 1:
self.charset = text
def __or__(self, right):
return Raw(self.text + '|' + Text(right).text, 0)
def __ror__(self, left):
return Raw(Text(left).text + '|' + self.text, 0)
def __add__(self, right):
self, right = _prioritize(self, Text(right), 1)
return Raw(self + right, 1)
__and__ = __add__
def __radd__(self, left):
self, left = _prioritize(self, Text(left), 1)
return Raw(left + self, 1)
__rand__ = __radd__
def __neg__(self):
if self.negated:
return Raw(self.negated, self.priority,
negated = self.text)
raise TypeError, 'Regexp cannot be negated'
def __sub__(self, right):
self, right = _prioritize(self, -Text(right), 1)
return Raw(self + right, 1)
def __rsub__(self, left):
self, left = _prioritize(-self, Text(left), 1)
return Raw(left + self, 1)
def Repeated(expr, min, max = None):
expr = Text(expr)
expr = '(?:%s)' % expr.text if expr.priority<2 else expr.text
if min == None:
min = 0
if min == 0 and max == None:
suffix = '*'
elif min == 1 and max == None:
suffix = '+'
elif min == 0 and max == 1:
suffix = '?'
elif max == None:
suffix = '{%d,}' % min
else:
suffix = '{%d,%d}' % (min, max)
return Raw(expr + suffix, 2, nongreedy = Raw(expr + suffix + '?', 2))
def Optional(expr, max = None):
return Repeated(expr, 0, max)
def Required(expr, max = None):
return Repeated(expr, 1, max)
def Maybe(expr):
return Repeated(expr, 0, 1)
def Group(expr, name = None):
expr = Text(expr)
if name:
return Raw('(?P<%s>%s)' % (name, expr.text), 99)
else:
return Raw('(%s)' % expr.text, 99)
def Match(name):
return Raw('(?P=%s)' % name, 99)
def FollowedBy(expr):
expr = Text(expr).text
return Raw('(?=%s)' % expr, 99, negated = '(?!%s)' % expr)
def _ischar(c):
if not isinstance(c, basestring) or len(c) != 1:
raise TypeError, 'single character required'
class Char:
def __init__(self, start):
_ischar(start)
self.start = start
def to(self, end):
_ischar(end)
temp = '%c-%c' % (self.start, end)
return Raw('[%s]' % temp, 99, negated = '[^%s]'%temp, charset = temp)
def Set(*args):
rv = [];
for item in args:
if isinstance(item, Raw):
if item.charset:
rv.append(item.charset)
else:
raise TypeError, "can't be used in charset"
elif isinstance(item, str):
rv.append(item)
elif isinstance(item, int):
rv.append(chr(item))
else:
raise TypeError, 'unsuitable value for charset'
rv = "".join(rv)
return Raw('[%s]' % rv, 99, negated = '[^%s]' % rv)
def Text(text):
if isinstance(text, Raw):
return text
if isinstance(text, str):
if len(text) == 1:
return Raw(re.escape(text), 99)
else:
return Raw(re.escape(text), 1)
raise TypeError, 'unsuitable value for Regexp'
def Matched(name):
return r'\g<%s>' % name
Alphanum = Raw('\\w', 99, negated = '\\W', charset = 1)
Any = Raw('.', 99)
Digit = Raw('\\d', 99, negated = '\\D', charset = 1)
Digits = Digit
End = Raw('$', 99)
EndString = Raw('\\Z', 99)
Start = Raw('^', 99)
StartString = Raw('\\A', 99)
Whitespace = Raw('\\s', 99, negated = '\\S', charset = 1)
Wordbreak = Raw('\\b', 99, negated = '\\B')
# re-constants
IGNORECASE = re.IGNORECASE
LOCALE = re.LOCALE
MULTILINE = re.MULTILINE
DOTALL = re.DOTALL
def Re(pat, flags = 0):
return re.compile(Text(pat).text, flags) | /reverb-2.0.1.zip/reverb-2.0.1/reverb.py | 0.579043 | 0.300027 | reverb.py | pypi |
import re
import sys
from argparse import SUPPRESS, Action, ArgumentParser, Namespace
from typing import List, Sequence
class ReverseArgumentParser:
"""
Argument parsing in reverse.
Whereas :class:`argparse.ArgumentParser` is concerned with taking a
bunch of command line arguments and parsing them into a
:class:`argparse.Namespace`, this class is intended to do the
opposite; that is, it'll take the parsed arguments and create the
effective command line invocation of the script that generated them.
The motivation is to be able to tell users exactly what was used for
all of the options, taking into consideration any defaults and other
transformations that might've been applied in the midst of parsing,
such that they're able to reproduce a prior run of a script exactly.
Attributes:
_args (List[str]): The list of arguments corresponding to each
:class:`argparse.Action` in the given parser, which is built
up as the arguments are unparsed.
_indent (int): The number of spaces with which to indent
subsequent lines when pretty-printing the effective command
line invocation.
_namespace (Namespace): The parsed arguments.
_parsers (List[argparse.ArgumentParser]): The parser that was
used to generate the parsed arguments. This is a ``list``
(conceptually a stack) to allow for sub-parsers, so the
outer-most parser is the first item in the list, and
sub-parsers are pushed onto and popped off of the stack as
they are processed.
_unparsed (List[bool]): A list in which the elements indicate
whether the corresponding parser in :attr:`parsers` has been
unparsed.
"""
def __init__(
self, parser: ArgumentParser, namespace: Namespace, indent: int = 4
):
"""
Initialize the object.
Args:
parser: The :class:`argparse.ArgumentParser` used to
construct the given ``namespace``.
namespace: The parsed arguments.
indent: How many spaces to use for each indentation level.
(See :func:`get_pretty_command_line_invocation`.)
"""
self._unparsed = [False]
self._args = [parser.prog]
self._indent = indent
self._parsers = [parser]
self._namespace = namespace
def _unparse_args(self) -> None:
"""
Unparse all the arguments.
Loop over the positional and then optional actions, generating
the command line arguments associated with each, and appending
them to the list of arguments.
"""
if self._unparsed[-1]:
return
psr = self._parsers[-1]
actions = (
psr._get_optional_actions() # pylint: disable=protected-access
+ psr._get_positional_actions() # pylint: disable=protected-access
)
for action in actions:
self._unparse_action(action)
self._unparsed[-1] = True
def _unparse_action(self, action: Action) -> None:
"""
Unparse a single action.
Generate the command line arguments associated with the given
``action``, and append them to the list of arguments.
Args:
action: The :class:`argparse.Action` to unparse.
Raises:
NotImplementedError: If there is not currently an
implementation for unparsing the given action.
"""
action_type = type(action).__name__
if action_type != "_SubParsersAction" and (
not hasattr(self._namespace, action.dest)
or self._arg_is_default_and_help_is_suppressed(action)
):
return
if action_type == "_AppendAction":
self._unparse_append_action(action)
elif action_type == "_AppendConstAction":
self._unparse_append_const_action(action)
elif action_type == "_CountAction":
self._unparse_count_action(action)
elif action_type == "_ExtendAction":
self._unparse_extend_action(action)
elif action_type == "_HelpAction": # pragma: no cover
return
elif action_type == "_StoreAction":
self._unparse_store_action(action)
elif action_type == "_StoreConstAction":
self._unparse_store_const_action(action)
elif action_type == "_StoreFalseAction":
self._unparse_store_false_action(action)
elif action_type == "_StoreTrueAction":
self._unparse_store_true_action(action)
elif action_type == "_SubParsersAction":
self._unparse_sub_parsers_action(action)
elif action_type == "_VersionAction": # pragma: no cover
return
elif (
action_type == "BooleanOptionalAction"
and sys.version_info.minor >= 9
):
self._unparse_boolean_optional_action(action)
else: # pragma: no cover
raise NotImplementedError(
f"{self.__class__.__name__} does not yet support the "
f"unparsing of {action_type} objects."
)
def _arg_is_default_and_help_is_suppressed(self, action: Action) -> bool:
"""
See if the argument should be skipped.
Determine whether the argument matches the default value and the
corresponding help text has been suppressed. Such cases
indicate that a parser author has hidden an argument from users,
and the user hasn't modified the value on the command line, so
to match the author's intent, we should omit the argument from
the effective command line invocation.
Args:
action: The command line argument in question.
Returns:
``True`` if the argument should be omitted; ``False``
otherwise.
"""
value = getattr(self._namespace, action.dest)
return value == action.default and action.help == SUPPRESS
def get_effective_command_line_invocation(self) -> str:
"""
Get the effective command line invocation of a script.
This takes into account what was passed into the script on the
command line, along with any default values, etc., such that
there is no ambiguity in what exactly was run.
Returns:
What you would need to run on the command line to reproduce
what was run before.
"""
self._unparse_args()
return " ".join(_.strip() for _ in self._args if _.strip())
def get_pretty_command_line_invocation(self) -> str:
"""
Get a "pretty" version of the command that was run.
Similar to :func:`get_effective_command_line_invocation`, but
generate a string ready for "pretty-printing", with escaped
newlines between each of the arguments, and appropriate
indentation.
Returns:
What you would need to run on the command line to reproduce
what was run before.
"""
self._unparse_args()
return " \\\n".join(_ for _ in self._args if _.strip())
def _get_long_option_strings(
self, option_strings: Sequence[str]
) -> List[str]:
"""
Get the long options from a list of options strings.
Args:
option_strings: The list of options strings.
Returns:
A list containing only the long options (e.g., ``"--foo"``),
and not the short ones (e.g., ``"-f"``). Note that the list
will be empty if there are no long options.
"""
return [
option
for option in option_strings
if len(option) > 2
and option[0] in self._parsers[-1].prefix_chars
and option[1] in self._parsers[-1].prefix_chars
]
def _get_short_option_strings(
self, option_strings: Sequence[str]
) -> List[str]:
"""
Get the short options from a list of options strings.
Args:
option_strings: The list of options strings.
Returns:
A list containing only the short options (e.g., ``"-f"``),
and not the short ones (e.g., ``"--foo"``). Note that the
list will be empty if there are no short options.
"""
return [
option
for option in option_strings
if len(option) == 2 and option[0] in self._parsers[-1].prefix_chars
]
def _get_option_string(
self, action: Action, prefer_short: bool = False
) -> str:
"""
Get the option string for the `action`.
Get the first of the long options corresponding to a given
:class:`argparse.Action`. If no long options are available, get
the first of the short options. If ``prefer_short`` is
``True``, search the short options first, and fall back to the
long ones if necessary.
Args:
action: The :class:`argparse.Action` in question.
prefer_short: Whether to prefer the short options over the
long ones.
Returns:
The option string, or the empty string, if no options string
exists (e.g., for positional arguments).
"""
short_options = self._get_short_option_strings(action.option_strings)
long_options = self._get_long_option_strings(action.option_strings)
if prefer_short:
if short_options:
return short_options[0]
if long_options:
return long_options[0]
else:
if long_options:
return long_options[0]
if short_options:
return short_options[0]
return ""
def _append_list_of_list_of_args(self, args: List[List[str]]) -> None:
"""
Append to the list of unparsed arguments.
Given a list of lists of command line arguments corresponding to
a particular action, append them to the list of arguments,
taking into account indentation and the sub-parser nesting
level.
Args:
args: The command line arguments to be appended.
"""
for line in args:
self._args.append(self._indent_str + " ".join(line))
def _append_list_of_args(self, args: List[str]) -> None:
"""
Append to the list of unparsed arguments.
Given a list of command line arguments corresponding to a
particular action, append them to the list of arguments, taking
into account indentation and the sub-parser nesting level.
Args:
args: The command line arguments to be appended.
"""
self._args.append(self._indent_str + " ".join(args))
def _append_arg(self, arg: str) -> None:
"""
Append to the list of unparsed arguments.
Given a command line argument corresponding to a particular
action, append it to the list of arguments, taking into account
indentation and the sub-parser nesting level.
Args:
arg: The command line argument to be appended.
"""
self._args.append(self._indent_str + arg)
@property
def _indent_str(self) -> str:
"""
The indentation level.
Returns:
A string of spaces corresponding to the indentation level.
"""
return " " * self._indent * len(self._parsers)
def _unparse_store_action(self, action: Action) -> None:
"""
Generate the list of arguments for a ``store`` action.
Args:
action: The :class:`_StoreAction` in question.
"""
values = getattr(self._namespace, action.dest)
if values is None:
return
flag = self._get_option_string(action)
result = []
if flag:
result.append(flag)
if not isinstance(values, list):
values = [values]
needs_quotes_regex = re.compile(r"(.*\s.*)")
for value in values:
value = str(value)
if needs_quotes_regex.search(value):
value = needs_quotes_regex.sub(r"'\1'", value)
result.append(value)
self._append_list_of_args(result)
def _unparse_store_const_action(self, action: Action) -> None:
"""
Generate the argument for a ``store_const`` action.
Args:
action: The :class:`_StoreConstAction` in question.
"""
value = getattr(self._namespace, action.dest)
if value == action.const:
self._append_arg(self._get_option_string(action))
def _unparse_store_true_action(self, action: Action) -> None:
"""
Generate the argument for a ``store_true`` action.
Args:
action: The :class:`_StoreTrueAction` in question.
"""
value = getattr(self._namespace, action.dest)
if value is True:
self._append_arg(self._get_option_string(action))
def _unparse_store_false_action(self, action: Action) -> None:
"""
Generate the argument for a ``store_false`` action.
Args:
action: The :class:`_StoreFalseAction` in question.
"""
value = getattr(self._namespace, action.dest)
if value is False:
self._append_arg(self._get_option_string(action))
def _unparse_append_action(self, action: Action) -> None:
"""
Generate the list of arguments for an ``append`` action.
Args:
action: The :class:`_AppendAction` in question.
"""
values = getattr(self._namespace, action.dest)
if values is None:
return
flag = self._get_option_string(action)
if not isinstance(values, list):
values = [values]
result = []
if isinstance(values[0], list):
for entry in values:
tmp = [flag]
for value in entry:
value = quote_arg_if_necessary(str(value))
tmp.append(value)
result.append(tmp)
else:
for value in values:
value = quote_arg_if_necessary(str(value))
result.append([flag, value])
self._append_list_of_list_of_args(result)
def _unparse_append_const_action(self, action: Action) -> None:
"""
Generate the argument for an ``append_const`` action.
Args:
action: The :class:`_AppendConstAction` in question.
"""
values = getattr(self._namespace, action.dest)
if values is not None and action.const in values:
self._append_arg(self._get_option_string(action))
def _unparse_count_action(self, action: Action) -> None:
"""
Generate the list of arguments for a ``count`` action.
Args:
action: The :class:`_CountAction` in question.
"""
value = getattr(self._namespace, action.dest)
count = value if action.default is None else (value - action.default)
flag = self._get_option_string(action, prefer_short=True)
if len(flag) == 2 and flag[0] in self._parsers[-1].prefix_chars:
self._append_arg(flag[0] + flag[1] * count)
else:
self._append_list_of_args([flag for _ in range(count)])
def _unparse_sub_parsers_action(self, action: Action) -> None:
"""
Generate the list of arguments for a subparser action.
This is done by:
* looping over the commands and corresponding parsers in the
given subparser action,
* recursively unparsing the subparser, and
* if the subparser wasn't used to parse the command line
arguments, removing it before continuing with the next
subcommand-subparser pair.
Args:
action: The :class:`_SubParsersAction` in question.
Raises:
RuntimeError: If a subparser action is somehow missing its
dictionary of choices.
"""
if action.choices is None or not isinstance(
action.choices, dict
): # pragma: no cover
raise RuntimeError(
"This subparser action is missing its dictionary of "
f"choices: {action}"
)
for subcommand, subparser in action.choices.items():
self._parsers.append(subparser)
self._unparsed.append(False)
self._args.append(
" " * self._indent * (len(self._parsers) - 1) + subcommand
)
args_before = self._args.copy()
self._unparse_args()
if self._args == args_before:
self._parsers.pop()
self._unparsed.pop()
self._args.pop()
def _unparse_extend_action(self, action: Action) -> None:
"""
Generate the list of arguments for an ``extend`` action.
Args:
action: The :class:`_ExtendAction` in question.
"""
values = getattr(self._namespace, action.dest)
if values is not None:
self._append_list_of_args(
[self._get_option_string(action)] + values
)
def _unparse_boolean_optional_action(self, action: Action) -> None:
"""
Generate the list of arguments for a ``BooleanOptionalAction``.
Args:
action: The :class:`BooleanOptionalAction` in question.
"""
value = getattr(self._namespace, action.dest)
if value is not None:
flag_index = 0 if getattr(self._namespace, action.dest) else 1
self._append_arg(action.option_strings[flag_index])
def quote_arg_if_necessary(arg: str) -> str:
"""
Quote an argument, if necessary.
If a command line argument has any spaces in it, surround it in
single quotes. If no quotes are necessary, don't change the
argument.
Args:
arg: The command line argument.
Returns:
The (possibly) quoted argument.
"""
needs_quotes_regex = re.compile(r"(.*\s.*)")
if needs_quotes_regex.search(arg):
return needs_quotes_regex.sub(r"'\1'", arg)
return arg | /reverse_argparse-1.0.4-py3-none-any.whl/reverse_argparse/reverse_argparse.py | 0.787523 | 0.504944 | reverse_argparse.py | pypi |
__author__ = 'Ajay Thampi'
import numpy as np
import multiprocessing as mp
import ctypes
from scipy.spatial import cKDTree
def shmem_as_nparray(shmem_array):
"""
Function that converts a shared memory array (multiprocessing.Array) to a numpy array
"""
return np.frombuffer(shmem_array.get_obj())
def _pquery(scheduler, data, ndata, ndim, leafsize,
x, nx, d, i, k, eps, p, dub, ierr):
"""
Function that parallelly queries the K-D tree based on chunks of data returned by the scheduler
"""
try:
_data = shmem_as_nparray(data).reshape((ndata, ndim))
_x = shmem_as_nparray(x).reshape((nx, ndim))
_d = shmem_as_nparray(d).reshape((nx, k))
_i = shmem_as_nparray(i).reshape((nx, k))
kdtree = cKDTree(_data, leafsize=leafsize)
for s in scheduler:
d_out, i_out = kdtree.query(_x[s, :], k=k, eps=eps, p=p, distance_upper_bound=dub)
m_d = d_out.shape[0]
m_i = i_out.shape[0]
_d[s, :], _i[s, :] = d_out.reshape(m_d, 1), i_out.reshape(m_i, 1)
except:
ierr.value += 1
def num_cpus():
"""
Function to get the number of CPUs / cores. This is used to determine the number of processes to spawn.
Default (if not implemented) = 2
"""
try:
return mp.cpu_count()
except NotImplementedError:
return 2
class cKDTree_MP(cKDTree):
"""
The parallelised cKDTree class
"""
def __init__(self, data_list, leafsize=30):
""" Class Instantiation
Arguments are based on scipy.spatial.cKDTree class
"""
data = np.array(data_list)
n, m = data.shape
self.shmem_data = mp.Array(ctypes.c_double, n*m)
_data = shmem_as_nparray(self.shmem_data).reshape((n, m))
_data[:, :] = data
self._leafsize = leafsize
super(cKDTree_MP, self).__init__(_data, leafsize=leafsize)
def pquery(self, x_list, k=1, eps=0, p=2,
distance_upper_bound=np.inf):
"""
Function to parallelly query the K-D Tree
"""
x = np.array(x_list)
nx, mx = x.shape
shmem_x = mp.Array(ctypes.c_double, nx*mx)
shmem_d = mp.Array(ctypes.c_double, nx*k)
shmem_i = mp.Array(ctypes.c_double, nx*k)
_x = shmem_as_nparray(shmem_x).reshape((nx, mx))
_d = shmem_as_nparray(shmem_d).reshape((nx, k))
_i = shmem_as_nparray(shmem_i)
if k != 1:
_i = _i.reshape((nx, k))
_x[:, :] = x
nprocs = num_cpus()
scheduler = Scheduler(nx, nprocs)
ierr = mp.Value(ctypes.c_int, 0)
query_args = (scheduler,
self.shmem_data, self.n, self.m, self.leafsize,
shmem_x, nx, shmem_d, shmem_i,
k, eps, p, distance_upper_bound,
ierr)
pool = [mp.Process(target=_pquery, args=query_args) for _ in range(nprocs)]
for p in pool: p.start()
for p in pool: p.join()
if ierr.value != 0:
raise RuntimeError('%d errors in worker processes' % (ierr.value))
return _d.copy(), _i.astype(int).copy()
class Scheduler:
"""
Scheduler that returns chunks of data to be queries on the K-D Tree.
The number of chunks is determined by the number of processes.
"""
def __init__(self, ndata, nprocs):
self._ndata = mp.RawValue(ctypes.c_int, ndata)
self._start = mp.RawValue(ctypes.c_int, 0)
self._lock = mp.Lock()
min_chunk = ndata // nprocs
min_chunk = ndata if min_chunk <= 2 else min_chunk
self._chunk = min_chunk
def __iter__(self):
return self
def next(self): # Python 2 support
self._lock.acquire()
ndata = self._ndata.value
start = self._start.value
chunk = self._chunk
if ndata:
if chunk > ndata:
_s0 = start
_s1 = start + ndata
self._ndata.value = 0
else:
_s0 = start
_s1 = start + chunk
self._ndata.value = ndata - chunk
self._start.value = start + chunk
self._lock.release()
return slice(_s0, _s1)
else:
self._lock.release()
raise StopIteration
def __next__(self): # Python 3 support
self._lock.acquire()
ndata = self._ndata.value
start = self._start.value
chunk = self._chunk
if ndata:
if chunk > ndata:
_s0 = start
_s1 = start + ndata
self._ndata.value = 0
else:
_s0 = start
_s1 = start + chunk
self._ndata.value = ndata - chunk
self._start.value = start + chunk
self._lock.release()
return slice(_s0, _s1)
else:
self._lock.release()
raise StopIteration | /reverse_geocoder_whl-1.5.3.tar.gz/reverse_geocoder_whl-1.5.3/reverse_geocoder_whl/cKDTree_MP.py | 0.542621 | 0.416915 | cKDTree_MP.py | pypi |
from __future__ import print_function
__author__ = 'Ajay Thampi'
import os
import sys
import csv
if sys.platform == 'win32':
# Windows C long is 32 bits, and the Python int is too large to fit inside.
# Use the limit appropriate for a 32-bit integer as the max file size
csv.field_size_limit(2**31-1)
else:
csv.field_size_limit(sys.maxsize)
import zipfile
from scipy.spatial import cKDTree as KDTree
from reverse_geocoder_whl import cKDTree_MP as KDTree_MP
import numpy as np
GN_URL = 'http://download.geonames.org/export/dump/'
GN_CITIES1000 = 'cities1000'
GN_ADMIN1 = 'admin1CodesASCII.txt'
GN_ADMIN2 = 'admin2Codes.txt'
# Schema of the GeoNames Cities with Population > 1000
GN_COLUMNS = {
'geoNameId': 0,
'name': 1,
'asciiName': 2,
'alternateNames': 3,
'latitude': 4,
'longitude': 5,
'featureClass': 6,
'featureCode': 7,
'countryCode': 8,
'cc2': 9,
'admin1Code': 10,
'admin2Code': 11,
'admin3Code': 12,
'admin4Code': 13,
'population': 14,
'elevation': 15,
'dem': 16,
'timezone': 17,
'modificationDate': 18
}
# Schema of the GeoNames Admin 1/2 Codes
ADMIN_COLUMNS = {
'concatCodes': 0,
'name': 1,
'asciiName': 2,
'geoNameId': 3
}
# Schema of the cities file created by this library
RG_COLUMNS = [
'lat',
'lon',
'name',
'admin1',
'admin2',
'cc'
]
# Name of cities file created by this library
RG_FILE = 'rg_cities1000.csv'
# WGS-84 major axis in kms
A = 6378.137
# WGS-84 eccentricity squared
E2 = 0.00669437999014
def singleton(cls):
"""
Function to get single instance of the RGeocoder class
"""
instances = {}
def getinstance(**kwargs):
"""
Creates a new RGeocoder instance if not created already
"""
if cls not in instances:
instances[cls] = cls(**kwargs)
return instances[cls]
return getinstance
@singleton
class RGeocoder(object):
"""
The main reverse geocoder class
"""
def __init__(self, mode=2, verbose=True, stream=None):
""" Class Instantiation
Args:
mode (int): Library supports the following two modes:
- 1 = Single-threaded K-D Tree
- 2 = Multi-threaded K-D Tree (Default)
verbose (bool): For verbose output, set to True
stream (io.StringIO): An in-memory stream of a custom data source
"""
self.mode = mode
self.verbose = verbose
if stream:
coordinates, self.locations = self.load(stream)
else:
coordinates, self.locations = self.extract(rel_path(RG_FILE))
if mode == 1: # Single-process
self.tree = KDTree(coordinates)
else: # Multi-process
self.tree = KDTree_MP.cKDTree_MP(coordinates)
def query(self, coordinates):
"""
Function to query the K-D tree to find the nearest city
Args:
coordinates (list): List of tuple coordinates, i.e. [(latitude, longitude)]
"""
if self.mode == 1:
_, indices = self.tree.query(coordinates, k=1)
else:
_, indices = self.tree.pquery(coordinates, k=1)
return [self.locations[index] for index in indices]
def load(self, stream):
"""
Function that loads a custom data source
Args:
stream (io.StringIO): An in-memory stream of a custom data source.
The format of the stream must be a comma-separated file
with header containing the columns defined in RG_COLUMNS.
"""
stream_reader = csv.DictReader(stream, delimiter=',')
header = stream_reader.fieldnames
if header != RG_COLUMNS:
raise csv.Error('Input must be a comma-separated file with header containing ' + \
'the following columns - %s. For more help, visit: ' % (','.join(RG_COLUMNS)) + \
'https://github.com/thampiman/reverse-geocoder')
# Load all the coordinates and locations
geo_coords, locations = [], []
for row in stream_reader:
geo_coords.append((row['lat'], row['lon']))
locations.append(row)
return geo_coords, locations
def extract(self, local_filename):
"""
Function loads the already extracted GeoNames cities file or downloads and extracts it if
it doesn't exist locally
Args:
local_filename (str): Path to local RG_FILE
"""
if os.path.exists(local_filename):
if self.verbose:
print('Loading formatted geocoded file...')
rows = csv.DictReader(open(local_filename, 'rt'))
else:
gn_cities1000_url = GN_URL + GN_CITIES1000 + '.zip'
gn_admin1_url = GN_URL + GN_ADMIN1
gn_admin2_url = GN_URL + GN_ADMIN2
cities1000_zipfilename = GN_CITIES1000 + '.zip'
cities1000_filename = GN_CITIES1000 + '.txt'
if not os.path.exists(cities1000_zipfilename):
if self.verbose:
print('Downloading files from Geoname...')
try: # Python 3
import urllib.request
urllib.request.urlretrieve(gn_cities1000_url, cities1000_zipfilename)
urllib.request.urlretrieve(gn_admin1_url, GN_ADMIN1)
urllib.request.urlretrieve(gn_admin2_url, GN_ADMIN2)
except ImportError: # Python 2
import urllib
urllib.urlretrieve(gn_cities1000_url, cities1000_zipfilename)
urllib.urlretrieve(gn_admin1_url, GN_ADMIN1)
urllib.urlretrieve(gn_admin2_url, GN_ADMIN2)
if self.verbose:
print('Extracting cities1000...')
_z = zipfile.ZipFile(open(cities1000_zipfilename, 'rb'))
open(cities1000_filename, 'wb').write(_z.read(cities1000_filename))
if self.verbose:
print('Loading admin1 codes...')
admin1_map = {}
t_rows = csv.reader(open(GN_ADMIN1, 'rt'), delimiter='\t')
for row in t_rows:
admin1_map[row[ADMIN_COLUMNS['concatCodes']]] = row[ADMIN_COLUMNS['asciiName']]
if self.verbose:
print('Loading admin2 codes...')
admin2_map = {}
for row in csv.reader(open(GN_ADMIN2, 'rt'), delimiter='\t'):
admin2_map[row[ADMIN_COLUMNS['concatCodes']]] = row[ADMIN_COLUMNS['asciiName']]
if self.verbose:
print('Creating formatted geocoded file...')
writer = csv.DictWriter(open(local_filename, 'wt'), fieldnames=RG_COLUMNS)
rows = []
for row in csv.reader(open(cities1000_filename, 'rt'), \
delimiter='\t', quoting=csv.QUOTE_NONE):
lat = row[GN_COLUMNS['latitude']]
lon = row[GN_COLUMNS['longitude']]
name = row[GN_COLUMNS['asciiName']]
cc = row[GN_COLUMNS['countryCode']]
admin1_c = row[GN_COLUMNS['admin1Code']]
admin2_c = row[GN_COLUMNS['admin2Code']]
cc_admin1 = cc+'.'+admin1_c
cc_admin2 = cc+'.'+admin1_c+'.'+admin2_c
admin1 = ''
admin2 = ''
if cc_admin1 in admin1_map:
admin1 = admin1_map[cc_admin1]
if cc_admin2 in admin2_map:
admin2 = admin2_map[cc_admin2]
write_row = {'lat':lat,
'lon':lon,
'name':name,
'admin1':admin1,
'admin2':admin2,
'cc':cc}
rows.append(write_row)
writer.writeheader()
writer.writerows(rows)
if self.verbose:
print('Removing extracted cities1000 to save space...')
os.remove(cities1000_filename)
# Load all the coordinates and locations
geo_coords, locations = [], []
for row in rows:
geo_coords.append((row['lat'], row['lon']))
locations.append(row)
return geo_coords, locations
def geodetic_in_ecef(geo_coords):
geo_coords = np.asarray(geo_coords).astype(np.float)
lat = geo_coords[:, 0]
lon = geo_coords[:, 1]
lat_r = np.radians(lat)
lon_r = np.radians(lon)
normal = A / (np.sqrt(1 - E2 * (np.sin(lat_r) ** 2)))
x = normal * np.cos(lat_r) * np.cos(lon_r)
y = normal * np.cos(lat_r) * np.sin(lon_r)
z = normal * (1 - E2) * np.sin(lat)
return np.column_stack([x, y, z])
def rel_path(filename):
"""
Function that gets relative path to the filename
"""
return os.path.join(os.getcwd(), os.path.dirname(__file__), filename)
def get(geo_coord, mode=2, verbose=True):
"""
Function to query for a single coordinate
"""
if not isinstance(geo_coord, tuple) or not isinstance(geo_coord[0], float):
raise TypeError('Expecting a tuple')
_rg = RGeocoder(mode=mode, verbose=verbose)
return _rg.query([geo_coord])[0]
def search(geo_coords, mode=2, verbose=True):
"""
Function to query for a list of coordinates
"""
if not isinstance(geo_coords, tuple) and not isinstance(geo_coords, list):
raise TypeError('Expecting a tuple or a tuple/list of tuples')
elif not isinstance(geo_coords[0], tuple):
geo_coords = [geo_coords]
_rg = RGeocoder(mode=mode, verbose=verbose)
return _rg.query(geo_coords)
if __name__ == '__main__':
print('Testing single coordinate through get...')
city = (37.78674, -122.39222)
print('Reverse geocoding 1 city...')
result = get(city)
print(result)
print('Testing coordinates...')
cities = [(51.5214588, -0.1729636), (9.936033, 76.259952), (37.38605, -122.08385)]
print('Reverse geocoding %d cities...' % len(cities))
results = search(cities)
print(results) | /reverse_geocoder_whl-1.5.3.tar.gz/reverse_geocoder_whl-1.5.3/reverse_geocoder_whl/__init__.py | 0.522689 | 0.212865 | __init__.py | pypi |
from pathlib import Path
from typing import Iterator, List, Tuple
import cv2
import numpy as np
from joblib import Parallel, delayed
from skimage.metrics import structural_similarity
from reverse_image_search import config
def load_image(path: Path, grayscale=True) -> np.ndarray:
if grayscale:
img = cv2.imread(str(path), cv2.IMREAD_GRAYSCALE)
else:
img = cv2.imread(str(Path))
return img
def resize_image(
img: np.ndarray, dim: Tuple = (config.RESIZE_HEIGHT, config.RESIZE_WIDTH)
) -> np.ndarray:
resized = cv2.resize(img, dim, interpolation=cv2.INTER_AREA)
return resized
def walk(path: Path) -> Iterator[Path]:
if path.is_file():
return path
for p in path.iterdir():
if p.is_symlink():
# Do not follow symlinks, or you might end up recursing indefinitely
continue
try:
is_dir = p.is_dir()
except OSError: # Occurs when you can't "stat" a file
continue
if is_dir:
try:
yield from walk(p)
except PermissionError:
# Ignore files we can't access
pass
continue
yield p
def get_similarity(src: np.ndarray, filepath: Path) -> Tuple[Path, float]:
target = load_image(filepath)
if target is None: # unable to load image
return filepath, 0
target = resize_image(target)
similarity = structural_similarity(src, target)
return filepath, similarity
def search_similar_images(
src_path: Path,
search_path: Path,
threshold: float,
allowed_filetypes=List[str],
) -> List[Tuple[Path, float]]:
# load first image only once
src = load_image(src_path)
src = resize_image(src)
# Get results using multiprocessing
results = Parallel(n_jobs=-1)(
delayed(get_similarity)(src, filepath)
for filepath in walk(search_path)
if filepath.suffix.lower() in allowed_filetypes
)
results = [
(filepath, similarity)
for filepath, similarity in results
if similarity >= threshold
]
return results | /reverse_image_search-0.2.1-py3-none-any.whl/reverse_image_search/search.py | 0.685844 | 0.324824 | search.py | pypi |
import re
from collections import defaultdict
from multiprocessing import Process
from kafka import TopicPartition
from logger import kafka_factory
from logger.constant import BATCH_SIZE
def search_messages_in_parallel(topic, brokers, regex):
"""
Messages will be searched in parallel by spawning process per partition.
:param topic:
:param brokers:
:param regex:
:return:
"""
n_partition = _get_n_partition(brokers, topic)
kafka_consumer = kafka_factory.generate_kafka_consumer(brokers)
partition_id_to_start_end_offset = _get_partition_info(kafka_consumer, topic, n_partition)
for partition in xrange(n_partition):
p = Process(
target=_reverse_search_log_per_partition,
args=(brokers, topic, partition, partition_id_to_start_end_offset, regex),
)
p.start()
p.join()
def _get_partition_info(kafka_consumer, topic, n_partition):
partition_to_offset_info = defaultdict(dict)
partitions = [TopicPartition(topic, partition) for partition in xrange(n_partition)]
beginning_offsets = kafka_consumer.beginning_offsets(partitions)
for topic_partition, offset in beginning_offsets.items():
partition_to_offset_info[topic_partition.partition].update({'start_offset': offset})
end_offsets = kafka_consumer.end_offsets(partitions)
for topic_partition, offset in end_offsets.items():
partition_to_offset_info[topic_partition.partition].update({'end_offset': offset})
return partition_to_offset_info
def _reverse_search_log_per_partition(
brokers,
topic,
partition,
partition_id_to_start_end_offset,
regex,
):
"""
This works by using a sliding window mechanism
---------------------------
1 2 3 4 5 6 7 8 9 10 11 12
^
Normal reading kafka starts from the beginning offset to the end
we can seek the offset one by one, but there is an overhead of network
to call the kafka broker, so the idea is to batch get the messages
:param list[str] brokers:
:param str topic:
:param int partition:
:param str regex:
:return:
"""
"""
Kafka consumer can only be instantiated when the sub-process is spawned otherwise the socket is closed
"""
kafka_consumer = kafka_factory.generate_kafka_consumer(brokers, is_singleton=False)
start_offset = partition_id_to_start_end_offset[partition]['start_offset']
end_offset = partition_id_to_start_end_offset[partition]['end_offset']
print 'start_offset: {}, end_offset: {}'.format(start_offset, end_offset)
kafka_consumer.assign([TopicPartition(topic, partition)])
for offset in range(end_offset, start_offset - 1, -BATCH_SIZE):
start_read_offset, end_read_offset = _get_start_end_offset(offset, start_offset)
# assign partition and offset to the kafka consumer
print 'start_read_offset: {}, end_read_offset: {}, assigned_offset: {}'.format(start_read_offset, end_read_offset, offset)
kafka_consumer.seek(
partition=TopicPartition(topic, partition),
offset=start_read_offset,
)
grep_messages_in_batch(kafka_consumer, regex, start_read_offset, end_read_offset)
def _get_start_end_offset(offset, start_offset):
"""
start offset might be less than the offset that can be read. Depending with
the configuration, messages are saved only in particular time period.
:param offset:
:param start_offset:
:return:
"""
start_read_offset = offset - BATCH_SIZE
end_read_offset = offset
if start_read_offset < start_offset:
start_read_offset = start_offset
return start_read_offset, end_read_offset
def grep_messages_in_batch(kafka_consumer, regex, start_offset, end_offset):
"""
KafkaConsumer poll --> works by using intern
:param KafkaConsumer kafka_consumer:
:param str regex:
:param int start_offset:
:param int end_offset:
:return:
"""
for _ in range(start_offset, end_offset):
message = next(kafka_consumer)
if re.match(regex, message.value):
print 'message: {}'.format(message)
def _get_n_partition(brokers, topic):
"""
:param brokers:
:param topic:
:return:
"""
kafka_consumer = kafka_factory.generate_kafka_consumer(brokers, is_singleton=False)
kafka_consumer.subscribe(topics=[topic])
kafka_consumer.topics()
return len(kafka_consumer.partitions_for_topic(unicode(topic))) | /reverse-kafka-logger-0.1.2.tar.gz/reverse-kafka-logger-0.1.2/logger/grep_manager.py | 0.540681 | 0.174762 | grep_manager.py | pypi |
from json import loads, JSONDecodeError
import re
from ipaddress import IPv4Address, AddressValueError
from .net.http import ApiRequester
from .models.response import Response
from .exceptions.error import ParameterError, EmptyApiKeyError, \
UnparsableApiResponseError
class Client:
__default_url = "https://reverse-mx.whoisxmlapi.com/api/v1"
_api_requester: ApiRequester or None
_api_key: str
_re_api_key = re.compile(r'^at_[a-z0-9]{29}$', re.IGNORECASE)
_domain_name = re.compile(r'^([a-z0-9_][-_a-z0-9]{0,62}\.){1,32}[0-9a-z][-0-9a-z]{1,62}')
_SUPPORTED_FORMATS = ['json', 'xml']
_PARSABLE_FORMAT = 'json'
JSON_FORMAT = 'JSON'
XML_FORMAT = 'XML'
def __init__(self, api_key: str, **kwargs):
"""
:param api_key: str: Your API key.
:key base_url: str: (optional) API endpoint URL.
:key timeout: float: (optional) API call timeout in seconds
"""
self._api_key = ''
self.api_key = api_key
if 'base_url' not in kwargs:
kwargs['base_url'] = Client.__default_url
self.api_requester = ApiRequester(**kwargs)
@property
def api_key(self) -> str:
return self._api_key
@api_key.setter
def api_key(self, value: str):
self._api_key = Client._validate_api_key(value)
@property
def api_requester(self) -> ApiRequester or None:
return self._api_requester
@api_requester.setter
def api_requester(self, value: ApiRequester):
self._api_requester = value
@property
def base_url(self) -> str:
return self._api_requester.base_url
@base_url.setter
def base_url(self, value: str or None):
if value is None:
self._api_requester.base_url = Client.__default_url
else:
self._api_requester.base_url = value
@property
def timeout(self) -> float:
return self._api_requester.timeout
@timeout.setter
def timeout(self, value: float):
self._api_requester.timeout = value
def iterate_pages(self, mx: str or IPv4Address):
"""
Iterate over all pages of domains related to given MX
:param str|IPv4Address mx: MX server name
:yields Response: Instance of `Response` with a page.
:raises ConnectionError:
:raises ReverseMxApiError: Base class for all errors below
:raises ResponseError: response contains an error message
:raises ApiAuthError: Server returned 401, 402 or 403 HTTP code
:raises BadRequestError: Server returned 400 or 422 HTTP code
:raises HttpApiError: HTTP code >= 300 and not equal to above codes
:raises ParameterError: invalid parameter's value
"""
resp = self.data(mx)
yield resp
while resp.has_next():
resp = self.next_page(mx, resp)
yield resp
def next_page(self, mx: str or IPv4Address, current_page: Response) \
-> Response:
"""
Get the next page if available, otherwise returns the given one
:param str|IPv4Address mx: MX server name
:param Response current_page: The current page.
:return: Instance of `Response` with a next page.
:raises ConnectionError:
:raises ReverseMxApiError: Base class for all errors below
:raises ResponseError: response contains an error message
:raises ApiAuthError: Server returned 401, 402 or 403 HTTP code
:raises BadRequestError: Server returned 400 or 422 HTTP code
:raises HttpApiError: HTTP code >= 300 and not equal to above codes
:raises ParameterError: invalid parameter's value
"""
if current_page.size > 0:
last_domain = current_page.result[-1].name
return self.data(mx, search_from=last_domain)
return current_page
def data(self, mx: str or IPv4Address, **kwargs) -> Response:
"""
Get parsed API response as a `Response` instance.
:param str|IPv4Address mx: MX server (domain or IP address).
:key search_from: Optional. The domain name which is used as an
offset for the results returned.
:return: `Response` instance
:raises ConnectionError:
:raises ReverseMxApiError: Base class for all errors below
:raises ResponseError: response contains an error message
:raises ApiAuthError: Server returned 401, 402 or 403 HTTP code
:raises BadRequestError: Server returned 400 or 422 HTTP code
:raises HttpApiError: HTTP code >= 300 and not equal to above codes
:raises ParameterError: invalid parameter's value
"""
kwargs['response_format'] = Client._PARSABLE_FORMAT
response = self.raw_data(mx, **kwargs)
try:
parsed = loads(str(response))
if 'result' in parsed:
return Response(parsed)
raise UnparsableApiResponseError(
"Could not find the correct root element.", None)
except JSONDecodeError as error:
raise UnparsableApiResponseError("Could not parse API response", error)
def raw_data(self, mx: str or IPv4Address, **kwargs) -> str:
"""
Get raw API response.
:param str|IPv4Address mx: MX server (domain or IP address).
:key search_from: Optional. The domain name which is used as an
offset for the results returned.
:key response_format: Optional. use constants
JSON_FORMAT and XML_FORMAT
:return: str
:raises ConnectionError:
:raises ReverseMxApiError: Base class for all errors below
:raises ResponseError: response contains an error message
:raises ApiAuthError: Server returned 401, 402 or 403 HTTP code
:raises BadRequestError: Server returned 400 or 422 HTTP code
:raises HttpApiError: HTTP code >= 300 and not equal to above codes
:raises ParameterError: invalid parameter's value
"""
if self.api_key == '':
raise EmptyApiKeyError('')
mx = Client._validate_mx_server(mx, True)
if 'output_format' in kwargs:
kwargs['response_format'] = kwargs['output_format']
if 'response_format' in kwargs:
response_format = Client._validate_response_format(
kwargs['response_format'])
else:
response_format = Client._PARSABLE_FORMAT
if 'search_from' in kwargs:
search_from = Client._validate_search_from(kwargs['search_from'])
else:
search_from = "0"
return self._api_requester.get(self._build_payload(
mx,
search_from,
response_format
))
@staticmethod
def _validate_api_key(api_key) -> str:
if Client._re_api_key.search(
str(api_key)
) is not None:
return str(api_key)
else:
raise ParameterError("Invalid API key format.")
@staticmethod
def _validate_mx_server(value: str or IPv4Address, required=False) -> str:
if value is None:
raise ParameterError("MX could be a None value")
if isinstance(value, IPv4Address):
return str(value)
if type(value) is not str:
raise ParameterError('MX should be a string or IPv4Address')
if Client._domain_name.search(value) is not None:
return value
try:
IPv4Address(value)
return value
except AddressValueError:
pass
raise ParameterError("Invalid MX name parameter")
@staticmethod
def _validate_search_from(value):
if type(value) is str and len(value) > 0:
return value
raise ParameterError("Invalid search_from parameter")
@staticmethod
def _validate_response_format(_format: str):
if _format.lower() in Client._SUPPORTED_FORMATS:
return str(_format)
else:
raise ParameterError(
"Output format should be either JSON or XML.")
def _build_payload(self, mx, search_from, response_format):
return {
'apiKey': self.api_key,
'mx': mx,
'from': search_from,
'outputFormat': response_format
} | /reverse-mx-1.0.0.tar.gz/reverse-mx-1.0.0/src/reversemx/client.py | 0.77137 | 0.165863 | client.py | pypi |
from pathlib import PurePath
import pandas as pd, numpy as np
import werkzeug, json
from .utils import get_feature_ind, get_json
from sklearn.impute import SimpleImputer
class ReadWebFile:
def __init__(self, filepath):
self.filepath = filepath
if isinstance(filepath, werkzeug.datastructures.FileStorage):
self.ext = self.extension = PurePath(filepath.filename).suffix
def read(self):
if self.ext in [".xlsx", ".xls"]:
data = pd.read_excel(self.filepath)
elif self.ext in [".txt", ".csv"]:
if self.ext == ".txt":
spliter = "\t"
elif self.ext == ".csv":
spliter = ","
buffer = self.filepath.read()
for encoding in ["utf-8", "gbk", "utf-16"]:
try:
file_read = buffer.decode(encoding)
if "\r\n" in file_read:
line_spliter = "\r\n"
else:
line_spliter = "\n"
file_read = file_read.split(line_spliter)
error = ""
break
except Exception as e:
error = e
continue
if error:
file_read = ""
data = []
if len(file_read) > 0:
for index, line in enumerate(file_read):
row = []
for cell in line.split(spliter):
row.append(cell)
if index == 0:
columns = row
elif index == len(file_read) - 1:
continue
else:
data.append(row)
data = pd.DataFrame(data, columns=columns)
else:
data = pd.DataFrame([])
else:
data = pd.DataFrame([])
return data
def read_data(filepath: werkzeug.datastructures.FileStorage) -> pd.DataFrame:
if isinstance(filepath, werkzeug.datastructures.FileStorage):
return ReadWebFile(filepath).read()
def to_numeric(dataframe: pd.DataFrame, errors="ignore") -> pd.DataFrame:
return dataframe.apply(pd.to_numeric, errors=errors)
def round_series(series: pd.Series, decimal=3) -> pd.Series:
if series.dtype == object:
return series
else:
return series.round(decimal)
def round_data(dataframe: pd.DataFrame, decimal=3) -> pd.DataFrame:
return dataframe.apply(round_series, args=[decimal])
def std_del_features_f(dataset, na_feature_names, number_ind, target_ind):
feature_ind = get_feature_ind(dataset.shape[1], number_ind, target_ind)
feature_df = dataset.iloc[:, feature_ind]
std_del_f = feature_df.columns[feature_df.std().round(2) == 0].tolist()
na_feature_names += std_del_f
feature_df = feature_df.loc[:, feature_df.std().round(2) > 0]
new_dataset = pd.concat([
dataset.iloc[:, [number_ind]+target_ind],
feature_df
], axis=1)
return new_dataset, na_feature_names, number_ind, target_ind
def preprocess_dataset(dataset: pd.DataFrame, number_ind: int, target_ind: list, strategy: str="mean"):
feature_ind = get_feature_ind(dataset.shape[1], number_ind, target_ind)
feature_df = dataset.iloc[:, feature_ind]
feature_df = feature_df.apply(pd.to_numeric, errors="coerce")
na_feature_mask = feature_df.isna().sum(axis=0).astype(bool)
na_feature_names = feature_df.columns[na_feature_mask].tolist()
feature_df.dropna(axis="columns", how="all", inplace=True)
feature_df.iloc[:, :] = SimpleImputer(strategy=strategy).fit_transform(feature_df)
new_dataset = pd.concat([
dataset.iloc[:, [number_ind]+target_ind],
feature_df
], axis=1)
new_number_ind = 0
new_target_ind = [i for i in range(1, len(target_ind)+1)]
new_dataset, na_feature_names, new_number_ind, new_target_ind = std_del_features_f(new_dataset, na_feature_names, new_number_ind, new_target_ind)
return new_dataset, na_feature_names, new_number_ind, new_target_ind
def get_jsons_for_dataset() -> dict:
dataset_params = dict()
for i, j in zip(
["datasetName", "targetInd", "numberInd", "uid"],
["name", "target_ind", "number_ind", "uid"]
):
dataset_params[j] = get_json(i)
return dataset_params
def get_dataset_from_jsons(name: str, dataset: pd.DataFrame, number_ind: int, target_ind: list) -> dict:
dataset, na_feature_names, number_ind, target_ind = preprocess_dataset(dataset, number_ind, target_ind)
feature_ind = get_feature_ind(dataset.shape[1], number_ind, target_ind, [])
dataset_p = dict()
dataset_p.update({
"name": name,
"dataset": dataset,
"number_ind": number_ind,
"target_ind": target_ind,
"feature_ind": feature_ind,
"na_feature_names": na_feature_names,
})
return dataset_p | /reverse-projection-0.0.2.5.tar.gz/reverse-projection-0.0.2.5/reverse_projection/webapis/data.py | 0.405096 | 0.279472 | data.py | pypi |
from hyperopt import hp, tpe, STATUS_OK, Trials, fmin
import numpy as np
def searching_fn(params, point, transformer, axises):
point = np.array(point)
outputted_point = transformer.transform(np.array(params).reshape(1, -1)).reshape(-1, )[axises]
error = outputted_point - point
error = (error**2).sum()
return error, outputted_point
def default_early_stop_fn(criterion=0.00001):
def early_stop_fn(trials, *args, **wargs):
best_loss = trials.best_trial["result"]["loss"]
if best_loss < criterion:
return True, dict(loss=trials.losses()[-1])
else:
return False, dict(loss=trials.losses()[-1])
return early_stop_fn
def hpopt(feature_ranges, target_point, transformer, iteration, verbose, early_stop_fn, axises, choices, **kwargs):
"""
feature_ranges包含的是特征们的上下限或者多个选择
上下限的情况,则传入[start, end],此时约定start与end均为float类型
多个选择的情况,则传入[choice1, choice2, ...],此时约定它们均为int类型
feature_ranges自身为一个字典
feature_ranges = dict(
X1 = list(start, end),
X2 = list(start, end),
X3 = list(choice1, choice2)
)
target_point是一个二维点,比如[3.14, 1.45]
transformer是一个拥有transform方法的对象,负责把feature_ranges内生成的点转变成二维点
"""
# 在feature_ranges中迭代,制作hpopt所需要的space
hpspace = []
for choice, (fname, frange) in zip(choices, feature_ranges.items()):
if choice is True:
hpobj = hp.choice(fname, frange)
elif choice is False:
hpobj = hp.uniform(fname, frange[0], frange[1])
hpspace.append(hpobj)
def f(params):
error = searching_fn(params, target_point, transformer, axises)
return {'loss': error[0], 'status': STATUS_OK}
trials = Trials()
best = fmin(fn=f, space=hpspace, algo=tpe.suggest, max_evals=iteration, verbose=verbose, trials=trials, early_stop_fn=early_stop_fn)
# best = np.array([ best[i] for i in feature_ranges.keys()]).reshape(1, -1)
best_fvalues = []
for choice, (i, j) in zip(choices, feature_ranges.items()):
if choice is True:
best_fvalues.append(j[best[i]])
elif choice is False:
best_fvalues.append(best[i])
return np.array(best_fvalues).reshape(1, -1)
def scipy_minimizer(x, *args):
transformer = args[1]
t = args[0]
axises = args[2]
x = transformer.transform(x.reshape(1, -1))[0, axises]
return ((x - t) ** 2).sum() ** 0.5
from scipy.optimize import minimize, Bounds
def scipy_minimize(feature_ranges, target_point, transformer, iteration, verbose, criterion, axises, **kwargs):
mins = []
maxs = []
fnames = []
means = []
for i,j in feature_ranges.items():
fnames.append(i)
mins.append(j[0])
maxs.append(j[1])
means.append(np.random.uniform(j[0], j[1]))
bounds = Bounds(mins, maxs)
res = minimize(scipy_minimizer, means,
bounds=bounds,
tol=criterion,
options={"verbose": verbose, 'maxiter': iteration,},
args=(target_point, transformer, axises),
method="trust-constr")
return res.x.reshape(1, -1) | /reverse-projection-0.0.2.5.tar.gz/reverse-projection-0.0.2.5/reverse_projection/search/algorithms.py | 0.644449 | 0.411584 | algorithms.py | pypi |
import datetime
from json import loads, JSONDecodeError
import re
from .net.http import ApiRequester
from .models.response import Response
from .models.request import Fields
from .exceptions.error import ParameterError, EmptyApiKeyError, \
UnparsableApiResponseError
class Client:
__default_url = "https://reverse-whois.whoisxmlapi.com/api/v2"
_api_requester: ApiRequester or None
_api_key: str
_re_api_key = re.compile(r'^at_[a-z0-9]{29}$', re.IGNORECASE)
_SUPPORTED_FORMATS = ['json', 'xml']
_PARSABLE_FORMAT = 'json'
JSON_FORMAT = 'json'
XML_FORMAT = 'xml'
PREVIEW_MODE = 'preview'
PURCHASE_MODE = 'purchase'
CURRENT = 'current'
HISTORIC = 'historic'
__DATETIME_OR_NONE_MSG = 'Value should be None or an instance of ' \
'datetime.date'
def __init__(self, api_key: str, **kwargs):
"""
:param api_key: str: Your API key.
:key base_url: str: (optional) API endpoint URL.
:key timeout: float: (optional) API call timeout in seconds
"""
self._api_key = ''
self.api_key = api_key
if 'base_url' not in kwargs:
kwargs['base_url'] = Client.__default_url
self.api_requester = ApiRequester(**kwargs)
@property
def api_key(self) -> str:
return self._api_key
@api_key.setter
def api_key(self, value: str):
self._api_key = Client._validate_api_key(value)
@property
def api_requester(self) -> ApiRequester or None:
return self._api_requester
@api_requester.setter
def api_requester(self, value: ApiRequester):
self._api_requester = value
@property
def base_url(self) -> str:
return self._api_requester.base_url
@base_url.setter
def base_url(self, value: str or None):
if value is None:
self._api_requester.base_url = Client.__default_url
else:
self._api_requester.base_url = value
@property
def timeout(self) -> float:
return self._api_requester.timeout
@timeout.setter
def timeout(self, value: float):
self._api_requester.timeout = value
def iterate_pages(self, **kwargs):
"""
Iterate over all pages of domains related to given MX
:key basic_terms: Required if advanced_terms aren't specified.
Dictionary. Take a look at API documentation for the format
:key advanced_terms: Required if basic_terms aren't specified
List. Take a look at API documentation for the format
:key search_type: Optional. Supported options - `Client.CURRENT`
and `Client.HISTORIC`. Default is `Client.CURRENT`
:key punycode: Optional. Boolean. Default value is `True`
:key include_audit_dates: Optional. Boolean. Default value is `False`
:key created_date_from: Optional. datetime.date.
:key created_date_to: Optional. datetime.date.
:key updated_date_from: Optional. datetime.date.
:key updated_date_to: Optional. datetime.date.
:key expires_date_from: Optional. datetime.date.
:key expires_date_to: Optional. datetime.date.
:key search_after: Optional. Integer.
:yields Response: Instance of `Response` with a page.
:raises ConnectionError:
:raises ReverseWhoisApiError: Base class for all errors below
:raises ResponseError: response contains an error message
:raises ApiAuthError: Server returned 401, 402 or 403 HTTP code
:raises BadRequestError: Server returned 400 or 422 HTTP code
:raises HttpApiError: HTTP code >= 300 and not equal to above codes
:raises ParameterError: invalid parameter's value
"""
resp = self.purchase(**kwargs)
yield resp
while resp.has_next():
resp = self.next_page(resp, **kwargs)
yield resp
def next_page(self, current_page: Response, **kwargs) \
-> Response:
"""
Get the next page if available, otherwise returns the given one
:param Response current_page: The current page.
:key basic_terms: Required if advanced_terms aren't specified.
Dictionary. Take a look at API documentation for the format
:key advanced_terms: Required if basic_terms aren't specified
List. Take a look at API documentation for the format
:key search_type: Optional. Supported options - `Client.CURRENT`
and `Client.HISTORIC`. Default is `Client.CURRENT`
:key punycode: Optional. Boolean. Default value is `True`
:key include_audit_dates: Optional. Boolean. Default value is `False`
:key created_date_from: Optional. datetime.date.
:key created_date_to: Optional. datetime.date.
:key updated_date_from: Optional. datetime.date.
:key updated_date_to: Optional. datetime.date.
:key expired_date_from: Optional. datetime.date.
:key expired_date_to: Optional. datetime.date.
:key search_after: Optional. Integer.
:return: Instance of `Response` with a next page.
:raises ConnectionError:
:raises ReverseWhoisApiError: Base class for all errors below
:raises ResponseError: response contains an error message
:raises ApiAuthError: Server returned 401, 402 or 403 HTTP code
:raises BadRequestError: Server returned 400 or 422 HTTP code
:raises HttpApiError: HTTP code >= 300 and not equal to above codes
:raises ParameterError: invalid parameter's value
"""
if current_page.has_next():
search_after = current_page.next_page_search_after
kwargs['search_after'] = search_after
return self.purchase(**kwargs)
return current_page
def preview(self, **kwargs) -> Response:
"""
Get parsed API response as a `Response` instance.
Mode = `preview`
:key basic_terms: Required if advanced_terms aren't specified.
Dictionary. Take a look at API documentation for the format
:key advanced_terms: Required if basic_terms aren't specified
List. Take a look at API documentation for the format
:key search_type: Optional. Supported options - `Client.CURRENT`
and `Client.HISTORIC`. Default is `Client.CURRENT`
:key punycode: Optional. Boolean. Default value is `True`
:key include_audit_dates: Optional. Boolean. Default value is `False`
:key created_date_from: Optional. datetime.date.
:key created_date_to: Optional. datetime.date.
:key updated_date_from: Optional. datetime.date.
:key updated_date_to: Optional. datetime.date.
:key expired_date_from: Optional. datetime.date.
:key expired_date_to: Optional. datetime.date.
:key search_after: Optional. Integer.
:return: `Response` instance
:raises ConnectionError:
:raises ReverseWhoisApiError: Base class for all errors below
:raises ResponseError: response contains an error message
:raises ApiAuthError: Server returned 401, 402 or 403 HTTP code
:raises BadRequestError: Server returned 400 or 422 HTTP code
:raises HttpApiError: HTTP code >= 300 and not equal to above codes
:raises ParameterError: invalid parameter's value
"""
kwargs['mode'] = Client.PREVIEW_MODE
return self.data(**kwargs)
def purchase(self, **kwargs):
"""
Get parsed API response as a `Response` instance.
Mode = `purchase`
:key basic_terms: Required if advanced_terms aren't specified.
Dictionary. Take a look at API documentation for the format
:key advanced_terms: Required if basic_terms aren't specified
List. Take a look at API documentation for the format
:key search_type: Optional. Supported options - `Client.CURRENT`
and `Client.HISTORIC`. Default is `Client.CURRENT`
:key punycode: Optional. Boolean. Default value is `True`
:key include_audit_dates: Optional. Boolean. Default value is `False`
:key created_date_from: Optional. datetime.date.
:key created_date_to: Optional. datetime.date.
:key updated_date_from: Optional. datetime.date.
:key updated_date_to: Optional. datetime.date.
:key expired_date_from: Optional. datetime.date.
:key expired_date_to: Optional. datetime.date.
:key search_after: Optional. Integer.
:return: `Response` instance
:raises ConnectionError:
:raises ReverseWhoisApiError: Base class for all errors below
:raises ResponseError: response contains an error message
:raises ApiAuthError: Server returned 401, 402 or 403 HTTP code
:raises BadRequestError: Server returned 400 or 422 HTTP code
:raises HttpApiError: HTTP code >= 300 and not equal to above codes
:raises ParameterError: invalid parameter's value
"""
kwargs['mode'] = Client.PURCHASE_MODE
return self.data(**kwargs)
def data(self, **kwargs) -> Response:
"""
Get parsed API response as a `Response` instance.
:key basic_terms: Required if advanced_terms aren't specified.
Dictionary. Take a look at API documentation for the format
:key advanced_terms: Required if basic_terms aren't specified
List. Take a look at API documentation for the format
:key mode: Optional. Supported options - `Client.PREVIEW_MODE` and
`Client.PURCHASE_MODE`. Default is `Client.PREVIEW_MODE`
:key search_type: Optional. Supported options - `Client.CURRENT`
and `Client.HISTORIC`. Default is `Client.CURRENT`
:key punycode: Optional. Boolean. Default value is `True`
:key include_audit_dates: Optional. Boolean. Default value is `False`
:key created_date_from: Optional. datetime.date.
:key created_date_to: Optional. datetime.date.
:key updated_date_from: Optional. datetime.date.
:key updated_date_to: Optional. datetime.date.
:key expired_date_from: Optional. datetime.date.
:key expired_date_to: Optional. datetime.date.
:key search_after: Optional. Integer.
:return: `Response` instance
:raises ConnectionError:
:raises ReverseWhoisApiError: Base class for all errors below
:raises ResponseError: response contains an error message
:raises ApiAuthError: Server returned 401, 402 or 403 HTTP code
:raises BadRequestError: Server returned 400 or 422 HTTP code
:raises HttpApiError: HTTP code >= 300 and not equal to above codes
:raises ParameterError: invalid parameter's value
"""
kwargs['response_format'] = Client._PARSABLE_FORMAT
response = self.raw_data(**kwargs)
try:
parsed = loads(str(response))
if 'domainsCount' in parsed:
return Response(parsed)
raise UnparsableApiResponseError(
"Could not find the correct root element.", None)
except JSONDecodeError as error:
raise UnparsableApiResponseError("Could not parse API response", error)
def raw_data(self, **kwargs) -> str:
"""
Get raw API response.
:key basic_terms: Required if advanced_terms aren't specified.
Dictionary. Take a look at API documentation for the format
:key advanced_terms: Required if basic_terms aren't specified
List. Take a look at API documentation for the format
:key mode: Optional. Supported options - `Client.PREVIEW_MODE` and
`Client.PURCHASE_MODE`. Default is `Client.PREVIEW_MODE`
:key search_type: Optional. Supported options - `Client.CURRENT`
and `Client.HISTORIC`. Default is `Client.CURRENT`
:key punycode: Optional. Boolean. Default value is `True`
:key include_audit_dates: Optional. Boolean. Default value is `False`
:key created_date_from: Optional. datetime.date.
:key created_date_to: Optional. datetime.date.
:key updated_date_from: Optional. datetime.date.
:key updated_date_to: Optional. datetime.date.
:key expired_date_from: Optional. datetime.date.
:key expired_date_to: Optional. datetime.date.
:key response_format: Optional. use constants
JSON_FORMAT and XML_FORMAT
:key search_after: Optional. Integer.
:return: str
:raises ConnectionError:
:raises ReverseWhoisApiError: Base class for all errors below
:raises ResponseError: response contains an error message
:raises ApiAuthError: Server returned 401, 402 or 403 HTTP code
:raises BadRequestError: Server returned 400 or 422 HTTP code
:raises HttpApiError: HTTP code >= 300 and not equal to above codes
:raises ParameterError: invalid parameter's value
"""
if self.api_key == '':
raise EmptyApiKeyError('')
if 'basic_terms' in kwargs:
basic_terms = Client._validate_basic_terms(kwargs['basic_terms'])
else:
basic_terms = None
if 'advanced_terms' in kwargs:
advanced_terms = Client._validate_advanced_terms(
kwargs['advanced_terms'])
else:
advanced_terms = None
if not advanced_terms and not basic_terms:
raise ParameterError(
"Required one from basic_terms and advanced_terms")
if 'output_format' in kwargs:
kwargs['response_format'] = kwargs['output_format']
if 'response_format' in kwargs:
response_format = Client._validate_response_format(
kwargs['response_format'])
else:
response_format = Client._PARSABLE_FORMAT
if 'search_type' in kwargs:
search_type = Client._validate_search_type(kwargs['search_type'])
else:
search_type = Client.CURRENT
if 'search_after' in kwargs:
search_after = Client._validate_search_after(
kwargs['search_after'])
else:
search_after = None
if 'punycode' in kwargs:
punycode = Client._validate_punycode(kwargs['punycode'])
else:
punycode = True
if 'include_audit_dates' in kwargs:
include_audit_dates = Client._validate_include_audit_dates(
kwargs['include_audit_dates'])
else:
include_audit_dates = False
if 'mode' in kwargs:
mode = Client._validate_mode(kwargs['mode'])
else:
mode = Client.PREVIEW_MODE
if 'created_date_from' in kwargs:
created_date_from = Client._validate_date(
kwargs['created_date_from']
)
else:
created_date_from = None
if 'created_date_to' in kwargs:
created_date_to = Client._validate_date(
kwargs['created_date_to']
)
else:
created_date_to = None
if 'updated_date_from' in kwargs:
updated_date_from = Client._validate_date(
kwargs['updated_date_from']
)
else:
updated_date_from = None
if 'updated_date_to' in kwargs:
updated_date_to = Client._validate_date(
kwargs['updated_date_to']
)
else:
updated_date_to = None
if 'expired_date_from' in kwargs:
expired_date_from = Client._validate_date(
kwargs['expired_date_from']
)
else:
expired_date_from = None
if 'expired_date_to' in kwargs:
expired_date_to = Client._validate_date(
kwargs['expired_date_to']
)
else:
expired_date_to = None
return self._api_requester.post(self._build_payload(
self.api_key,
basic_terms,
advanced_terms,
mode,
punycode,
search_type,
search_after,
include_audit_dates,
response_format,
created_date_from,
created_date_to,
updated_date_from,
updated_date_to,
expired_date_from,
expired_date_to,
))
@staticmethod
def _validate_api_key(api_key) -> str:
if Client._re_api_key.search(
str(api_key)
) is not None:
return str(api_key)
else:
raise ParameterError("Invalid API key format.")
@staticmethod
def _validate_basic_terms(value) -> dict:
include, exclude = [], []
if value is None:
raise ParameterError("Terms list cannot be None.")
elif type(value) is dict:
if 'include' in value:
include = list(map(lambda s: str(s), value['include']))
include = list(
filter(lambda s: s is not None and len(s) > 0, include))
if 4 <= len(include) <= 1:
raise ParameterError("Include terms list must include "
"from 1 to 4 terms.")
if 'exclude' in value:
exclude = list(map(lambda s: str(s), value['exclude']))
exclude = list(
filter(lambda s: s is not None and len(s) > 0, exclude))
if 4 <= len(exclude) <= 0:
raise ParameterError("Exclude terms list must include "
"from 0 to 4 terms.")
if include:
return {'include': include, 'exclude': exclude}
raise ParameterError("Expected a dict with 2 lists of strings.")
@staticmethod
def _validate_advanced_terms(value) -> list:
if value is None:
raise ParameterError("Terms list cannot be None.")
elif type(value) is list:
if 4 <= len(value) < 1:
raise ParameterError(
"Terms list must include form 1 to 4 items.")
for item in value:
if 'field' not in item or 'term' not in item:
raise ParameterError(
"Invalid advanced search terms format.")
if item['field'] not in Fields.values():
raise ParameterError("Unknown field name.")
if item['term'] is None or type(item['term']) is not str \
or len(item['term']) < 2:
raise ParameterError("Term should be non-empty string.")
return value
raise ParameterError("Expected a list of pairs field <-> term.")
@staticmethod
def _validate_search_after(value):
if value is not None and int(value) > 0:
return int(value)
raise ParameterError(
"Search after parameter value must be an integer greater "
"than zero or None")
@staticmethod
def _validate_response_format(value: str):
if value.lower() in [Client.JSON_FORMAT, Client.XML_FORMAT]:
return value.lower()
raise ParameterError(
f"Response format must be {Client.JSON_FORMAT} "
f"or {Client.XML_FORMAT}")
@staticmethod
def _validate_include_audit_dates(value: bool):
if value in [True, False]:
return value
raise ParameterError("Value must be True or False")
@staticmethod
def _validate_mode(value: str):
if value.lower() in [Client.PREVIEW_MODE, Client.PURCHASE_MODE]:
return value.lower()
raise ParameterError(
f"Mode must be {Client.PREVIEW_MODE} or {Client.PURCHASE_MODE}")
@staticmethod
def _validate_punycode(value: bool):
if value in [True, False]:
return value
raise ParameterError(
"Punycode parameter value must be True or False")
@staticmethod
def _validate_search_type(value: str):
if value.lower() in [Client.CURRENT, Client.HISTORIC]:
return value.lower()
raise ParameterError(
f"Search type must be {Client.CURRENT} or {Client.HISTORIC}")
@staticmethod
def _validate_date(value: datetime.date or None):
if value is None or isinstance(value, datetime.date):
return str(value)
raise ParameterError(Client.__DATETIME_OR_NONE_MSG)
@staticmethod
def _build_payload(
api_key,
basic_terms,
advanced_terms,
mode,
punycode,
search_type,
search_after,
include_audit_dates,
response_format,
created_date_from,
created_date_to,
updated_date_from,
updated_date_to,
expired_date_from,
expired_date_to,
) -> dict:
tmp = {
'apiKey': api_key,
'mode': mode,
'punycode': punycode,
'searchType': search_type,
'includeAuditDates': include_audit_dates,
'responseFormat': response_format,
'createdDateFrom': created_date_from,
'createdDateTo': created_date_to,
'updatedDateFrom': updated_date_from,
'updatedDateTo': updated_date_to,
'expiredDateFrom': expired_date_from,
'expiredDateTo': expired_date_to,
'searchAfter': search_after,
'basicSearchTerms': basic_terms,
'advancedSearchTerms': advanced_terms
}
payload = {}
for k, v in tmp.items():
if v is not None:
payload[k] = v
return payload | /reverse_whois-1.0.0-py3-none-any.whl/reversewhois/client.py | 0.784897 | 0.18628 | client.py | pypi |
from cms.models import Placeholder
from cms.plugin_rendering import ContentRenderer
from sekizai.context import SekizaiContext
import diff_match_patch as dmp
def revert_escape(txt, transform=True):
"""
transform replaces the '<ins ' or '<del ' with '<div '
:type transform: bool
"""
html = txt.replace("&", "&").replace("<", "<").replace(">", ">").replace("¶<br>", "\n")
if transform:
html = html.replace('<ins ', '<div ').replace('<del ', '<div ').replace('</ins>', '</div>')\
.replace('</del>', '</div>')
return html
def create_placeholder_contents(left_page, right_page, request, language):
# persist rendered html content for each placeholder for later use in diff
placeholders_a = Placeholder.objects.filter(page=left_page.pk).order_by('slot')
placeholders_b = Placeholder.objects.filter(page=right_page.pk).order_by('slot')
slots = set()
for p in placeholders_a or placeholders_b:
slots.add(p.slot)
placeholders = {x: [placeholders_a.filter(slot=x).get(slot=x)
if placeholders_a.filter(slot=x).count() > 0 else None,
placeholders_b.filter(slot=x).get(slot=x)
if placeholders_b.filter(slot=x).count() > 0 else None]
for x in slots}
diffs = {}
for key, (p1, p2) in placeholders.items():
body1 = placeholder_html(p1, request, language)
body2 = placeholder_html(p2, request, language)
diff = diff_texts(body2, body1)
diffs[key] = {'left': body1, 'right': body2,
'diff_right_to_left': diff}
return diffs
def placeholder_html(placeholder, request, language):
if not placeholder:
return ''
if hasattr(placeholder, '_plugins_cache'):
del placeholder._plugins_cache
renderer = ContentRenderer(request)
context = SekizaiContext({
'request': request,
'cms_content_renderer': renderer,
'CMS_TEMPLATE': placeholder.page.get_template
})
return renderer.render_placeholder(placeholder, context, language=language).strip()
def diff_texts(text1, text2):
differ = dmp.diff_match_patch()
diffs = differ.diff_main(text1, text2)
differ.diff_cleanupEfficiency(diffs)
diffs = revert_escape(differ.diff_prettyHtml(diffs))
return diffs | /reversion2-0.3.tar.gz/reversion2-0.3/djangocms_reversion2/diff.py | 0.618089 | 0.249002 | diff.py | pypi |
import re
from reverso_context_api.misc import BASE_URL
from reverso_context_api.session import ReversoSession
FAVORITES_PAGE_SIZE = 50
class Client(object):
def __init__(self, source_lang, target_lang, credentials=None, user_agent=None):
"""
Simple client for Reverso Context
Language can be redefined in api calls
:param source_lang: Default source language
:param target_lang: Default target language
:param credentials: Optional login information: pair of (email, password)
:param user_agent: Optional user agent string that will be used during API calls
"""
self._source_lang, self._target_lang = source_lang, target_lang
self._session = ReversoSession(credentials=credentials, user_agent=user_agent)
def get_translations(self, text, source_lang=None, target_lang=None):
"""Yields found translations of word (without context)
For example:
>>> list(Client("de", "en").get_translations("braucht"))
['needed', 'required', 'need', 'takes', 'requires', 'take', 'necessary'...]
"""
r = self._request_translations(text, source_lang, target_lang)
contents = r.json()
for entry in contents["dictionary_entry_list"]:
yield entry["term"]
def get_translation_samples(self, text, target_text=None, source_lang=None, target_lang=None, cleanup=True):
"""Yields pairs (source_text, translation) of context for passed text
>>> import itertools # just like other methods, this one returns iterator
>>> c = Client("en", "de")
>>> list(itertools.islice(c.get_translation_samples("cellar door", cleanup=False), 3)) # take first three samples
[("And Dad still hasn't fixed the <em>cellar door</em>.",
'Und Dad hat die <em>Kellertür</em> immer noch nicht repariert.'),
("Casey, I'm outside the <em>cellar door</em>.",
'Casey, ich bin vor der <em>Kellertür</em>.'),
('The ridge walk and mountain bike trails are accessible from the <em>cellar door</em>.',
'Der Ridge Walk und verschiedene Mountainbikestrecken sind von der <em>Weinkellerei</em> aus zugänglich.')]
:param target_text: if there are many translations of passed text (see get_translations), with this parameter
you can narrow the sample search down to one passed translation
:param cleanup: Remove <em>...</em> around requested part and its translation
Based on example from get_translations: get first sample where 'braucht' was translated as 'required':
>>> next(c.get_translation_samples("braucht", "required"))
('Für einen wirksamen Verbraucherschutz braucht man internationale Vorschriften.',
'In order to achieve good consumer protection, international rules are required.')
"""
for page in self._translations_pager(text, target_text, source_lang, target_lang):
for entry in page["list"]:
source_text, translation = entry["s_text"], entry["t_text"]
if cleanup:
source_text = self._cleanup_html_tags(source_text)
translation = self._cleanup_html_tags(translation)
yield source_text, translation
def get_favorites(self, source_lang=None, target_lang=None, cleanup=True):
"""
Yields context samples saved by you as favorites (you have to provide login credentials to Client to use it)
:param source_lang: string of lang abbreviations separated by comma
:param target_lang: same as source_lang
:param cleanup: remove <em>...</em> tags marking occurance of source_text
:return: dict of sample attrs (source/target lang/context/text)
"""
for page in self._favorites_pager(source_lang, target_lang):
for entry in page["results"]:
yield self._process_fav_entry(entry, cleanup)
def get_search_suggestions(self, text, source_lang=None, target_lang=None, fuzzy_search=False, cleanup=True):
"""
Yields search suggestions for passed text
>>> list(Client("de", "en").get_search_suggestions("bew"))
['Bewertung', 'Bewegung', 'bewegen', 'bewegt', 'bewusst', 'bewirkt', 'bewertet'...]
:param fuzzy_search: Allow fuzzy search (can find suggestions for words with typos: entzwickl -> Entwickler)
:param cleanup: Remove <b>...</b> around requested part in each suggestion
"""
r = self._request_suggestions(text, source_lang, target_lang)
parts = ["suggestions"]
if fuzzy_search:
parts += ["fuzzy1", "fuzzy2"]
contents = r.json()
for part in parts:
for term in contents[part]:
suggestion = term["suggestion"]
if cleanup:
suggestion = self._cleanup_html_tags(suggestion)
yield suggestion
def _translations_pager(self, text, target_text=None, source_lang=None, target_lang=None):
page_num, pages_total = 1, None
while page_num != pages_total:
r = self._request_translations(text, source_lang, target_lang, target_text, page_num=page_num)
contents = r.json()
pages_total = contents["npages"]
yield contents
page_num += 1
def _favorites_pager(self, source_lang=None, target_lang=None):
source_lang = source_lang or self._source_lang
target_lang = target_lang or self._target_lang
self._session.login()
start, total = 0, None
while True:
r = self._request_favorites(source_lang, target_lang, start)
contents = r.json()
total = contents["numTotalResults"]
yield contents
start += FAVORITES_PAGE_SIZE
if start >= total:
break
def _request_translations(self, text, source_lang, target_lang, target_text=None, page_num=None):
# defaults are set here because this method can be called both directly and via pager
target_text = target_text or ""
page_num = page_num or 1
data = {
"source_lang": source_lang or self._source_lang,
"target_lang": target_lang or self._target_lang,
"mode": 0,
"npage": page_num,
"source_text": text,
"target_text": target_text or "",
}
r = self._session.json_request("POST", BASE_URL + "bst-query-service", data)
return r
def _request_favorites(self, source_lang, target_lang, start):
params = {
"sourceLang": source_lang,
"targetLang": target_lang,
"start": start,
"length": FAVORITES_PAGE_SIZE,
"order": 10 # don't know yet what this value means, but it works
}
r = self._session.json_request("GET", BASE_URL + "bst-web-user/user/favourites", params=params)
return r
def _request_suggestions(self, text, source_lang, target_lang):
data = {
"search": text,
"source_lang": source_lang or self._source_lang,
"target_lang": target_lang or self._target_lang
}
r = self._session.json_request("POST", BASE_URL + "bst-suggest-service", data)
return r
def _process_fav_entry(self, entry, cleanup):
entry_fields_map = {
"srcLang": "source_lang",
"srcText": "source_text",
"srcContext": "source_context",
"trgLang": "target_lang",
"trgText": "target_text",
"trgContext": "target_context"
}
fields_to_clean = {"srcContext", "trgContext"}
processed_entry = {}
for field_from, field_to in entry_fields_map.items():
val = entry[field_from]
if cleanup and field_from in fields_to_clean:
val = self._cleanup_html_tags(val)
processed_entry[field_to] = val
return processed_entry
@staticmethod
def _cleanup_html_tags(text):
"""Remove html tags like <b>...</b> or <em>...</em> from text
I'm well aware that generally it's a felony, but in this case tags cannot even overlap
"""
return re.sub(r"<.*?>", "", text) | /reverso_context_api-0.5.tar.gz/reverso_context_api-0.5/reverso_context_api/client.py | 0.71423 | 0.195153 | client.py | pypi |
import logging
from typing import Optional, get_type_hints, List
from bson import ObjectId
from pymongo import MongoClient
from pymongo.collection import Collection
not_fields = ('_client', '_db', '_collection', '_indexes')
class Document:
_id: Optional[ObjectId]
_client: MongoClient
_db: str
_collection: str
_indexes: List
@property
def id(self) -> Optional[ObjectId]:
return self._document.get('_id')
@property
def document(self) -> dict:
return self._document
def __init__(self, **kwargs):
hints = get_type_hints(self.__class__)
self.__fields = list(filter(lambda x: x not in not_fields, hints))
self.connect(self._client, self._db, self._collection)
self._document = kwargs
self._ensure_indexes()
def __getattr__(self, item):
if item in self.__fields:
return self._document.get(item)
raise AttributeError
def __setattr__(self, key, value):
if key == '_Document__fields':
super(Document, self).__setattr__(key, value)
if key in self.__fields:
self._document[key] = value
else:
super(Document, self).__setattr__(key, value)
def _ensure_indexes(self):
if not hasattr(self, '_indexes') or not self._indexes:
return
self._get_collection().create_indexes(self._indexes)
@classmethod
def connect(cls, client: Optional[MongoClient] = None,
db: str = '', collection: str = ''):
if client:
cls._client = client
if db:
cls._db = db
if collection:
cls._collection = collection
if not hasattr(cls, '_collection'):
cls._collection = cls.__name__.lower()
@classmethod
def _get_collection(cls) -> Collection:
return cls._client[cls._db][cls._collection]
def insert(self):
self._ensure_indexes()
return self._get_collection().insert_one(self._document)
@classmethod
def one(cls, filter_=None) -> Optional['Document']:
doc = cls._get_collection().find_one(filter_)
return cls(**doc) if doc else None
@classmethod
def many(cls, filter_=None) -> List['Document']:
docs = cls._get_collection().find(filter_)
return [cls(**doc) for doc in docs]
@classmethod
def by_id(cls, id: ObjectId) -> Optional['Document']:
doc = cls._get_collection().find_one({'_id': {'$in': id}})
return cls(**doc) if doc else None
@classmethod
def by_ids(cls, ids: List[ObjectId]) -> List['Document']:
docs = cls._get_collection().find({'_id': {'$in': ids}})
return [cls(**doc) for doc in docs]
@classmethod
def sample(cls, size: int, filter_=None, projection=None) -> List['Document']:
pipeline = []
if filter_:
pipeline.append({'$match': filter_})
pipeline.append({'$sample': {'size': size}})
pipeline.append({'$project': {'_id': True}})
docs = cls._get_collection().aggregate(pipeline)
docs = cls._get_collection().find(
{'_id': {'$in': [d['_id'] for d in docs]}},
projection=projection
)
return [cls(**doc) for doc in docs]
def update(self):
self._ensure_indexes()
return self._get_collection().find_one_and_replace({'_id': self.id}, self._document)
def delete(self):
return self._get_collection().find_one_and_delete({'_id': self.id}) | /revfyawo.mongo-0.0.6-py3-none-any.whl/revfyawo/mongo/document.py | 0.828176 | 0.154823 | document.py | pypi |
"""Contains functions to interface with GitHub."""
import logging
import threading
import requests
from review.bot.misc import _get_gh_token
LOG = logging.getLogger(__name__)
LOG.setLevel("DEBUG")
def _fetch_file_content(file_data, headers):
"""Fetch the content of a single file."""
content_response = requests.get(
file_data["contents_url"], headers=headers, timeout=10
)
if content_response.status_code == 200:
content = content_response.json()
file_data["file_text"] = requests.get(content["download_url"], timeout=10).text
else:
raise RuntimeError("Error fetching file content")
def get_changed_files_and_contents(owner, repo, pull_number, gh_access_token=None):
r"""Retrieve the filenames, status, and contents of files changed in a GitHub PR.
Parameters
----------
owner : str
The owner of the repository where the pull request was made.
repo : str
The name of the repository where the pull request was made.
pull_number : int
The number of the pull request to retrieve the changed files for.
gh_access_token : str, optional
GitHub token needed to communicate with the repository. By default, ``None``,
which means it will try to read an existing env variable named ``GITHUB_TOKEN``.
Returns
-------
list[dict]
A list of dictionaries, where each dictionary represents a file that
was changed in the pull request. The dictionary contains the following
keys:
- filename: str, the name of the file
- status: str, the status of the file change ('added', 'modified', or 'removed')
- file_text: str, the contents of the file as a string
Raises
------
RuntimeError
If an error occurs while fetching the file content.
Notes
-----
This function uses the GitHub API to retrieve the list of changed files in
a pull request, along with the contents of each changed file. It requires a
GitHub access token with appropriate permissions to access the repository.
Example
-------
>>> files = get_changed_files_and_contents('my-org', 'my-repo', 123)
>>> print(files[0]['filename'])
'path/to/my-file.py'
>>> print(files[0]['status'])
'modified'
>>> print(files[0]['file_text'])
'print("Hello, world!")\n'
"""
access_token = _get_gh_token() if gh_access_token is None else gh_access_token
url = f"https://api.github.com/repos/{owner}/{repo}/pulls/{pull_number}"
# url = f"https://github.com/{owner}/{repo}/pull/{pull_number}.diff"
headers = {
"Authorization": f"Bearer {access_token}",
# "Content-type": "application/vnd.github.diff",
}
response = requests.get(url, headers=headers, timeout=10)
if response.status_code != 200:
raise RuntimeError(
f"Error fetching pull request files from:\n{url}\n\n{response.status_code}"
)
files = response.json()
url = f"https://api.github.com/repos/{owner}/{repo}/pulls/{pull_number}/files"
headers = {"Authorization": f"Bearer {access_token}"}
response = requests.get(url, headers=headers, timeout=10)
if response.status_code != 200:
print(f"Error fetching pull request files: {response.status_code}")
return
files = response.json()
threads = []
for file_data in files:
LOG.info("Filename: %s", file_data["filename"])
LOG.info("Status: %s", file_data["status"])
thread = threading.Thread(target=_fetch_file_content, args=(file_data, headers))
threads.append(thread)
thread.start()
for thread in threads:
thread.join()
return files | /review_bot-0.1.1-py3-none-any.whl/review/bot/gh_interface.py | 0.856677 | 0.224703 | gh_interface.py | pypi |
"""Interface module for local GIT files."""
import logging
import os
from pathlib import Path
LOG = logging.getLogger(__name__)
LOG.setLevel("DEBUG")
try:
from git import Repo
except ImportError as err:
LOG.warning(f"Error while importing git: {err}")
LOG.warning(f"Usage of review_bot might lead to errors.")
class LocalGit:
"""Class to extract information from a diff in a local repository.
Parameters
----------
repo_path : str
Path to the local repository.
"""
def __init__(self, repo_path: str):
"""Receives the path to the local repo."""
self._repo = Repo(repo_path)
self._repo_path = repo_path
def _preprocess_patch(self):
"""Clean the string returned by the diff of many unwanted lines.
Returns
-------
list
Diff without unnecessary lines and separated by file.
"""
# get the repo and get the diff of the last commit with main
tree = self._repo.heads.main.commit.tree
diff = self._repo.git.diff(tree)
# remove unwanted lines
diff_lines = diff.split("\n")
diff_lines_aux = []
i = 0
while i < len(diff_lines):
if diff_lines[i].startswith("diff"):
i += 3
else:
diff_lines_aux.append(diff_lines[i])
i += 1
# rejoin the diff into one single string
diff_processed = "\n".join(diff_lines)
diff_files = diff_processed.split("+++")
# first element is always empty
return diff_files[1:]
def get_filenames(self):
"""Get the filenames of the diff files.
Returns
-------
list
List with the filenames.
"""
diff_files = self._preprocess_patch()
# get names of the files affected by the diff
diff_filenames = []
for file in diff_files:
name = file.split("\n")[0][3:]
if name != "":
diff_filenames.append(name)
return diff_filenames
def get_local_patch(self):
"""Process the raw diff to extract the filename and useful info.
Returns
-------
Dict
Dict with the file name as key and the diff as content.
"""
diff_files = self._preprocess_patch()
diff_filenames = self.get_filenames()
# associate filenames with the code changes
patch_dict = {}
for filename, file in zip(diff_filenames, diff_files):
patch_dict[filename] = file
return patch_dict
def change_branch(self, branch_name: str):
"""Switch the branch of the repo to the required one.
Parameters
----------
branch_name : str
Branch to switch to.
"""
# TODO: Raise error if you have uncommitted changes
git = self._repo.git
git.checkout(branch_name)
def get_file_sources(self):
"""Get the text from the code files of the diff.
Returns
-------
Dict
Dict with the file name as key and the source file as content.
"""
file_sources = {}
for filename in self.get_filenames():
absolute_path = os.path.join(self._repo_path, filename)
source = Path(absolute_path).read_text()
file_sources[filename] = source
return file_sources | /review_bot-0.1.1-py3-none-any.whl/review/bot/git_interface.py | 0.700997 | 0.286831 | git_interface.py | pypi |
"""Functions to interface with OpenAI."""
import logging
from typing import Dict, List
import openai
import review.bot.defaults as defaults
from review.bot.exceptions import EmptyOpenAIResponseException
from review.bot.gh_interface import get_changed_files_and_contents
from review.bot.git_interface import LocalGit
from review.bot.misc import _set_open_ai_config, add_line_numbers, parse_suggestions
LOG = logging.getLogger(__name__)
LOG.setLevel("DEBUG")
# Developer note:
# There is a significant improvement in the completion using gpt-4 vs gpt-3.5-turbo
OPEN_AI_MODEL = defaults.API_MODEL
def review_patch(
owner,
repo,
pr,
use_src=False,
filter_filename=None,
gh_access_token=None,
config_file: str = None,
):
"""Review a patch in a pull request and generate suggestions for improvement.
Parameters
----------
owner : str
The GitHub owner/organization of the repository.
repo : str
The name of the repository on GitHub.
pr : str
The pull request number.
use_src : bool, default: False
Use the source file as context for the patch. Works for small files and
not for large ones.
filter_filename : str, optional
If set, filters out all but the file matching this string.
gh_access_token : str, optional
GitHub token needed to communicate with the repository. By default, ``None``,
which means it will try to read an existing env variable named ``GITHUB_TOKEN``.
config_file : str, optional
Path to OpenAI configuration file. By default, ``None``.
Returns
-------
list[dict]
A dictionary containing suggestions for the reviewed patch.
"""
# Fetch changed files and contents
changed_files = get_changed_files_and_contents(
owner, repo, pr, gh_access_token=gh_access_token
)
# assemble suggestions
suggestions = []
n_hits = 0
for file_data in changed_files:
filename = file_data["filename"]
if filter_filename is not None and filename != filter_filename:
LOG.debug(
"Skipping %s due to filter_filename = %s", filename, filter_filename
)
continue
patch = add_line_numbers(file_data["patch"])
if use_src:
file_src = f"FILENAME: {filename}\nCONTENT:\n{file_data['file_text']}"
suggestions.extend(
generate_suggestions_with_source(filename, file_src, patch, config_file)
)
else:
suggestions.extend(generate_suggestions(filename, patch))
n_hits += 1
if filter_filename is not None and n_hits < 1:
raise ValueError(f"No files matching '{filter_filename}'")
return suggestions
def review_patch_local(
repo: str,
branch: str = None,
use_src=False,
filter_filename=None,
config_file: str = None,
):
"""Review a patch in a pull request and generate suggestions for improvement.
Parameters
----------
repo : str
The path to the local repository.
branch : str
Name of the branch you want to compare to main. By default, current branch.
use_src : bool, default: False
Use the source file as context for the patch. Works for small files and
not for large ones.
filter_filename : str, optional
If set, filters out all but the file matching this string.
config_file : str, optional
Path to OpenAI configuration file. By default, ``None``.
Returns
-------
list[dict]
A dictionary containing suggestions for the reviewed patch.
"""
# load repo and change branch if it applies
local_repo = LocalGit(repo)
if branch is not None:
local_repo.change_branch(branch_name=branch)
# Fetch changed files and contents
changed_files = local_repo.get_local_patch()
file_sources = local_repo.get_file_sources()
# assemble suggestions
suggestions = []
n_hits = 0
for filename, patch in changed_files.items():
if filter_filename is not None and filename != filter_filename:
LOG.debug(
"Skipping %s due to filter_filename = %s", filename, filter_filename
)
continue
if use_src:
file_src = f"FILENAME: {filename}\nCONTENT:\n{file_sources['file_text']}"
suggestions.extend(
generate_suggestions_with_source(filename, file_src, patch, config_file)
)
else:
suggestions.extend(generate_suggestions(filename, patch))
n_hits += 1
if filter_filename is not None and n_hits < 1:
raise ValueError(f"No files matching '{filter_filename}'")
return suggestions
def generate_suggestions_with_source(
filename, file_src, patch, config_file: str = None
) -> List[Dict[str, str]]:
"""Generate suggestions for a given file source and patch.
Parameters
----------
filename : str
Name of the file being patched.
file_src : str
The source file text including the file name and its contents.
patch : str
The patch text containing line numbers and changes.
config_file : str, optional
Path to OpenAI configuration file. By default, ``None``.
Returns
-------
list[dict]
A list of dictionaries containing suggestions for the patch.
"""
_set_open_ai_config(config_file)
LOG.debug("Generating suggestions for a given file source and patch.")
LOG.debug("FILENAME: %s", filename)
LOG.debug("PATCH: %s", patch)
response = openai.ChatCompletion.create(
engine=OPEN_AI_MODEL,
messages=[
{
"role": "system",
"content": """
You are a GitHub review bot. You first expect the full source of the file to be reviewed followed by the patch which contains the line number. You respond after the full source file with 'Ready for the patch.'. After the patch, you provide 'review items' to improve just the patch code using the context from the full source file. Do not include the line numbers in any code suggestions. There are 3 TYPEs of review items [GLOBAL, SUGGESTION, INFO]. Each review item must be in the format [<FILENAME>], [<LINE-START>(-<LINE-END>)], [TYPE]: <Review text>
Type: GLOBAL
This must always included. This is a general overview of the file patch. If the file looks good, simply respond with "No issues found, LGTM!". Otherwise, indicate the kind of comments and suggestions that will be given in the files tab. Make this section short and do not include any line numbers (i.e., leave [<LINE-START>(-<LINE-END>)] empty.
Type: SUGGESTION
This is where code must be changed or should be changed. If you are replacing code, it must use the GitHub markdown code block with ```suggestion, and the [<LINE-START>-<LINE-END>] must match the line(s) that will be replaced. If you are adding new code, you should only include the [<LINE-START>] where you expect the code to be inserted. Do not insert code that is outside of the patch.
Type: INFO
This is for comments that do not include code that you want to replace. These should be logical errors, style suggestions, or other issues with the code. You can feel free to include example code, and if you do use markdown formatting, but this is primarily for text comments.
""",
},
{"role": "user", "content": file_src},
{"role": "assistant", "content": "Ready for the patch."},
{
"role": "user",
"content": f"{patch}\n\nReview the above code patch and provide recommendations for improvement or point out errors.",
},
],
)
# Extract suggestions
LOG.debug(response)
text = response["choices"][0].message.content
if len(text) == 0:
raise EmptyOpenAIResponseException()
return parse_suggestions(text)
def generate_suggestions(
filename, patch, config_file: str = None
) -> List[Dict[str, str]]:
"""
Generate suggestions for a given file source and patch.
Parameters
----------
filename : str
Name of the file being patched.
patch : str
The patch text containing line numbers and changes.
config_file : str, optional
Path to OpenAI configuration file. By default, ``None``.
Returns
-------
list[dict]
A list of dictionaries containing suggestions for the patch.
"""
_set_open_ai_config(config_file)
LOG.debug("Generating suggestions for a given file source and patch.")
LOG.debug("FILENAME: %s", filename)
LOG.debug("PATCH: %s", patch)
response = openai.ChatCompletion.create(
engine=OPEN_AI_MODEL,
messages=[
{
"role": "system",
"content": """
You are a GitHub review bot. You first expect full filename. You then expect a patch from a GitHub pull request and you provide 'review items' to improve just the patch code using the context from the full source file. Do not include the line numbers in any code suggestions. There are 3 TYPEs of review items [GLOBAL, SUGGESTION, COMMENT]. Each review item must be in the format [<FILENAME>], [<LINE-START>(-<LINE-END>)], [TYPE], always between brackets: <Review text>
Type: GLOBAL
This must always included. This is a general overview of the file patch. If the file looks good, simply respond with "No issues found, LGTM!". Otherwise, indicate the kind of comments and suggestions that follow. Make this section short and do not include any line numbers (i.e., leave [<LINE-START>(-<LINE-END>)] empty.
Type: SUGGESTION
This is where code must be changed or should be changed. If you are replacing code, it must use the GitHub markdown code block with ```suggestion, and the [<LINE-START>-<LINE-END>] must match the line(s) that will be replaced. If you are adding new code, you should only include the [<LINE-START>] where you expect the code to be inserted. Do not insert code that is outside of the patch.
Type: COMMENT
This is for comments that do not include code that you want to replace. These should be logical errors, style suggestions, or other issues with the code. You can feel free to include example code, and if you do use markdown formatting, but this is primarily for text comments.
""",
},
{"role": "user", "content": filename},
{"role": "assistant", "content": "Ready for the patch."},
{
"role": "user",
"content": f"{patch}\n\nReview the above code patch and provide recommendations for improvement or point out errors.",
},
],
# n=3,
)
LOG.debug(response)
# Extract suggestions
text = response["choices"][0].message.content
if len(text) == 0:
raise EmptyOpenAIResponseException()
return parse_suggestions(text) | /review_bot-0.1.1-py3-none-any.whl/review/bot/open_ai_interface.py | 0.889135 | 0.290301 | open_ai_interface.py | pypi |
from __future__ import annotations
import requests
import requests_cache
from dataclasses import dataclass
from datetime import datetime, timezone
import base64
import math
class RepoRetriveal:
"""
Class to download pulls and commit from a GitHub repository.
"""
@dataclass
class PullRequest:
"""
A pull request in a repository identified by its number.
Can access login name of the author, set of reviewers and date.
"""
number: int
author_login: str
reviewers: set[str]
date: datetime
def __hash__(self) -> int:
return self.number
@dataclass
class Commit:
"""
A commit in a repository identified by its sha.
Can acces login name of the author and date.
"""
sha: str
author_login: str
filesInfo: list
date: datetime
def __hash__(self) -> int:
return self.sha.__hash__()
@dataclass
class RepoFile:
"""
A text file in a repository.
Can access its path and its content.
"""
filepath: str
content: str
def __init__(self, owner, repo, token=None):
"""
Returns a RepoRetriveal instance.
Args:
owner: the name of the owner
repo: the name of the repository
token: the GitHub access token
"""
self.owner = owner
self.repo = repo
self.token = token
self.base_url = f'https://api.github.com/repos/{owner}/{repo}'
self.headers = {"Accept": "application/vnd.github+json"}
if token:
self.headers["Authorization"] = f"Bearer {token}"
self.s = requests_cache.CachedSession('data_retriveal_cache',
allowable_codes=(200, 404))
self.timeout = 10
def getFromUrl(self, url, params=None):
r = self.s.get(url, headers=self.headers,
params=params,
timeout=self.timeout)
r.raise_for_status()
return r.json()
def getPullByNumber(self, number):
"""
Returns a RepoRetriveal.PullRequest instance from its number.
Args:
number (int): The number of the pull request.
Raises:
requests.HTTPError: If there is no pull with this number.
"""
pull_url = f'{self.base_url}/pulls/{str(number)}'
reviews_url = f'{pull_url}/reviews'
data = self.getFromUrl(pull_url)
author_login = data['user']['login']
date_str = data['created_at']
date = datetime.fromisoformat(date_str[:-1])
date = date.replace(tzinfo=timezone.utc)
data = self.getFromUrl(reviews_url)
reviewers = set()
for review in data:
reviewer = review['user']['login']
if not reviewer == author_login:
reviewers.add(review['user']['login'])
return RepoRetriveal.PullRequest(number, author_login,
reviewers, date)
def getCommitBySha(self, sha):
"""
Returns a RepoRetriveal.Commit instance from its sha.
Args:
sha (string): The sha of the commit.
Raises:
requests.HTTPError: If there is no pull with this sha.
"""
commit_url = f'{self.base_url}/commits/{sha}'
data = self.getFromUrl(commit_url)
if data['author']:
author_login = data['author']['login']
else:
author_login = None
date_str = data['commit']['author']['date']
date = datetime.fromisoformat(date_str[:-1])
date = date.replace(tzinfo=timezone.utc)
filesInfo = data['files']
return RepoRetriveal.Commit(sha, author_login, filesInfo, date)
def getCommitsIterable(self, toDate: datetime, numberOfCommits):
"""
Returns a generator to be used in a for loop, that returns
a certain number of commits up to a certain date.
Args:
toDate(datetime.datetime): retrieve only commits before this date.
numberOfCommits: retrive this number of commit, if possible.
"""
MAX_PER_PAGE = 100
commit_url = f'{self.base_url}/commits'
date_str = toDate.isoformat()[:-6]+'Z'
if numberOfCommits <= MAX_PER_PAGE:
commitsPerPage = numberOfCommits
numOfPages = 1
else:
commitsPerPage = MAX_PER_PAGE
numOfPages = math.ceil(numberOfCommits/MAX_PER_PAGE)
currentPage = 1
currentNumOfCommits = 0
while currentPage <= numOfPages:
params = {'until': date_str,
'per_page': commitsPerPage,
'page': currentPage}
data = self.getFromUrl(commit_url, params=params)
for item in data:
if currentNumOfCommits >= numberOfCommits: break
commit = self.getCommitBySha(item['sha'])
if not commit.author_login: continue
yield commit
currentNumOfCommits += 1
currentPage += 1
def getPullIterable(self, toNumber, numberOfPulls):
"""
Returns a generator to be used in a for loop, that returns
a certain number of pull request up to a certain number.
Args:
toNumber(int): retrieve only pulls before and not including this number.
numberOfPulls: retrive this number of pulls, if possible.
"""
numOfPullsRetrieved = 0
numOfPullsBackward = 0
while numOfPullsRetrieved < numberOfPulls:
numOfPullsBackward += 1
pullNumber = toNumber - numOfPullsBackward
if pullNumber <= 0: break
try:
pull = self.getPullByNumber(toNumber - numOfPullsBackward)
numOfPullsRetrieved += 1
except requests.HTTPError as e:
if e.response.status_code == requests.codes['not_found']:
continue
else:
raise e
yield pull
def getPullFiles(self, pull: PullRequest):
"""
Returns a list of RepoRetriveal.RepoFile associated with a pull request.
Args:
pull(RepoRetriveal.PullRequest): the pull request.
Raises:
requests.HTTPError: If there is no such pull.
"""
files_url = f'{self.base_url}/pulls/{str(pull.number)}/files'
data = self.getFromUrl(files_url)
return self.getFileContentList(data)
def getCommitFiles(self, commit: Commit):
"""
Returns a list of RepoRetriveal.RepoFile associated with a commit.
Args:
pull(RepoRetriveal.Commit): the commit.
Raises:
requests.HTTPError: If there is no such commit.
"""
return self.getFileContentList(commit.filesInfo)
def getFileContentList(self, files_data):
files = []
for item in files_data:
content_url = item['contents_url']
file_data = self.getFromUrl(content_url)
raw_content = file_data['content']
try:
decoded_content = base64.b64decode(raw_content).decode('utf-8')
except UnicodeDecodeError:
continue
file = RepoRetriveal.RepoFile(item['filename'], decoded_content)
files.append(file)
return files | /review_recommender-1.0.3.tar.gz/review_recommender-1.0.3/src/review_recommender/data_retriveal.py | 0.719679 | 0.191554 | data_retriveal.py | pypi |
from __future__ import annotations
import math
from dataclasses import dataclass
@dataclass
class ItemReference:
item: object
length_squared: float
def __hash__(self) -> int:
return self.item.__hash__()
@dataclass
class TokenOccurence:
item_ref: ItemReference
count: int
@dataclass
class TokenInfo:
idf: float
occ_list: list[TokenOccurence]
class InvertedFile:
"""
A class that models a basic inverted file, to witch you can add
key value pairs of tokens to their count associated with an item
(could be a document or anything else, the only condition is that
it is hashable).
Can be queried to get similar items based on cosine similarity.
"""
def __init__(self):
self.token2Items: dict[str, TokenInfo] = {}
self.totalItems = 0
def add(self, item, token_freq: dict[str, int]):
"""
Add an item with its associated token frequencies:
Args:
token_freq(dict[str, int]): pairs of token and their count
"""
for token, count in token_freq.items():
if not token in self.token2Items:
self.token2Items[token] = TokenInfo(idf=0, occ_list=[])
itemRef = ItemReference(item, length_squared=0)
self.token2Items[token].occ_list.append(TokenOccurence(itemRef, count))
self.totalItems += 1
def calculateIDF(self):
for tokenInfo in self.token2Items.values():
tokenInfo.idf = math.log2(self.totalItems/len(tokenInfo.occ_list))
for tokenInfo in self.token2Items.values():
idf = tokenInfo.idf
for tokenOccurrence in tokenInfo.occ_list:
count = tokenOccurrence.count
tokenOccurrence.item_ref.length_squared += (idf * count)**2
def getSimilar(self, tokenFreqs):
"""
Returns a list of items that are similar to a query, that is
a new dictionary of token with their count.
Args:
tokenFreqs(dict[str, int]): pairs of token and their count
"""
self.calculateIDF()
retrievedRef2score: dict[ItemReference, float] = {}
token2weights = {}
for token, count in tokenFreqs.items():
if not token in self.token2Items: continue
tokenInfo = self.token2Items[token]
idf = tokenInfo.idf
weight = count * idf
token2weights[token] = weight
occList = tokenInfo.occ_list
for occurrence in occList:
itemRef = occurrence.item_ref
countInItem = occurrence.count
if not itemRef in retrievedRef2score: retrievedRef2score[itemRef] = 0
retrievedRef2score[itemRef] += weight * idf * countInItem
queryLengthSquared = 0
for token, weight in token2weights.items():
queryLengthSquared += weight**2
queryLength = math.sqrt(queryLengthSquared)
retrievedItem2score = {}
for retrieved, score in retrievedRef2score.items():
length = math.sqrt(retrieved.length_squared)
retrievedItem2score[retrieved.item] = score/(queryLength * length)
return retrievedItem2score
def dump(self):
print(self.token2Items.keys()) | /review_recommender-1.0.3.tar.gz/review_recommender-1.0.3/src/review_recommender/inverted_files.py | 0.871803 | 0.381479 | inverted_files.py | pypi |
from collections import OrderedDict
import datetime
from dateutil.relativedelta import relativedelta
class BaseService(object):
def format_duration(self, created_at):
"""
Formats the duration the review request is pending for
Args:
created_at (str): the date review request was filed
Returns:
a string of duration the review request is pending for
"""
"""
find the relative time difference between now and
review request filed to retrieve relative information
"""
rel_diff = relativedelta(datetime.datetime.utcnow(),
created_at)
time_dict = OrderedDict([
('year', rel_diff.years),
('month', rel_diff.months),
('day', rel_diff.days),
('hour', rel_diff.hours),
('minute', rel_diff.minutes),
])
result = []
for k, v in time_dict.items():
if v == 1:
result.append('%s %s' % (v, k))
elif v > 1:
result.append('%s %ss' % (v, k))
return ' '.join(result)
def check_request_state(self, created_at,
state_, value, duration):
"""
Checks if the review request is older or newer than specified
time interval.
Args:
created_at (str): the date review request was filed
state_ (str): state for review requests, e.g, older
or newer
value (int): The value in terms of duration for requests
to be older or newer than
duration (str): The duration in terms of period(year, month, hour
minute) for requests to be older or newer than
Returns:
True if the review request is older or newer than
specified time interval, False otherwise
"""
if (state_ is not None and value is not None and
duration is not None):
"""
find the relative time difference between now and
review request filed to retrieve relative information
"""
rel_diff = relativedelta(datetime.datetime.utcnow(),
created_at)
"""
find the absolute time difference between now and
review request filed to retrieve absolute information
"""
abs_diff = datetime.datetime.utcnow() - created_at
if state_ not in ('older', 'newer'):
raise ValueError('Invalid state value: %s' % state_)
if duration == 'y':
""" use rel_diff to retrieve absolute year since
value of absolute and relative year is same."""
if state_ == 'older' and rel_diff.years < value:
return False
elif state_ == 'newer' and rel_diff.years >= value:
return False
elif duration == 'm':
""" use rel_diff to calculate absolute time difference
in months """
abs_month = (rel_diff.years*12) + rel_diff.months
if state_ == 'older' and abs_month < value:
return False
elif state_ == 'newer' and abs_month >= value:
return False
elif duration == 'd':
""" use abs_diff to retrieve absolute time difference
in days """
if state_ == 'older' and abs_diff.days < value:
return False
elif state_ == 'newer' and abs_diff.days >= value:
return False
elif duration == 'h':
""" use abs_diff to calculate absolute time difference
in hours """
abs_hour = abs_diff.total_seconds()/3600
if state_ == 'older' and abs_hour < value:
return False
elif state_ == 'newer' and abs_hour >= value:
return False
elif duration == 'min':
""" use abs_diff to calculate absolute time difference
in minutes """
abs_min = abs_diff.total_seconds()/60
if state_ == 'older' and abs_min < value:
return False
elif state_ == 'newer' and abs_min >= value:
return False
else:
raise ValueError("Invalid duration type: %s" % duration)
return True
class BaseReview(object):
def __init__(self, user=None, title=None, url=None,
time=None, comments=None):
self.user = user
self.title = title
self.url = url
self.time = time
self.comments = comments
def __str__(self):
if self.comments == 1:
return ("@%s filed '%s' %s since %s with %s comment" % (
self.user, self.title, self.url, self.time, self.comments))
elif self.comments > 1:
return ("@%s filed '%s' %s since %s with %s comments" % (
self.user, self.title, self.url, self.time, self.comments))
else:
return ("@%s filed '%s' %s since %s" % (self.user, self.title,
self.url, self.time)) | /review-rot-0.0.tar.gz/review-rot-0.0/reviewrot/basereview.py | 0.903769 | 0.315143 | basereview.py | pypi |
__version__ = (1, 0, 0, "final", 0)
import distutils.dist
import distutils.errors
class VersiontoolsEnchancedDistributionMetadata(distutils.dist.DistributionMetadata):
"""
A subclass of distutils.dist.DistributionMetadata that uses versiontools
Typically you would not instantiate this class directly. It is constructed
by distutils.dist.Distribution.__init__() method. Since there is no other
way to do it, this module monkey-patches distutils to override the original
version of DistributionMetadata
"""
# Reference to the original class. This is only required because distutils
# was created before the introduction of new-style classes to python.
__base = distutils.dist.DistributionMetadata
def get_version(self):
"""
Get distribution version.
This method is enhanced compared to original distutils implementation.
If the version string is set to a special value then instead of using
the actual value the real version is obtained by querying versiontools.
If versiontools package is not installed then the version is obtained
from the standard section of the ``PKG-INFO`` file. This file is
automatically created by any source distribution. This method is less
useful as it cannot take advantage of version control information that
is automatically loaded by versiontools. It has the advantage of not
requiring versiontools installation and that it does not depend on
``setup_requires`` feature of ``setuptools``.
"""
if (self.name is not None and self.version is not None
and self.version.startswith(":versiontools:")):
return (self.__get_live_version() or self.__get_frozen_version()
or self.__fail_to_get_any_version())
else:
return self.__base.get_version(self)
def __get_live_version(self):
"""
Get a live version string using versiontools
"""
try:
import versiontools
except ImportError:
return None
else:
return str(versiontools.Version.from_expression(self.name))
def __get_frozen_version(self):
"""
Get a fixed version string using an existing PKG-INFO file
"""
try:
return self.__base("PKG-INFO").version
except IOError:
return None
def __fail_to_get_any_version(self):
"""
Raise an informative exception
"""
raise SystemExit(
"""This package requires versiontools for development or testing.
See http://versiontools.readthedocs.org/ for more information about
what versiontools is and why it is useful.
To install versiontools now please run:
$ pip install versiontools
Note: versiontools works best when you have additional modules for
integrating with your preferred version control system. Refer to
the documentation for a full list of required modules.""")
# If DistributionMetadata is not a subclass of
# VersiontoolsEnhancedDistributionMetadata then monkey patch it. This should
# prevent a (odd) case of multiple imports of this module.
if not issubclass(
distutils.dist.DistributionMetadata,
VersiontoolsEnchancedDistributionMetadata):
distutils.dist.DistributionMetadata = VersiontoolsEnchancedDistributionMetadata | /reviewboard-svn-hooks-0.2.1-r20.tar.gz/reviewboard-svn-hooks-0.2.1-r20/versiontools_support.py | 0.657209 | 0.278024 | versiontools_support.py | pypi |
====================
Review Bot Extension
====================
`Review Bot`_ is a tool for automating tasks on code uploaded to `Review
Board`_, and posting the results as a code review. Review Bot was built to
automate the execution of static analysis tools.
The Review Bot extension integrates Review Board with one or more
`Review Bot workers`_. It manages the configuration rules which tell Review
Bot when and how to review code, and schedules new review requests for review.
.. _Review Bot: https://www.reviewboard.org/downloads/reviewbot/
.. _Review Bot workers: https://pypi.org/project/reviewbot-worker/
.. _Review Board: https://www.reviewboard.org/
Supported Code Checking Tools
=============================
The Review Bot extension can perform automated code reviews using any of the
following tools:
* `checkstyle
<https://www.reviewboard.org/docs/reviewbot/latest/tools/checkstyle/>`_
- A static analysis tool that provides a variety of checkers for Java code
* `Cppcheck
<https://www.reviewboard.org/docs/reviewbot/latest/tools/cppcheck/>`_
- A static analysis tool for C/C++ code
* `CppLint <https://www.reviewboard.org/docs/reviewbot/latest/tools/cpplint/>`_
- Checks C++ code against Google's style guide
* `flake8 <https://www.reviewboard.org/docs/reviewbot/latest/tools/flake8/>`_
- A wrapper around several Python code quality tools
* `PMD <https://www.reviewboard.org/docs/reviewbot/latest/tools/pmd/>`_
- A static analysis tool that provides checkers for many languages
* `pycodestyle
<https://www.reviewboard.org/docs/reviewbot/latest/tools/pycodestyle/>`_
- A code style checker for Python code
* `pydocstyle
<https://www.reviewboard.org/docs/reviewbot/latest/tools/pydocstyle/>`_
- A static analysis tool for Python docstring conventions
* `pyflakes <https://www.reviewboard.org/docs/reviewbot/latest/tools/pyflakes/>`_
- A static analysis tool for Python code
See the links above for installation and usage instructions.
Installing the Review Bot Extension
===================================
The extension is provided through the reviewbot-extension_ Python package.
See the documentation_ to learn how to install and configure the worker and
the rest of Review Bot.
.. _documentation:
https://www.reviewboard.org/docs/reviewbot/latest/
.. _reviewbot-extension: https://pypi.org/project/reviewbot-extension/
Getting Support
===============
We can help you get going with Review Bot, and diagnose any issues that may
come up. There are three levels of support: Public Community Support, Private
Basic Support, and Private Premium Support.
The public community support is available on our main `discussion list`_. We
generally respond to requests within a couple of days. This support works well
for general, non-urgent questions that don't need to expose confidential
information.
Private Support plans are available through support contracts. We offer
same-day support options, handled confidentially over e-mail or our support
tracker, and can assist with a wide range of requests.
See your `support options`_ for more information.
.. _discussion list: https://groups.google.com/group/reviewboard/
.. _support options: https://www.reviewboard.org/support/
Our Happy Users
===============
There are thousands of companies and organizations using Review Board today.
We respect the privacy of our users, but some of them have asked to feature them
on the `Happy Users page`_.
If you're using Review Board, and you're a happy user,
`let us know! <https://groups.google.com/group/reviewboard/>`_
.. _Happy Users page: https://www.reviewboard.org/users/
Reporting Bugs
==============
Hit a bug? Let us know by
`filing a bug report <https://www.reviewboard.org/bugs/new/>`_.
You can also look through the
`existing bug reports <https://www.reviewboard.org/bugs/>`_ to see if anyone
else has already filed the bug.
Contributing
============
Are you a developer? Do you want to help build new tools or features for
Review Bot? Great! Let's help you get started.
First off, read through our `Contributor Guide`_.
We accept patches to Review Bot, Review Board, RBTools, and other related
projects on `reviews.reviewboard.org <https://reviews.reviewboard.org/>`_.
(Please note that we *do not* accept pull requests.)
Got any questions about anything related to Review Board and development? Head
on over to our `development discussion list`_.
.. _`Contributor Guide`: https://www.reviewboard.org/docs/codebase/dev/
.. _`development discussion list`:
https://groups.google.com/group/reviewboard-dev/
Related Projects
================
* `Review Board`_ -
Our extensible, open source code review tool.
* RBTools_ -
The RBTools command line suite.
* `RB Gateway`_ -
Manages Git repositories, providing a full API enabling all of Review Board's
feaures.
.. _RBTools: https://github.com/reviewboard/rbtools/
.. _ReviewBot: https://github.com/reviewboard/ReviewBot/
.. _RB Gateway: https://github.com/reviewboard/rb-gateway/
| /reviewbot-extension-3.2.tar.gz/reviewbot-extension-3.2/README.rst | 0.8777 | 0.735452 | README.rst | pypi |
from __future__ import unicode_literals
import json
from importlib import import_module
from django.forms.widgets import MultiWidget
from django.utils.html import format_html
from django.utils.safestring import mark_safe
class ToolOptionsWidget(MultiWidget):
"""Widget for showing tool-specific options.
Review Bot tools may define some tool-specific options that can be
specified through the admin UI. Because tools are located on the worker
nodes and the extension doesn't actually have their implementations, they
communicate what options are available and what form fields should be used
to configure them via a big JSON blob.
"""
def __init__(self, tools, attrs=None):
"""Initialize the widget.
Args:
tools (list of reviewbotext.models.Tool):
The list of tools.
attrs (dict, optional):
Additional attributes for the widget (unused).
"""
self.fields = []
for tool in tools:
for option in tool.tool_options:
field_class = self._import_class(option['field_type'])
field_options = option.get('field_options', {})
widget_def = option.get('widget')
if widget_def is None:
widget = None
else:
widget_class = self._import_class(widget_def['type'])
widget = widget_class(attrs=widget_def.get('attrs'))
self.fields.append({
'tool_id': tool.pk,
'name': option['name'],
'default': option.get('default'),
'form_field': field_class(widget=widget, **field_options),
})
sub_widgets = [field['form_field'].widget for field in self.fields]
super(ToolOptionsWidget, self).__init__(sub_widgets, attrs)
def _import_class(self, class_path):
"""Import and return a class.
Args:
class_path (unicode):
The module path of the class to import.
Returns:
type:
The imported class.
"""
class_path = class_path.split('.')
module_name = '.'.join(class_path[:-1])
module = import_module(module_name)
return getattr(module, class_path[-1])
def render(self, name, value, attrs=None, *args, **kwargs):
"""Render the widget.
This overrides MultiWidget's rendering to render the sub-fields more
like their own rows.
Args:
name (unicode):
The name of the field.
value (unicode):
The current value of the field.
attrs (dict, optional):
Any attributes to include on the HTML element.
*args (tuple, optional):
Additional arguments to pass through to contained widgets.
**kwargs (dict, optional):
Additional arguments to pass through to contained widgets.
Returns:
django.utils.safestring.SafeText:
The rendered widget.
"""
if self.is_localized:
for widget in self.widgets:
widget.is_localized = self.is_localized
if not isinstance(value, list):
value = self.decompress(value)
output = []
final_attrs = self.build_attrs(attrs)
id_ = final_attrs.get('id', None)
assert len(self.widgets) == len(self.fields)
for i, widget in enumerate(self.widgets):
try:
widget_value = value[i]
except IndexError:
widget_value = None
if id_:
final_attrs = dict(final_attrs, id='%s_%s' % (id_, i))
widget_name = '%s_%s' % (name, i)
field = self.fields[i]
if field['form_field'].required:
label_class = 'required'
else:
label_class = ''
if field['form_field'].help_text:
help_text = format_html('<p class="help">{0}</p>',
field['form_field'].help_text)
else:
help_text = ''
output.append(format_html(
'<div class="form-row" data-tool-id="{tool_id}" '
' style="display: none;">'
' <label for="{widget_name}" '
' class="{label_class}">{field_label}:</label>'
' {widget}'
' {help_text}'
'</div>',
tool_id=field['tool_id'],
field_label=field['form_field'].label,
help_text=help_text,
label_class=label_class,
widget_name=widget_name,
widget=widget.render(widget_name, widget_value, final_attrs,
*args, **kwargs)))
return mark_safe(''.join(output))
def value_from_datadict(self, data, files, name):
"""Return the value for this widget's field from the form data.
Args:
data (dict):
The form data.
files (dict):
Any files submitted along with the form.
name (unicode):
The name of the current field.
"""
# This is a little bit sketchy because it means we're reaching into
# data owned by a different form field (the tool). That said, I don't
# know that there's a clean way to do this otherwise.
selected_tool = int(data['tool'] or 0)
result = {}
for i, widget in enumerate(self.widgets):
field = self.fields[i]
if field['tool_id'] == selected_tool:
key = field['name']
value = widget.value_from_datadict(
data, files, '%s_%s' % (name, i))
result[key] = value
return json.dumps(result)
def decompress(self, value):
"""Return an array of sub-field values given the field value.
Args:
value (unicode):
The stored value for the top-level field.
Returns:
list:
A list of values for each sub-widget.
"""
if value:
values = json.loads(value)
else:
values = {}
return [
values.get(field['name'], field['default'])
for field in self.fields
] | /reviewbot-extension-3.2.tar.gz/reviewbot-extension-3.2/reviewbotext/widgets.py | 0.855731 | 0.207155 | widgets.py | pypi |
=================
Review Bot Worker
=================
`Review Bot`_ is a tool for automating tasks on code uploaded to `Review
Board`_, and posting the results as a code review. Review Bot was built to
automate the execution of static analysis tools.
The Review Bot worker is the service component of Review Bot that performs the
code reviews, managed by the `Review Bot extension`_. Workers on be installed
on a single server, or can be distributed across several, helping to review
large numbers of changes at once.
.. _Review Bot: https://www.reviewboard.org/downloads/reviewbot/
.. _Review Bot extension: https://pypi.org/project/reviewbot-extension/
.. _Review Board: https://www.reviewboard.org/
Supported Code Checking Tools
=============================
The Review Bot worker can perform automated code reviews using any of the
following tools:
* `checkstyle
<https://www.reviewboard.org/docs/reviewbot/latest/tools/checkstyle/>`_
- A static analysis tool that provides a variety of checkers for Java code
* `Cppcheck
<https://www.reviewboard.org/docs/reviewbot/latest/tools/cppcheck/>`_
- A static analysis tool for C/C++ code
* `CppLint <https://www.reviewboard.org/docs/reviewbot/latest/tools/cpplint/>`_
- Checks C++ code against Google's style guide
* `flake8 <https://www.reviewboard.org/docs/reviewbot/latest/tools/flake8/>`_
- A wrapper around several Python code quality tools
* `PMD <https://www.reviewboard.org/docs/reviewbot/latest/tools/pmd/>`_
- A static analysis tool that provides checkers for many languages
* `pycodestyle
<https://www.reviewboard.org/docs/reviewbot/latest/tools/pycodestyle/>`_
- A code style checker for Python code
* `pydocstyle
<https://www.reviewboard.org/docs/reviewbot/latest/tools/pydocstyle/>`_
- A static analysis tool for Python docstring conventions
* `pyflakes <https://www.reviewboard.org/docs/reviewbot/latest/tools/pyflakes/>`_
- A static analysis tool for Python code
See the links above for installation and usage instructions.
Installing the Review Bot Worker
================================
The worker is provided through the reviewbot-worker_ Python package.
See the documentation_ to learn how to install and configure the worker and
the rest of Review Bot.
.. _documentation:
https://www.reviewboard.org/docs/reviewbot/latest/
.. _reviewbot-worker: https://pypi.org/project/reviewbot-worker/
Getting Support
===============
We can help you get going with Review Bot, and diagnose any issues that may
come up. There are three levels of support: Public Community Support, Private
Basic Support, and Private Premium Support.
The public community support is available on our main `discussion list`_. We
generally respond to requests within a couple of days. This support works well
for general, non-urgent questions that don't need to expose confidential
information.
Private Support plans are available through support contracts. We offer
same-day support options, handled confidentially over e-mail or our support
tracker, and can assist with a wide range of requests.
See your `support options`_ for more information.
.. _discussion list: https://groups.google.com/group/reviewboard/
.. _support options: https://www.reviewboard.org/support/
Our Happy Users
===============
There are thousands of companies and organizations using Review Board today.
We respect the privacy of our users, but some of them have asked to feature them
on the `Happy Users page`_.
If you're using Review Board, and you're a happy user,
`let us know! <https://groups.google.com/group/reviewboard/>`_
.. _Happy Users page: https://www.reviewboard.org/users/
Reporting Bugs
==============
Hit a bug? Let us know by
`filing a bug report <https://www.reviewboard.org/bugs/new/>`_.
You can also look through the
`existing bug reports <https://www.reviewboard.org/bugs/>`_ to see if anyone
else has already filed the bug.
Contributing
============
Are you a developer? Do you want to help build new tools or features for
Review Bot? Great! Let's help you get started.
First off, read through our `Contributor Guide`_.
We accept patches to Review Bot, Review Board, RBTools, and other related
projects on `reviews.reviewboard.org <https://reviews.reviewboard.org/>`_.
(Please note that we *do not* accept pull requests.)
Got any questions about anything related to Review Board and development? Head
on over to our `development discussion list`_.
.. _`Contributor Guide`: https://www.reviewboard.org/docs/codebase/dev/
.. _`development discussion list`:
https://groups.google.com/group/reviewboard-dev/
Related Projects
================
* `Review Board`_ -
Our extensible, open source code review tool.
* RBTools_ -
The RBTools command line suite.
* `RB Gateway`_ -
Manages Git repositories, providing a full API enabling all of Review Board's
feaures.
.. _RBTools: https://github.com/reviewboard/rbtools/
.. _ReviewBot: https://github.com/reviewboard/ReviewBot/
.. _RB Gateway: https://github.com/reviewboard/rb-gateway/
| /reviewbot-worker-3.2.tar.gz/reviewbot-worker-3.2/README.rst | 0.880116 | 0.741159 | README.rst | pypi |
from __future__ import unicode_literals
import warnings
class BaseRemovedInReviewBotVersionWarning(DeprecationWarning):
"""Base class for a Review Bot deprecation warning.
All version-specific deprecation warnings inherit from this, allowing
callers to check for Review Bot deprecations without being tied to a
specific version.
"""
@classmethod
def warn(cls, message, stacklevel=2):
"""Emit the deprecation warning.
This is a convenience function that emits a deprecation warning using
this class, with a suitable default stack level. Callers can provide
a useful message and a custom stack level.
Args:
message (unicode):
The message to show in the deprecation warning.
stacklevel (int, optional):
The stack level for the warning.
"""
warnings.warn(message, cls, stacklevel=stacklevel + 1)
class RemovedInReviewBot40Warning(BaseRemovedInReviewBotVersionWarning):
"""Deprecations for features removed in Review Bot 4.0.
Note that this class will itself be removed in Review Bot 4.0. If you need
to check against Review Bot deprecation warnings, please see
:py:class:`BaseRemovedInReviewBotVersionWarning`.
"""
class RemovedInReviewBot50Warning(BaseRemovedInReviewBotVersionWarning):
"""Deprecations for features removed in Review Bot 5.0.
Note that this class will itself be removed in Review Bot 5.0. If you need
to check against Review Bot deprecation warnings, please see
:py:class:`BaseRemovedInReviewBotVersionWarning`. Alternatively, you can
use the alias for this class,
:py:data:`RemovedInNextReviewBotVersionWarning`.
"""
#: An alias for the next release of Review Bot where features would be removed.
RemovedInNextReviewBotVersionWarning = RemovedInReviewBot40Warning
# Enable each warning for display.
for _warning_cls in (RemovedInReviewBot40Warning, RemovedInReviewBot50Warning):
warnings.simplefilter('once', _warning_cls, 0) | /reviewbot-worker-3.2.tar.gz/reviewbot-worker-3.2/reviewbot/deprecation.py | 0.848157 | 0.279838 | deprecation.py | pypi |
from __future__ import unicode_literals
import os
import subprocess
import sys
from reviewbot.utils.log import get_logger
logger = get_logger(__name__)
def execute(command,
env=None,
split_lines=False,
ignore_errors=False,
extra_ignore_errors=(),
translate_newlines=True,
with_errors=True,
return_errors=False,
none_on_ignored_error=False):
"""Execute a command and return the output.
Args:
command (list of unicode):
The command to run.
env (dict, optional):
The environment variables to use when running the process.
split_lines (bool, optional):
Whether to return the output as a list (split on newlines) or a
single string.
ignore_errors (bool, optional):
Whether to ignore non-zero return codes from the command.
extra_ignore_errors (tuple of int, optional):
Process return codes to ignore.
translate_newlines (bool, optional):
Whether to convert platform-specific newlines (such as \\r\\n) to
the regular newline (\\n) character.
with_errors (bool, optional):
Whether the stderr output should be merged in with the stdout
output or just ignored.
return_errors (bool, optional)
Whether to return the content of the stderr stream. If set, this
argument takes precedence over the ``with_errors`` argument.
none_on_ignored_error (bool, optional):
Whether to return ``None`` if there was an ignored error (instead
of the process output).
Returns:
object:
This returns a single value or 2-tuple, depending on the arguments.
If ``return_errors`` is ``True``, this will return the standard output
and standard errors as strings in a tuple. Otherwise, this will just
result the standard output as a string.
If ``split_lines`` is ``True``, those strings will instead be lists
of lines (preserving newlines).
All resulting strings will be Unicode.
"""
if isinstance(command, list):
logger.debug(subprocess.list2cmdline(command))
else:
logger.debug(command)
if env:
env.update(os.environ)
else:
env = os.environ.copy()
env['LC_ALL'] = 'en_US.UTF-8'
env['LANGUAGE'] = 'en_US.UTF-8'
if with_errors and not return_errors:
errors_output = subprocess.STDOUT
else:
errors_output = subprocess.PIPE
if sys.platform.startswith('win'):
p = subprocess.Popen(command,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=errors_output,
shell=False,
universal_newlines=translate_newlines,
env=env)
else:
p = subprocess.Popen(command,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=errors_output,
shell=False,
close_fds=True,
universal_newlines=translate_newlines,
env=env)
data, errors = p.communicate()
if isinstance(data, bytes):
data = data.decode('utf-8')
if split_lines:
data = data.splitlines(True)
if return_errors:
if split_lines:
errors = errors.splitlines(True)
else:
errors = None
rc = p.wait()
if rc and not ignore_errors and rc not in extra_ignore_errors:
raise Exception('Failed to execute command: %s\n%s' % (command, data))
if rc and none_on_ignored_error:
data = None
if return_errors:
return data, errors
else:
return data
def is_exe_in_path(name, cache={}):
"""Check whether an executable is in the user's search path.
If the provided filename is an absolute path, it will be checked
directly without looking in the search path.
Version Changed:
3.0:
Added the ``cache`` parameter.
Args:
name (unicode):
The name of the executable, without any platform-specific
executable extension. The extension will be appended if necessary.
cache (dict, optional):
A result cache, to avoid repeated lookups.
This will store the paths to any files that are found (or ``None``
if not found).
By default, the cache is shared across all calls. A custom cache
can be provided instead.
Returns:
boolean:
True if the executable can be found in the execution path.
"""
if sys.platform == 'win32' and not name.endswith('.exe'):
name += '.exe'
if name in cache:
return cache[name]
path = None
if os.path.isabs(name):
if os.path.exists(name):
path = name
else:
for dirname in os.environ['PATH'].split(os.pathsep):
temp_path = os.path.abspath(os.path.join(dirname, name))
if os.path.exists(temp_path):
path = temp_path
break
cache[name] = path
return path is not None | /reviewbot-worker-3.2.tar.gz/reviewbot-worker-3.2/reviewbot/utils/process.py | 0.665193 | 0.257914 | process.py | pypi |
"""Review Bot tool to run cpplint."""
from __future__ import unicode_literals
import re
from reviewbot.config import config
from reviewbot.tools.base import BaseTool
from reviewbot.utils.process import execute
class CPPLintTool(BaseTool):
"""Review Bot tool to run cpplint."""
name = 'cpplint'
version = '1.0'
description = "Checks code for style errors using Google's cpplint tool."
timeout = 30
exe_dependencies = ['cpplint']
file_patterns = [
'*.c', '*.cc', '*.cpp', '.cxx', '*.c++', '*.cu',
'*.h', '*.hh', '*.hpp', '*.hxx', '*.h++', '*.cuh',
]
options = [
{
'name': 'verbosity',
'field_type': 'django.forms.IntegerField',
'default': 1,
'min_value': 1,
'max_value': 5,
'field_options': {
'label': 'Verbosity level for cpplint',
'help_text': (
'Which level of messages should be displayed. '
'1=All, 5=Few.'
),
'required': True,
},
},
{
'name': 'excluded_checks',
'field_type': 'django.forms.CharField',
'default': '',
'field_options': {
'label': 'Tests to exclude',
'help_text': (
'Comma-separated list of tests to exclude (run cpplint '
'--filter= to see all possible choices).'
),
'required': False,
},
},
]
ERROR_RE = re.compile(
r'^[^:]+:(?P<linenum>\d+):\s+(?P<text>.*?)\s+'
r'\[(?P<category>[^\]]+)\] \[[0-5]\]$',
re.M)
def build_base_command(self, **kwargs):
"""Build the base command line used to review files.
Args:
**kwargs (dict, unused):
Additional keyword arguments.
Returns:
list of unicode:
The base command line.
"""
settings = self.settings
verbosity = settings['verbosity']
excluded_checks = settings.get('excluded_checks')
cmdline = [
config['exe_paths']['cpplint'],
'--verbose=%s' % verbosity,
]
if excluded_checks:
cmdline.append('--filter=%s' % excluded_checks)
return cmdline
def handle_file(self, f, path, base_command, **kwargs):
"""Perform a review of a single file.
Args:
f (reviewbot.processing.review.File):
The file to process.
path (unicode):
The local path to the patched file to review.
base_command (list of unicode):
The base command used to run pyflakes.
**kwargs (dict, unused):
Additional keyword arguments.
"""
output = execute(base_command + [path],
ignore_errors=True)
for m in self.ERROR_RE.finditer(output):
# Note that some errors may have a line number of 0 (indicating
# that a copyright header isn't present). We'll be converting this
# to 1.
f.comment(text=m.group('text'),
first_line=int(m.group('linenum')) or 1,
error_code=m.group('category')) | /reviewbot-worker-3.2.tar.gz/reviewbot-worker-3.2/reviewbot/tools/cpplint.py | 0.900218 | 0.15785 | cpplint.py | pypi |
from __future__ import unicode_literals
from reviewbot.config import config
from reviewbot.tools import BaseTool
from reviewbot.utils.process import execute
class PycodestyleTool(BaseTool):
"""Review Bot tool to run pycodestyle."""
name = 'pycodestyle'
version = '1.0'
description = 'Checks Python code for style errors.'
timeout = 30
exe_dependencies = ['pycodestyle']
file_patterns = ['*.py']
options = [
{
'name': 'max_line_length',
'field_type': 'django.forms.IntegerField',
'default': 79,
'field_options': {
'label': 'Maximum line length',
'help_text': (
'The maximum length allowed for lines. Any lines longer '
'than this length will cause an issue to be filed.'
),
'required': True,
},
},
{
'name': 'ignore',
'field_type': 'django.forms.CharField',
'default': '',
'field_options': {
'label': 'Ignore',
'help_text': (
'A comma-separated list of errors and warnings to '
'ignore. This will be passed to the --ignore command '
'line argument (e.g. E4,W).'
),
'required': False,
},
},
]
def build_base_command(self, **kwargs):
"""Build the base command line used to review files.
Args:
**kwargs (dict, unused):
Additional keyword arguments.
Returns:
list of unicode:
The base command line.
"""
settings = self.settings
ignore = settings.get('ignore', '').strip()
cmd = [
config['exe_paths']['pycodestyle'],
'--max-line-length=%s' % settings['max_line_length'],
'--format=%(code)s:%(row)d:%(col)d:%(text)s',
]
if ignore:
cmd.append('--ignore=%s' % ignore)
return cmd
def handle_file(self, f, path, base_command, **kwargs):
"""Perform a review of a single file.
Args:
f (reviewbot.processing.review.File):
The file to process.
path (unicode):
The local path to the patched file to review.
base_command (list of unicode):
The base command used to run pycodestyle.
**kwargs (dict, unused):
Additional keyword arguments.
"""
output = execute(base_command + [path],
split_lines=True,
ignore_errors=True)
for line in output:
try:
error_code, line_num, column, message = line.split(':', 3)
line_num = int(line_num)
column = int(column)
except Exception as e:
self.logger.error('Cannot parse pycodestyle line "%s": %s',
line, e)
continue
f.comment(text=message.strip(),
first_line=line_num,
start_column=column,
error_code=error_code) | /reviewbot-worker-3.2.tar.gz/reviewbot-worker-3.2/reviewbot/tools/pycodestyle.py | 0.90031 | 0.159577 | pycodestyle.py | pypi |
from __future__ import unicode_literals
import json
import os
import re
from collections import OrderedDict
import six
from reviewbot.config import config
from reviewbot.tools.base import BaseTool, FullRepositoryToolMixin
from reviewbot.utils.process import execute
class GoTool(FullRepositoryToolMixin, BaseTool):
"""Review Bot tool to run Go Tools."""
name = 'GoTool'
version = '1.0'
description = (
'Checks Go code for test errors using built-in Go tools "go test", '
'and "go vet".'
)
timeout = 90
exe_dependencies = ['go']
file_patterns = ['*.go']
options = [
{
'name': 'test',
'field_type': 'django.forms.BooleanField',
'default': False,
'field_options': {
'label': 'Run tests',
'required': False,
'help_text': 'Run unit tests using "go test".',
},
},
{
'name': 'vet',
'field_type': 'django.forms.BooleanField',
'default': True,
'field_options': {
'label': 'Vet code',
'required': False,
'help_text': 'Run lint checks against code using "go vet".',
},
},
]
PACKAGE_LINE_RE = re.compile(r'^# (.*)$')
VET_ERROR_RE = re.compile(
r'^(vet: )?(?P<path>.*\.go):(?P<linenum>\d+):(?P<column>\d+): '
r'(?P<text>.*)$',
re.M)
def get_can_handle_file(self, review_file, **kwargs):
"""Return whether this tool can handle a given file.
Args:
review_file (reviewbot.processing.review.File):
The file to check.
**kwargs (dict, unused):
Additional keyword arguments passed to :py:meth:`execute`.
This is intended for future expansion.
Returns:
bool:
``True`` if the file can be handled. ``False`` if it cannot.
"""
return (
super(GoTool, self).get_can_handle_file(review_file, **kwargs) and
not review_file.dest_file.lower().endswith('_test.go')
)
def handle_files(self, files, review, **kwargs):
"""Perform a review of all files.
Args:
files (list of reviewbot.processing.review.File):
The files to process.
review (reviewbot.processing.review.Review):
The review that comments will apply to.
**kwargs (dict):
Additional keyword arguments.
"""
packages = set()
patched_files_map = {}
super(GoTool, self).handle_files(files=files,
packages=packages,
patched_files_map=patched_files_map,
**kwargs)
settings = self.settings
run_test = settings.get('test', False)
run_vet = settings.get('vet', False)
for package in packages:
if run_test:
self.run_go_test(package, review)
if run_vet:
self.run_go_vet(package, patched_files_map)
def handle_file(self, f, path, packages, patched_files_map, **kwargs):
"""Perform a review of a single file.
Args:
f (reviewbot.processing.review.File):
The file to process.
path (unicode):
The local path to the patched file to review.
packages (set of unicode):
A set of all package names. This function will add the file's
package to this set.
patched_files_map (dict):
A mapping of paths to files being reviewed. This function
will add the path and file to this map.
**kwargs (dict, unused):
Additional keyword arguments.
"""
packages.add(os.path.dirname(path))
patched_files_map[path] = f
def run_go_test(self, package, review):
"""Execute 'go test' on a given package
Args:
package (unicode):
Name of the go package.
review (reviewbot.processing.review.Review):
The review object.
"""
output = execute(
[
config['exe_paths']['go'],
'test',
'-json',
'-vet=off',
'./%s' % package,
],
split_lines=True,
ignore_errors=True)
test_results = OrderedDict()
found_json_errors = False
for line in output:
try:
entry = json.loads(line)
except ValueError:
found_json_errors = True
continue
if 'Test' in entry:
action = entry['Action']
if action in ('fail', 'output'):
test_name = entry['Test']
package = entry['Package']
if test_name not in test_results:
test_results[test_name] = {
'failed': False,
'output': [],
'package': package,
}
test_result = test_results[test_name]
if action == 'output':
test_result['output'].append(entry['Output'])
elif action == 'fail':
test_result['failed'] = True
if test_results:
for test_name, test_result in six.iteritems(test_results):
if test_result['failed']:
review.general_comment(
'%s failed in the %s package:\n'
'\n'
'```%s```'
% (test_name,
test_result['package'],
''.join(test_result['output']).strip()),
rich_text=True)
elif found_json_errors:
review.general_comment(
'Unable to run `go test` on the %s package:\n'
'\n'
'```%s```'
% (package, ''.join(output).strip()),
rich_text=True)
def run_go_vet(self, package, patched_files_map):
"""Execute 'go vet' on a given package
Args:
package (unicode):
Name of the go package.
patched_files_map (dict):
Mapping from filename to
:py:class:`~reviewbot.processing.review.File` to add comments.
"""
# Ideally, we would use -json, but unfortunately `go vet` doesn't
# always respect this for all errors. Rather than checking for both
# JSON output and non-JSON output, we'll just parse the usual way.
output = execute(
[
config['exe_paths']['go'],
'vet',
'./%s' % package,
],
with_errors=True,
ignore_errors=True)
for m in self.VET_ERROR_RE.finditer(output):
path = m.group('path')
linenum = int(m.group('linenum'))
column = int(m.group('column'))
text = m.group('text')
f = patched_files_map.get(path)
if f is None:
self.logger.error('Could not find path "%s" in patched '
'file map %r',
path, patched_files_map)
else:
f.comment(text=text,
first_line=linenum,
start_column=column) | /reviewbot-worker-3.2.tar.gz/reviewbot-worker-3.2/reviewbot/tools/gotool.py | 0.751557 | 0.285465 | gotool.py | pypi |
from __future__ import unicode_literals
import json
from reviewbot.config import config
from reviewbot.tools.base import BaseTool
from reviewbot.utils.process import execute
from reviewbot.utils.text import split_comma_separated
class RubocopTool(BaseTool):
"""Review Bot tool to run rubocop."""
name = 'RuboCop'
version = '1.0'
description = (
'Checks Ruby code for style errors based on the community Ruby style '
'guide using RuboCop.'
)
timeout = 60
exe_dependencies = ['rubocop']
file_patterns = ['*.rb']
options = [
{
'name': 'except',
'field_type': 'django.forms.CharField',
'default': '',
'field_options': {
'label': 'Except',
'help_text': (
'Run all cops enabled by configuration except the '
'specified cop(s) and/or departments. This will be '
'passed to the --except command line argument (e.g. '
'Lint/UselessAssignment).'
),
'required': False,
},
},
]
def build_base_command(self, **kwargs):
"""Build the base command line used to review files.
Args:
**kwargs (dict, unused):
Additional keyword arguments.
Returns:
list of unicode:
The base command line.
"""
settings = self.settings
except_list = split_comma_separated(settings.get('except', '').strip())
cmdline = [
config['exe_paths']['rubocop'],
'--format=json',
'--display-style-guide',
]
if except_list:
cmdline.append('--except=%s' % ','.join(except_list))
return cmdline
def handle_file(self, f, path, base_command, **kwargs):
"""Perform a review of a single file.
Args:
f (reviewbot.processing.review.File):
The file to process.
path (unicode):
The local path to the patched file to review.
base_command (list of unicode):
The base command used to run rubocop.
**kwargs (dict, unused):
Additional keyword arguments.
"""
output = execute(base_command + [path],
ignore_errors=True)
try:
results = json.loads(output)
except ValueError:
# There's an error here. It *should* be in the first line.
# Subsequent lines may contain stack traces or mostly-empty
# result JSON payloads.
lines = output.splitlines()
f.comment('RuboCop could not analyze this file, due to the '
'following errors:\n'
'\n'
'```%s```'
% lines[0].strip(),
first_line=None,
rich_text=True)
return
if results['summary']['offense_count'] > 0:
for offense in results['files'][0]['offenses']:
cop_name = offense['cop_name']
message = offense['message']
location = offense['location']
# Strip away the cop name prefix, if found.
prefix = '%s: ' % cop_name
if message.startswith(prefix):
message = message[len(prefix):]
# Check the old and new fields, for compatibility.
first_line = location.get('start_line', location['line'])
last_line = location.get('last_line', location['line'])
start_column = location.get('start_column', location['column'])
f.comment(message,
first_line=first_line,
num_lines=last_line - first_line + 1,
start_column=start_column,
severity=offense.get('severity'),
error_code=cop_name,
rich_text=True) | /reviewbot-worker-3.2.tar.gz/reviewbot-worker-3.2/reviewbot/tools/rubocop.py | 0.857365 | 0.246295 | rubocop.py | pypi |
from __future__ import unicode_literals
from reviewbot.deprecation import RemovedInReviewBot40Warning
from reviewbot.tools.base import BaseTool, FullRepositoryToolMixin
class Tool(BaseTool):
"""Legacy base class for tools.
Deprecated:
3.0:
Subclasses should instead inherit from
:py:class:`reviewbot.tools.base.tool.BaseTool` (or a more specific
subclass).
This will be removed in Review Bot 4.0.
"""
#: Internal state for marking this as a legacy tool.
#:
#: Do not change this. It is necessary for legacy tools to continue to
#: work in Review Bot 3.0.
#:
#: Version Added:
#: 3.0
#:
#: Type:
#: bool
legacy_tool = True
def __new__(cls, *args, **kwargs):
"""Create an instance of the tool.
This will emit a deprecation warning, warning of impending removal
and the changes that will be needed.
Args:
*args (tuple):
Positional arguments to pass to the constructor.
**kwargs (dict):
Keyword arguments to pass to the constructor.
Returns:
Tool:
A new instance of the tool.
"""
RemovedInReviewBot40Warning.warn(
'%s must subclass reviewbot.tools.base.BaseTool. All '
'overridden methods, including __init__() and handle_file(), '
'must take a **kwargs argument, and self.settings should be '
'accessed for tool-specific settings. Legacy support will be '
'removed in Review Bot 4.0.'
% cls.__name__)
return super(Tool, cls).__new__(cls)
class RepositoryTool(FullRepositoryToolMixin, BaseTool):
"""Legacy base class for tools that need access to the entire repository.
Deprecated:
3.0:
Subclasses should instead inherit from
:py:class:`reviewbot.tools.base.tool.BaseTool` (or a more specific
subclass) and mix in
:py:class:`reviewbot.tools.base.mixins.FullRepositoryToolMixin`.
This will be removed in Review Bot 4.0.
"""
#: Internal state for marking this as a legacy tool.
#:
#: Do not change this. It is necessary for legacy tools to continue to
#: work in Review Bot 3.0.
#:
#: Version Added:
#: 3.0
#:
#: Type:
#: bool
legacy_tool = True
def __new__(cls, *args, **kwargs):
"""Create an instance of the tool.
This will emit a deprecation warning, warning of impending removal
and the changes that will be needed.
Args:
*args (tuple):
Positional arguments to pass to the constructor.
**kwargs (dict):
Keyword arguments to pass to the constructor.
Returns:
Tool:
A new instance of the tool.
"""
RemovedInReviewBot40Warning.warn(
'%s must subclass reviewbot.tools.base.BaseTool, and mix in '
'reviewbot.tools.base.mixins.FullRepositoryToolMixin. All '
'overridden methods, including __init__() and handle_file(), '
'must take a **kwargs argument, and self.settings should be '
'accessed for tool-specific settings. Legacy support will be '
'removed in Review Bot 4.0.'
% cls.__name__)
return super(RepositoryTool, cls).__new__(cls) | /reviewbot-worker-3.2.tar.gz/reviewbot-worker-3.2/reviewbot/tools/__init__.py | 0.859973 | 0.192748 | __init__.py | pypi |
from __future__ import unicode_literals
import os
import re
from reviewbot.config import config
from reviewbot.utils.filesystem import chdir, ensure_dirs_exist
from reviewbot.utils.process import execute
from reviewbot.utils.text import split_comma_separated
# Python 3.4+ includes glob.escape, but older versions do not. Optimistically,
# we'll use glob.escape, and we'll fall back on a custom implementation.
try:
from glob import escape as glob_escape
except ImportError:
_glob_escape_pattern = re.compile(r'([*?[])')
def glob_escape(path):
drive, path = os.path.split(path)
return '%s%s' % (drive, _glob_escape_pattern.sub(r'[\1]', path))
class FilePatternsFromSettingMixin(object):
"""Mixin to set file patterns based on a configured tool setting.
Subclasses can base file patterns off either a setting representing
a comma-separated list of file patterns, or a setting representing a
comma-separated list of file extensions. If both are provided, both will
be checked, with the file patterns taking precedence over file extensions.
If neither are provided by the user, the default list of file patterns
set by the subclass (if any) will be used.
Version Added:
3.0
"""
#: The name of a tool setting for a comma-separated list of extensions.
#:
#: Type:
#: unicode
file_extensions_setting = None
#: The name of a tool setting for a comma-separated list of patterns.
#:
#: Type:
#: unicode
file_patterns_setting = None
#: Whether to include default file patterns in the resulting list.
#:
#: Type:
#: boolean
include_default_file_patterns = True
def __init__(self, **kwargs):
"""Initialize the tool.
Args:
**kwargs (dict):
Keyword arguments for the tool.
"""
super(FilePatternsFromSettingMixin, self).__init__(**kwargs)
settings = self.settings
file_patterns = None
if self.file_patterns_setting:
value = settings.get(self.file_patterns_setting, '').strip()
if value:
file_patterns = split_comma_separated(value)
if not file_patterns and self.file_extensions_setting:
value = settings.get(self.file_extensions_setting, '').strip()
file_patterns = [
'*.%s' % glob_escape(ext.lstrip('.'))
for ext in split_comma_separated(value)
]
if file_patterns:
if self.include_default_file_patterns and self.file_patterns:
file_patterns += self.file_patterns
self.file_patterns = [
file_pattern
for file_pattern in sorted(set(file_patterns))
if file_pattern
]
class FullRepositoryToolMixin(object):
"""Mixin for tools that need access to the entire repository.
This will take care of checking out a copy of the repository and applying
patches from the diff being reviewed.
Version Added:
3.0:
This replaced the legacy :py:class:`reviewbot.tools.RepositoryTool`.
"""
working_directory_required = True
def execute(self, review, repository=None, base_commit_id=None, **kwargs):
"""Perform a review using the tool.
Args:
review (reviewbot.processing.review.Review):
The review object.
settings (dict, optional):
Tool-specific settings.
repository (reviewbot.repositories.Repository, optional):
The repository.
base_commit_id (unicode, optional):
The ID of the commit that the patch should be applied to.
"""
repository.sync()
working_dir = repository.checkout(base_commit_id)
# Patch all the files first.
with chdir(working_dir):
for f in review.files:
self.logger.debug('Patching %s', f.dest_file)
f.apply_patch(working_dir)
# Now run the tool for everything.
super(FullRepositoryToolMixin, self).execute(review, **kwargs)
class JavaToolMixin(object):
"""Mixin for Java-based tools.
Version Added:
3.0
"""
#: Main class to call to run the Java application.
#:
#: Type:
#: unicode
java_main = None
#: The key identifying the classpaths to use.
#:
#: Type:
#: unicode
java_classpaths_key = None
exe_dependencies = ['java']
@classmethod
def set_has_java_runtime(cls, has_runtime):
"""Set whether there's a Java runtime installed.
Args:
has_runtime (bool):
Whether there's a runtime installed.
"""
JavaToolMixin._has_java_runtime = has_runtime
@classmethod
def clear_has_java_runtime(cls):
"""Clear whether there's a Java runtime installed.
This will force the next dependency check to re-check for a runtime.
"""
try:
delattr(JavaToolMixin, '_has_java_runtime')
except AttributeError:
pass
def check_dependencies(self):
"""Verify the tool's dependencies are installed.
This will invoke the base class's dependency checking, ensuring that
:command:`java` is available, and will then attempt to run the
configured Java class (:py:attr:`java_main`), checking that it could
be found.
Returns:
bool:
True if all dependencies for the tool are satisfied. If this
returns False, the worker will not listen for this Tool's queue,
and a warning will be logged.
"""
# Run any standard dependency checks.
if not super(JavaToolMixin, self).check_dependencies():
return False
# Make sure that `java` has a suitable runtime. It's not enough
# to just be present in the path.
if not hasattr(JavaToolMixin, '_has_java_runtime'):
try:
execute([config['exe_paths']['java'], '-version'])
JavaToolMixin.set_has_java_runtime(True)
except Exception:
JavaToolMixin.set_has_java_runtime(False)
if not JavaToolMixin._has_java_runtime:
return False
# If there's a classpath set, make sure the modules we need are in it.
if self.java_classpaths_key is not None:
classpath = \
config['java_classpaths'].get(self.java_classpaths_key, [])
if not self._check_java_classpath(classpath):
return False
# If this tool is invoked directly through Java with a Main class,
# check it now.
if self.java_main:
output = execute(self._build_java_command(),
ignore_errors=True)
return 'Could not find or load main class' not in output
return True
def build_base_command(self, **kwargs):
"""Build the base command line used to review files.
Args:
**kwargs (dict, unused):
Additional keyword arguments.
Returns:
list of unicode:
The base command line.
"""
return self._build_java_command(**kwargs)
def _build_java_command(self):
"""Return the base Java command for running the class.
This will build the class path and command line for running
:py:attr:`java_main`.
Returns:
list of unicode:
The base command line for running the Java class.
"""
classpath = ':'.join(
config['java_classpaths'].get(self.java_classpaths_key, []))
cmdline = [config['exe_paths']['java']]
if classpath:
cmdline += ['-cp', classpath]
cmdline.append(self.java_main)
return cmdline
def _check_java_classpath(self, classpath):
"""Return whether all entries in a classpath exist.
Args:
classpath (list of unicode):
The classpath locations.
Returns:
bool:
``True`` if all entries exist on the filesystem. ``False`` if
one or more are missing.
"""
if not classpath:
return False
for path in classpath:
if not path or not os.path.exists(path):
return False
return True | /reviewbot-worker-3.2.tar.gz/reviewbot-worker-3.2/reviewbot/tools/base/mixins.py | 0.794066 | 0.153105 | mixins.py | pypi |
from __future__ import unicode_literals
import pkg_resources
import six
from reviewbot.utils.log import get_logger
logger = get_logger(__name__,
is_task_logger=False)
#: A mapping of tool IDs to tool classes.
#:
#: Type
#: dict
_registered_tools = {}
def register_tool_class(tool_cls):
"""Register a tool class for later lookup.
The tool class must have a ``tool_id`` attribute (either directly set,
for unit tests, or through :py:meth:`load_tool_classes`), and cannot
conflict with another tool.
Args:
tool_cls (type):
A tool class to register (subclass of
:py:class:`reviewbot.tools.base.tool.BaseTool)`.
Raises:
ValueError:
The tool could not be registered, either due to a missing
``tool_id`` or due to a conflict with another tool.
"""
tool_id = getattr(tool_cls, 'tool_id', None)
if tool_id is None:
raise ValueError('The tool class %r is missing a tool_id attribute.'
% (tool_cls,))
if tool_id in _registered_tools:
raise ValueError(
'Another tool with the ID "%s" is already registered (%r).'
% (tool_id, _registered_tools[tool_id]))
_registered_tools[tool_id] = tool_cls
def unregister_tool_class(tool_id):
"""Unregister a tool class by its ID.
Args:
tool_id (type):
The ID of the tool to unregister.
Raises:
KeyError:
The tool could not be found.
"""
try:
del _registered_tools[tool_id]
except KeyError:
raise KeyError('A tool with the ID "%s" was not registered.'
% tool_id)
def get_tool_class(tool_id):
"""Return the tool class with a given ID.
Args:
tool_id (unicode):
The ID of the tool to return.
Returns:
type:
The tool class, or ``None`` if no tool with that ID exists.
"""
return _registered_tools.get(tool_id)
def get_tool_classes():
"""Return all registered tool classes.
This will be sorted in alphabetical order, based on the ID.
Returns:
list of type:
A list of tool classes (subclasses of
:py:class:`reviewbot.tools.base.tool.BaseTool`).
"""
return sorted(six.itervalues(_registered_tools),
key=lambda tool_cls: tool_cls.tool_id)
def load_tool_classes():
"""Load tool classes provided by Review Bot and other packages.
This will scan for any Python packages that provide ``reviewbot.tools``
entrypoints, loading the tools into the tool registry.
Any existing tools will be cleared out before this begins, and any errors
will be logged.
"""
_registered_tools.clear()
for ep in pkg_resources.iter_entry_points(group='reviewbot.tools'):
try:
tool_cls = ep.load()
tool_cls.tool_id = ep.name
register_tool_class(tool_cls)
except Exception as e:
logger.error('Unable to load tool "%s": %s',
ep.name, e,
exc_info=True) | /reviewbot-worker-3.2.tar.gz/reviewbot-worker-3.2/reviewbot/tools/base/registry.py | 0.798029 | 0.197444 | registry.py | pypi |
from __future__ import unicode_literals
from fnmatch import fnmatchcase
from reviewbot.config import config
from reviewbot.utils.log import get_logger
from reviewbot.utils.process import is_exe_in_path
class BaseTool(object):
"""The base class all Review Bot tools should inherit from.
This class provides base functionality specific to tools which
process each file separately.
Most tools will override :py:meth:`handle_file`, performing a code review
on the provided file.
If a tool would like to perform a different style of analysis, it can
override :py:meth:`handle_files`.
Attributes:
settings (dict):
Settings configured for this tool in Review Board, based on
:py:attr:`options`.
"""
#: The displayed name of the tool.
#:
#: Type:
#: unicode
name = ''
#: A short description of the tool.
#:
#: Type:
#: unicode
description = ''
#: The compatibility version of the tool.
#:
#: This should only be changed for major breaking updates. It will break
#: compatibility with existing integration configurations, requiring
#: manual updates to those configurations. Any existing configurations
#: referencing the old version will not be run, unless an older version
#: of the tool is being handled through another Review Bot worker providing
#: the older tool.
#:
#: Type:
#: unicode
version = '1'
#: A list of executable tools required by the tool.
#:
#: Each is the name of an executable on the filesystem, either in the
#: :envvar:`PATH` or defined in the ``exe_paths`` configuration.
#:
#: These will be checked when the worker starts. If a dependency for a
#: tool is missing, the worker will not enable it.
#:
#: Version Added:
#: 3.0:
#: Tools that previously implemented :py:meth:`check_dependencies`
#: may want to be updated to use this.
#:
#: Type:
#: dict
exe_dependencies = []
#: A list of filename patterns this tool can process.
#:
#: This is intended for tools that have a fixed list of file extensions
#: or specific filenames they should process. Each entry is a
#: glob file pattern (e.g., ``*.py``, ``.config/*.xml``, ``dockerfile``,
#: etc.), and must be lowercase (as filenames will be normalized to
#: lowercase for comparison). See :py:mod:`fnmatch` for pattern rules.
#:
#: Tools can leave this empty to process all files, or can override
#: :py:meth:`get_can_handle_file` to implement custom logic (e.g., basing
#: matching off a tool's settings, or providing case-sensitive matches).
#:
#: Version Added:
#: 3.0
#:
#: Type:
#: list of unicode
file_patterns = []
#: Configurable options defined for the tool.
#:
#: Each item in the list is a dictionary representing a form field to
#: display in the Review Board administration UI. Keys include:
#:
#: ``field_type`` (:py:class:`unicode`):
#: The full path as a string to a Django form field class to render.
#:
#: ``name`` (:py:class:`unicode`):
#: The name/ID of the field. This will map to the key in the
#: settings provided to :py:meth:`handle_files` and
#: :py:meth:`handle_file`.
#:
#: ``default`` (:py:class:`object`, optional):
#: The default value for the field.
#:
#: ``field_options`` (:py:class:`dict`, optional):
#: Additional options to pass to the form field's constructor.
#:
#: ``widget`` (:py:class:`dict`, optional):
#: Information on the Django form field widget class used to render
#: the field. This dictionary includes the following keys:
#:
#: ``type`` (:py:class:`unicode`):
#: The full path as a string to a Django form field widget class.
#:
#: ``attrs`` (:py:class:`dict`, optional):
#: A dictionary of attributes passed to the widget's constructor.
#:
#: Type:
#: list
options = []
#: Whether this tool requires a full checkout and working directory to run.
#:
#: Type:
#: bool
working_directory_required = False
#: Timeout for tool execution, in seconds.
#:
#: Type:
#: int
timeout = None
def __init__(self, settings=None, **kwargs):
"""Initialize the tool.
Version Changed:
3.0:
Added ``settings`` and ``**kwargs`` arguments. Subclasses must
provide this by Review Bot 4.0.
Args:
settings (dict, optional):
Settings provided to the tool.
**kwargs (dict):
Additional keyword arguments, for future expansion.
"""
self.settings = settings or {}
self.output = None
self._logger = None
@property
def logger(self):
"""The logger for the tool.
This logger will contain information on the process, the task (if
it's running in a task), and the tool name.
Version Added:
3.0
Type:
logging.Logger
"""
if self._logger is None:
from reviewbot.celery import get_celery
self._logger = get_logger(
self.name,
is_task_logger=get_celery().current_task is not None)
return self._logger
def check_dependencies(self, **kwargs):
"""Verify the tool's dependencies are installed.
By default, this will check :py:attr:`exe_dependencies`, ensuring each
is available to the tool.
For each entry in :py:attr:`exe_dependencies`, :envvar:`PATH` will be
checked. If the dependency name is found in the ``exe_paths`` mapping
in the Review Bot configuration, that path will be checked.
Subclasses can implement this if they need more advanced checks.
Args:
**kwargs (dict, unused):
Additional keyword arguments. This is intended for future
expansion.
Returns:
bool:
True if all dependencies for the tool are satisfied. If this
returns False, the worker will not listen for this Tool's queue,
and a warning will be logged.
"""
exe_paths = config['exe_paths']
for exe in self.exe_dependencies:
path = exe_paths.get(exe, exe)
if not path or not is_exe_in_path(path, cache=exe_paths):
return False
return True
def get_can_handle_file(self, review_file, **kwargs):
"""Return whether this tool can handle a given file.
By default, this checks the full path of the destination file against
the patterns in :py:attr:`file_patterns`. If the file path matches, or
that list is empty, this will allow the file to be handled.
Subclasses can override this to provide custom matching logic.
Version Added:
3.0
Args:
review_file (reviewbot.processing.review.File):
The file to check.
**kwargs (dict, unused):
Additional keyword arguments passed to :py:meth:`execute`.
This is intended for future expansion.
Returns:
bool:
``True`` if the file can be handled. ``False`` if it cannot.
"""
if not self.file_patterns:
return True
filename = review_file.dest_file.lower()
for pattern in self.file_patterns:
if fnmatchcase(filename, pattern):
return True
return False
def execute(self, review, repository=None, base_commit_id=None, **kwargs):
"""Perform a review using the tool.
Version Changed:
3.0:
``settings`` is deprecated in favor of the :py:attr:`settings`
attribute on the instance. It's still provided in 3.0.
``**kwargs`` is now expected.
Args:
review (reviewbot.processing.review.Review):
The review object.
repository (reviewbot.repositories.Repository, optional):
The repository.
base_commit_id (unicode, optional):
The ID of the commit that the patch should be applied to.
**kwargs (dict, unused):
Additional keyword arguments, for future expansion.
"""
if not getattr(self, 'legacy_tool', False):
kwargs.update({
'base_command': self.build_base_command(),
'review': review,
})
self.handle_files(review.files, **kwargs)
def build_base_command(self, **kwargs):
"""Build the base command line used to review files.
This will be passed to :py:meth:`handle_file` for each file. It's
useful for constructing a common command line and arguments that
will apply to each file in a diff.
Version Added:
3.0
Args:
**kwargs (dict, unused):
Additional keyword arguments, for future expansion.
Returns:
list of unicode:
The base command line.
"""
return []
def handle_files(self, files, **kwargs):
"""Perform a review of all files.
This may be overridden by subclasses for tools that process all files
at once.
Version Changed:
3.0:
``settings`` is deprecated in favor of the :py:attr:`settings`
attribute on the instance. It's still provided in 3.0.
``**kwargs`` is now expected.
These will be enforced in Review Bot 4.0.
Args:
files (list of reviewbot.processing.review.File):
The files to process.
**kwargs (dict):
Additional keyword arguments passed to :py:meth:`execute`.
This is intended for future expansion.
"""
legacy_tool = getattr(self, 'legacy_tool', False)
for f in files:
if self.get_can_handle_file(review_file=f, **kwargs):
if legacy_tool:
self.handle_file(f, **kwargs)
else:
path = f.get_patched_file_path()
if path:
self.handle_file(f, path=path, **kwargs)
def handle_file(self, f, path=None, base_command=None, **kwargs):
"""Perform a review of a single file.
This method may be overridden by subclasses to process an individual
file.
Version Changed:
3.0:
``settings`` is deprecated in favor of the :py:attr:`settings`
attribute on the instance. It's still provided in 3.0.
``path`` is added, which is the result of a
:py:meth:`~reviewbot.processing.File.get_patched_file_path`
command, and must be valid for this method to be called.
``base_command`` is added, which would be the result of
:py:meth:`build_base_command`.
``**kwargs`` is now expected.
These will be enforced in Review Bot 4.0.
Args:
f (reviewbot.processing.review.File):
The file to process.
path (unicode, optional):
The local path to the patched file to review.
This won't be passed for legacy tools.
base_command (list of unicode, optional):
The common base command line used for reviewing a file,
if returned from :py:meth:`build_base_command`.
**kwargs (dict):
Additional keyword arguments passed to :py:meth:`handle_files`.
This is intended for future expansion.
"""
pass | /reviewbot-worker-3.2.tar.gz/reviewbot-worker-3.2/reviewbot/tools/base/tool.py | 0.892111 | 0.240362 | tool.py | pypi |
import argparse
import collections
import pandas as pd
EXPECTED_COLUMNS = {"Path", "Verdict", "Confidence"}
HIGH_CONFIDENCE = "high"
DISCORDANT_VERDICT_COLUMN = "Discordant Verdict"
DISCORDANT_SCORE_COLUMN = "Discordance Score"
DISCORDANT_TEXT_COLUMN = "Discordance Text"
def parse_args():
p = argparse.ArgumentParser(description="Check concordance between two form response tables created by 2 users "
"reviewing the same images. Rows in the two tables are matched by their same Path column.")
p.add_argument("-s1", "--suffix1", help="Suffix to append to column names from table1", default="1")
p.add_argument("-s2", "--suffix2", help="Suffix to append to column names from table2", default="2")
p.add_argument("-o", "--output-table", help="Path of output .tsv or .xls", default="combined.tsv")
p.add_argument("table1", help="Path of form response table .tsv")
p.add_argument("table2", help="Path of form response table .tsv")
args = p.parse_args()
try:
if args.table1.endswith(".xls") or args.table1.endswith(".xlsx"):
df1 = pd.read_excel(args.table1, engine="openpyxl")
else:
df1 = pd.read_table(args.table1)
except Exception as e:
p.error(f"Error parsing {args.table1}: {e}")
try:
if args.table2.endswith(".xls") or args.table2.endswith(".xlsx"):
df2 = pd.read_excel(args.table2, engine="openpyxl")
else:
df2 = pd.read_table(args.table2)
except Exception as e:
p.error(f"Error parsing {args.table2}: {e}")
if EXPECTED_COLUMNS - set(df1.columns):
p.error(f"{args.table1} is missing columns: " + ", ".join(EXPECTED_COLUMNS - set(df1.columns)))
if EXPECTED_COLUMNS - set(df2.columns):
p.error(f"{args.table2} is missing columns: " + ", ".join(EXPECTED_COLUMNS - set(df2.columns)))
if len(df1) == 0:
p.error(f"{args.table1} is empty")
if len(df2) == 0:
p.error(f"{args.table2} is empty")
df1 = df1.set_index("Path").fillna("")
df2 = df2.set_index("Path").fillna("")
if len(set(df1.index) & set(df2.index)) == 0:
p.error(f"{args.table1} Path column values have 0 overlap with {args.table2} Path column values. Tables can only "
f"be combined if they have the same Paths.")
return args, df1, df2
def compute_discordance_columns_func(suffix1, suffix2):
verdict_column1 = f"Verdict_{suffix1}"
verdict_column2 = f"Verdict_{suffix2}"
confidence_column1 = f"Confidence_{suffix1}"
confidence_column2 = f"Confidence_{suffix2}"
def compute_discordance_columns(row):
if not row[verdict_column1] or not row[verdict_column2]:
return row
if row[verdict_column1] == row[verdict_column2]:
# same verdict
row[DISCORDANT_VERDICT_COLUMN] = 0
row[DISCORDANT_SCORE_COLUMN] = 0
row[DISCORDANT_TEXT_COLUMN] = "same verdict"
if row[confidence_column1] and row[confidence_column2]:
if row[confidence_column1] == HIGH_CONFIDENCE and row[confidence_column2] == HIGH_CONFIDENCE:
row[DISCORDANT_SCORE_COLUMN] = 0
row[DISCORDANT_TEXT_COLUMN] = "same verdict, both high confidence"
elif row[confidence_column1] == HIGH_CONFIDENCE or row[confidence_column2] == HIGH_CONFIDENCE:
row[DISCORDANT_SCORE_COLUMN] = 1
row[DISCORDANT_TEXT_COLUMN] = "same verdict, one high confidence"
else:
row[DISCORDANT_SCORE_COLUMN] = 0
row[DISCORDANT_TEXT_COLUMN] = "same verdict, zero high confidence"
else:
# different verdicts
row[DISCORDANT_VERDICT_COLUMN] = 1
row[DISCORDANT_SCORE_COLUMN] = 2
row[DISCORDANT_TEXT_COLUMN] = "different verdict"
if row[confidence_column1] or row[confidence_column2]:
if row[confidence_column1] == HIGH_CONFIDENCE and row[confidence_column2] == HIGH_CONFIDENCE:
row[DISCORDANT_SCORE_COLUMN] = 4
row[DISCORDANT_TEXT_COLUMN] = "different verdict, both high confidence"
elif row[confidence_column1] == HIGH_CONFIDENCE or row[confidence_column2] == HIGH_CONFIDENCE:
row[DISCORDANT_SCORE_COLUMN] = 3
row[DISCORDANT_TEXT_COLUMN] = "different verdict, one high confidence"
else:
row[DISCORDANT_SCORE_COLUMN] = 2
row[DISCORDANT_TEXT_COLUMN] = "different verdict, zero high confidence"
return row
return compute_discordance_columns
def main():
args, df1, df2 = parse_args()
df_joined = df1.join(df2, lsuffix=f"_{args.suffix1}", rsuffix=f"_{args.suffix2}", how="outer").reset_index()
df_joined = df_joined.fillna("")
# print stats about input tables
def get_counts_string(df, column, label="", sep=", "):
return sep.join(
[f"{count:2d} {label} {key}" for key, count in sorted(collections.Counter(df[column].fillna("<empty>")).items())])
print("-"*20)
df1_verdicts_counter = get_counts_string(df1, "Verdict")
df1_num_verdicts = sum(df1['Verdict'].str.len() > 0)
df1_num_high_confidence = collections.Counter(df1['Confidence']).get(HIGH_CONFIDENCE, 0)
df1_high_confidence_fraction = df1_num_high_confidence / df1_num_verdicts
df2_verdicts_counter = get_counts_string(df2, "Verdict")
df2_num_verdicts = sum(df2['Verdict'].str.len() > 0)
df2_num_high_confidence = collections.Counter(df2['Confidence']).get(HIGH_CONFIDENCE, 0)
df2_high_confidence_fraction = df2_num_high_confidence / df2_num_verdicts
print(f"{args.table2}: {df1_num_verdicts} verdicts. {df1_verdicts_counter}. High confidence for {100*df1_high_confidence_fraction:4.1f}% of them")
print(f"{args.table1}: {df2_num_verdicts} verdicts. {df2_verdicts_counter}. High confidence for {100*df2_high_confidence_fraction:4.1f}% of them")
# compute concordance
df_joined = df_joined.apply(compute_discordance_columns_func(args.suffix1, args.suffix2), axis=1)
# print concordance stats
print("-"*20)
num_discordant_verdicts = sum(df_joined[DISCORDANT_VERDICT_COLUMN])
print(f"{num_discordant_verdicts} out of {len(df_joined)} ({100*num_discordant_verdicts/len(df_joined):0.1f}%) of "
f"verdicts differed between the two tables")
print(f"\nDiscordance score = {sum(df_joined[DISCORDANT_SCORE_COLUMN])}:")
print(get_counts_string(df_joined, DISCORDANT_TEXT_COLUMN, label="review comparisons:", sep="\n"))
# write outtable
print("-"*20)
verdict_column1 = f"Verdict_{args.suffix1}"
confidence_column1 = f"Confidence_{args.suffix1}"
df_joined.sort_values(
[DISCORDANT_SCORE_COLUMN, DISCORDANT_TEXT_COLUMN, verdict_column1, confidence_column1],
ascending=False, inplace=True)
if args.output_table.endswith(".xls") or args.output_table.endswith(".xlsx"):
df_joined.to_excel(args.output_table, header=True, index=False)
else:
df_joined.to_csv(args.output_table, header=True, sep="\t", index=False)
print(f"Wrote {len(df_joined)} rows to {args.output_table}")
if __name__ == "__main__":
main() | /reviewer2-0.9.31.tar.gz/reviewer2-0.9.31/compare_form_response_tables.py | 0.425367 | 0.378459 | compare_form_response_tables.py | pypi |
# Reviser
[](https://pypi.org/project/reviser/)
[](https://gitlab.com/rocket-boosters/reviser/commits/main)
[](https://gitlab.com/rocket-boosters/reviser/commits/main)
[](https://github.com/psf/black)
[](https://gitlab.com/pycqa/flake8)
[](http://mypy-lang.org/)
[](https://pypi.org/project/reviser/)
Reviser is a tool for AWS Lambda function and layer version deployment and
alias management specifically for Python runtimes where the actual
infrastructure is managed separately, mostly likely by CloudFormation or
Terraform. There are a number of ways to manage AWS Lambda functions and layers
already, but their generality and all-encompassing approaches don't integrate
well with certain workflows and can be overly complex for many needs.
Reviser is scoped to facilitate the deployment and updating of AWS Lambda
Python functions and layers for all version-specific configurations,
e.g. code bundles, environment variables, memory size, and timeout lengths.
The expectation is that functions are created by other means and then
configuration for versions is managed with the reviser through an interactive
or scripted shell of commands.
- [Basic Usage](#basic-usage)
- [Shell Commands](#shell-commands)
- [alias](#alias)
- [bundle](#bundle)
- [configs](#configs)
- [deploy](#deploy)
- [exit](#exit)
- [help (?)](#help-)
- [list](#list)
- [prune](#prune)
- [push](#push)
- [region](#region)
- [reload](#reload)
- [select](#select)
- [shell](#shell)
- [status](#status)
- [tail](#tail)
- [Configuration Files](#configuration-files)
- [bucket(s)](#buckets)
- [AWS region](#aws-region)
- [targets](#targets)
- [targets[N].kind](#targetsnkind)
- [targets[N].name(s)](#targetsnnames)
- [targets[N].region](#targetsnregion)
- [targets[N].dependencies](#targetsndependencies)
- [targets[N].dependencies.skip](#targetsndependenciesskip)
- [targets[N].dependencies(kind="pipper")](#targetsndependencieskindpipper)
- [targets[N].dependencies(kind="poetry")](#targetsndependencieskindpoetry)
- [targets[N].bundle](#targetsnbundle)
- [targets[N].bundle.include(s)](#targetsnbundleincludes)
- [targets[N].bundle.exclude(s)](#targetsnbundleexcludes)
- [targets[N].bundle.exclude_package(s)](#targetsnbundleexclude_packages)
- [targets[N].bundle.omit_package(s)](#targetsnbundleomit_packages)
- [targets[N].bundle.handler](#targetsnbundlehandler)
- [function targets](#function-targets)
- [(function) targets[N].image](#function-targetsnimage)
- [(function) targets[N].image.uri](#function-targetsnimageuri)
- [(function) targets[N].image.entrypoint](#function-targetsnimageentrypoint)
- [(function) targets[N].image.cmd](#function-targetsnimagecmd)
- [(function) targets[N].image.workingdir](#function-targetsnimageworkingdir)
- [(function) targets[N].layer(s)](#function-targetsnlayers)
- [(function) targets[N].memory](#function-targetsnmemory)
- [(function) targets[N].timeout](#function-targetsntimeout)
- [(function) targets[N].variable(s)](#function-targetsnvariables)
- [(function) targets[N].ignore(s)](#function-targetsnignores)
- [run](#run)
- [Shared Dependencies](#shared-dependencies)
- [Local Execution](#local-execution)
# Basic Usage
A project defines one or more lambda function configuration targets in a
`lambda.yaml` file in the root project directory. The most basic configuration
looks like this:
```yaml
bucket: name-of-s3-bucket-for-code-uploads
targets:
- kind: function
name: foo-function
```
This configuration defines a single `foo-function` lambda function target that
will be managed by reviser. The expectation is that this function exists and
was created by another means, e.g. CloudFormation or Terraform. A bucket must
be specified to indicate where the zipped code bundles will be uploaded prior
to them being applied to the target(s). The bucket must already exist as well.
By default the package will include no external, e.g. pip, package
dependencies. It will search for the first folder in the directory where the
`lambda.yaml` file is located that contains an `__init__.py` file, identifying
that folder as a Python source package for the function. It will also look for
a `lambda_function.py` alongside the `lambda.yaml` file to serve as the
entrypoint. These will be included in the uploaded and deployed code bundle
when a `push` or a `deploy` command is executed. These default settings can
all be configured along with many more as will be outlined below.
To deploy this example project, install the reviser python library and
start the shell with the command `reviser` in your terminal of choice
in the directory where the `lambda.yaml` file resides. Docker must be running
and available in the terminal in which you execute this command, as reviser
is a containerized shell environment that runs within a container that mimics
the actual AWS Lambda runtime environment. Then run the `push` command within
the launched shell to create and upload the bundled source code and publish
a new version of the `foo-function` lambda function with the uploaded results.
# Shell commands
The reviser command starts an interactive shell within a Docker container
compatible with the AWS Python Lambda runtime. This shell contains various
commands for deploying and managing deployments of lambda functions and layers
defined in a project's `lambda.yaml` configuration file, the format of which
is described later in this document. The shell commands are:
## alias
Assign an alias to the specified version of the selected or specified lambda
function.
```
usage: alias [--function FUNCTION] [--yes] [--create] alias version
positional arguments:
alias Name of an existing alias to move to the specified
version, or the name of an alias to create and assign
to the specified function version if the --create flag
is included to allow for creating a new alias.
version Version of the function that the alias should be
assigned to. This will either be an integer value or
$LATEST. To see what versions are available for a given
function use the list command.
options:
--function FUNCTION The alias command only acts on one function. This can
be achieved either by selecting the function target via
the select command, or specifying the function name to
apply this change to with this flag.
--yes By default this command will require input confirmation
before carrying out the change. Specify this flag to
skip input confirmation and proceed without a breaking
prompt.
--create When specified the alias will be created instead of
reassigned. Use this to create and assign new aliases
to a function. When this flag is not specified, the
command will fail if the alias doesn't exist, which
helps prevent accidental alias creation.
```
Or it will create a new alias and assign it to the specified version if the --create
flag is included. To assign an existing `test` alias to version 42 of the selected
function, the command would be:
```
> alias test 42
```
If multiple functions are currently selected, use `--function=<NAME>`
to identify the function to which the alias change will be applied.
## bundle
Install dependencies and copies includes into a zipped file ready for
deployment.
```
usage: bundle [--reinstall] [--output OUTPUT]
options:
--reinstall Add this flag to reinstall dependencies on a repeated
bundle operation. By default, dependencies will remain
cached for the lifetime of the shell to speed up the
bundling process. This will force dependencies to be
installed even if they had been installed previously.
--output OUTPUT, -o OUTPUT
Output the bundled artifacts into the specified output
path.
```
The resulting zip file is structured correctly to be deployed to the lambda
function/layer target via an S3 upload and subsequent publish command.
## configs
Display the configs merged from its source file, dynamic values and defaults.
```
usage: configs
```
Use this to inspect and validate that the loaded configuration meets expectations when
parsed into the reviser shell.
## deploy
Upload the bundled contents to the upload S3 bucket and then publish a new
version.
```
usage: deploy [--description DESCRIPTION] [--dry-run]
options:
--description DESCRIPTION
Specify a message to assign to the version published
by the deploy command.
--dry-run If set, the deploy operation will be exercised without
actually carrying out the actions. This can be useful
to validate the deploy process without side effects.
```
This will be carried out for each of the lambda targets with that new bundle and
any modified settings between the current configuration and that target's
existing configuration. This command will fail if a target being deployed
has not already been bundled.
## exit
Exit the shell and returns to the parent terminal.
```
usage: exit
```
## help (?)
Display help information on the commands available within the shell.
```
usage: help
```
Additional help on each command can be found using the --help flag on the command in
question.
## list
List versions of the specified lambda targets with info about each version.
```
usage: list
```
## prune
Remove old function and/or layer versions for the selected targets.
```
usage: prune [--start START] [--end END] [--dry-run] [-y]
options:
--start START Keep versions lower (earlier/before) this one. A negative
value can be specified for relative indexing in the same
fashion as Python lists.
--end END Do not prune versions higher than this value. A negative
value can be specified for relative indexing in the same
fashion as Python lists.
--dry-run Echo pruning operation without actually executing it.
-y, --yes Run the prune process without reviewing first.
```
## push
Combined single command for bundling and deploying the selected targets.
```
usage: push [--reinstall] [--output OUTPUT] [--description DESCRIPTION]
[--dry-run]
options:
--reinstall Add this flag to reinstall dependencies on a repeated
bundle operation. By default, dependencies will remain
cached for the lifetime of the shell to speed up the
bundling process. This will force dependencies to be
installed even if they had been installed previously.
--output OUTPUT, -o OUTPUT
Output the bundled artifacts into the specified output
path.
--description DESCRIPTION
Specify a message to assign to the version published
by the deploy command.
--dry-run If set, the deploy operation will be exercised without
actually carrying out the actions. This can be useful
to validate the deploy process without side effects.
```
## region
Switch the target region.
```
usage: region
[{us-east-2,us-east-1,us-west-1,us-west-2,af-south-1,ap-east-1,ap-south-1,ap-northeast-3,ap-northeast-2,ap-southeast-1,ap-southeast-2,ap-northeast-1,ca-central-1,cn-north-1,cn-northwest-1,eu-central-1,eu-west-1,eu-west-2,eu-south-1,eu-west-3,eu-north-1,me-south-1,sa-east-1,us-gov-east-1,us-gov-west-1}]
positional arguments:
{us-east-2,us-east-1,us-west-1,us-west-2,af-south-1,ap-east-1,ap-south-1,ap-northeast-3,ap-northeast-2,ap-southeast-1,ap-southeast-2,ap-northeast-1,ca-central-1,cn-north-1,cn-northwest-1,eu-central-1,eu-west-1,eu-west-2,eu-south-1,eu-west-3,eu-north-1,me-south-1,sa-east-1,us-gov-east-1,us-gov-west-1}
AWS region name for the override. Leave it blank to
return to the default region for the initially loaded
credentials and/or environment variables.
```
## reload
Reload the lambda.yaml configuration file from disk.
```
usage: reload
```
## select
Allow for selecting subsets of the targets within the loaded configuration.
```
usage: select [--functions] [--layers] [--exact] [name ...]
positional arguments:
name Specifies the value to match against the function and
layer target names available from the configuration.
This can include shell-style wildcards and will also
match against partial strings. If the --exact flag is
specified, this value must exactly match one of the
targets instead of the default fuzzy matching
behavior.
options:
--functions, --function, --func, -f
When specified, functions will be selected. This will
default to true if neither of --functions or --layers
is specified. Will default to false if --layers is
specified.
--layers, --layer, -l
When specified, layers will be selected. This will
default to true if neither of --functions or --layers
is specified. Will default to false if --functions is
specified.
--exact Forces the match to be exact instead of fuzzy.
```
The subsets are fuzzy-matched unless the --exact flag is used.
## shell
Macro command to convert to interactive shell operation.
```
usage: shell
```
This is a special command to use in run command groups/macros to start interactive
command mode for the terminal. Useful when in scenarios where you wish to prefix an
interactive session with commonly executed commands. For example, if you want to select
certain targets with the select command as part of starting the shell, you could create
a run command group/macro in your lambda.yaml that executes the select command and then
executes the shell command. This would updated the selection and then with the shell
command, start the shell in interactive mode. Without specifying the shell command
here, the run command group/macro would just set a selection and then exit.
## status
Show the current status information for each of the selected lambda targets.
```
usage: status [qualifier]
positional arguments:
qualifier Specifies a version or alias to show status for. If not
specified, $LATEST will be used for functions and the latest
version will be dynamically determined for layers.
```
## tail
Tail the logs for the selected lambda functions.
```
usage: tail
```
More detail on any of these commands can be found from within the shell by
executing them with the `--help` flag.
The reviser application also supports non-interactive batch command
execution via `run` macros that behave similarly to how `npm run <command>`
commands are defined. For more details see the `run` attribute section of the
configuration file definitions below.
# Configuration Files
Configuration files, named `lambda.yaml` define the lambda targets to be
managed within a project. The top-level keys in the configuration file are:
## bucket(s)
This key defines the bucket or buckets where zipped source bundles will be
uploaded before they are deployed to their lambda function and/or layer
targets. Basic usage is to specify the bucket as a key:
```yaml
bucket: bucket-name
```
It's also possible for multi-account scenarios to specify multiple buckets as
a key-value pairing where the keys are the AWS account IDs (as strings) and
the values are the bucket names associated with those IDs. In this case the
bucket selection is made dynamically based on the AWS session loaded during
shell initialization. Specifying multiple buckets looks like:
```yaml
buckets:
"123456789": bucket-in-account-123456789
"987654321": bucket-in-account-987654321
```
Multiple region buckets can also be specified using the AWS region as the key:
```yaml
buckets:
us-east-1: bucket-in-region-us-east-1
us-west-2: bucket-in-region-us-west-2
```
These can be combined to define buckets for multiple accounts and multiple
regions as:
```yaml
buckets:
"123456789":
us-east-1: bucket-123456789-in-region-us-east-1
us-west-2: bucket-123456789-in-region-us-west-2
"987654321":
us-east-1: bucket-987654321-in-region-us-east-1
us-west-2: bucket-987654321-in-region-us-west-2
```
## AWS region
The AWS region in which the resources reside can be specified at the top
level of the file if desired. It is recommended that the region be specified
within the calling AWS profile if possible for flexibility, but there are
situations where it makes more sense to make it explicit within the
configuration file instead. If no region is found either in the configuration
file or in the AWS profile the `us-east-1` region will be used as the default
in keeping with AWS region defaulting conventions. Specify the region with
the top-level key:
```yaml
region: us-east-2
```
## targets
Targets is where the bulk of the configuration resides. Each item
is either of the *function* or *layer* kind and has associated
configuration and bundling settings according to the type. Common
to both *function* and *layer* kinds are the keys:
### targets[N].kind
As mentioned already, each target must specify its object type using
the kind key:
```yaml
targets:
- kind: function
...
- kind: layer
...
```
### targets[N].name(s)
The name specifies the name of the target object, not the ARN. For example,
a function named foo would be represented as:
```yaml
targets:```
- kind: function
name: foo
```
A single target can point to multiple functions. This is useful in cases
where a single target could be for both development and production functions
or where a single code-base is shared across multiple functions for logical
or integration reasons. In this case a list of names is supplied instead:
```yaml
targets:
- kind: function
names:
- foo-devel
- foo-prod
```
### targets[N].region
In the same fashion as regions can be explicitly set as a top-level
configuration key, they can also be set on a per-target basis. If set,
the target region will take precedence over the top-level value and
the profile-specified value. This makes deploying code across regions
within a single configuration file possible.
### targets[N].dependencies
Dependencies is a list of external dependency sources to install as
site packages in the lambda function or layer. Multiple package managers
are supported and specified by the `kind` attribute:
```yaml
targets:
- kind: layer
name: foo
dependencies:
- kind: pip
- kind: pipper
- kind: poetry
```
Currently `pip`, `pipper` and `poetry` package managers are supported. For any of the
package managers, the dependencies can be specified explicitly with the
`package(s)` key.
```yaml
targets:
- kind: layer
name: foo
dependencies:
- kind: pip
packages:
- spam
- hamd
- kind: pipper
package: spammer
```
It's also possible to specify a file to where the package dependencies
have been defined.
```yaml
targets:
- kind: layer
name: foo
dependencies:
- kind: pip
file: requirements.layer.txt
- kind: pipper
file: pipper.layer.json
```
If no packages or file is specified, the default file for the given package
manager will be used by default (e.g. `requirements.txt` for pip,
`pipper.json` for pipper, and `pyproject.toml` for poetry).
It is also possible to specify the same kind of package manager multiple
times in this list to aggregate dependencies from multiple locations.
### targets[N].dependencies.skip
It is possible to specify inline dependencies to skip during the bundling installation
process. This can be useful, for example, when a particular dependency is specific to
platforms other than the lambda environment. Or perhaps a package like boto3 that is
already available in the lambda function should be skipped to save bundling space while
still wanting to include it in the packages dependencies for beyond-lambda deployment
purposes.
As shown below, specify the packages to skip within the dependency as part of the
dependency definition:
```yaml
targets:
- kind: function
name: foo
dependencies:
- kind: pip
skip:
- boto3
```
### targets[N].dependencies(kind="pipper")
Pipper repositories have additional configuration not associated with pip
packages. To support pipper libraries, there are two additional attributes
that can be specified: `bucket` and `prefix`.
The `bucket` is required as it specifies the S3 bucket used as the package
source and should be read-accessible by the profile invoking reviser.
The `prefix` is an optional alternate package prefix within the S3 bucket.
Use this only if you are using an alternate prefix with for your pipper
package.
```yaml
targets:
- kind: layer
name: foo
dependencies:
- kind: pipper
file: pipper.layer.json
bucket: bucket-name-where-pipper-package-resides
prefix: a/prefix/that/is/not/just/pipper
```
### targets[N].dependencies(kind="poetry")
Poetry repositories have additional `extras` configuration that can be used to
specify optional dependency groups to install in the lambda. This can be useful
to separate dependencies by function.
```yaml
targets:
- kind: layer
name: foo
dependencies:
- kind: poetry
extras:
- group
```
### targets[N].bundle
The target bundle object contains the attributes that define the bundle
that will be created and uploaded to the functions or layers in a given
target as part of the deployment process. It's primary purpose is to define
what files should be included in the bundling process, which it achieves
with the following attributes.
#### targets[N].bundle.include(s)
The `include(s)` key is a string or list of Python glob-styled includes
to add to the bundle. If no includes are specified, the default behavior is:
- **function targets**: copy the first directory found that contains an
*__init__.py* file.
- **layer targets**: do not copy anything and assume dependencies are the
only files to copy into the bundle.
All paths should be referenced relative to the root path where the
`lambda.yaml` is located. For a recursive matching pattern, the glob syntax
should be used as `**/*.txt` or if restricted to a folder inside of the root
directory then `folder/**/*.txt`. To include the entire contents of a
directory, specify the path to the folder.
```yaml
targets:
- kind: function
name: foo
bundle:
includes:
# This is shorthand for "foo_library/**/*"
- foo_library
# All Python files in the "bin/" folder recursively.
- bin/**/*.py
# All Jinja2 files in the root directory that begin "template_".
- template_*.jinja2
```
#### targets[N].bundle.exclude(s)
The `exclude(s)` key is an optional one that is also a string or list of
Python glob-styled paths to remove from the matching `include(s)`. These
are applied to the files found via the includes and do not need to be
comprehensive of all files in the root directory. Building on the example
from above:
```yaml
targets:
- kind: function
name: foo
bundle:
includes:
# This is shorthand for "foo_library/**/*"
- foo_library
# All Python files in the "bin/" folder recursively.
- bin/**/*.py
# All Jinja2 files in the root directory that begin "template_".
- template_*.jinja2
exclues:
- template_local.jinja2
- template_testing.jinja2
```
This example would remove two of the template file matches from the includes
from the files copied into the bundle for deployment.
All `__pycache__`, `*.pyc` and `.DS_Store` files/directories are
excluded from the copying process in all cases and do not need to be
specified explicitly.
#### targets[N].bundle.exclude_package(s)
The `package_exclude(s)` key is an optional one that is also a string or list of
Python glob-styled paths. However, these are for paths to exclude when adding
site-packages to the bundle. Building on the example from above:
```yaml
targets:
- kind: function
name: foo
bundle:
includes:
# This is shorthand for "foo_library/**/*"
- foo_library
# All Python files in the "bin/" folder recursively.
- bin/**/*.py
# All Jinja2 files in the root directory that begin "template_".
- template_*.jinja2
exclues:
- template_local.jinja2
- template_testing.jinja2
package_excludes:
- foo/foo_windows.py
dependencies:
- kind: pip
```
This example would not include the `site-packages/foo/foo_windows.py` from the
bundled zip file for the lambda function. In this case, the reason for omitting
this file is that "Windows" code isn't needed in a linux runtime, so you want to
save some space. This is more likely useful for large packages that include
unneeded components, and it is desirable to save the space. This should be used
very carefully as it can cause external libraries to fail.
#### targets[N].bundle.omit_package(s)
There can be cases where dependencies install dependencies of their own that
you may not want copied over to the bundle. The most common case is a
dependency that requires `boto3`, which is available by default in lambda
functions already. In that case it can be useful to list site packages that
should not be copied into the bundle but may have been installed as a side
effect of the dependency installation process.
```yaml
targets:
- kind: function
name: foo
bundle:
omit_package: boto3
dependencies:
- kind: pip
# Installs a package that requires boto3, which is therefore installed
# into the site-packages bundle directory as a result.
# https://github.com/awslabs/aws-lambda-powertools-python
package: aws-lambda-powertools
```
In the above example `aws-lambda-powertools` causes `boto3` to be installed
as well. However, since lambda functions have `boto3` installed by default,
it's possible to omit that package from the bundling process so that it isn't
installed twice.
Note, however, that installing `boto3` directly in a bundle can be beneficial
because it gives you the ability to install the version that is compatible
with your given source code and dependencies. The `boto3` version on the lambda
function can be aged and stale.
#### targets[N].bundle.handler
This attribute only applies to function targets and gives the location of
file and function entrypoint for the lambda function(s) in the target. The
format matches the expected value for lambda functions, which is
`<filename_without_extension>.<function_name>`.
```yaml
targets:
- kind: function
name: foo
bundle:
handler: function:main
```
In this case the bundler would expect to find `function.py` in the top-leve
directory alongside `lambda.yaml` and inside it there would be a
`main(event, context)` function that would be called when the function(s)
are invoked.
If this value is omitted, the default value of `lambda_function.lambda_handler`
will be used as this matches the AWS lambda Python function documentation.
## function targets
In addition to the common attributes described above that are shared between
both function and layer targets, there are a number of additional
attributes that apply only to function targets. These are:
### (function) targets[N].image
Specifies the configuration of the image for image based lambda functions.
This cannot be used with `targets[N].bundle`. With the exception of `uri`
all subfields are optional.
```yaml
image:
uri: 123456789012.dkr.ecr.us-west-2.amazonaws.com/repo:tag
entrypoint: /my/entrypoint
cmd:
- params
- to
- entrypoint
workingdir: /the/working/dir
```
#### (function) targets[N].image.uri
The image uri for the function's image. This must be a ECR uri that resides
within the same region as the lambda function. If the lambda function is
deployed to a single region this can be configured with a string:
```yaml
uri: 123456789012.dkr.ecr.us-west-2.amazonaws.com/repo:tag
```
If the lambda function is deployed to multiple regions it can be configured
with a dictionary mapping region names to images.
```yaml
uri:
us-west-2: 123456789012.dkr.ecr.us-west-2.amazonaws.com/repo:tag
us-east-2: 123456789012.dkr.ecr.us-east-2.amazonaws.com/repo:tag
```
#### (function) targets[N].image.entrypoint
A custom entrypoint to use for the image. If this is not specified the
entrypoint of the image will be used. This can be specified as a list or
as a single string that will be treated as a list with one element.
```yaml
entrypoint: /my/entrypoint
```
or
```yaml
entrypoint:
- /my/entrypoint
```
#### (function) targets[N].image.cmd
A custom command to use for the image. If this is not specified the default
command of the image will be used. This can be specified as a list or
as a single string that will be treated as a list with one element.
```yaml
cmd: a_command
```
or
```yaml
cmd:
- a_command
- with
- multiple
- words
```
#### (function) targets[N].image.workingdir
A custom working directory to set for the image. If this is not specified
the default working directory of the image will be used.
```yaml
workingdir: /my/working/dir
```
### (function) targets[N].layer(s)
Specifies one or more layers that should be attached to the targeted
function(s). Layers can be specified as fully-qualified ARNs for externally
specified layers, e.g. a layer created in another AWS account, or by name
for layers specified within the account and layers defined within the targets
of the configuration file.
```yaml
targets:
- kind: function
name: foo
layer: arn:aws:lambda:us-west-2:999999999:layer:bar
...
```
or for multiple layers:
```yaml
targets:
- kind: function
name: foo
layers:
# A layer defined in another account is specified by ARN.
- arn:aws:lambda:us-west-2:999999999:layer:bar
# A layer in this account is specified by name. This layer may also be
# a target in this configuration file.
- baz
...
- kind: layer
name: baz
...
```
By default, deployments will use the latest available version of each layer,
but this can be overridden by specifying the layer ARN with its version:
```yaml
targets:
- kind: function
name: foo
layer: arn:aws:lambda:us-west-2:999999999:layer:bar:42
...
```
In the above example the layer will remain at version 42 until explicitly
modified in the configuration file.
Layers can also be defined as objects instead of attributes. The two-layer
example from above could be rewritten as:
```yaml
targets:
- kind: function
name: foo
layers:
- arn: arn:aws:lambda:us-west-2:999999999:layer:bar
- name: baz
...
```
When specified as an object with attributes, there are a number of additional
attributes that can be specified as well. First, `version` can be specified
as a separate key from the arn or name, which in many cases can make it easier
to work with than appending it to the end of the arn or function itself for
programmatic/automation:
```yaml
targets:
- kind: function
name: foo
layers:
- arn: arn:aws:lambda:us-west-2:999999999:layer:bar
version: 42
- name: baz
version: 123
...
```
Next is that the layer objects accept `only` and `except` keys that can be
used to attach the layers to certain functions in the target and not others.
This can be useful in cases where development and production targets share
a lot in common, but perhaps point to different versions of a layer or perhaps
separate development and production layers entirely. It can also be useful
when a target of functions share a common codebase but don't all need the
same dependencies. For performance optimization, restricting the layer
inclusions only to those that need the additional dependencies can be
beneficial.
The `only` and `except` attributes can be specified as a single string
or a list of strings that match against *unix pattern matching*. For example,
expanding on the example from above:
```yaml
targets:
- kind: function
names:
- foo-devel
- foo-devel-worker
- foo-prod
- foo-prod-worker
layers:
- name: baz-devel
only: foo-devel*
- name: baz-devel-worker
only: foo-devel-worker
- name: baz-prod
only: foo-prod*
- name: baz-prod-worker
only: foo-prod-worker
...
```
this example shows 4 layers that are conditionally applied using the only
keyword. The example could be rewritten with the `except` key instead:
```yaml
targets:
- kind: function
names:
- foo-devel
- foo-devel-worker
- foo-prod
- foo-prod-worker
layers:
- name: baz-devel
except: foo-prod*
- name: baz-devel-worker
except:
- foo-prod*
- foo-devel
- name: baz-prod
except: foo-devel*
- name: baz-prod-worker
except:
- foo-devel*
- foo-prod
...
```
And either way works. The two (`only` and `except`) can also be combined
when that makes more sense. For example, the `baz-devel-worker` from above
could also be written as:
```yaml
- name: baz-devel-worker
only: foo-devel*
except: foo-devel
```
Note that if `only` is specified it is processed first and then `except` is
removed from the matches found by `only`.
### (function) targets[N].memory
This specifies the function memory in megabytes either as an integer or
a string with an `MB` suffix.
```yaml
targets:
- kind: function
name: foo
memory: 256MB
```
### (function) targets[N].timeout
This specifies the function timeout in seconds either as an integer or
a string with an `s` suffix.
```yaml
targets:
- kind: function
name: foo
timeout: 42s
```
### (function) targets[N].variable(s)
Variables contains a list of environment variables to assign to the function.
They can be specified simply with as a string `<KEY>=<value>` syntax:
```yaml
targets:
- kind: function
name: foo
variable: MODE=read-only
```
Here a single environment variable is specified that maps `"MODE"` to the
value *"ready-only"*. A more programmatic-friendly way is to specify the
name and value as attributes of a variable:
```yaml
targets:
- kind: function
name: foo
variables:
- name: MODE
value: read-only
```
Some environment variables may be managed through other means, e.g.
terraform that created the function in the first place or another command
interface used to update the function. For those cases, the `preserve`
attribute should be set to true and no value specified.
```yaml
targets:
- kind: function
name: foo
variables:
- name: MODE
preserve: true
```
In this case the `MODE` environment variable value will be preserved between
function deployments to contain the value that was already set.
Finally, variables support the same `only` and `exclude` attributes that
are found for target layers so that environment variables can be specified
differently for subsets of targets.
The `only` and `except` attributes can be specified as a single string
or a list of strings that match against *unix pattern matching*. For example,
expanding on the example from above:
```yaml
targets:
- kind: function
names:
- foo-prod
- foo-devel
variables:
- name: MODE
value: write
only: '*prod'
- name: MODE
value: read-only
except: '*prod'
```
### (function) targets[N].ignore(s)
Ignores allows you to specify one or more configuration keys within a function
target that should be ignored during deployments. For cases where any of the
configuration values:
- `memory`
- `timeout`
- `variables`
are managed by external systems, they can be specified by the ignores to
prevent changes being applied by reviser.
```yaml
targets:
- kind: function
name: foo
ignores:
- memory
- timeout
```
## run
The run attribute contains an optional object of batch non-interactive commands
to run when the shell is called with that run key. This is useful for
orchestrating actions for CI/CD purposes as the commands will be processed
within a shell environment without user prompts and then the shell will exit
when complete without waiting for additional input.
```yaml
run:
deploy-prod:
- select function *prod
- push --description="($CI_COMMIT_SHORT_SHA): $CI_COMMIT_TITLE"
- alias test -1
targets:
- kind: function
names:
- foo-prod
- foo-devel
```
In the example above, the `deploy-prod` run command macro/group would start
the shell and then non-interactively execute the three commands in order
to first select the *foo-prod* function, then to build and deploy that function
with a description created from CI environment variables and finally move the
*test* alias to the newly deployed version using a negative version index of
*-1*. After those three commands are executed reviser will exit the
shell automatically, successfully ending that process.
There is also a special `shell` command that can be used in run command
macros/groups that will start the shell in interactive mode. This is useful
for using run command macros/groups for pre-configuration during startup of
the interactive shell. Building on the previous example,
```yaml
run:
deploy-prod:
- select function *prod
- push --description="($CI_COMMIT_SHORT_SHA): $CI_COMMIT_TITLE"
- alias test -1
devel:
- select * *devel
- bundle
- shell
targets:
- kind: function
names:
- foo-prod
- foo-devel
- kind: layer
names:
- bar-devel
- bar-prod
```
here we've added a `devel` run command macro/group that will select the devel
function and layer and bundle those but not deploy them. After that's complete
the shell command will kick off the interactive session and ask for user
input. The benefit of this particular run command macro/group is to select
the development targets and pre-build them to cache the dependencies for the
shell user while they continue to develop and deploy the source code to the
function.
## Shared Dependencies
It is possible to share dependencies across targets. This is useful if the dependencies
are the same but other configurations differ. The configuration will look something
like this:
```yaml
dependencies:
# Each shared dependency must be named, but the name can be any valid yaml key that
# you want.
shared_by_my_foo_and_bar:
- kind: pip
file: requirements.functions.txt
shared_by_others:
- kind: pip
file: requirements.layer.txt
targets:
- kind: function
names:
- foo-prod
- foo-devel
timeout: 30s
memory: 256
dependencies: shared_by_my_foo_and_bar
- kind: function
names:
- bar-prod
- bar-devel
timeout: 500s
memory: 2048
dependencies: shared_by_my_foo_and_bar
- kind: function
names:
- baz-prod
- baz-devel
timeout: 10s
memory: 128
dependencies: shared_by_others
- kind: layer
names:
- spam-prod
- spam-devel
dependencies: shared_by_others
```
Shared dependencies will be installed once reused by each target configured to use
it. Each name shared dependency has the same structure and available options of a
regular target dependencies definition.
# Local Execution
When running reviser in your current environment instead of launching the shell within
a new container, you will want to use the command `reviser-shell`. This is the local
version of the CLI that is meant to be used within a suitable container environment
that mimics the lambda runtime environment. It is merely a change in entrypoint, and
has all the shell functionality described for the `reviser` command above.
Also, to run the `reviser-shell` successfully, you must install the extra shell
dependencies with the installation:
```shell
$ pip install reviser[shell]
```
Without the shell extras install, the `reviser-shell` will fail. This is how you would
use reviser in a containerized CI environment as well. | /reviser-0.4.0.tar.gz/reviser-0.4.0/README.md | 0.791096 | 0.877004 | README.md | pypi |
# Revizor




<!--  -->
Revizor is a security-oriented fuzzer for detecting information leaks in CPUs, such as [Spectre and Meltdown](https://meltdownattack.com/).
It tests CPUs against [Leakage Contracts](https://arxiv.org/abs/2006.03841) and searches for unexpected leaks.
For more details, see our [Paper](https://dl.acm.org/doi/10.1145/3503222.3507729) (open access [here](https://arxiv.org/abs/2105.06872)), and the [follow-up paper](https://arxiv.org/pdf/2301.07642.pdf).
## Installation
**Warning**:
Keep in mind that the Revizor runs randomly-generated code in kernel space.
As you can imagine, things could go wrong.
Make sure you're not running Revizor on an important machine.
### 1. Check Requirements
* Architecture: Revizor supports Intel and AMD x86-64 CPUs.
We also have experimental support for ARM CPUs (see `arm-port` branch) but it is at very early stages, use it on your own peril.
* No virtualization: You will need a bare-metal OS installation.
Testing from inside a VM is not (yet) supported.
* OS: The target machine has to be running Linux v4.15 or later.
### 2. Install Revizor Python Package
If you use `pip`, you can install Revizor with:
```bash
pip install revizor-fuzzer
```
Alternatively, install Revizor from sources:
```bash
# run from the project root directory
make install
```
If the installation fails with `'revizor-fuzzer' requires a different Python:`, you'll have to install Python 3.9 and run Revizor from a virtual environment:
```bash
sudo apt install python3.9 python3.9-venv
/usr/bin/python3.9 -m pip install virtualenv
/usr/bin/python3.9 -m virtualenv ~/venv-revizor
source ~/venv-revizor/bin/activate
pip install revizor-fuzzer
```
### 3. Install Revizor Executor (kernel module)
Then build and install the kernel module:
```bash
# building a kernel module require kernel headers
sudo apt-get install linux-headers-$(uname -r)
# get the source code
git clone https://github.com/microsoft/sca-fuzzer.git
# build the executor
cd sca-fuzzer/src/x86/executor
make uninstall # the command will give an error message, but it's ok!
make clean
make
make install
```
### 4. Download ISA spec
```bash
rvzr download_spec -a x86-64 --extensions BASE SSE SSE2 CLFLUSHOPT CLFSH --outfile base.json
```
### 5. (Optional) System Configuration
For more stable results, disable hyperthreading (there's usually a BIOS option for it).
If you do not disable hyperthreading, you will see a warning every time you invoke Revizor; you can ignore it.
Optionally (and it *really* is optional), you can boot the kernel on a single core by adding `-maxcpus=1` to the boot parameters ([how to add a boot parameter](https://wiki.ubuntu.com/Kernel/KernelBootParameters)).
## Command Line Interface
The fuzzer is controlled via a single command line interface `rvzr` (or `revizor.py` if you're running directly from the source directory).
It accepts the following arguments:
* `-s, --instruction-set PATH` - path to the ISA description file
* `-c, --config PATH` - path to the fuzzing configuration file
* `-n , --num-test-cases N` - number of test cases to be tested
* `-i , --num-inputs N` - number of input classes per test case. The number of actual inputs = input classes * inputs_per_class, which is a configuration option
* `-t , --testcase PATH` - use an existing test case instead of generating random test cases
* `--timeout TIMEOUT` - run fuzzing with a time limit [seconds]
* `-w` - working directory where the detected violations will be stored
For example, this command
```bash
rvzr fuzz -s base.json -n 100 -i 10 -c config.yaml -w ./violations
```
will run the fuzzer for 100 iterations (i.e., 100 test cases), with 10 inputs per test case.
The fuzzer will use the ISA spec stored in the `base.json` file, and will read the configuration from `config.yaml`. If the fuzzer finds a violation, it will be stored in the `./violations` directory.
See [docs](https://microsoft.github.io/sca-fuzzer/cli/) for more details.
## How To Fuzz With Revizor
The fuzzing process is controlled by a configuration file in the YAML format, passed via `--config` option. At the very minimum, this file should contain the following fields:
* `contract_observation_clause` and `contract_execution_clause` describe the contract that the CPU-under-test is tested against. See [this page](https://microsoft.github.io/sca-fuzzer/config/) for a list of available contracts. If you don't know what a contract is, Sec. 3 of [this paper](https://arxiv.org/pdf/2105.06872.pdf) will give you a high-level introduction to contracts, and [this paper](https://www.microsoft.com/en-us/research/publication/hardware-software-contracts-for-secure-speculation/) will provide a deep dive into contracts.
* `instruction_categories` is a list of instruction types that will be tested. Effectively, Revizor uses this list to filter out instructions from `base.json` (the file you downloaded via `rvzr download_spec`).
For a full list of configuration options, see [docs](https://microsoft.github.io/sca-fuzzer/config/).
### Baseline Experiment
After a fresh installation, it is normally a good idea to do a quick test run to check that everything works ok.
For example, we can create a configuration file `config.yaml` with only simple arithmetic instructions. As this instruction set does not include any instructions that would trigger speculation on Intel or AMD CPUs (at least that we know of), the expected contract would be `CT-SEQ`:
```yaml
# config.yaml
instruction_categories:
- BASE-BINARY # arithmetic instructions
max_bb_per_function: 1 # no branches!
min_bb_per_function: 1
contract_observation_clause: loads+stores+pc # aka CT
contract_execution_clause:
- no_speculation # aka SEQ
```
Start the fuzzer:
```bash
rvzr fuzz -s base.json -i 50 -n 100 -c config.yaml -w .
```
This command should terminate with no violations.
### Detection of a Simple Contract Violation
Next, we could intentionally make a mistake in a contract to check that Revizor can detect it.
To this end, we can modify the config file from the previous example to include instructions that trigger speculation (e.g., conditional branches) but keep the contract the same:
```yaml
# config.yaml
instruction_categories:
- BASE-BINARY # arithmetic instructions
- BASE-COND_BR
max_bb_per_function: 5 # up to 5 branches per test case
min_bb_per_function: 1
contract_observation_clause: loads+stores+pc # aka CT
contract_execution_clause:
- no_speculation # aka SEQ
```
Start the fuzzer:
```bash
rvzr fuzz -s base.json -i 50 -n 1000 -c config.yaml -w .
```
As your CPU-under-test almost definitely implements branch prediction, Revizor should detect a violation within a few minutes, with a message similar to this:
```
================================ Violations detected ==========================
Contract trace (hash):
0111010000011100111000001010010011110101110011110100000111010110
Hardware traces:
Inputs [907599882]:
.....^......^......^...........................................^
Inputs [2282448906]:
...................^.....^...................................^.^
```
You can find the violating test case as well as the violation report in the directory named `./violation-*/`.
It will contain an assembly file `program.asm` that surfaced a violation, a sequence of inputs `input-*.bin` to this program, and some details about the violation in `report.txt`.
### Full-Scale Fuzzing Campaign
To start a full-scale test, write your own configuration file (see description [here](config.md) and an example config [here](https://github.com/microsoft/sca-fuzzer/tree/main/src/tests/big-fuzz.yaml)), and launch the fuzzer.
Below is a example launch command, which will start a 24-hour fuzzing session, with 100 input classes per test case, and which uses [big-fuzz.yaml](https://github.com/microsoft/sca-fuzzer/tree/main/src/tests/big-fuzz.yaml) configuration:
```shell
rvzr fuzz -s base.json -c src/tests/big-fuzz.yaml -i 100 -n 100000000 --timeout 86400 -w `pwd` --nonstop
```
When you find a violation, you will have to do some manual investigation to understand the source of it; [this guide](fuzzing-guide.md) is an example of how to do such an investigation.
## Need Help with Revizor?
If you find a bug in Revizor, don't hesitate to [open an issue](https://github.com/microsoft/sca-fuzzer/issues).
If something is confusing or you need help in using Revizor, we have a [discussion page](https://github.com/microsoft/sca-fuzzer/discussions).
## Documentation
For more details, see [the website](https://microsoft.github.io/sca-fuzzer/).
## Contributing
See [CONTRIBUTING.md](CONTRIBUTING.md).
## Trademarks
This project may contain trademarks or logos for projects, products, or services. Authorized use of Microsoft
trademarks or logos is subject to and must follow
[Microsoft's Trademark & Brand Guidelines](https://www.microsoft.com/en-us/legal/intellectualproperty/trademarks/usage/general).
Use of Microsoft trademarks or logos in modified versions of this project must not cause confusion or imply Microsoft sponsorship.
Any use of third-party trademarks or logos are subject to those third-party's policies.
| /revizor_fuzzer-1.2.3.tar.gz/revizor_fuzzer-1.2.3/README.md | 0.440951 | 0.901878 | README.md | pypi |
from datetime import datetime
from typing import NoReturn, Dict
from pprint import pformat
from traceback import print_stack
from .interfaces import EquivalenceClass
from .config import CONF
MASK_64BIT = pow(2, 64)
POW2_64 = pow(2, 64)
TWOS_COMPLEMENT_MASK_64 = pow(2, 64) - 1
class StatisticsCls:
_borg_shared_state: Dict = {}
test_cases: int = 0
num_inputs: int = 0
eff_classes: int = 0
single_entry_classes: int = 0
required_priming: int = 0
flaky_violations: int = 0
violations: int = 0
coverage: int = 0
analysed_test_cases: int = 0
spec_filter: int = 0
observ_filter: int = 0
# Implementation of Borg pattern
def __init__(self) -> None:
self.__dict__ = self._borg_shared_state
def __str__(self):
total_clss = self.eff_classes + self.single_entry_classes
effectiveness = self.eff_classes / total_clss if total_clss else 0
total_clss_per_test_case = total_clss / self.analysed_test_cases \
if self.analysed_test_cases else 0
effective_clss = self.eff_classes / self.analysed_test_cases \
if self.analysed_test_cases else 0
iptc = self.num_inputs / self.test_cases if self.test_cases else 0
s = "================================ Statistics ===================================\n"
s += f"Test Cases: {self.test_cases}\n"
s += f"Inputs per test case: {iptc:.1f}\n"
s += f"Flaky violations: {self.flaky_violations}\n"
s += f"Required priming: {self.required_priming}\n"
s += f"Violations: {self.violations}\n"
s += "Effectiveness: \n"
s += f" Effectiveness: {effectiveness:.1f}\n"
s += f" Total Cls: {total_clss_per_test_case:.1f}\n"
s += f" Effective Cls: {effective_clss:.1f}\n"
s += "Filters:\n"
s += f" Speculation Filter: {self.spec_filter}\n"
s += f" Observation Filter: {self.observ_filter}\n"
return s
def get_brief(self):
if self.test_cases == 0:
return ""
else:
if self.analysed_test_cases:
all_cls = (self.eff_classes + self.single_entry_classes) / self.analysed_test_cases
eff_cls = self.eff_classes / self.analysed_test_cases
else:
all_cls = 0
eff_cls = 0
s = f"Cls:{eff_cls:.1f}/{all_cls:.1f},"
s += f"In:{self.num_inputs / self.test_cases:.1f},"
s += f"Cv:{self.coverage},"
s += f"SpF:{self.spec_filter},"
s += f"ObF:{self.observ_filter},"
s += f"Prm:{self.required_priming}," \
f"Flk:{self.flaky_violations}," \
f"Vio:{self.violations}"
return s
STAT = StatisticsCls()
class Logger:
"""
A global object responsible for printing stuff.
Has the following levels of logging:
- Error: Critical error. Prints a message and exits
- Warning: Non-critical error. Always printed, but does not cause an exit
- Info: Useful info. Printed only if enabled in CONF.logging_modes
- Debug: Detailed info. Printed if both enabled in CONF.logging_modes and if __debug__ is set.
"""
one_percent_progress: float = 0.0
progress: float = 0.0
progress_percent: int = 0
msg: str = ""
line_ending: str = ""
redraw_mode: bool = True
# info modes
info: bool = False
stat: bool = False
debug: bool = False
# debugging specific modules
dbg_timestamp: bool = False
dbg_violation: bool = False
dbg_traces: bool = False
dbg_model: bool = False
dbg_coverage: bool = False
dbg_generator: bool = False
def __init__(self) -> None:
self.update_logging_modes()
def update_logging_modes(self):
for mode in CONF.logging_modes:
if not mode:
continue
if getattr(self, mode, None) is None:
self.error(f"Unknown value '{mode}' of config variable 'logging_modes'")
setattr(self, mode, True)
if "dbg" in mode: # enable debug mode if any debug mode is enabled
self.debug = True
if not __debug__:
if self.dbg_timestamp or self.dbg_model or self.dbg_coverage or self.dbg_traces\
or self.dbg_generator:
self.warning(
"", "Current value of `logging_modes` requires debugging mode!\n"
"Remove '-O' from python arguments")
def error(self, msg: str, print_tb: bool = False) -> NoReturn:
if self.redraw_mode:
print("")
if print_tb:
print("Encountered an unrecoverable error\nTraceback:")
print_stack()
print("\n")
print(f"ERROR: {msg}")
exit(1)
def warning(self, src, msg) -> None:
if self.redraw_mode:
print("")
print(f"WARNING: [{src}] {msg}")
def inform(self, src, msg, end="\n") -> None:
if self.info:
if self.redraw_mode:
print("")
print(f"INFO: [{src}] {msg}", end=end, flush=True)
def dbg(self, src, msg) -> None:
if self.debug:
if self.redraw_mode:
print("")
print(f"DBG: [{src}] {msg}")
# ==============================================================================================
# Generator
def dbg_gen_instructions(self, instructions):
if not __debug__:
return
if not self.dbg_generator:
return
instructions_by_category = {i.category: set() for i in instructions}
for i in instructions:
instructions_by_category[i.category].add(i.name)
self.dbg("generator", "Instructions under test:")
for k, instruction_list in instructions_by_category.items():
print(" - " + k + ": " + pformat(sorted(instruction_list), indent=4, compact=True))
print("")
# ==============================================================================================
# Fuzzer
def fuzzer_start(self, iterations: int, start_time):
if self.info:
self.one_percent_progress = iterations / 100
self.progress = 0
self.progress_percent = 0
self.msg = ""
self.line_ending = '\n' if CONF.multiline_output else ''
self.redraw_mode = False if CONF.multiline_output else True
self.start_time = start_time
self.inform("fuzzer", start_time.strftime('Starting at %H:%M:%S'))
def fuzzer_start_round(self, round_id):
if self.info:
if STAT.test_cases > self.progress:
self.progress += self.one_percent_progress
self.progress_percent += 1
if STAT.test_cases == 0:
msg = ""
else:
msg = f"\r{STAT.test_cases:<6}({self.progress_percent:>2}%)| Stats: "
msg += STAT.get_brief()
print(msg + " ", end=self.line_ending, flush=True)
self.msg = msg
if not __debug__:
return
if self.dbg_timestamp and round_id and round_id % 1000 == 0:
self.dbg(
"fuzzer", f"Time: {datetime.today()} | "
f" Duration: {(datetime.today() - self.start_time).total_seconds()} seconds")
def fuzzer_priming(self, num_violations: int):
if self.info:
print(
self.msg + "> Prime " + str(num_violations) + " ",
end=self.line_ending,
flush=True)
def fuzzer_nesting_increased(self):
if self.info:
print(
self.msg + "> Nest " + str(CONF.model_max_nesting) + " ",
end=self.line_ending,
flush=True)
def fuzzer_timeout(self):
self.inform("fuzzer", "\nTimeout expired")
def fuzzer_finish(self):
if self.info:
now = datetime.today()
print("") # new line after the progress bar
if self.stat:
print(STAT)
print(f"Duration: {(now - self.start_time).total_seconds():.1f}")
print(datetime.today().strftime('Finished at %H:%M:%S'))
def trc_fuzzer_dump_traces(self, model, inputs, htraces, ctraces, hw_feedback):
if __debug__:
if self.dbg_traces:
print("\n================================ Collected Traces "
"=============================")
if CONF.contract_observation_clause == 'l1d':
for i in range(len(htraces)):
if i > 100:
self.warning("fuzzer", "Trace output is limited to 100 traces")
break
ctrace = ctraces[i]
print(" ")
print(f"CTr{i:<2} {pretty_trace(ctrace, ctrace > pow(2, 64), ' ')}")
print(f"HTr{i:<2} {pretty_trace(htraces[i])}")
print(f"Feedback{i}: {hw_feedback[i]}")
return
org_debug_state = self.dbg_model
self.dbg_model = False
for i in range(len(htraces)):
if i > 100:
self.warning("fuzzer", "Trace output is limited to 100 traces")
break
ctrace_full = model.dbg_get_trace_detailed(inputs[i], 1)
print(" ")
print(f"CTr{i}: {ctrace_full}")
print(f"HTr{i}: {pretty_trace(htraces[i])}")
print(f"Feedback{i}: {hw_feedback[i]}")
self.dbg_model = org_debug_state
def fuzzer_report_violations(self, violation: EquivalenceClass, model):
print("\n\n================================ Violations detected ==========================")
print("Contract trace:")
if CONF.contract_observation_clause != 'l1d':
print(f" {violation.ctrace} (hash)")
else:
if violation.ctrace <= pow(2, 64):
print(f" {violation.ctrace:064b}")
else:
print(f" {violation.ctrace % MASK_64BIT:064b} [ns]\n"
f" {(violation.ctrace >> 64) % MASK_64BIT:064b} [s]\n")
print("Hardware traces:")
for htrace, measurements in violation.htrace_map.items():
inputs = [m.input_id for m in measurements]
if len(inputs) < 4:
print(f" Inputs {inputs}:")
else:
print(f" Inputs {inputs[:4]} (+ {len(inputs) - 4} ):")
print(f" {pretty_trace(htrace)}")
print("")
if not __debug__:
return
if self.dbg_violation:
# print details
print("================================ Debug Trace ==================================")
for htrace, measurements in violation.htrace_map.items():
print(f" ##### Input {measurements[0].input_id} #####")
model_debug_state = self.dbg_model
self.dbg_model = True
model.trace_test_case([measurements[0].input_], 1)
self.dbg_model = model_debug_state
print("\n\n")
# ==============================================================================================
# Model
def dbg_model_header(self, input_id):
if not __debug__:
return
if not self.dbg_model:
return
print(f"\n ##### Input {input_id} #####")
def dbg_model_mem_access(self, normalized_address, value, address, size, is_store, model):
if not __debug__:
return
if not self.dbg_model:
return
val = value if is_store else int.from_bytes(
model.emulator.mem_read(address, size), byteorder='little')
type_ = "store to" if is_store else "load from"
print(f" > {type_} +0x{normalized_address:x} value 0x{val:x}")
def dbg_model_instruction(self, normalized_address, model):
if not __debug__:
return
if not self.dbg_model:
return
name = model.test_case.address_map[normalized_address]
if model.in_speculation:
print(f"transient 0x{normalized_address:<2x}: {name}")
else:
print(f"0x{normalized_address:<2x}: {name}")
model.print_state(oneline=True)
def dbg_model_rollback(self, address, base):
if not __debug__:
return
if not self.dbg_model:
return
print(f"ROLLBACK to 0x{address - base:x}")
# ==============================================================================================
# Coverage
def dbg_report_coverage(self, round_id, msg):
if __debug__:
if self.dbg_coverage and round_id and round_id % 100 == 0:
print(f"\nDBG: [coverage] {msg}")
# ==================================================================================================
# Small helper functions
# ==================================================================================================
def bit_count(n):
count = 0
while n:
count += n & 1
n >>= 1
return count
def pretty_trace(bits: int, merged=False, offset: str = ""):
if not merged:
s = f"{bits:064b}"
else:
s = f"{bits % MASK_64BIT:064b} [ns]\n" \
f"{offset}{(bits >> 64) % MASK_64BIT:064b} [s]"
s = s.replace("0", ".").replace("1", "^")
if CONF.color:
s = '\033[33;34m' + s[0:8] + '\033[33;32m' + s[8:16] \
+ '\033[33;34m' + s[16:24] + '\033[33;32m' + s[24:32] \
+ '\033[33;34m' + s[32:40] + '\033[33;32m' + s[40:48] \
+ '\033[33;34m' + s[48:56] + '\033[33;32m' + s[56:64] \
+ "\033[0m" + s[64:]
return s
class NotSupportedException(Exception):
pass
class UnreachableCode(Exception):
pass | /revizor_fuzzer-1.2.3.tar.gz/revizor_fuzzer-1.2.3/revizor/util.py | 0.620507 | 0.226196 | util.py | pypi |
from typing import List, Dict
from .x86 import x86_config
class ConfigException(SystemExit):
pass
class ConfCls:
config_path: str = ""
# ==============================================================================================
# Fuzzer
fuzzer: str = "basic"
""" fuzzer: type of the fuzzing algorithm """
ignore_flaky_violations: bool = True
""" ignore_flaky_violations: if True, don't report non-reproducible violations """
enable_priming: bool = True
""" enable_priming: whether to check violations with priming """
enable_speculation_filter: bool = False
""" enable_speculation_filter: if True, discard test cases that don't trigger speculation"""
enable_observation_filter: bool = False
""" enable_observation_filter: if True,discard test cases that don't leave speculative traces"""
# ==============================================================================================
# Execution Environment
permitted_faults: List[str] = []
""" permitted_faults: a list of faults that are permitted to happen during testing """
# ==============================================================================================
# Program Generator
generator: str = "random"
""" generator: type of the program generator """
instruction_set: str = "x86-64"
""" instruction_set: ISA under test """
instruction_categories: List[str] = []
""" instruction_categories: list of instruction categories to use for generating programs """
instruction_blocklist: List[str] = []
""" instruction_blocklist: list of instruction that will NOT be used for generating programs """
program_generator_seed: int = 0
""" program_generator_seed: seed of the program generator """
program_size: int = 24
""" program_size: size of generated programs """
avg_mem_accesses: int = 12
""" avg_mem_accesses: average number of memory accesses in generated programs """
min_bb_per_function: int = 2
""" min_bb_per_function: minimal number of basic blocks per function in generated programs """
max_bb_per_function: int = 2
""" max_bb_per_function: maximum number of basic blocks per function in generated programs """
min_successors_per_bb: int = 1
""" min_bb_per_function: min. number of successors for each basic block in generated programs
Note 1: this config option is a *hint*; it could be ignored if the instruction set does not
have the necessary instructions to satisfy it, or if a certain number of successor is required
for correctness
Note 2: If min_successors_per_bb > max_successors_per_bb, the value is
overwritten with max_successors_per_bb """
max_successors_per_bb: int = 2
""" min_bb_per_function: min. number of successors for each basic block in generated programs
Note: this config option is a *hint*; it could be ignored if the instruction set does not
have the necessary instructions to satisfy it, or if a certain number of successor is required
for correctness """
register_blocklist: List[str] = []
""" register_blocklist: list of registers that will NOT be used for generating programs """
avoid_data_dependencies: bool = False
""" [DEPRECATED] avoid_data_dependencies: """
generate_memory_accesses_in_pairs: bool = False
""" [DEPRECATED] generate_memory_accesses_in_pairs: """
feedback_driven_generator: bool = False
""" [DEPRECATED] feedback_driven_generator: """
# ==============================================================================================
# Input Generator
input_generator: str = 'random'
""" input_generator: type of the input generator """
input_gen_seed: int = 10
""" input_gen_seed: input generation seed; will use a random seed if set to zero """
input_gen_entropy_bits: int = 16
""" input_gen_entropy_bits: entropy of the random values created by the input generator """
memory_access_zeroed_bits: int = 0
""" [DEPRECATED] memory_access_zeroed_bits: """
inputs_per_class: int = 2
""" inputs_per_class: number of inputs per input class """
input_main_region_size: int = 4096
""" input_main_region_size: """
input_faulty_region_size: int = 4096
""" input_faulty_region_size: """
input_register_region_size: int = 64
""" input_register_region_size: """
# ==============================================================================================
# Contract Model
model: str = 'x86-unicorn'
""" model: """
contract_execution_clause: List[str] = ["seq"]
""" contract_execution_clause: """
contract_observation_clause: str = 'ct'
""" contract_observation_clause: """
model_max_nesting: int = 5
""" model_max_nesting: """
model_max_spec_window: int = 250
""" model_max_spec_window: """
# ==============================================================================================
# Executor
executor: str = 'default'
""" executor: executor type """
executor_mode: str = 'P+P'
""" executor_mode: hardware trace collection mode """
executor_warmups: int = 50
""" executor_warmups: number of warmup rounds executed before starting to collect
hardware traces """
executor_repetitions: int = 10
""" executor_repetitions: number of repetitions while collecting hardware traces """
executor_max_outliers: int = 1
""" executor_max_outliers: """
executor_taskset: int = 0
""" executor_taskset: id of the CPU core on which the executor is running test cases """
enable_pre_run_flush: bool = True
""" enable_pre_run_flush: ff enabled, the executor will do its best to flush
the microarchitectural state before running test cases """
# ==============================================================================================
# Analyser
analyser: str = 'equivalence-classes'
""" analyser: analyser type """
analyser_permit_subsets: bool = True
""" analyser_permit_subsets: if enabled, the analyser will not label hardware traces
as mismatching if they form a subset relation """
# ==============================================================================================
# Coverage
coverage_type: str = 'none'
""" coverage_type: coverage type """
# ==============================================================================================
# Minimizer
minimizer: str = 'violation'
""" minimizer: type of the test case minimizer """
# ==============================================================================================
# Output
multiline_output: bool = False
""" multiline_output: """
logging_modes: List[str] = ["info", "stat"]
""" logging_modes: """
color: bool = False
# ==============================================================================================
# Internal
_borg_shared_state: Dict = {}
_no_generation: bool = False
_option_values: Dict[str, List] = {} # set by ISA-specific config.py
_default_instruction_blocklist: List[str] = []
# Implementation of Borg pattern
def __init__(self) -> None:
self.setattr_internal("__dict__", self._borg_shared_state)
def __setattr__(self, name, value):
# print(f"CONF: setting {name} to {value}")
# Sanity checks
if name[0] == "_":
raise ConfigException(
f"ERROR: Attempting to set an internal configuration variable {name}.")
if getattr(self, name, None) is None:
raise ConfigException(f"ERROR: Unknown configuration variable {name}.\n"
f"It's likely a typo in the configuration file.")
if type(self.__getattribute__(name)) != type(value):
raise ConfigException(f"ERROR: Wrong type of the configuration variable {name}.\n"
f"It's likely a typo in the configuration file.")
if name == "executor_max_outliers" and value > 20:
print(f"WARNING: Configuration: Are you sure you want to"
f" ignore {self.executor_max_outliers} outliers?")
if name == "coverage_type" and value > "none":
super().__setattr__("feedback_driven_generator", "False")
# value checks
if self._option_values.get(name, '') != '':
invalid = False
if isinstance(value, List):
for v in value:
if v not in self._option_values[name]:
invalid = True
break
else:
invalid = value not in self._option_values[name]
if invalid:
raise ConfigException(
f"ERROR: Unknown value '{value}' of config variable '{name}'\n"
f"Possible options: {self._option_values[name]}")
if (self.input_main_region_size % 4096 != 0) or \
(self.input_faulty_region_size % 4096 != 0):
raise ConfigException("ERROR: Inputs must be page-aligned")
if self.input_gen_entropy_bits + self.memory_access_zeroed_bits > 32:
raise ConfigException(
"ERROR: The sum of input_gen_entropy_bits and memory_access_zeroed_bits"
" must be less or equal to 32 bits")
# special handling
if name == "instruction_set":
super().__setattr__("instruction_set", value)
self.update_arch()
return
if name == "instruction_blocklist":
self._default_instruction_blocklist.extend(value)
return
super().__setattr__(name, value)
def update_arch(self):
# arch-specific config
if self.instruction_set == "x86-64":
config = x86_config
prefix = "x86_"
else:
raise ConfigException(f"ERROR: Unknown architecture {self.instruction_set}")
options = [i for i in dir(config) if i.startswith(prefix)]
for option in options:
values = getattr(config, option)
trimmed_name = option.removeprefix(prefix)
if trimmed_name == "option_values":
self.setattr_internal("_option_values", values)
continue
if hasattr(self, trimmed_name):
setattr(self, trimmed_name, values)
else:
super().__setattr__(option, values)
def setattr_internal(self, name, val):
""" Bypass value checks and set an internal config variable. Use with caution! """
super().__setattr__(name, val)
CONF = ConfCls()
CONF.update_arch() | /revizor_fuzzer-1.2.3.tar.gz/revizor_fuzzer-1.2.3/revizor/config.py | 0.812644 | 0.49408 | config.py | pypi |
from __future__ import annotations
import random
import abc
import re
from typing import List, Dict
from subprocess import CalledProcessError, run
from collections import OrderedDict
from .isa_loader import InstructionSet
from .interfaces import Generator, TestCase, Operand, RegisterOperand, FlagsOperand, \
MemoryOperand, ImmediateOperand, AgenOperand, LabelOperand, OT, Instruction, BasicBlock, \
Function, OperandSpec, InstructionSpec, CondOperand, TargetDesc
from .util import NotSupportedException, Logger
from .config import CONF
# Helpers
class GeneratorException(Exception):
pass
class AsmParserException(Exception):
def __init__(self, line_number, explanation):
msg = "Could not parse line " + str(line_number + 1) + "\n Reason: " + explanation
super().__init__(msg)
def parser_assert(condition: bool, line_number: int, explanation: str):
if not condition:
raise AsmParserException(line_number, explanation)
# ==================================================================================================
# Generator Interface
# ==================================================================================================
class Pass(abc.ABC):
@abc.abstractmethod
def run_on_test_case(self, test_case: TestCase) -> None:
pass
class Printer(abc.ABC):
prologue_template: List[str]
epilogue_template: List[str]
@abc.abstractmethod
def print(self, test_case: TestCase, outfile: str) -> None:
pass
class ConfigurableGenerator(Generator, abc.ABC):
"""
The interface description for Generator classes.
"""
instruction_set: InstructionSet
test_case: TestCase
passes: List[Pass] # set by subclasses
printer: Printer # set by subclasses
target_desc: TargetDesc # set by subclasses
LOG: Logger # name capitalized to make logging easily distinguishable from the main logic
def __init__(self, instruction_set: InstructionSet, seed: int):
super().__init__(instruction_set, seed)
self.LOG = Logger()
self.LOG.dbg_gen_instructions(instruction_set.instructions)
self.control_flow_instructions = \
[i for i in self.instruction_set.instructions if i.control_flow]
assert self.control_flow_instructions or CONF.max_bb_per_function <= 1, \
"The instruction set is insufficient to generate a test case"
self.non_control_flow_instructions = \
[i for i in self.instruction_set.instructions if not i.control_flow]
assert self.non_control_flow_instructions, \
"The instruction set is insufficient to generate a test case"
self.non_memory_access_instructions = \
[i for i in self.non_control_flow_instructions if not i.has_mem_operand]
if CONF.avg_mem_accesses != 0:
memory_access_instructions = \
[i for i in self.non_control_flow_instructions if i.has_mem_operand]
self.load_instruction = [i for i in memory_access_instructions if not i.has_write]
self.store_instructions = [i for i in memory_access_instructions if i.has_write]
assert self.load_instruction or self.store_instructions, \
"The instruction set does not have memory accesses while `avg_mem_accesses > 0`"
else:
self.load_instruction = []
self.store_instructions = []
def set_seed(self, seed: int) -> None:
self._state = seed
def create_test_case(self, asm_file: str, disable_assembler: bool = False) -> TestCase:
self.test_case = TestCase(self._state)
# set seeds
if self._state == 0:
self._state = random.randint(1, 1000000)
self.LOG.inform("prog_gen",
f"Setting program_generator_seed to random value: {self._state}")
random.seed(self._state)
self._state += 1
# create the main function
func = self.generate_function(".function_main")
# fill the function with instructions
self.add_terminators_in_function(func)
self.add_instructions_in_function(func)
# add it to the test case
self.test_case.functions.append(func)
self.test_case.main = func
# process the test case
for p in self.passes:
p.run_on_test_case(self.test_case)
self.printer.print(self.test_case, asm_file)
self.test_case.asm_path = asm_file
if disable_assembler:
return self.test_case
bin_file = asm_file[:-4] + ".o"
self.assemble(asm_file, bin_file)
self.test_case.bin_path = bin_file
self.map_addresses(self.test_case, bin_file)
return self.test_case
@staticmethod
def assemble(asm_file: str, bin_file: str) -> None:
"""Assemble the test case into a stripped binary"""
def pretty_error_msg(error_msg):
with open(asm_file, "r") as f:
lines = f.read().split("\n")
msg = "Error appeared while assembling the test case:\n"
for line in error_msg.split("\n"):
line = line.removeprefix(asm_file + ":")
line_num_str = re.search(r"(\d+):", line)
if not line_num_str:
msg += line
else:
parsed = lines[int(line_num_str.group(1)) - 1]
msg += f"\n Line {line}\n (the line was parsed as {parsed})"
return msg
try:
out = run(f"as {asm_file} -o {bin_file}", shell=True, check=True, capture_output=True)
except CalledProcessError as e:
error_msg = e.stderr.decode()
if "Assembler messages:" in error_msg:
print(pretty_error_msg(error_msg))
else:
print(error_msg)
raise e
output = out.stderr.decode()
if "Assembler messages:" in output:
print("WARNING: [generator]" + pretty_error_msg(output))
run(f"strip --remove-section=.note.gnu.property {bin_file}", shell=True, check=True)
run(f"objcopy {bin_file} -O binary {bin_file}", shell=True, check=True)
def load(self, asm_file: str) -> TestCase:
test_case = TestCase(0)
test_case.asm_path = asm_file
# prepare regexes
re_redundant_spaces = re.compile(r"(?<![a-zA-Z0-9]) +")
# prepare a map of all instruction specs
instruction_map: Dict[str, List[InstructionSpec]] = {}
for spec in self.instruction_set.instructions:
if spec.name in instruction_map:
instruction_map[spec.name].append(spec)
else:
instruction_map[spec.name] = [spec]
# add an entry for direct opcodes
dummy_spec = InstructionSpec()
dummy_spec.name = "OPCODE"
dummy_spec.category = "OPCODE"
instruction_map["OPCODE"] = [dummy_spec]
# load the text and clean it up
lines = []
started = False
with open(asm_file, "r") as f:
for line in f:
# remove extra spaces
line = line.strip()
line = re_redundant_spaces.sub("", line)
# skip comments and empty lines
if not line or line[0] in ["", "#", "/"]:
continue
# skip footer and header
if not started:
started = (line == ".test_case_enter:")
if line[0] != ".":
test_case.num_prologue_instructions += 1
continue
if line == ".test_case_exit:":
break
lines.append(line)
# set defaults in case the main function is implicit
if not lines or not lines[0].startswith(".function_main:"):
lines = [".function_main:"] + lines
# map lines to functions and basic blocks
current_function = ""
current_bb = ".bb_main.entry"
test_case_map: Dict[str, Dict[str, List[str]]] = OrderedDict()
for line in lines:
# opcode
if line[:4] == ".bcd " or line[:5] in [".byte", ".long", ".quad"] \
or line[6:] in [".value", ".2byte", ".4byte", ".8byte"]:
test_case_map[current_function][current_bb].append("OPCODE")
continue
# instruction
if not line.startswith("."):
test_case_map[current_function][current_bb].append(line)
continue
# function
if line.startswith(".function_"):
current_function = line[:-1]
test_case_map[current_function] = OrderedDict()
current_bb = ".bb_" + current_function.removeprefix(".function_") + ".entry"
test_case_map[current_function][current_bb] = []
continue
# basic block
current_bb = line[:-1]
if current_bb not in test_case_map[current_function]:
test_case_map[current_function][current_bb] = []
# parse lines and create their object representations
line_id = 1
for func_name, bbs in test_case_map.items():
# print(func_name)
line_id += 1
func = Function(func_name)
test_case.functions.append(func)
if func_name == ".function_main":
test_case.main = func
for bb_name, lines in bbs.items():
# print(">>", bb_name)
line_id += 1
if bb_name.endswith("entry"):
bb = func.entry
elif bb_name.endswith("exit"):
bb = func.exit
else:
bb = BasicBlock(bb_name)
func.insert(bb)
terminators_started = False
for line in lines:
# print(f" {line}")
line_id += 1
inst = self.parse_line(line, line_id, instruction_map)
if inst.control_flow and not self.target_desc.is_call(inst):
terminators_started = True
bb.insert_terminator(inst)
else:
parser_assert(not terminators_started, line_id,
"Terminator not at the end of BB")
bb.insert_after(bb.get_last(), inst)
# connect basic blocks
bb_names = {bb.name.upper(): bb for func in test_case for bb in func}
bb_names[".TEST_CASE_EXIT"] = func.exit
previous_bb = None
for func in test_case:
for bb in func:
# fallthrough
if previous_bb: # skip the first BB
# there is a fallthrough only if the last terminator is not a direct jump
if not previous_bb.terminators or \
not self.target_desc.is_unconditional_branch(previous_bb.terminators[-1]):
previous_bb.successors.append(bb)
previous_bb = bb
# taken branches
for terminator in bb.terminators:
for op in terminator.operands:
if isinstance(op, LabelOperand):
successor = bb_names[op.value]
bb.successors.append(successor)
bin_file = asm_file[:-4] + ".o"
self.assemble(asm_file, bin_file)
test_case.bin_path = bin_file
self.map_addresses(test_case, bin_file)
return test_case
@abc.abstractmethod
def parse_line(self, line: str, line_num: int,
instruction_map: Dict[str, List[InstructionSpec]]) -> Instruction:
pass
@abc.abstractmethod
def map_addresses(self, test_case: TestCase, bin_file: str) -> None:
pass
@abc.abstractmethod
def generate_function(self, name: str) -> Function:
pass
@abc.abstractmethod
def generate_instruction(self, spec: InstructionSpec) -> Instruction:
pass
def generate_operand(self, spec: OperandSpec, parent: Instruction) -> Operand:
generators = {
OT.REG: self.generate_reg_operand,
OT.MEM: self.generate_mem_operand,
OT.IMM: self.generate_imm_operand,
OT.LABEL: self.generate_label_operand,
OT.AGEN: self.generate_agen_operand,
OT.FLAGS: self.generate_flags_operand,
OT.COND: self.generate_cond_operand,
}
return generators[spec.type](spec, parent)
@abc.abstractmethod
def generate_reg_operand(self, spec: OperandSpec, parent: Instruction) -> Operand:
pass
@abc.abstractmethod
def generate_mem_operand(self, spec: OperandSpec, _: Instruction) -> Operand:
pass
@abc.abstractmethod
def generate_imm_operand(self, spec: OperandSpec, _: Instruction) -> Operand:
pass
@abc.abstractmethod
def generate_label_operand(self, spec: OperandSpec, parent: Instruction) -> Operand:
pass
@abc.abstractmethod
def generate_agen_operand(self, _: OperandSpec, __: Instruction) -> Operand:
pass
@abc.abstractmethod
def generate_flags_operand(self, spec: OperandSpec, _: Instruction) -> Operand:
pass
@abc.abstractmethod
def generate_cond_operand(self, spec: OperandSpec, _: Instruction) -> Operand:
pass
@abc.abstractmethod
def add_terminators_in_function(self, func: Function):
pass
@abc.abstractmethod
def add_instructions_in_function(self, func: Function):
pass
# ==================================================================================================
# ISA-independent Generators
# ==================================================================================================
class RandomGenerator(ConfigurableGenerator, abc.ABC):
"""
Implements an ISA-independent logic of random test case generation.
Subclasses are responsible for the ISA-specific parts.
"""
had_recent_memory_access: bool = False
def __init__(self, instruction_set: InstructionSet, seed: int):
super().__init__(instruction_set, seed)
uncond_name = self.get_unconditional_jump_instruction().name.lower()
self.cond_branches = \
[i for i in self.control_flow_instructions if i.name.lower() != uncond_name]
def generate_function(self, label: str):
""" Generates a random DAG of basic blocks within a function """
func = Function(label)
# Define the maximum allowed number of successors for any BB
if self.instruction_set.has_conditional_branch:
max_successors = CONF.max_successors_per_bb if CONF.max_successors_per_bb < 2 else 2
min_successors = CONF.min_successors_per_bb \
if CONF.min_successors_per_bb < max_successors else max_successors
else:
max_successors = 1
min_successors = 1
# Create basic blocks
node_count = random.randint(CONF.min_bb_per_function, CONF.max_bb_per_function)
func_name = label.removeprefix(".function_")
nodes = [BasicBlock(f".bb_{func_name}.{i}") for i in range(node_count)]
# Connect BBs into a graph
for i in range(node_count):
current_bb = nodes[i]
# the last node has only one successor - exit
if i == node_count - 1:
current_bb.successors = [func.exit]
break
# the rest of the node have a random number of successors
successor_count = random.randint(min_successors, max_successors)
if successor_count + i > node_count:
# the number is adjusted to the position when close to the end
successor_count = node_count - i
# one of the targets (the first successor) is always the next node - to avoid dead code
current_bb.successors.append(nodes[i + 1])
# all other successors are random, selected from next nodes
options = nodes[i + 2:]
options.append(func.exit)
for j in range(1, successor_count):
target = random.choice(options)
options.remove(target)
current_bb.successors.append(target)
func.entry.successors = [nodes[0]]
# Function return
if label != ".function_main":
func.exit.terminators = [self.get_return_instruction()]
# Finalize the function
func.insert_multiple(nodes)
return func
def generate_instruction(self, spec: InstructionSpec) -> Instruction:
# fill up with random operands, following the spec
inst = Instruction.from_spec(spec)
# generate explicit operands
for operand_spec in spec.operands:
operand = self.generate_operand(operand_spec, inst)
inst.operands.append(operand)
# generate implicit operands
for operand_spec in spec.implicit_operands:
operand = self.generate_operand(operand_spec, inst)
inst.implicit_operands.append(operand)
return inst
def generate_reg_operand(self, spec: OperandSpec, parent: Instruction) -> Operand:
reg_type = spec.values[0]
if reg_type == 'GPR':
choices = self.target_desc.registers[spec.width]
elif reg_type == "SIMD":
choices = self.target_desc.simd_registers[spec.width]
else:
choices = spec.values
if not CONF.avoid_data_dependencies:
reg = random.choice(choices)
return RegisterOperand(reg, spec.width, spec.src, spec.dest)
if parent.latest_reg_operand and parent.latest_reg_operand.value in choices:
return parent.latest_reg_operand
reg = random.choice(choices)
op = RegisterOperand(reg, spec.width, spec.src, spec.dest)
parent.latest_reg_operand = op
return op
def generate_mem_operand(self, spec: OperandSpec, _: Instruction) -> Operand:
if spec.values:
address_reg = random.choice(spec.values)
else:
address_reg = random.choice(self.target_desc.registers[64])
return MemoryOperand(address_reg, spec.width, spec.src, spec.dest)
def generate_imm_operand(self, spec: OperandSpec, _: Instruction) -> Operand:
if spec.values:
if spec.values[0] == "bitmask":
# FIXME: this implementation always returns the same bitmask
# make it random
value = str(pow(2, spec.width) - 2)
else:
assert "[" in spec.values[0], spec.values
range_ = spec.values[0][1:-1].split("-")
if range_[0] == "":
range_ = range_[1:]
range_[0] = "-" + range_[0]
assert len(range_) == 2
value = str(random.randint(int(range_[0]), int(range_[1])))
else:
value = str(random.randint(pow(2, spec.width - 1) * -1, pow(2, spec.width - 1) - 1))
return ImmediateOperand(value, spec.width)
def generate_label_operand(self, spec: OperandSpec, parent: Instruction) -> Operand:
return LabelOperand("") # the actual label will be set in add_terminators_in_function
def generate_agen_operand(self, spec: OperandSpec, __: Instruction) -> Operand:
n_operands = random.randint(1, 3)
reg1 = random.choice(self.target_desc.registers[64])
if n_operands == 1:
return AgenOperand(reg1, spec.width)
reg2 = random.choice(self.target_desc.registers[64])
if n_operands == 2:
return AgenOperand(reg1 + " + " + reg2, spec.width)
imm = str(random.randint(0, pow(2, 16) - 1))
return AgenOperand(reg1 + " + " + reg2 + " + " + imm, spec.width)
def generate_flags_operand(self, spec: OperandSpec, parent: Instruction) -> Operand:
cond_op = parent.get_cond_operand()
if not cond_op:
return FlagsOperand(spec.values)
flag_values = self.target_desc.branch_conditions[cond_op.value]
if not spec.values:
return FlagsOperand(flag_values)
# combine implicit flags with the condition
merged_flags = []
for flag_pair in zip(flag_values, spec.values):
if "undef" in flag_pair:
merged_flags.append("undef")
elif "r/w" in flag_pair:
merged_flags.append("r/w")
elif "w" in flag_pair:
if "r" in flag_pair:
merged_flags.append("r/w")
else:
merged_flags.append("w")
elif "cw" in flag_pair:
if "r" in flag_pair:
merged_flags.append("r/cw")
else:
merged_flags.append("cw")
elif "r" in flag_pair:
merged_flags.append("r")
else:
merged_flags.append("")
return FlagsOperand(merged_flags)
def generate_cond_operand(self, spec: OperandSpec, _: Instruction) -> Operand:
cond = random.choice(list(self.target_desc.branch_conditions))
return CondOperand(cond)
def add_terminators_in_function(self, func: Function):
def add_fallthrough(bb: BasicBlock, destination: BasicBlock):
# create an unconditional branch and add it
terminator = self.get_unconditional_jump_instruction()
terminator.operands = [LabelOperand(destination.name)]
bb.terminators.append(terminator)
for bb in func:
if len(bb.successors) == 0:
# Return instruction
continue
elif len(bb.successors) == 1:
# Unconditional branch
dest = bb.successors[0]
if dest == func.exit:
# DON'T insert a branch to the exit
# the last basic block always falls through implicitly
continue
add_fallthrough(bb, dest)
elif len(bb.successors) == 2:
# Conditional branch
spec = random.choice(self.cond_branches)
terminator = self.generate_instruction(spec)
label = terminator.get_label_operand()
assert label
label.value = bb.successors[0].name
bb.terminators.append(terminator)
add_fallthrough(bb, bb.successors[1])
else:
# Indirect jump
raise NotSupportedException()
def add_instructions_in_function(self, func: Function):
# evenly fill all BBs with random instructions
basic_blocks_to_fill = func.get_all()[1:-1]
for _ in range(0, CONF.program_size):
bb = random.choice(basic_blocks_to_fill)
spec = self._pick_random_instruction_spec()
inst = self.generate_instruction(spec)
bb.insert_after(bb.get_last(), inst)
def _pick_random_instruction_spec(self) -> InstructionSpec:
# ensure the requested avg. number of mem. accesses
search_for_memory_access = False
memory_access_probability = CONF.avg_mem_accesses / CONF.program_size
if CONF.generate_memory_accesses_in_pairs:
memory_access_probability = 1 if self.had_recent_memory_access else \
(CONF.avg_mem_accesses / 2) / (CONF.program_size - CONF.avg_mem_accesses / 2)
if random.random() < memory_access_probability:
search_for_memory_access = True
self.had_recent_memory_access = not self.had_recent_memory_access
if self.store_instructions:
search_for_store = random.random() < 0.5 # 50% probability of stores
else:
search_for_store = False
# select a random instruction spec for generation
if not search_for_memory_access:
return random.choice(self.non_memory_access_instructions)
if search_for_store:
return random.choice(self.store_instructions)
return random.choice(self.load_instruction)
@abc.abstractmethod
def get_return_instruction(self) -> Instruction:
pass
@abc.abstractmethod
def get_unconditional_jump_instruction(self) -> Instruction:
pass | /revizor_fuzzer-1.2.3.tar.gz/revizor_fuzzer-1.2.3/revizor/generator.py | 0.735926 | 0.299284 | generator.py | pypi |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.